blob: 05e8e274a91818c5af29ca59ab22cc8b4220b896 [file] [log] [blame]
Jens Axboe2b188cc2019-01-07 10:46:33 -07001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Shared application/kernel submission and completion ring pairs, for
4 * supporting fast/efficient IO.
5 *
6 * A note on the read/write ordering memory barriers that are matched between
Stefan Bühler1e84b972019-04-24 23:54:16 +02007 * the application and kernel side.
8 *
9 * After the application reads the CQ ring tail, it must use an
10 * appropriate smp_rmb() to pair with the smp_wmb() the kernel uses
11 * before writing the tail (using smp_load_acquire to read the tail will
12 * do). It also needs a smp_mb() before updating CQ head (ordering the
13 * entry load(s) with the head store), pairing with an implicit barrier
14 * through a control-dependency in io_get_cqring (smp_store_release to
15 * store head will do). Failure to do so could lead to reading invalid
16 * CQ entries.
17 *
18 * Likewise, the application must use an appropriate smp_wmb() before
19 * writing the SQ tail (ordering SQ entry stores with the tail store),
20 * which pairs with smp_load_acquire in io_get_sqring (smp_store_release
21 * to store the tail will do). And it needs a barrier ordering the SQ
22 * head load before writing new SQ entries (smp_load_acquire to read
23 * head will do).
24 *
25 * When using the SQ poll thread (IORING_SETUP_SQPOLL), the application
26 * needs to check the SQ flags for IORING_SQ_NEED_WAKEUP *after*
27 * updating the SQ tail; a full memory barrier smp_mb() is needed
28 * between.
Jens Axboe2b188cc2019-01-07 10:46:33 -070029 *
30 * Also see the examples in the liburing library:
31 *
32 * git://git.kernel.dk/liburing
33 *
34 * io_uring also uses READ/WRITE_ONCE() for _any_ store or load that happens
35 * from data shared between the kernel and application. This is done both
36 * for ordering purposes, but also to ensure that once a value is loaded from
37 * data that the application could potentially modify, it remains stable.
38 *
39 * Copyright (C) 2018-2019 Jens Axboe
Christoph Hellwigc992fe22019-01-11 09:43:02 -070040 * Copyright (c) 2018-2019 Christoph Hellwig
Jens Axboe2b188cc2019-01-07 10:46:33 -070041 */
42#include <linux/kernel.h>
43#include <linux/init.h>
44#include <linux/errno.h>
45#include <linux/syscalls.h>
46#include <linux/compat.h>
Jens Axboe52de1fe2020-02-27 10:15:42 -070047#include <net/compat.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070048#include <linux/refcount.h>
49#include <linux/uio.h>
Pavel Begunkov6b47ee62020-01-18 20:22:41 +030050#include <linux/bits.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070051
52#include <linux/sched/signal.h>
53#include <linux/fs.h>
54#include <linux/file.h>
55#include <linux/fdtable.h>
56#include <linux/mm.h>
57#include <linux/mman.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070058#include <linux/percpu.h>
59#include <linux/slab.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070060#include <linux/blkdev.h>
Jens Axboeedafcce2019-01-09 09:16:05 -070061#include <linux/bvec.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070062#include <linux/net.h>
63#include <net/sock.h>
64#include <net/af_unix.h>
Jens Axboe6b063142019-01-10 22:13:58 -070065#include <net/scm.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070066#include <linux/anon_inodes.h>
67#include <linux/sched/mm.h>
68#include <linux/uaccess.h>
69#include <linux/nospec.h>
Jens Axboeedafcce2019-01-09 09:16:05 -070070#include <linux/sizes.h>
71#include <linux/hugetlb.h>
Jens Axboeaa4c3962019-11-29 10:14:00 -070072#include <linux/highmem.h>
Jens Axboe15b71ab2019-12-11 11:20:36 -070073#include <linux/namei.h>
74#include <linux/fsnotify.h>
Jens Axboe4840e412019-12-25 22:03:45 -070075#include <linux/fadvise.h>
Jens Axboe3e4827b2020-01-08 15:18:09 -070076#include <linux/eventpoll.h>
Pavel Begunkov7d67af22020-02-24 11:32:45 +030077#include <linux/splice.h>
Jens Axboeb41e9852020-02-17 09:52:41 -070078#include <linux/task_work.h>
Jens Axboebcf5a062020-05-22 09:24:42 -060079#include <linux/pagemap.h>
Jens Axboe0f212202020-09-13 13:09:39 -060080#include <linux/io_uring.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070081
Dmitrii Dolgovc826bd72019-10-15 19:02:01 +020082#define CREATE_TRACE_POINTS
83#include <trace/events/io_uring.h>
84
Jens Axboe2b188cc2019-01-07 10:46:33 -070085#include <uapi/linux/io_uring.h>
86
87#include "internal.h"
Jens Axboe561fb042019-10-24 07:25:42 -060088#include "io-wq.h"
Jens Axboe2b188cc2019-01-07 10:46:33 -070089
Daniel Xu5277dea2019-09-14 14:23:45 -070090#define IORING_MAX_ENTRIES 32768
Jens Axboe33a107f2019-10-04 12:10:03 -060091#define IORING_MAX_CQ_ENTRIES (2 * IORING_MAX_ENTRIES)
Jens Axboe65e19f52019-10-26 07:20:21 -060092
93/*
94 * Shift of 9 is 512 entries, or exactly one page on 64-bit archs
95 */
96#define IORING_FILE_TABLE_SHIFT 9
97#define IORING_MAX_FILES_TABLE (1U << IORING_FILE_TABLE_SHIFT)
98#define IORING_FILE_TABLE_MASK (IORING_MAX_FILES_TABLE - 1)
99#define IORING_MAX_FIXED_FILES (64 * IORING_MAX_FILES_TABLE)
Stefano Garzarella21b55db2020-08-27 16:58:30 +0200100#define IORING_MAX_RESTRICTIONS (IORING_RESTRICTION_LAST + \
101 IORING_REGISTER_LAST + IORING_OP_LAST)
Jens Axboe2b188cc2019-01-07 10:46:33 -0700102
Pavel Begunkovb16fed662021-02-18 18:29:40 +0000103#define SQE_VALID_FLAGS (IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK| \
104 IOSQE_IO_HARDLINK | IOSQE_ASYNC | \
105 IOSQE_BUFFER_SELECT)
106
Jens Axboe2b188cc2019-01-07 10:46:33 -0700107struct io_uring {
108 u32 head ____cacheline_aligned_in_smp;
109 u32 tail ____cacheline_aligned_in_smp;
110};
111
Stefan Bühler1e84b972019-04-24 23:54:16 +0200112/*
Hristo Venev75b28af2019-08-26 17:23:46 +0000113 * This data is shared with the application through the mmap at offsets
114 * IORING_OFF_SQ_RING and IORING_OFF_CQ_RING.
Stefan Bühler1e84b972019-04-24 23:54:16 +0200115 *
116 * The offsets to the member fields are published through struct
117 * io_sqring_offsets when calling io_uring_setup.
118 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000119struct io_rings {
Stefan Bühler1e84b972019-04-24 23:54:16 +0200120 /*
121 * Head and tail offsets into the ring; the offsets need to be
122 * masked to get valid indices.
123 *
Hristo Venev75b28af2019-08-26 17:23:46 +0000124 * The kernel controls head of the sq ring and the tail of the cq ring,
125 * and the application controls tail of the sq ring and the head of the
126 * cq ring.
Stefan Bühler1e84b972019-04-24 23:54:16 +0200127 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000128 struct io_uring sq, cq;
Stefan Bühler1e84b972019-04-24 23:54:16 +0200129 /*
Hristo Venev75b28af2019-08-26 17:23:46 +0000130 * Bitmasks to apply to head and tail offsets (constant, equals
Stefan Bühler1e84b972019-04-24 23:54:16 +0200131 * ring_entries - 1)
132 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000133 u32 sq_ring_mask, cq_ring_mask;
134 /* Ring sizes (constant, power of 2) */
135 u32 sq_ring_entries, cq_ring_entries;
Stefan Bühler1e84b972019-04-24 23:54:16 +0200136 /*
137 * Number of invalid entries dropped by the kernel due to
138 * invalid index stored in array
139 *
140 * Written by the kernel, shouldn't be modified by the
141 * application (i.e. get number of "new events" by comparing to
142 * cached value).
143 *
144 * After a new SQ head value was read by the application this
145 * counter includes all submissions that were dropped reaching
146 * the new SQ head (and possibly more).
147 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000148 u32 sq_dropped;
Stefan Bühler1e84b972019-04-24 23:54:16 +0200149 /*
Stefano Garzarella0d9b5b32020-05-15 18:38:04 +0200150 * Runtime SQ flags
Stefan Bühler1e84b972019-04-24 23:54:16 +0200151 *
152 * Written by the kernel, shouldn't be modified by the
153 * application.
154 *
155 * The application needs a full memory barrier before checking
156 * for IORING_SQ_NEED_WAKEUP after updating the sq tail.
157 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000158 u32 sq_flags;
Stefan Bühler1e84b972019-04-24 23:54:16 +0200159 /*
Stefano Garzarella0d9b5b32020-05-15 18:38:04 +0200160 * Runtime CQ flags
161 *
162 * Written by the application, shouldn't be modified by the
163 * kernel.
164 */
165 u32 cq_flags;
166 /*
Stefan Bühler1e84b972019-04-24 23:54:16 +0200167 * Number of completion events lost because the queue was full;
168 * this should be avoided by the application by making sure
LimingWu0b4295b2019-12-05 20:18:18 +0800169 * there are not more requests pending than there is space in
Stefan Bühler1e84b972019-04-24 23:54:16 +0200170 * the completion queue.
171 *
172 * Written by the kernel, shouldn't be modified by the
173 * application (i.e. get number of "new events" by comparing to
174 * cached value).
175 *
176 * As completion events come in out of order this counter is not
177 * ordered with any other data.
178 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000179 u32 cq_overflow;
Stefan Bühler1e84b972019-04-24 23:54:16 +0200180 /*
181 * Ring buffer of completion events.
182 *
183 * The kernel writes completion events fresh every time they are
184 * produced, so the application is allowed to modify pending
185 * entries.
186 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000187 struct io_uring_cqe cqes[] ____cacheline_aligned_in_smp;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700188};
189
Pavel Begunkov45d189c2021-02-10 00:03:07 +0000190enum io_uring_cmd_flags {
191 IO_URING_F_NONBLOCK = 1,
Pavel Begunkov889fca72021-02-10 00:03:09 +0000192 IO_URING_F_COMPLETE_DEFER = 2,
Pavel Begunkov45d189c2021-02-10 00:03:07 +0000193};
194
Jens Axboeedafcce2019-01-09 09:16:05 -0700195struct io_mapped_ubuf {
196 u64 ubuf;
197 size_t len;
198 struct bio_vec *bvec;
199 unsigned int nr_bvecs;
Jens Axboede293932020-09-17 16:19:16 -0600200 unsigned long acct_pages;
Jens Axboeedafcce2019-01-09 09:16:05 -0700201};
202
Bijan Mottahedeh50238532021-01-15 17:37:45 +0000203struct io_ring_ctx;
204
Pavel Begunkov6c2450a2021-02-23 12:40:22 +0000205struct io_overflow_cqe {
206 struct io_uring_cqe cqe;
207 struct list_head list;
208};
209
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +0000210struct io_rsrc_put {
211 struct list_head list;
Bijan Mottahedeh50238532021-01-15 17:37:45 +0000212 union {
213 void *rsrc;
214 struct file *file;
215 };
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +0000216};
217
218struct fixed_rsrc_table {
Jens Axboe65e19f52019-10-26 07:20:21 -0600219 struct file **files;
Jens Axboe31b51512019-01-18 22:56:34 -0700220};
221
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +0000222struct fixed_rsrc_ref_node {
Xiaoguang Wang05589552020-03-31 14:05:18 +0800223 struct percpu_ref refs;
224 struct list_head node;
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +0000225 struct list_head rsrc_list;
226 struct fixed_rsrc_data *rsrc_data;
Bijan Mottahedeh50238532021-01-15 17:37:45 +0000227 void (*rsrc_put)(struct io_ring_ctx *ctx,
228 struct io_rsrc_put *prsrc);
Jens Axboe4a38aed22020-05-14 17:21:15 -0600229 struct llist_node llist;
Pavel Begunkove2978222020-11-18 14:56:26 +0000230 bool done;
Xiaoguang Wang05589552020-03-31 14:05:18 +0800231};
232
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +0000233struct fixed_rsrc_data {
234 struct fixed_rsrc_table *table;
Jens Axboe05f3fb32019-12-09 11:22:50 -0700235 struct io_ring_ctx *ctx;
236
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +0000237 struct fixed_rsrc_ref_node *node;
Jens Axboe05f3fb32019-12-09 11:22:50 -0700238 struct percpu_ref refs;
Jens Axboe05f3fb32019-12-09 11:22:50 -0700239 struct completion done;
Hao Xu8bad28d2021-02-19 17:19:36 +0800240 bool quiesce;
Jens Axboe05f3fb32019-12-09 11:22:50 -0700241};
242
Jens Axboe5a2e7452020-02-23 16:23:11 -0700243struct io_buffer {
244 struct list_head list;
245 __u64 addr;
246 __s32 len;
247 __u16 bid;
248};
249
Stefano Garzarella21b55db2020-08-27 16:58:30 +0200250struct io_restriction {
251 DECLARE_BITMAP(register_op, IORING_REGISTER_LAST);
252 DECLARE_BITMAP(sqe_op, IORING_OP_LAST);
253 u8 sqe_flags_allowed;
254 u8 sqe_flags_required;
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +0200255 bool registered;
Stefano Garzarella21b55db2020-08-27 16:58:30 +0200256};
257
Jens Axboe37d1e2e2021-02-17 21:03:43 -0700258enum {
259 IO_SQ_THREAD_SHOULD_STOP = 0,
260 IO_SQ_THREAD_SHOULD_PARK,
261};
262
Jens Axboe534ca6d2020-09-02 13:52:19 -0600263struct io_sq_data {
264 refcount_t refs;
Pavel Begunkov9e138a42021-03-14 20:57:12 +0000265 atomic_t park_pending;
Pavel Begunkov09a6f4e2021-03-14 20:57:10 +0000266 struct mutex lock;
Jens Axboe69fb2132020-09-14 11:16:23 -0600267
268 /* ctx's that are using this sqd */
269 struct list_head ctx_list;
Jens Axboe69fb2132020-09-14 11:16:23 -0600270
Jens Axboe534ca6d2020-09-02 13:52:19 -0600271 struct task_struct *thread;
272 struct wait_queue_head wait;
Xiaoguang Wang08369242020-11-03 14:15:59 +0800273
274 unsigned sq_thread_idle;
Jens Axboe37d1e2e2021-02-17 21:03:43 -0700275 int sq_cpu;
276 pid_t task_pid;
Jens Axboe5c2469e2021-03-11 10:17:56 -0700277 pid_t task_tgid;
Jens Axboe37d1e2e2021-02-17 21:03:43 -0700278
279 unsigned long state;
Jens Axboe37d1e2e2021-02-17 21:03:43 -0700280 struct completion exited;
Pavel Begunkovb7f5a0b2021-03-15 14:23:08 +0000281 struct callback_head *park_task_work;
Jens Axboe534ca6d2020-09-02 13:52:19 -0600282};
283
Pavel Begunkov258b29a2021-02-10 00:03:10 +0000284#define IO_IOPOLL_BATCH 8
Pavel Begunkov6dd0be12021-02-10 00:03:13 +0000285#define IO_COMPL_BATCH 32
Pavel Begunkov6ff119a2021-02-10 00:03:18 +0000286#define IO_REQ_CACHE_SIZE 32
Pavel Begunkovbf019da2021-02-10 00:03:17 +0000287#define IO_REQ_ALLOC_BATCH 8
Pavel Begunkov258b29a2021-02-10 00:03:10 +0000288
289struct io_comp_state {
Pavel Begunkov6dd0be12021-02-10 00:03:13 +0000290 struct io_kiocb *reqs[IO_COMPL_BATCH];
Jens Axboe1b4c3512021-02-10 00:03:19 +0000291 unsigned int nr;
Jens Axboec7dae4b2021-02-09 19:53:37 -0700292 unsigned int locked_free_nr;
293 /* inline/task_work completion list, under ->uring_lock */
Jens Axboe1b4c3512021-02-10 00:03:19 +0000294 struct list_head free_list;
Jens Axboec7dae4b2021-02-09 19:53:37 -0700295 /* IRQ completion list, under ->completion_lock */
296 struct list_head locked_free_list;
Pavel Begunkov258b29a2021-02-10 00:03:10 +0000297};
298
Pavel Begunkova1ab7b32021-02-18 18:29:42 +0000299struct io_submit_link {
300 struct io_kiocb *head;
301 struct io_kiocb *last;
302};
303
Pavel Begunkov258b29a2021-02-10 00:03:10 +0000304struct io_submit_state {
305 struct blk_plug plug;
Pavel Begunkova1ab7b32021-02-18 18:29:42 +0000306 struct io_submit_link link;
Pavel Begunkov258b29a2021-02-10 00:03:10 +0000307
308 /*
309 * io_kiocb alloc cache
310 */
Pavel Begunkovbf019da2021-02-10 00:03:17 +0000311 void *reqs[IO_REQ_CACHE_SIZE];
Pavel Begunkov258b29a2021-02-10 00:03:10 +0000312 unsigned int free_reqs;
313
314 bool plug_started;
315
316 /*
317 * Batch completion logic
318 */
319 struct io_comp_state comp;
320
321 /*
322 * File reference cache
323 */
324 struct file *file;
325 unsigned int fd;
326 unsigned int file_refs;
327 unsigned int ios_left;
328};
329
Jens Axboe2b188cc2019-01-07 10:46:33 -0700330struct io_ring_ctx {
331 struct {
332 struct percpu_ref refs;
333 } ____cacheline_aligned_in_smp;
334
335 struct {
336 unsigned int flags;
Randy Dunlape1d85332020-02-05 20:57:10 -0800337 unsigned int compat: 1;
Randy Dunlape1d85332020-02-05 20:57:10 -0800338 unsigned int cq_overflow_flushed: 1;
339 unsigned int drain_next: 1;
340 unsigned int eventfd_async: 1;
Stefano Garzarella21b55db2020-08-27 16:58:30 +0200341 unsigned int restricted: 1;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700342
Hristo Venev75b28af2019-08-26 17:23:46 +0000343 /*
344 * Ring buffer of indices into array of io_uring_sqe, which is
345 * mmapped by the application using the IORING_OFF_SQES offset.
346 *
347 * This indirection could e.g. be used to assign fixed
348 * io_uring_sqe entries to operations and only submit them to
349 * the queue when needed.
350 *
351 * The kernel modifies neither the indices array nor the entries
352 * array.
353 */
354 u32 *sq_array;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700355 unsigned cached_sq_head;
356 unsigned sq_entries;
357 unsigned sq_mask;
Jens Axboe6c271ce2019-01-10 11:22:30 -0700358 unsigned sq_thread_idle;
Jens Axboe498ccd92019-10-25 10:04:25 -0600359 unsigned cached_sq_dropped;
Pavel Begunkov2c3bac6d2020-10-18 10:17:40 +0100360 unsigned cached_cq_overflow;
Jens Axboead3eb2c2019-12-18 17:12:20 -0700361 unsigned long sq_check_overflow;
Jens Axboede0617e2019-04-06 21:51:27 -0600362
Jens Axboee9418942021-02-19 12:33:30 -0700363 /* hashed buffered write serialization */
364 struct io_wq_hash *hash_map;
365
Jens Axboede0617e2019-04-06 21:51:27 -0600366 struct list_head defer_list;
Jens Axboe5262f562019-09-17 12:26:57 -0600367 struct list_head timeout_list;
Jens Axboe1d7bb1d2019-11-06 11:31:17 -0700368 struct list_head cq_overflow_list;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700369
Jens Axboead3eb2c2019-12-18 17:12:20 -0700370 struct io_uring_sqe *sq_sqes;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700371 } ____cacheline_aligned_in_smp;
372
Jens Axboe3c1a2ea2021-02-11 10:48:03 -0700373 struct {
374 struct mutex uring_lock;
375 wait_queue_head_t wait;
376 } ____cacheline_aligned_in_smp;
377
378 struct io_submit_state submit_state;
379
Hristo Venev75b28af2019-08-26 17:23:46 +0000380 struct io_rings *rings;
381
Jens Axboe2aede0e2020-09-14 10:45:53 -0600382 /* Only used for accounting purposes */
383 struct mm_struct *mm_account;
384
Stefan Metzmacher7c30f36a2021-03-07 11:54:28 +0100385 const struct cred *sq_creds; /* cred used for __io_sq_thread() */
Jens Axboe534ca6d2020-09-02 13:52:19 -0600386 struct io_sq_data *sq_data; /* if using sq thread polling */
387
Jens Axboe90554202020-09-03 12:12:41 -0600388 struct wait_queue_head sqo_sq_wait;
Jens Axboe69fb2132020-09-14 11:16:23 -0600389 struct list_head sqd_list;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700390
Jens Axboe6b063142019-01-10 22:13:58 -0700391 /*
392 * If used, fixed file set. Writers must ensure that ->refs is dead,
393 * readers must ensure that ->refs is alive as long as the file* is
394 * used. Only updated through io_uring_register(2).
395 */
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +0000396 struct fixed_rsrc_data *file_data;
Jens Axboe6b063142019-01-10 22:13:58 -0700397 unsigned nr_user_files;
398
Jens Axboeedafcce2019-01-09 09:16:05 -0700399 /* if used, fixed mapped user buffers */
400 unsigned nr_user_bufs;
401 struct io_mapped_ubuf *user_bufs;
402
Jens Axboe2b188cc2019-01-07 10:46:33 -0700403 struct user_struct *user;
404
Jens Axboe0f158b42020-05-14 17:18:39 -0600405 struct completion ref_comp;
Jens Axboe206aefd2019-11-07 18:27:42 -0700406
407#if defined(CONFIG_UNIX)
408 struct socket *ring_sock;
409#endif
410
Jens Axboe9e15c3a2021-03-13 12:29:43 -0700411 struct xarray io_buffers;
Jens Axboe5a2e7452020-02-23 16:23:11 -0700412
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +0000413 struct xarray personalities;
414 u32 pers_next;
Jens Axboe071698e2020-01-28 10:04:42 -0700415
Jens Axboe206aefd2019-11-07 18:27:42 -0700416 struct {
417 unsigned cached_cq_tail;
418 unsigned cq_entries;
419 unsigned cq_mask;
420 atomic_t cq_timeouts;
Marcelo Diop-Gonzalezf0105052021-01-15 11:54:40 -0500421 unsigned cq_last_tm_flush;
Jens Axboead3eb2c2019-12-18 17:12:20 -0700422 unsigned long cq_check_overflow;
Jens Axboe206aefd2019-11-07 18:27:42 -0700423 struct wait_queue_head cq_wait;
424 struct fasync_struct *cq_fasync;
425 struct eventfd_ctx *cq_ev_fd;
426 } ____cacheline_aligned_in_smp;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700427
428 struct {
Jens Axboe2b188cc2019-01-07 10:46:33 -0700429 spinlock_t completion_lock;
Jens Axboee94f1412019-12-19 12:06:02 -0700430
Jens Axboedef596e2019-01-09 08:59:42 -0700431 /*
Pavel Begunkov540e32a2020-07-13 23:37:09 +0300432 * ->iopoll_list is protected by the ctx->uring_lock for
Jens Axboedef596e2019-01-09 08:59:42 -0700433 * io_uring instances that don't use IORING_SETUP_SQPOLL.
434 * For SQPOLL, only the single threaded io_sq_thread() will
435 * manipulate the list, hence no extra locking is needed there.
436 */
Pavel Begunkov540e32a2020-07-13 23:37:09 +0300437 struct list_head iopoll_list;
Jens Axboe78076bb2019-12-04 19:56:40 -0700438 struct hlist_head *cancel_hash;
439 unsigned cancel_hash_bits;
Jens Axboee94f1412019-12-19 12:06:02 -0700440 bool poll_multi_file;
Jens Axboefcb323c2019-10-24 12:39:47 -0600441
442 spinlock_t inflight_lock;
443 struct list_head inflight_list;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700444 } ____cacheline_aligned_in_smp;
Jens Axboe85faa7b2020-04-09 18:14:00 -0600445
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +0000446 struct delayed_work rsrc_put_work;
447 struct llist_head rsrc_put_llist;
Bijan Mottahedehd67d2262021-01-15 17:37:46 +0000448 struct list_head rsrc_ref_list;
449 spinlock_t rsrc_ref_lock;
Pavel Begunkov8dd03af2021-03-19 17:22:36 +0000450 struct fixed_rsrc_ref_node *rsrc_backup_node;
Jens Axboe4a38aed22020-05-14 17:21:15 -0600451
Stefano Garzarella21b55db2020-08-27 16:58:30 +0200452 struct io_restriction restrictions;
Jens Axboe3c1a2ea2021-02-11 10:48:03 -0700453
Jens Axboe7c25c0d2021-02-16 07:17:00 -0700454 /* exit task_work */
455 struct callback_head *exit_task_work;
456
Jens Axboee9418942021-02-19 12:33:30 -0700457 struct wait_queue_head hash_wait;
458
Jens Axboe3c1a2ea2021-02-11 10:48:03 -0700459 /* Keep this last, we don't need it for the fast path */
460 struct work_struct exit_work;
Pavel Begunkov13bf43f2021-03-06 11:02:12 +0000461 struct list_head tctx_list;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700462};
463
Stefan Metzmacher53e043b2021-03-15 12:56:56 +0100464struct io_uring_task {
465 /* submission side */
466 struct xarray xa;
467 struct wait_queue_head wait;
Stefan Metzmacheree53fb22021-03-15 12:56:57 +0100468 const struct io_ring_ctx *last;
469 struct io_wq *io_wq;
Stefan Metzmacher53e043b2021-03-15 12:56:56 +0100470 struct percpu_counter inflight;
471 atomic_t in_idle;
Stefan Metzmacher53e043b2021-03-15 12:56:56 +0100472
473 spinlock_t task_lock;
474 struct io_wq_work_list task_list;
475 unsigned long task_state;
476 struct callback_head task_work;
477};
478
Jens Axboe09bb8392019-03-13 12:39:28 -0600479/*
480 * First field must be the file pointer in all the
481 * iocb unions! See also 'struct kiocb' in <linux/fs.h>
482 */
Jens Axboe221c5eb2019-01-17 09:41:58 -0700483struct io_poll_iocb {
484 struct file *file;
Pavel Begunkov018043b2020-10-27 23:17:18 +0000485 struct wait_queue_head *head;
Jens Axboe221c5eb2019-01-17 09:41:58 -0700486 __poll_t events;
Jens Axboe8c838782019-03-12 15:48:16 -0600487 bool done;
Jens Axboe221c5eb2019-01-17 09:41:58 -0700488 bool canceled;
Jens Axboe392edb42019-12-09 17:52:20 -0700489 struct wait_queue_entry wait;
Jens Axboe221c5eb2019-01-17 09:41:58 -0700490};
491
Pavel Begunkov018043b2020-10-27 23:17:18 +0000492struct io_poll_remove {
493 struct file *file;
494 u64 addr;
495};
496
Jens Axboeb5dba592019-12-11 14:02:38 -0700497struct io_close {
498 struct file *file;
Jens Axboeb5dba592019-12-11 14:02:38 -0700499 int fd;
500};
501
Jens Axboead8a48a2019-11-15 08:49:11 -0700502struct io_timeout_data {
503 struct io_kiocb *req;
504 struct hrtimer timer;
505 struct timespec64 ts;
506 enum hrtimer_mode mode;
507};
508
Jens Axboe8ed8d3c2019-12-16 11:55:28 -0700509struct io_accept {
510 struct file *file;
511 struct sockaddr __user *addr;
512 int __user *addr_len;
513 int flags;
Jens Axboe09952e32020-03-19 20:16:56 -0600514 unsigned long nofile;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -0700515};
516
517struct io_sync {
518 struct file *file;
519 loff_t len;
520 loff_t off;
521 int flags;
Jens Axboed63d1b52019-12-10 10:38:56 -0700522 int mode;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -0700523};
524
Jens Axboefbf23842019-12-17 18:45:56 -0700525struct io_cancel {
526 struct file *file;
527 u64 addr;
528};
529
Jens Axboeb29472e2019-12-17 18:50:29 -0700530struct io_timeout {
531 struct file *file;
Pavel Begunkovbfe68a22020-05-30 14:54:18 +0300532 u32 off;
533 u32 target_seq;
Pavel Begunkov135fcde2020-07-13 23:37:12 +0300534 struct list_head list;
Pavel Begunkov90cd7e42020-10-27 23:25:36 +0000535 /* head of the link, used by linked timeouts only */
536 struct io_kiocb *head;
Jens Axboeb29472e2019-12-17 18:50:29 -0700537};
538
Pavel Begunkov0bdf7a22020-10-10 18:34:10 +0100539struct io_timeout_rem {
540 struct file *file;
541 u64 addr;
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +0000542
543 /* timeout update */
544 struct timespec64 ts;
545 u32 flags;
Pavel Begunkov0bdf7a22020-10-10 18:34:10 +0100546};
547
Jens Axboe9adbd452019-12-20 08:45:55 -0700548struct io_rw {
549 /* NOTE: kiocb has the file as the first member, so don't do it here */
550 struct kiocb kiocb;
551 u64 addr;
552 u64 len;
553};
554
Jens Axboe3fbb51c2019-12-20 08:51:52 -0700555struct io_connect {
556 struct file *file;
557 struct sockaddr __user *addr;
558 int addr_len;
559};
560
Jens Axboee47293f2019-12-20 08:58:21 -0700561struct io_sr_msg {
562 struct file *file;
Jens Axboefddafac2020-01-04 20:19:44 -0700563 union {
Pavel Begunkov270a5942020-07-12 20:41:04 +0300564 struct user_msghdr __user *umsg;
Jens Axboefddafac2020-01-04 20:19:44 -0700565 void __user *buf;
566 };
Jens Axboee47293f2019-12-20 08:58:21 -0700567 int msg_flags;
Jens Axboebcda7ba2020-02-23 16:42:51 -0700568 int bgid;
Jens Axboefddafac2020-01-04 20:19:44 -0700569 size_t len;
Jens Axboebcda7ba2020-02-23 16:42:51 -0700570 struct io_buffer *kbuf;
Jens Axboee47293f2019-12-20 08:58:21 -0700571};
572
Jens Axboe15b71ab2019-12-11 11:20:36 -0700573struct io_open {
574 struct file *file;
575 int dfd;
Jens Axboe15b71ab2019-12-11 11:20:36 -0700576 struct filename *filename;
Jens Axboec12cedf2020-01-08 17:41:21 -0700577 struct open_how how;
Jens Axboe4022e7a2020-03-19 19:23:18 -0600578 unsigned long nofile;
Jens Axboe15b71ab2019-12-11 11:20:36 -0700579};
580
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +0000581struct io_rsrc_update {
Jens Axboe05f3fb32019-12-09 11:22:50 -0700582 struct file *file;
583 u64 arg;
584 u32 nr_args;
585 u32 offset;
586};
587
Jens Axboe4840e412019-12-25 22:03:45 -0700588struct io_fadvise {
589 struct file *file;
590 u64 offset;
591 u32 len;
592 u32 advice;
593};
594
Jens Axboec1ca7572019-12-25 22:18:28 -0700595struct io_madvise {
596 struct file *file;
597 u64 addr;
598 u32 len;
599 u32 advice;
600};
601
Jens Axboe3e4827b2020-01-08 15:18:09 -0700602struct io_epoll {
603 struct file *file;
604 int epfd;
605 int op;
606 int fd;
607 struct epoll_event event;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700608};
609
Pavel Begunkov7d67af22020-02-24 11:32:45 +0300610struct io_splice {
611 struct file *file_out;
612 struct file *file_in;
613 loff_t off_out;
614 loff_t off_in;
615 u64 len;
616 unsigned int flags;
617};
618
Jens Axboeddf0322d2020-02-23 16:41:33 -0700619struct io_provide_buf {
620 struct file *file;
621 __u64 addr;
622 __s32 len;
623 __u32 bgid;
624 __u16 nbufs;
625 __u16 bid;
626};
627
Bijan Mottahedeh1d9e1282020-05-22 21:31:16 -0700628struct io_statx {
629 struct file *file;
630 int dfd;
631 unsigned int mask;
632 unsigned int flags;
Bijan Mottahedehe62753e2020-05-22 21:31:18 -0700633 const char __user *filename;
Bijan Mottahedeh1d9e1282020-05-22 21:31:16 -0700634 struct statx __user *buffer;
635};
636
Jens Axboe36f4fa62020-09-05 11:14:22 -0600637struct io_shutdown {
638 struct file *file;
639 int how;
640};
641
Jens Axboe80a261f2020-09-28 14:23:58 -0600642struct io_rename {
643 struct file *file;
644 int old_dfd;
645 int new_dfd;
646 struct filename *oldpath;
647 struct filename *newpath;
648 int flags;
649};
650
Jens Axboe14a11432020-09-28 14:27:37 -0600651struct io_unlink {
652 struct file *file;
653 int dfd;
654 int flags;
655 struct filename *filename;
656};
657
Pavel Begunkov3ca405e2020-07-13 23:37:08 +0300658struct io_completion {
659 struct file *file;
660 struct list_head list;
Pavel Begunkov8c3f9cd2021-02-28 22:35:15 +0000661 u32 cflags;
Pavel Begunkov3ca405e2020-07-13 23:37:08 +0300662};
663
Jens Axboef499a022019-12-02 16:28:46 -0700664struct io_async_connect {
665 struct sockaddr_storage address;
666};
667
Jens Axboe03b12302019-12-02 18:50:25 -0700668struct io_async_msghdr {
669 struct iovec fast_iov[UIO_FASTIOV];
Pavel Begunkov257e84a2021-02-05 00:58:00 +0000670 /* points to an allocated iov, if NULL we use fast_iov instead */
671 struct iovec *free_iov;
Jens Axboe03b12302019-12-02 18:50:25 -0700672 struct sockaddr __user *uaddr;
673 struct msghdr msg;
Jens Axboeb5379162020-02-09 11:29:15 -0700674 struct sockaddr_storage addr;
Jens Axboe03b12302019-12-02 18:50:25 -0700675};
676
Jens Axboef67676d2019-12-02 11:03:47 -0700677struct io_async_rw {
678 struct iovec fast_iov[UIO_FASTIOV];
Jens Axboeff6165b2020-08-13 09:47:43 -0600679 const struct iovec *free_iovec;
680 struct iov_iter iter;
Jens Axboe227c0c92020-08-13 11:51:40 -0600681 size_t bytes_done;
Jens Axboebcf5a062020-05-22 09:24:42 -0600682 struct wait_page_queue wpq;
Jens Axboef67676d2019-12-02 11:03:47 -0700683};
684
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300685enum {
686 REQ_F_FIXED_FILE_BIT = IOSQE_FIXED_FILE_BIT,
687 REQ_F_IO_DRAIN_BIT = IOSQE_IO_DRAIN_BIT,
688 REQ_F_LINK_BIT = IOSQE_IO_LINK_BIT,
689 REQ_F_HARDLINK_BIT = IOSQE_IO_HARDLINK_BIT,
690 REQ_F_FORCE_ASYNC_BIT = IOSQE_ASYNC_BIT,
Jens Axboebcda7ba2020-02-23 16:42:51 -0700691 REQ_F_BUFFER_SELECT_BIT = IOSQE_BUFFER_SELECT_BIT,
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300692
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300693 REQ_F_FAIL_LINK_BIT,
694 REQ_F_INFLIGHT_BIT,
695 REQ_F_CUR_POS_BIT,
696 REQ_F_NOWAIT_BIT,
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300697 REQ_F_LINK_TIMEOUT_BIT,
Pavel Begunkov99bc4c32020-02-07 22:04:45 +0300698 REQ_F_NEED_CLEANUP_BIT,
Jens Axboed7718a92020-02-14 22:23:12 -0700699 REQ_F_POLLED_BIT,
Jens Axboebcda7ba2020-02-23 16:42:51 -0700700 REQ_F_BUFFER_SELECTED_BIT,
Pavel Begunkov900fad42020-10-19 16:39:16 +0100701 REQ_F_LTIMEOUT_ACTIVE_BIT,
Pavel Begunkove342c802021-01-19 13:32:47 +0000702 REQ_F_COMPLETE_INLINE_BIT,
Jens Axboe230d50d2021-04-01 20:41:15 -0600703 REQ_F_REISSUE_BIT,
Pavel Begunkov8c130822021-03-22 01:58:32 +0000704 REQ_F_DONT_REISSUE_BIT,
Jens Axboe7b29f922021-03-12 08:30:14 -0700705 /* keep async read/write and isreg together and in order */
706 REQ_F_ASYNC_READ_BIT,
707 REQ_F_ASYNC_WRITE_BIT,
708 REQ_F_ISREG_BIT,
Jens Axboe84557872020-03-03 15:28:17 -0700709
710 /* not a real bit, just to check we're not overflowing the space */
711 __REQ_F_LAST_BIT,
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300712};
713
714enum {
715 /* ctx owns file */
716 REQ_F_FIXED_FILE = BIT(REQ_F_FIXED_FILE_BIT),
717 /* drain existing IO first */
718 REQ_F_IO_DRAIN = BIT(REQ_F_IO_DRAIN_BIT),
719 /* linked sqes */
720 REQ_F_LINK = BIT(REQ_F_LINK_BIT),
721 /* doesn't sever on completion < 0 */
722 REQ_F_HARDLINK = BIT(REQ_F_HARDLINK_BIT),
723 /* IOSQE_ASYNC */
724 REQ_F_FORCE_ASYNC = BIT(REQ_F_FORCE_ASYNC_BIT),
Jens Axboebcda7ba2020-02-23 16:42:51 -0700725 /* IOSQE_BUFFER_SELECT */
726 REQ_F_BUFFER_SELECT = BIT(REQ_F_BUFFER_SELECT_BIT),
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300727
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300728 /* fail rest of links */
729 REQ_F_FAIL_LINK = BIT(REQ_F_FAIL_LINK_BIT),
Pavel Begunkovb05a1bc2021-03-04 13:59:24 +0000730 /* on inflight list, should be cancelled and waited on exit reliably */
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300731 REQ_F_INFLIGHT = BIT(REQ_F_INFLIGHT_BIT),
732 /* read/write uses file position */
733 REQ_F_CUR_POS = BIT(REQ_F_CUR_POS_BIT),
734 /* must not punt to workers */
735 REQ_F_NOWAIT = BIT(REQ_F_NOWAIT_BIT),
Pavel Begunkov900fad42020-10-19 16:39:16 +0100736 /* has or had linked timeout */
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300737 REQ_F_LINK_TIMEOUT = BIT(REQ_F_LINK_TIMEOUT_BIT),
Pavel Begunkov99bc4c32020-02-07 22:04:45 +0300738 /* needs cleanup */
739 REQ_F_NEED_CLEANUP = BIT(REQ_F_NEED_CLEANUP_BIT),
Jens Axboed7718a92020-02-14 22:23:12 -0700740 /* already went through poll handler */
741 REQ_F_POLLED = BIT(REQ_F_POLLED_BIT),
Jens Axboebcda7ba2020-02-23 16:42:51 -0700742 /* buffer already selected */
743 REQ_F_BUFFER_SELECTED = BIT(REQ_F_BUFFER_SELECTED_BIT),
Pavel Begunkov900fad42020-10-19 16:39:16 +0100744 /* linked timeout is active, i.e. prepared by link's head */
745 REQ_F_LTIMEOUT_ACTIVE = BIT(REQ_F_LTIMEOUT_ACTIVE_BIT),
Pavel Begunkove342c802021-01-19 13:32:47 +0000746 /* completion is deferred through io_comp_state */
747 REQ_F_COMPLETE_INLINE = BIT(REQ_F_COMPLETE_INLINE_BIT),
Jens Axboe230d50d2021-04-01 20:41:15 -0600748 /* caller should reissue async */
749 REQ_F_REISSUE = BIT(REQ_F_REISSUE_BIT),
Pavel Begunkov8c130822021-03-22 01:58:32 +0000750 /* don't attempt request reissue, see io_rw_reissue() */
751 REQ_F_DONT_REISSUE = BIT(REQ_F_DONT_REISSUE_BIT),
Jens Axboe7b29f922021-03-12 08:30:14 -0700752 /* supports async reads */
753 REQ_F_ASYNC_READ = BIT(REQ_F_ASYNC_READ_BIT),
754 /* supports async writes */
755 REQ_F_ASYNC_WRITE = BIT(REQ_F_ASYNC_WRITE_BIT),
756 /* regular file */
757 REQ_F_ISREG = BIT(REQ_F_ISREG_BIT),
Jens Axboed7718a92020-02-14 22:23:12 -0700758};
759
760struct async_poll {
761 struct io_poll_iocb poll;
Jens Axboe807abcb2020-07-17 17:09:27 -0600762 struct io_poll_iocb *double_poll;
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300763};
764
Jens Axboe7cbf1722021-02-10 00:03:20 +0000765struct io_task_work {
766 struct io_wq_work_node node;
767 task_work_func_t func;
768};
769
Jens Axboe09bb8392019-03-13 12:39:28 -0600770/*
771 * NOTE! Each of the iocb union members has the file pointer
772 * as the first entry in their struct definition. So you can
773 * access the file pointer through any of the sub-structs,
774 * or directly as just 'ki_filp' in this struct.
775 */
Jens Axboe2b188cc2019-01-07 10:46:33 -0700776struct io_kiocb {
Jens Axboe221c5eb2019-01-17 09:41:58 -0700777 union {
Jens Axboe09bb8392019-03-13 12:39:28 -0600778 struct file *file;
Jens Axboe9adbd452019-12-20 08:45:55 -0700779 struct io_rw rw;
Jens Axboe221c5eb2019-01-17 09:41:58 -0700780 struct io_poll_iocb poll;
Pavel Begunkov018043b2020-10-27 23:17:18 +0000781 struct io_poll_remove poll_remove;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -0700782 struct io_accept accept;
783 struct io_sync sync;
Jens Axboefbf23842019-12-17 18:45:56 -0700784 struct io_cancel cancel;
Jens Axboeb29472e2019-12-17 18:50:29 -0700785 struct io_timeout timeout;
Pavel Begunkov0bdf7a22020-10-10 18:34:10 +0100786 struct io_timeout_rem timeout_rem;
Jens Axboe3fbb51c2019-12-20 08:51:52 -0700787 struct io_connect connect;
Jens Axboee47293f2019-12-20 08:58:21 -0700788 struct io_sr_msg sr_msg;
Jens Axboe15b71ab2019-12-11 11:20:36 -0700789 struct io_open open;
Jens Axboeb5dba592019-12-11 14:02:38 -0700790 struct io_close close;
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +0000791 struct io_rsrc_update rsrc_update;
Jens Axboe4840e412019-12-25 22:03:45 -0700792 struct io_fadvise fadvise;
Jens Axboec1ca7572019-12-25 22:18:28 -0700793 struct io_madvise madvise;
Jens Axboe3e4827b2020-01-08 15:18:09 -0700794 struct io_epoll epoll;
Pavel Begunkov7d67af22020-02-24 11:32:45 +0300795 struct io_splice splice;
Jens Axboeddf0322d2020-02-23 16:41:33 -0700796 struct io_provide_buf pbuf;
Bijan Mottahedeh1d9e1282020-05-22 21:31:16 -0700797 struct io_statx statx;
Jens Axboe36f4fa62020-09-05 11:14:22 -0600798 struct io_shutdown shutdown;
Jens Axboe80a261f2020-09-28 14:23:58 -0600799 struct io_rename rename;
Jens Axboe14a11432020-09-28 14:27:37 -0600800 struct io_unlink unlink;
Pavel Begunkov3ca405e2020-07-13 23:37:08 +0300801 /* use only after cleaning per-op data, see io_clean_op() */
802 struct io_completion compl;
Jens Axboe221c5eb2019-01-17 09:41:58 -0700803 };
Jens Axboe2b188cc2019-01-07 10:46:33 -0700804
Jens Axboee8c2bc12020-08-15 18:44:09 -0700805 /* opcode allocated if it needs to store data for async defer */
806 void *async_data;
Jens Axboed625c6e2019-12-17 19:53:05 -0700807 u8 opcode;
Xiaoguang Wang65a65432020-06-11 23:39:36 +0800808 /* polled IO has completed */
809 u8 iopoll_completed;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700810
Bijan Mottahedeh4f4eeba2020-05-19 14:52:49 -0700811 u16 buf_index;
Pavel Begunkov9cf7c102020-07-13 23:37:15 +0300812 u32 result;
Bijan Mottahedeh4f4eeba2020-05-19 14:52:49 -0700813
Pavel Begunkov010e8e62020-07-30 18:43:45 +0300814 struct io_ring_ctx *ctx;
815 unsigned int flags;
Jens Axboeabc54d62021-02-24 13:32:30 -0700816 atomic_t refs;
Pavel Begunkov010e8e62020-07-30 18:43:45 +0300817 struct task_struct *task;
818 u64 user_data;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700819
Pavel Begunkovf2f87372020-10-27 23:25:37 +0000820 struct io_kiocb *link;
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +0000821 struct percpu_ref *fixed_rsrc_refs;
Jens Axboed7718a92020-02-14 22:23:12 -0700822
Pavel Begunkovd21ffe72020-07-13 23:37:10 +0300823 /*
824 * 1. used with ctx->iopoll_list with reads/writes
825 * 2. to track reqs with ->files (see io_op_def::file_table)
826 */
Pavel Begunkov010e8e62020-07-30 18:43:45 +0300827 struct list_head inflight_entry;
Jens Axboe7cbf1722021-02-10 00:03:20 +0000828 union {
829 struct io_task_work io_task_work;
830 struct callback_head task_work;
831 };
Pavel Begunkov010e8e62020-07-30 18:43:45 +0300832 /* for polled requests, i.e. IORING_OP_POLL_ADD and async armed poll */
833 struct hlist_node hash_node;
834 struct async_poll *apoll;
835 struct io_wq_work work;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700836};
837
Pavel Begunkov13bf43f2021-03-06 11:02:12 +0000838struct io_tctx_node {
839 struct list_head ctx_node;
840 struct task_struct *task;
Pavel Begunkov13bf43f2021-03-06 11:02:12 +0000841 struct io_ring_ctx *ctx;
842};
843
Pavel Begunkov27dc8332020-07-13 23:37:14 +0300844struct io_defer_entry {
845 struct list_head list;
846 struct io_kiocb *req;
Pavel Begunkov9cf7c102020-07-13 23:37:15 +0300847 u32 seq;
Pavel Begunkov27dc8332020-07-13 23:37:14 +0300848};
849
Jens Axboed3656342019-12-18 09:50:26 -0700850struct io_op_def {
Jens Axboed3656342019-12-18 09:50:26 -0700851 /* needs req->file assigned */
852 unsigned needs_file : 1;
Jens Axboed3656342019-12-18 09:50:26 -0700853 /* hash wq insertion if file is a regular file */
854 unsigned hash_reg_file : 1;
855 /* unbound wq insertion if file is a non-regular file */
856 unsigned unbound_nonreg_file : 1;
Jens Axboe66f4af92020-01-16 15:36:52 -0700857 /* opcode is not supported by this kernel */
858 unsigned not_supported : 1;
Jens Axboe8a727582020-02-20 09:59:44 -0700859 /* set if opcode supports polled "wait" */
860 unsigned pollin : 1;
861 unsigned pollout : 1;
Jens Axboebcda7ba2020-02-23 16:42:51 -0700862 /* op supports buffer selection */
863 unsigned buffer_select : 1;
Pavel Begunkov26f05052021-02-28 22:35:18 +0000864 /* do prep async if is going to be punted */
865 unsigned needs_async_setup : 1;
Jens Axboe27926b62020-10-28 09:33:23 -0600866 /* should block plug */
867 unsigned plug : 1;
Jens Axboee8c2bc12020-08-15 18:44:09 -0700868 /* size of async data needed, if any */
869 unsigned short async_size;
Jens Axboed3656342019-12-18 09:50:26 -0700870};
871
Jens Axboe09186822020-10-13 15:01:40 -0600872static const struct io_op_def io_op_defs[] = {
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300873 [IORING_OP_NOP] = {},
874 [IORING_OP_READV] = {
Jens Axboed3656342019-12-18 09:50:26 -0700875 .needs_file = 1,
876 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700877 .pollin = 1,
Jens Axboe4d954c22020-02-27 07:31:19 -0700878 .buffer_select = 1,
Pavel Begunkov26f05052021-02-28 22:35:18 +0000879 .needs_async_setup = 1,
Jens Axboe27926b62020-10-28 09:33:23 -0600880 .plug = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700881 .async_size = sizeof(struct io_async_rw),
Jens Axboed3656342019-12-18 09:50:26 -0700882 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300883 [IORING_OP_WRITEV] = {
Jens Axboed3656342019-12-18 09:50:26 -0700884 .needs_file = 1,
885 .hash_reg_file = 1,
886 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700887 .pollout = 1,
Pavel Begunkov26f05052021-02-28 22:35:18 +0000888 .needs_async_setup = 1,
Jens Axboe27926b62020-10-28 09:33:23 -0600889 .plug = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700890 .async_size = sizeof(struct io_async_rw),
Jens Axboed3656342019-12-18 09:50:26 -0700891 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300892 [IORING_OP_FSYNC] = {
Jens Axboed3656342019-12-18 09:50:26 -0700893 .needs_file = 1,
894 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300895 [IORING_OP_READ_FIXED] = {
Jens Axboed3656342019-12-18 09:50:26 -0700896 .needs_file = 1,
897 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700898 .pollin = 1,
Jens Axboe27926b62020-10-28 09:33:23 -0600899 .plug = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700900 .async_size = sizeof(struct io_async_rw),
Jens Axboed3656342019-12-18 09:50:26 -0700901 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300902 [IORING_OP_WRITE_FIXED] = {
Jens Axboed3656342019-12-18 09:50:26 -0700903 .needs_file = 1,
904 .hash_reg_file = 1,
905 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700906 .pollout = 1,
Jens Axboe27926b62020-10-28 09:33:23 -0600907 .plug = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700908 .async_size = sizeof(struct io_async_rw),
Jens Axboed3656342019-12-18 09:50:26 -0700909 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300910 [IORING_OP_POLL_ADD] = {
Jens Axboed3656342019-12-18 09:50:26 -0700911 .needs_file = 1,
912 .unbound_nonreg_file = 1,
913 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300914 [IORING_OP_POLL_REMOVE] = {},
915 [IORING_OP_SYNC_FILE_RANGE] = {
Jens Axboed3656342019-12-18 09:50:26 -0700916 .needs_file = 1,
917 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300918 [IORING_OP_SENDMSG] = {
Jens Axboed3656342019-12-18 09:50:26 -0700919 .needs_file = 1,
920 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700921 .pollout = 1,
Pavel Begunkov26f05052021-02-28 22:35:18 +0000922 .needs_async_setup = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700923 .async_size = sizeof(struct io_async_msghdr),
Jens Axboed3656342019-12-18 09:50:26 -0700924 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300925 [IORING_OP_RECVMSG] = {
Jens Axboed3656342019-12-18 09:50:26 -0700926 .needs_file = 1,
927 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700928 .pollin = 1,
Jens Axboe52de1fe2020-02-27 10:15:42 -0700929 .buffer_select = 1,
Pavel Begunkov26f05052021-02-28 22:35:18 +0000930 .needs_async_setup = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700931 .async_size = sizeof(struct io_async_msghdr),
Jens Axboed3656342019-12-18 09:50:26 -0700932 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300933 [IORING_OP_TIMEOUT] = {
Jens Axboee8c2bc12020-08-15 18:44:09 -0700934 .async_size = sizeof(struct io_timeout_data),
Jens Axboed3656342019-12-18 09:50:26 -0700935 },
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +0000936 [IORING_OP_TIMEOUT_REMOVE] = {
937 /* used by timeout updates' prep() */
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +0000938 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300939 [IORING_OP_ACCEPT] = {
Jens Axboed3656342019-12-18 09:50:26 -0700940 .needs_file = 1,
941 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700942 .pollin = 1,
Jens Axboed3656342019-12-18 09:50:26 -0700943 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300944 [IORING_OP_ASYNC_CANCEL] = {},
945 [IORING_OP_LINK_TIMEOUT] = {
Jens Axboee8c2bc12020-08-15 18:44:09 -0700946 .async_size = sizeof(struct io_timeout_data),
Jens Axboed3656342019-12-18 09:50:26 -0700947 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300948 [IORING_OP_CONNECT] = {
Jens Axboed3656342019-12-18 09:50:26 -0700949 .needs_file = 1,
950 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700951 .pollout = 1,
Pavel Begunkov26f05052021-02-28 22:35:18 +0000952 .needs_async_setup = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700953 .async_size = sizeof(struct io_async_connect),
Jens Axboed3656342019-12-18 09:50:26 -0700954 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300955 [IORING_OP_FALLOCATE] = {
Jens Axboed3656342019-12-18 09:50:26 -0700956 .needs_file = 1,
957 },
Jens Axboe44526be2021-02-15 13:32:18 -0700958 [IORING_OP_OPENAT] = {},
959 [IORING_OP_CLOSE] = {},
960 [IORING_OP_FILES_UPDATE] = {},
961 [IORING_OP_STATX] = {},
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300962 [IORING_OP_READ] = {
Jens Axboe3a6820f2019-12-22 15:19:35 -0700963 .needs_file = 1,
964 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700965 .pollin = 1,
Jens Axboebcda7ba2020-02-23 16:42:51 -0700966 .buffer_select = 1,
Jens Axboe27926b62020-10-28 09:33:23 -0600967 .plug = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700968 .async_size = sizeof(struct io_async_rw),
Jens Axboe3a6820f2019-12-22 15:19:35 -0700969 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300970 [IORING_OP_WRITE] = {
Jens Axboe3a6820f2019-12-22 15:19:35 -0700971 .needs_file = 1,
972 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700973 .pollout = 1,
Jens Axboe27926b62020-10-28 09:33:23 -0600974 .plug = 1,
Jens Axboee8c2bc12020-08-15 18:44:09 -0700975 .async_size = sizeof(struct io_async_rw),
Jens Axboe3a6820f2019-12-22 15:19:35 -0700976 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300977 [IORING_OP_FADVISE] = {
Jens Axboe4840e412019-12-25 22:03:45 -0700978 .needs_file = 1,
979 },
Jens Axboe44526be2021-02-15 13:32:18 -0700980 [IORING_OP_MADVISE] = {},
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300981 [IORING_OP_SEND] = {
Jens Axboefddafac2020-01-04 20:19:44 -0700982 .needs_file = 1,
983 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700984 .pollout = 1,
Jens Axboefddafac2020-01-04 20:19:44 -0700985 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300986 [IORING_OP_RECV] = {
Jens Axboefddafac2020-01-04 20:19:44 -0700987 .needs_file = 1,
988 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700989 .pollin = 1,
Jens Axboebcda7ba2020-02-23 16:42:51 -0700990 .buffer_select = 1,
Jens Axboefddafac2020-01-04 20:19:44 -0700991 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300992 [IORING_OP_OPENAT2] = {
Jens Axboecebdb982020-01-08 17:59:24 -0700993 },
Jens Axboe3e4827b2020-01-08 15:18:09 -0700994 [IORING_OP_EPOLL_CTL] = {
995 .unbound_nonreg_file = 1,
Jens Axboe3e4827b2020-01-08 15:18:09 -0700996 },
Pavel Begunkov7d67af22020-02-24 11:32:45 +0300997 [IORING_OP_SPLICE] = {
998 .needs_file = 1,
999 .hash_reg_file = 1,
1000 .unbound_nonreg_file = 1,
Jens Axboeddf0322d2020-02-23 16:41:33 -07001001 },
1002 [IORING_OP_PROVIDE_BUFFERS] = {},
Jens Axboe067524e2020-03-02 16:32:28 -07001003 [IORING_OP_REMOVE_BUFFERS] = {},
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03001004 [IORING_OP_TEE] = {
1005 .needs_file = 1,
1006 .hash_reg_file = 1,
1007 .unbound_nonreg_file = 1,
1008 },
Jens Axboe36f4fa62020-09-05 11:14:22 -06001009 [IORING_OP_SHUTDOWN] = {
1010 .needs_file = 1,
1011 },
Jens Axboe44526be2021-02-15 13:32:18 -07001012 [IORING_OP_RENAMEAT] = {},
1013 [IORING_OP_UNLINKAT] = {},
Jens Axboed3656342019-12-18 09:50:26 -07001014};
1015
Pavel Begunkov7a612352021-03-09 00:37:59 +00001016static bool io_disarm_next(struct io_kiocb *req);
Pavel Begunkovd56d9382021-03-06 11:02:13 +00001017static void io_uring_del_task_file(unsigned long index);
Pavel Begunkov9936c7c2021-02-04 13:51:56 +00001018static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
1019 struct task_struct *task,
1020 struct files_struct *files);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07001021static void io_uring_cancel_sqpoll(struct io_ring_ctx *ctx);
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00001022static void destroy_fixed_rsrc_ref_node(struct fixed_rsrc_ref_node *ref_node);
Pavel Begunkovbc9744c2021-01-15 17:37:49 +00001023static struct fixed_rsrc_ref_node *alloc_fixed_rsrc_ref_node(
Pavel Begunkov1ffc5422020-12-30 21:34:15 +00001024 struct io_ring_ctx *ctx);
Pavel Begunkovf2303b12021-02-20 18:03:49 +00001025static void io_ring_file_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc);
Pavel Begunkov1ffc5422020-12-30 21:34:15 +00001026
Jens Axboe78e19bb2019-11-06 15:21:34 -07001027static void io_cqring_fill_event(struct io_kiocb *req, long res);
Jackie Liuec9c02a2019-11-08 23:50:36 +08001028static void io_put_req(struct io_kiocb *req);
Pavel Begunkov216578e2020-10-13 09:44:00 +01001029static void io_put_req_deferred(struct io_kiocb *req, int nr);
Jens Axboec7dae4b2021-02-09 19:53:37 -07001030static void io_dismantle_req(struct io_kiocb *req);
1031static void io_put_task(struct task_struct *task, int nr);
1032static void io_queue_next(struct io_kiocb *req);
Jens Axboe94ae5e72019-11-14 19:39:52 -07001033static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req);
1034static void io_queue_linked_timeout(struct io_kiocb *req);
Jens Axboe05f3fb32019-12-09 11:22:50 -07001035static int __io_sqe_files_update(struct io_ring_ctx *ctx,
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00001036 struct io_uring_rsrc_update *ip,
Jens Axboe05f3fb32019-12-09 11:22:50 -07001037 unsigned nr_args);
Pavel Begunkov68fb8972021-03-19 17:22:41 +00001038static void io_clean_op(struct io_kiocb *req);
Pavel Begunkov8371adf2020-10-10 18:34:08 +01001039static struct file *io_file_get(struct io_submit_state *state,
1040 struct io_kiocb *req, int fd, bool fixed);
Pavel Begunkovc5eef2b2021-02-10 00:03:22 +00001041static void __io_queue_sqe(struct io_kiocb *req);
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00001042static void io_rsrc_put_work(struct work_struct *work);
Jens Axboede0617e2019-04-06 21:51:27 -06001043
Pavel Begunkov907d1df2021-01-26 23:35:10 +00001044static void io_req_task_queue(struct io_kiocb *req);
Jens Axboe65453d12021-02-10 00:03:21 +00001045static void io_submit_flush_completions(struct io_comp_state *cs,
1046 struct io_ring_ctx *ctx);
Pavel Begunkov179ae0d2021-02-28 22:35:20 +00001047static int io_req_prep_async(struct io_kiocb *req);
Jens Axboe9a56a232019-01-09 09:06:50 -07001048
Jens Axboe2b188cc2019-01-07 10:46:33 -07001049static struct kmem_cache *req_cachep;
1050
Jens Axboe09186822020-10-13 15:01:40 -06001051static const struct file_operations io_uring_fops;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001052
1053struct sock *io_uring_get_socket(struct file *file)
1054{
1055#if defined(CONFIG_UNIX)
1056 if (file->f_op == &io_uring_fops) {
1057 struct io_ring_ctx *ctx = file->private_data;
1058
1059 return ctx->ring_sock->sk;
1060 }
1061#endif
1062 return NULL;
1063}
1064EXPORT_SYMBOL(io_uring_get_socket);
1065
Pavel Begunkovf2f87372020-10-27 23:25:37 +00001066#define io_for_each_link(pos, head) \
1067 for (pos = (head); pos; pos = pos->link)
1068
Pavel Begunkov36f72fe2020-11-18 19:57:26 +00001069static inline void io_set_resource_node(struct io_kiocb *req)
Jens Axboec40f6372020-06-25 15:39:59 -06001070{
Pavel Begunkov36f72fe2020-11-18 19:57:26 +00001071 struct io_ring_ctx *ctx = req->ctx;
1072
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00001073 if (!req->fixed_rsrc_refs) {
1074 req->fixed_rsrc_refs = &ctx->file_data->node->refs;
1075 percpu_ref_get(req->fixed_rsrc_refs);
Pavel Begunkov36f72fe2020-11-18 19:57:26 +00001076 }
1077}
1078
Pavel Begunkov08d23632020-11-06 13:00:22 +00001079static bool io_match_task(struct io_kiocb *head,
1080 struct task_struct *task,
1081 struct files_struct *files)
1082{
1083 struct io_kiocb *req;
1084
Pavel Begunkov68207682021-03-22 01:58:25 +00001085 if (task && head->task != task)
Pavel Begunkov08d23632020-11-06 13:00:22 +00001086 return false;
1087 if (!files)
1088 return true;
1089
1090 io_for_each_link(req, head) {
Pavel Begunkovb05a1bc2021-03-04 13:59:24 +00001091 if (req->flags & REQ_F_INFLIGHT)
Jens Axboe02a13672021-01-23 15:49:31 -07001092 return true;
Pavel Begunkov08d23632020-11-06 13:00:22 +00001093 }
1094 return false;
1095}
1096
Jens Axboec40f6372020-06-25 15:39:59 -06001097static inline void req_set_fail_links(struct io_kiocb *req)
1098{
1099 if ((req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) == REQ_F_LINK)
1100 req->flags |= REQ_F_FAIL_LINK;
1101}
Jens Axboe4a38aed22020-05-14 17:21:15 -06001102
Jens Axboe2b188cc2019-01-07 10:46:33 -07001103static void io_ring_ctx_ref_free(struct percpu_ref *ref)
1104{
1105 struct io_ring_ctx *ctx = container_of(ref, struct io_ring_ctx, refs);
1106
Jens Axboe0f158b42020-05-14 17:18:39 -06001107 complete(&ctx->ref_comp);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001108}
1109
Pavel Begunkov8eb7e2d2020-06-29 13:13:02 +03001110static inline bool io_is_timeout_noseq(struct io_kiocb *req)
1111{
1112 return !req->timeout.off;
1113}
1114
Jens Axboe2b188cc2019-01-07 10:46:33 -07001115static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
1116{
1117 struct io_ring_ctx *ctx;
Jens Axboe78076bb2019-12-04 19:56:40 -07001118 int hash_bits;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001119
1120 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
1121 if (!ctx)
1122 return NULL;
1123
Jens Axboe78076bb2019-12-04 19:56:40 -07001124 /*
1125 * Use 5 bits less than the max cq entries, that should give us around
1126 * 32 entries per hash list if totally full and uniformly spread.
1127 */
1128 hash_bits = ilog2(p->cq_entries);
1129 hash_bits -= 5;
1130 if (hash_bits <= 0)
1131 hash_bits = 1;
1132 ctx->cancel_hash_bits = hash_bits;
1133 ctx->cancel_hash = kmalloc((1U << hash_bits) * sizeof(struct hlist_head),
1134 GFP_KERNEL);
1135 if (!ctx->cancel_hash)
1136 goto err;
1137 __hash_init(ctx->cancel_hash, 1U << hash_bits);
1138
Roman Gushchin21482892019-05-07 10:01:48 -07001139 if (percpu_ref_init(&ctx->refs, io_ring_ctx_ref_free,
Jens Axboe206aefd2019-11-07 18:27:42 -07001140 PERCPU_REF_ALLOW_REINIT, GFP_KERNEL))
1141 goto err;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001142
1143 ctx->flags = p->flags;
Jens Axboe90554202020-09-03 12:12:41 -06001144 init_waitqueue_head(&ctx->sqo_sq_wait);
Jens Axboe69fb2132020-09-14 11:16:23 -06001145 INIT_LIST_HEAD(&ctx->sqd_list);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001146 init_waitqueue_head(&ctx->cq_wait);
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001147 INIT_LIST_HEAD(&ctx->cq_overflow_list);
Jens Axboe0f158b42020-05-14 17:18:39 -06001148 init_completion(&ctx->ref_comp);
Jens Axboe9e15c3a2021-03-13 12:29:43 -07001149 xa_init_flags(&ctx->io_buffers, XA_FLAGS_ALLOC1);
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +00001150 xa_init_flags(&ctx->personalities, XA_FLAGS_ALLOC1);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001151 mutex_init(&ctx->uring_lock);
1152 init_waitqueue_head(&ctx->wait);
1153 spin_lock_init(&ctx->completion_lock);
Pavel Begunkov540e32a2020-07-13 23:37:09 +03001154 INIT_LIST_HEAD(&ctx->iopoll_list);
Jens Axboede0617e2019-04-06 21:51:27 -06001155 INIT_LIST_HEAD(&ctx->defer_list);
Jens Axboe5262f562019-09-17 12:26:57 -06001156 INIT_LIST_HEAD(&ctx->timeout_list);
Jens Axboefcb323c2019-10-24 12:39:47 -06001157 spin_lock_init(&ctx->inflight_lock);
1158 INIT_LIST_HEAD(&ctx->inflight_list);
Bijan Mottahedehd67d2262021-01-15 17:37:46 +00001159 spin_lock_init(&ctx->rsrc_ref_lock);
1160 INIT_LIST_HEAD(&ctx->rsrc_ref_list);
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00001161 INIT_DELAYED_WORK(&ctx->rsrc_put_work, io_rsrc_put_work);
1162 init_llist_head(&ctx->rsrc_put_llist);
Pavel Begunkov13bf43f2021-03-06 11:02:12 +00001163 INIT_LIST_HEAD(&ctx->tctx_list);
Jens Axboe1b4c3512021-02-10 00:03:19 +00001164 INIT_LIST_HEAD(&ctx->submit_state.comp.free_list);
Jens Axboec7dae4b2021-02-09 19:53:37 -07001165 INIT_LIST_HEAD(&ctx->submit_state.comp.locked_free_list);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001166 return ctx;
Jens Axboe206aefd2019-11-07 18:27:42 -07001167err:
Jens Axboe78076bb2019-12-04 19:56:40 -07001168 kfree(ctx->cancel_hash);
Jens Axboe206aefd2019-11-07 18:27:42 -07001169 kfree(ctx);
1170 return NULL;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001171}
1172
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03001173static bool req_need_defer(struct io_kiocb *req, u32 seq)
Jens Axboede0617e2019-04-06 21:51:27 -06001174{
Jens Axboe2bc99302020-07-09 09:43:27 -06001175 if (unlikely(req->flags & REQ_F_IO_DRAIN)) {
1176 struct io_ring_ctx *ctx = req->ctx;
Jackie Liua197f662019-11-08 08:09:12 -07001177
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03001178 return seq != ctx->cached_cq_tail
Pavel Begunkov2c3bac6d2020-10-18 10:17:40 +01001179 + READ_ONCE(ctx->cached_cq_overflow);
Jens Axboe2bc99302020-07-09 09:43:27 -06001180 }
Jens Axboe7adf4ea2019-10-10 21:42:58 -06001181
Bob Liu9d858b22019-11-13 18:06:25 +08001182 return false;
Jens Axboe7adf4ea2019-10-10 21:42:58 -06001183}
1184
Pavel Begunkovce3d5aa2021-02-01 18:59:55 +00001185static void io_req_track_inflight(struct io_kiocb *req)
1186{
1187 struct io_ring_ctx *ctx = req->ctx;
1188
1189 if (!(req->flags & REQ_F_INFLIGHT)) {
Pavel Begunkovce3d5aa2021-02-01 18:59:55 +00001190 req->flags |= REQ_F_INFLIGHT;
1191
1192 spin_lock_irq(&ctx->inflight_lock);
1193 list_add(&req->inflight_entry, &ctx->inflight_list);
1194 spin_unlock_irq(&ctx->inflight_lock);
1195 }
1196}
1197
Pavel Begunkovcbdcb432020-06-29 19:18:43 +03001198static void io_prep_async_work(struct io_kiocb *req)
Jens Axboe561fb042019-10-24 07:25:42 -06001199{
Jens Axboed3656342019-12-18 09:50:26 -07001200 const struct io_op_def *def = &io_op_defs[req->opcode];
Pavel Begunkov23329512020-10-10 18:34:06 +01001201 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe54a91f32019-09-10 09:15:04 -06001202
Jens Axboe003e8dc2021-03-06 09:22:27 -07001203 if (!req->work.creds)
1204 req->work.creds = get_current_cred();
1205
Pavel Begunkove1d675d2021-03-22 01:58:29 +00001206 req->work.list.next = NULL;
1207 req->work.flags = 0;
Pavel Begunkovfeaadc42020-10-22 16:47:16 +01001208 if (req->flags & REQ_F_FORCE_ASYNC)
1209 req->work.flags |= IO_WQ_WORK_CONCURRENT;
1210
Jens Axboed3656342019-12-18 09:50:26 -07001211 if (req->flags & REQ_F_ISREG) {
Pavel Begunkov23329512020-10-10 18:34:06 +01001212 if (def->hash_reg_file || (ctx->flags & IORING_SETUP_IOPOLL))
Pavel Begunkov8766dd52020-03-14 00:31:04 +03001213 io_wq_hash_work(&req->work, file_inode(req->file));
Jens Axboe4b982bd2021-04-01 08:38:34 -06001214 } else if (!req->file || !S_ISBLK(file_inode(req->file)->i_mode)) {
Jens Axboed3656342019-12-18 09:50:26 -07001215 if (def->unbound_nonreg_file)
Jens Axboe3529d8c2019-12-19 18:24:38 -07001216 req->work.flags |= IO_WQ_WORK_UNBOUND;
Jens Axboe54a91f32019-09-10 09:15:04 -06001217 }
Pavel Begunkove1d675d2021-03-22 01:58:29 +00001218
1219 switch (req->opcode) {
1220 case IORING_OP_SPLICE:
1221 case IORING_OP_TEE:
1222 /*
1223 * Splice operation will be punted aync, and here need to
1224 * modify io_wq_work.flags, so initialize io_wq_work firstly.
1225 */
1226 if (!S_ISREG(file_inode(req->splice.file_in)->i_mode))
1227 req->work.flags |= IO_WQ_WORK_UNBOUND;
1228 break;
1229 }
Jens Axboe561fb042019-10-24 07:25:42 -06001230}
1231
Pavel Begunkovcbdcb432020-06-29 19:18:43 +03001232static void io_prep_async_link(struct io_kiocb *req)
1233{
1234 struct io_kiocb *cur;
1235
Pavel Begunkovf2f87372020-10-27 23:25:37 +00001236 io_for_each_link(cur, req)
1237 io_prep_async_work(cur);
Pavel Begunkovcbdcb432020-06-29 19:18:43 +03001238}
1239
Pavel Begunkovebf93662021-03-01 18:20:47 +00001240static void io_queue_async_work(struct io_kiocb *req)
Jens Axboe561fb042019-10-24 07:25:42 -06001241{
Jackie Liua197f662019-11-08 08:09:12 -07001242 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkovcbdcb432020-06-29 19:18:43 +03001243 struct io_kiocb *link = io_prep_linked_timeout(req);
Jens Axboe5aa75ed2021-02-16 12:56:50 -07001244 struct io_uring_task *tctx = req->task->io_uring;
Jens Axboe561fb042019-10-24 07:25:42 -06001245
Jens Axboe3bfe6102021-02-16 14:15:30 -07001246 BUG_ON(!tctx);
1247 BUG_ON(!tctx->io_wq);
Jens Axboe561fb042019-10-24 07:25:42 -06001248
Pavel Begunkovcbdcb432020-06-29 19:18:43 +03001249 /* init ->work of the whole link before punting */
1250 io_prep_async_link(req);
Pavel Begunkovd07f1e8a2021-03-22 01:45:58 +00001251 trace_io_uring_queue_async_work(ctx, io_wq_is_hashed(&req->work), req,
1252 &req->work, req->flags);
Pavel Begunkovebf93662021-03-01 18:20:47 +00001253 io_wq_enqueue(tctx->io_wq, &req->work);
Jens Axboe7271ef32020-08-10 09:55:22 -06001254 if (link)
1255 io_queue_linked_timeout(link);
Pavel Begunkovcbdcb432020-06-29 19:18:43 +03001256}
1257
Pavel Begunkov1ee41602021-03-25 18:32:42 +00001258static void io_kill_timeout(struct io_kiocb *req, int status)
Jens Axboe5262f562019-09-17 12:26:57 -06001259{
Jens Axboee8c2bc12020-08-15 18:44:09 -07001260 struct io_timeout_data *io = req->async_data;
Jens Axboe5262f562019-09-17 12:26:57 -06001261 int ret;
1262
Jens Axboee8c2bc12020-08-15 18:44:09 -07001263 ret = hrtimer_try_to_cancel(&io->timer);
Jens Axboe5262f562019-09-17 12:26:57 -06001264 if (ret != -1) {
Pavel Begunkov01cec8c2020-07-30 18:43:50 +03001265 atomic_set(&req->ctx->cq_timeouts,
1266 atomic_read(&req->ctx->cq_timeouts) + 1);
Pavel Begunkov135fcde2020-07-13 23:37:12 +03001267 list_del_init(&req->timeout.list);
Pavel Begunkov1ee41602021-03-25 18:32:42 +00001268 io_cqring_fill_event(req, status);
Pavel Begunkov216578e2020-10-13 09:44:00 +01001269 io_put_req_deferred(req, 1);
Jens Axboe5262f562019-09-17 12:26:57 -06001270 }
1271}
1272
Pavel Begunkov04518942020-05-26 20:34:05 +03001273static void __io_queue_deferred(struct io_ring_ctx *ctx)
1274{
1275 do {
Pavel Begunkov27dc8332020-07-13 23:37:14 +03001276 struct io_defer_entry *de = list_first_entry(&ctx->defer_list,
1277 struct io_defer_entry, list);
Pavel Begunkov04518942020-05-26 20:34:05 +03001278
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03001279 if (req_need_defer(de->req, de->seq))
Pavel Begunkov04518942020-05-26 20:34:05 +03001280 break;
Pavel Begunkov27dc8332020-07-13 23:37:14 +03001281 list_del_init(&de->list);
Pavel Begunkov907d1df2021-01-26 23:35:10 +00001282 io_req_task_queue(de->req);
Pavel Begunkov27dc8332020-07-13 23:37:14 +03001283 kfree(de);
Pavel Begunkov04518942020-05-26 20:34:05 +03001284 } while (!list_empty(&ctx->defer_list));
1285}
1286
Pavel Begunkov360428f2020-05-30 14:54:17 +03001287static void io_flush_timeouts(struct io_ring_ctx *ctx)
1288{
Marcelo Diop-Gonzalezf0105052021-01-15 11:54:40 -05001289 u32 seq;
1290
1291 if (list_empty(&ctx->timeout_list))
1292 return;
1293
1294 seq = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
1295
1296 do {
1297 u32 events_needed, events_got;
Pavel Begunkov360428f2020-05-30 14:54:17 +03001298 struct io_kiocb *req = list_first_entry(&ctx->timeout_list,
Pavel Begunkov135fcde2020-07-13 23:37:12 +03001299 struct io_kiocb, timeout.list);
Pavel Begunkov360428f2020-05-30 14:54:17 +03001300
Pavel Begunkov8eb7e2d2020-06-29 13:13:02 +03001301 if (io_is_timeout_noseq(req))
Pavel Begunkov360428f2020-05-30 14:54:17 +03001302 break;
Marcelo Diop-Gonzalezf0105052021-01-15 11:54:40 -05001303
1304 /*
1305 * Since seq can easily wrap around over time, subtract
1306 * the last seq at which timeouts were flushed before comparing.
1307 * Assuming not more than 2^31-1 events have happened since,
1308 * these subtractions won't have wrapped, so we can check if
1309 * target is in [last_seq, current_seq] by comparing the two.
1310 */
1311 events_needed = req->timeout.target_seq - ctx->cq_last_tm_flush;
1312 events_got = seq - ctx->cq_last_tm_flush;
1313 if (events_got < events_needed)
Pavel Begunkov360428f2020-05-30 14:54:17 +03001314 break;
Pavel Begunkovbfe68a22020-05-30 14:54:18 +03001315
Pavel Begunkov135fcde2020-07-13 23:37:12 +03001316 list_del_init(&req->timeout.list);
Pavel Begunkov1ee41602021-03-25 18:32:42 +00001317 io_kill_timeout(req, 0);
Marcelo Diop-Gonzalezf0105052021-01-15 11:54:40 -05001318 } while (!list_empty(&ctx->timeout_list));
1319
1320 ctx->cq_last_tm_flush = seq;
Pavel Begunkov360428f2020-05-30 14:54:17 +03001321}
1322
Jens Axboede0617e2019-04-06 21:51:27 -06001323static void io_commit_cqring(struct io_ring_ctx *ctx)
1324{
Pavel Begunkov360428f2020-05-30 14:54:17 +03001325 io_flush_timeouts(ctx);
Pavel Begunkovec30e042021-01-19 13:32:38 +00001326
1327 /* order cqe stores with ring update */
1328 smp_store_release(&ctx->rings->cq.tail, ctx->cached_cq_tail);
Jens Axboede0617e2019-04-06 21:51:27 -06001329
Pavel Begunkov04518942020-05-26 20:34:05 +03001330 if (unlikely(!list_empty(&ctx->defer_list)))
1331 __io_queue_deferred(ctx);
Jens Axboede0617e2019-04-06 21:51:27 -06001332}
1333
Jens Axboe90554202020-09-03 12:12:41 -06001334static inline bool io_sqring_full(struct io_ring_ctx *ctx)
1335{
1336 struct io_rings *r = ctx->rings;
1337
1338 return READ_ONCE(r->sq.tail) - ctx->cached_sq_head == r->sq_ring_entries;
1339}
1340
Pavel Begunkov888aae22021-01-19 13:32:39 +00001341static inline unsigned int __io_cqring_events(struct io_ring_ctx *ctx)
1342{
1343 return ctx->cached_cq_tail - READ_ONCE(ctx->rings->cq.head);
1344}
1345
Jens Axboe2b188cc2019-01-07 10:46:33 -07001346static struct io_uring_cqe *io_get_cqring(struct io_ring_ctx *ctx)
1347{
Hristo Venev75b28af2019-08-26 17:23:46 +00001348 struct io_rings *rings = ctx->rings;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001349 unsigned tail;
1350
Stefan Bühler115e12e2019-04-24 23:54:18 +02001351 /*
1352 * writes to the cq entry need to come after reading head; the
1353 * control dependency is enough as we're using WRITE_ONCE to
1354 * fill the cq entry
1355 */
Pavel Begunkov888aae22021-01-19 13:32:39 +00001356 if (__io_cqring_events(ctx) == rings->cq_ring_entries)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001357 return NULL;
1358
Pavel Begunkov888aae22021-01-19 13:32:39 +00001359 tail = ctx->cached_cq_tail++;
Hristo Venev75b28af2019-08-26 17:23:46 +00001360 return &rings->cqes[tail & ctx->cq_mask];
Jens Axboe2b188cc2019-01-07 10:46:33 -07001361}
1362
Jens Axboef2842ab2020-01-08 11:04:00 -07001363static inline bool io_should_trigger_evfd(struct io_ring_ctx *ctx)
1364{
Jens Axboef0b493e2020-02-01 21:30:11 -07001365 if (!ctx->cq_ev_fd)
1366 return false;
Stefano Garzarella7e55a192020-05-15 18:38:05 +02001367 if (READ_ONCE(ctx->rings->cq_flags) & IORING_CQ_EVENTFD_DISABLED)
1368 return false;
Jens Axboef2842ab2020-01-08 11:04:00 -07001369 if (!ctx->eventfd_async)
1370 return true;
Jens Axboeb41e9852020-02-17 09:52:41 -07001371 return io_wq_current_is_worker();
Jens Axboef2842ab2020-01-08 11:04:00 -07001372}
1373
Jens Axboeb41e9852020-02-17 09:52:41 -07001374static void io_cqring_ev_posted(struct io_ring_ctx *ctx)
Jens Axboe8c838782019-03-12 15:48:16 -06001375{
Pavel Begunkovb1445e52021-01-07 03:15:43 +00001376 /* see waitqueue_active() comment */
1377 smp_mb();
1378
Jens Axboe8c838782019-03-12 15:48:16 -06001379 if (waitqueue_active(&ctx->wait))
1380 wake_up(&ctx->wait);
Jens Axboe534ca6d2020-09-02 13:52:19 -06001381 if (ctx->sq_data && waitqueue_active(&ctx->sq_data->wait))
1382 wake_up(&ctx->sq_data->wait);
Jens Axboeb41e9852020-02-17 09:52:41 -07001383 if (io_should_trigger_evfd(ctx))
Jens Axboe9b402842019-04-11 11:45:41 -06001384 eventfd_signal(ctx->cq_ev_fd, 1);
Pavel Begunkovb1445e52021-01-07 03:15:43 +00001385 if (waitqueue_active(&ctx->cq_wait)) {
Pavel Begunkov4aa84f22021-01-07 03:15:42 +00001386 wake_up_interruptible(&ctx->cq_wait);
1387 kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN);
1388 }
Jens Axboe8c838782019-03-12 15:48:16 -06001389}
1390
Pavel Begunkov80c18e42021-01-07 03:15:41 +00001391static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx)
1392{
Pavel Begunkovb1445e52021-01-07 03:15:43 +00001393 /* see waitqueue_active() comment */
1394 smp_mb();
1395
Pavel Begunkov80c18e42021-01-07 03:15:41 +00001396 if (ctx->flags & IORING_SETUP_SQPOLL) {
1397 if (waitqueue_active(&ctx->wait))
1398 wake_up(&ctx->wait);
1399 }
1400 if (io_should_trigger_evfd(ctx))
1401 eventfd_signal(ctx->cq_ev_fd, 1);
Pavel Begunkovb1445e52021-01-07 03:15:43 +00001402 if (waitqueue_active(&ctx->cq_wait)) {
Pavel Begunkov4aa84f22021-01-07 03:15:42 +00001403 wake_up_interruptible(&ctx->cq_wait);
1404 kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN);
1405 }
Pavel Begunkov80c18e42021-01-07 03:15:41 +00001406}
1407
Jens Axboec4a2ed72019-11-21 21:01:26 -07001408/* Returns true if there are no backlogged entries after the flush */
Pavel Begunkov6c2450a2021-02-23 12:40:22 +00001409static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001410{
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001411 struct io_rings *rings = ctx->rings;
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001412 unsigned long flags;
Jens Axboeb18032b2021-01-24 16:58:56 -07001413 bool all_flushed, posted;
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001414
Pavel Begunkove23de152020-12-17 00:24:37 +00001415 if (!force && __io_cqring_events(ctx) == rings->cq_ring_entries)
1416 return false;
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001417
Jens Axboeb18032b2021-01-24 16:58:56 -07001418 posted = false;
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001419 spin_lock_irqsave(&ctx->completion_lock, flags);
Pavel Begunkov6c2450a2021-02-23 12:40:22 +00001420 while (!list_empty(&ctx->cq_overflow_list)) {
1421 struct io_uring_cqe *cqe = io_get_cqring(ctx);
1422 struct io_overflow_cqe *ocqe;
Jens Axboee6c8aa92020-09-28 13:10:13 -06001423
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001424 if (!cqe && !force)
1425 break;
Pavel Begunkov6c2450a2021-02-23 12:40:22 +00001426 ocqe = list_first_entry(&ctx->cq_overflow_list,
1427 struct io_overflow_cqe, list);
1428 if (cqe)
1429 memcpy(cqe, &ocqe->cqe, sizeof(*cqe));
1430 else
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001431 WRITE_ONCE(ctx->rings->cq_overflow,
Pavel Begunkov6c2450a2021-02-23 12:40:22 +00001432 ++ctx->cached_cq_overflow);
Jens Axboeb18032b2021-01-24 16:58:56 -07001433 posted = true;
Pavel Begunkov6c2450a2021-02-23 12:40:22 +00001434 list_del(&ocqe->list);
1435 kfree(ocqe);
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001436 }
1437
Pavel Begunkov09e88402020-12-17 00:24:38 +00001438 all_flushed = list_empty(&ctx->cq_overflow_list);
1439 if (all_flushed) {
1440 clear_bit(0, &ctx->sq_check_overflow);
1441 clear_bit(0, &ctx->cq_check_overflow);
1442 ctx->rings->sq_flags &= ~IORING_SQ_CQ_OVERFLOW;
1443 }
Pavel Begunkov46930142020-07-30 18:43:49 +03001444
Jens Axboeb18032b2021-01-24 16:58:56 -07001445 if (posted)
1446 io_commit_cqring(ctx);
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001447 spin_unlock_irqrestore(&ctx->completion_lock, flags);
Jens Axboeb18032b2021-01-24 16:58:56 -07001448 if (posted)
1449 io_cqring_ev_posted(ctx);
Pavel Begunkov09e88402020-12-17 00:24:38 +00001450 return all_flushed;
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001451}
1452
Pavel Begunkov6c2450a2021-02-23 12:40:22 +00001453static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
Pavel Begunkov6c503152021-01-04 20:36:36 +00001454{
Jens Axboeca0a2652021-03-04 17:15:48 -07001455 bool ret = true;
1456
Pavel Begunkov6c503152021-01-04 20:36:36 +00001457 if (test_bit(0, &ctx->cq_check_overflow)) {
1458 /* iopoll syncs against uring_lock, not completion_lock */
1459 if (ctx->flags & IORING_SETUP_IOPOLL)
1460 mutex_lock(&ctx->uring_lock);
Pavel Begunkov6c2450a2021-02-23 12:40:22 +00001461 ret = __io_cqring_overflow_flush(ctx, force);
Pavel Begunkov6c503152021-01-04 20:36:36 +00001462 if (ctx->flags & IORING_SETUP_IOPOLL)
1463 mutex_unlock(&ctx->uring_lock);
1464 }
Jens Axboeca0a2652021-03-04 17:15:48 -07001465
1466 return ret;
Pavel Begunkov6c503152021-01-04 20:36:36 +00001467}
1468
Jens Axboeabc54d62021-02-24 13:32:30 -07001469/*
1470 * Shamelessly stolen from the mm implementation of page reference checking,
1471 * see commit f958d7b528b1 for details.
1472 */
1473#define req_ref_zero_or_close_to_overflow(req) \
1474 ((unsigned int) atomic_read(&(req->refs)) + 127u <= 127u)
1475
Jens Axboede9b4cc2021-02-24 13:28:27 -07001476static inline bool req_ref_inc_not_zero(struct io_kiocb *req)
1477{
Jens Axboeabc54d62021-02-24 13:32:30 -07001478 return atomic_inc_not_zero(&req->refs);
Jens Axboede9b4cc2021-02-24 13:28:27 -07001479}
1480
1481static inline bool req_ref_sub_and_test(struct io_kiocb *req, int refs)
1482{
Jens Axboeabc54d62021-02-24 13:32:30 -07001483 WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req));
1484 return atomic_sub_and_test(refs, &req->refs);
Jens Axboede9b4cc2021-02-24 13:28:27 -07001485}
1486
1487static inline bool req_ref_put_and_test(struct io_kiocb *req)
1488{
Jens Axboeabc54d62021-02-24 13:32:30 -07001489 WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req));
1490 return atomic_dec_and_test(&req->refs);
Jens Axboede9b4cc2021-02-24 13:28:27 -07001491}
1492
1493static inline void req_ref_put(struct io_kiocb *req)
1494{
Jens Axboeabc54d62021-02-24 13:32:30 -07001495 WARN_ON_ONCE(req_ref_put_and_test(req));
Jens Axboede9b4cc2021-02-24 13:28:27 -07001496}
1497
1498static inline void req_ref_get(struct io_kiocb *req)
1499{
Jens Axboeabc54d62021-02-24 13:32:30 -07001500 WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req));
1501 atomic_inc(&req->refs);
Jens Axboede9b4cc2021-02-24 13:28:27 -07001502}
1503
Pavel Begunkov8c3f9cd2021-02-28 22:35:15 +00001504static void __io_cqring_fill_event(struct io_kiocb *req, long res,
1505 unsigned int cflags)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001506{
Jens Axboe78e19bb2019-11-06 15:21:34 -07001507 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001508 struct io_uring_cqe *cqe;
1509
Jens Axboe78e19bb2019-11-06 15:21:34 -07001510 trace_io_uring_complete(ctx, req->user_data, res);
Jens Axboe51c3ff62019-11-03 06:52:50 -07001511
Jens Axboe2b188cc2019-01-07 10:46:33 -07001512 /*
1513 * If we can't get a cq entry, userspace overflowed the
1514 * submission (by quite a lot). Increment the overflow count in
1515 * the ring.
1516 */
1517 cqe = io_get_cqring(ctx);
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001518 if (likely(cqe)) {
Jens Axboe78e19bb2019-11-06 15:21:34 -07001519 WRITE_ONCE(cqe->user_data, req->user_data);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001520 WRITE_ONCE(cqe->res, res);
Jens Axboebcda7ba2020-02-23 16:42:51 -07001521 WRITE_ONCE(cqe->flags, cflags);
Pavel Begunkov6c2450a2021-02-23 12:40:22 +00001522 return;
1523 }
1524 if (!ctx->cq_overflow_flushed &&
1525 !atomic_read(&req->task->io_uring->in_idle)) {
1526 struct io_overflow_cqe *ocqe;
1527
1528 ocqe = kmalloc(sizeof(*ocqe), GFP_ATOMIC | __GFP_ACCOUNT);
1529 if (!ocqe)
1530 goto overflow;
Jens Axboead3eb2c2019-12-18 17:12:20 -07001531 if (list_empty(&ctx->cq_overflow_list)) {
1532 set_bit(0, &ctx->sq_check_overflow);
1533 set_bit(0, &ctx->cq_check_overflow);
Xiaoguang Wang6d5f9042020-07-09 09:15:29 +08001534 ctx->rings->sq_flags |= IORING_SQ_CQ_OVERFLOW;
Jens Axboead3eb2c2019-12-18 17:12:20 -07001535 }
Pavel Begunkov6c2450a2021-02-23 12:40:22 +00001536 ocqe->cqe.user_data = req->user_data;
1537 ocqe->cqe.res = res;
1538 ocqe->cqe.flags = cflags;
1539 list_add_tail(&ocqe->list, &ctx->cq_overflow_list);
1540 return;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001541 }
Pavel Begunkov6c2450a2021-02-23 12:40:22 +00001542overflow:
1543 /*
1544 * If we're in ring overflow flush mode, or in task cancel mode,
1545 * or cannot allocate an overflow entry, then we need to drop it
1546 * on the floor.
1547 */
1548 WRITE_ONCE(ctx->rings->cq_overflow, ++ctx->cached_cq_overflow);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001549}
1550
Jens Axboebcda7ba2020-02-23 16:42:51 -07001551static void io_cqring_fill_event(struct io_kiocb *req, long res)
1552{
1553 __io_cqring_fill_event(req, res, 0);
1554}
1555
Pavel Begunkov7a612352021-03-09 00:37:59 +00001556static void io_req_complete_post(struct io_kiocb *req, long res,
1557 unsigned int cflags)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001558{
Jens Axboe78e19bb2019-11-06 15:21:34 -07001559 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001560 unsigned long flags;
1561
1562 spin_lock_irqsave(&ctx->completion_lock, flags);
Jens Axboebcda7ba2020-02-23 16:42:51 -07001563 __io_cqring_fill_event(req, res, cflags);
Jens Axboec7dae4b2021-02-09 19:53:37 -07001564 /*
1565 * If we're the last reference to this request, add to our locked
1566 * free_list cache.
1567 */
Jens Axboede9b4cc2021-02-24 13:28:27 -07001568 if (req_ref_put_and_test(req)) {
Jens Axboec7dae4b2021-02-09 19:53:37 -07001569 struct io_comp_state *cs = &ctx->submit_state.comp;
1570
Pavel Begunkov7a612352021-03-09 00:37:59 +00001571 if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) {
1572 if (req->flags & (REQ_F_LINK_TIMEOUT | REQ_F_FAIL_LINK))
1573 io_disarm_next(req);
1574 if (req->link) {
1575 io_req_task_queue(req->link);
1576 req->link = NULL;
1577 }
1578 }
Jens Axboec7dae4b2021-02-09 19:53:37 -07001579 io_dismantle_req(req);
1580 io_put_task(req->task, 1);
1581 list_add(&req->compl.list, &cs->locked_free_list);
1582 cs->locked_free_nr++;
Pavel Begunkov180f8292021-03-14 20:57:09 +00001583 } else {
1584 if (!percpu_ref_tryget(&ctx->refs))
1585 req = NULL;
1586 }
Pavel Begunkov7a612352021-03-09 00:37:59 +00001587 io_commit_cqring(ctx);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001588 spin_unlock_irqrestore(&ctx->completion_lock, flags);
Pavel Begunkov7a612352021-03-09 00:37:59 +00001589
Pavel Begunkov180f8292021-03-14 20:57:09 +00001590 if (req) {
1591 io_cqring_ev_posted(ctx);
Jens Axboec7dae4b2021-02-09 19:53:37 -07001592 percpu_ref_put(&ctx->refs);
Pavel Begunkov180f8292021-03-14 20:57:09 +00001593 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07001594}
1595
Pavel Begunkova38d68d2021-01-19 13:32:45 +00001596static void io_req_complete_state(struct io_kiocb *req, long res,
Pavel Begunkov889fca72021-02-10 00:03:09 +00001597 unsigned int cflags)
Jens Axboebcda7ba2020-02-23 16:42:51 -07001598{
Pavel Begunkov68fb8972021-03-19 17:22:41 +00001599 if (req->flags & (REQ_F_NEED_CLEANUP | REQ_F_BUFFER_SELECTED))
1600 io_clean_op(req);
Pavel Begunkova38d68d2021-01-19 13:32:45 +00001601 req->result = res;
1602 req->compl.cflags = cflags;
Pavel Begunkove342c802021-01-19 13:32:47 +00001603 req->flags |= REQ_F_COMPLETE_INLINE;
Jens Axboee1e16092020-06-22 09:17:17 -06001604}
Jens Axboe2b188cc2019-01-07 10:46:33 -07001605
Pavel Begunkov889fca72021-02-10 00:03:09 +00001606static inline void __io_req_complete(struct io_kiocb *req, unsigned issue_flags,
1607 long res, unsigned cflags)
Pavel Begunkova38d68d2021-01-19 13:32:45 +00001608{
Pavel Begunkov889fca72021-02-10 00:03:09 +00001609 if (issue_flags & IO_URING_F_COMPLETE_DEFER)
1610 io_req_complete_state(req, res, cflags);
Pavel Begunkova38d68d2021-01-19 13:32:45 +00001611 else
Jens Axboec7dae4b2021-02-09 19:53:37 -07001612 io_req_complete_post(req, res, cflags);
Pavel Begunkova38d68d2021-01-19 13:32:45 +00001613}
Jens Axboebcda7ba2020-02-23 16:42:51 -07001614
Pavel Begunkova38d68d2021-01-19 13:32:45 +00001615static inline void io_req_complete(struct io_kiocb *req, long res)
Jens Axboee1e16092020-06-22 09:17:17 -06001616{
Pavel Begunkov889fca72021-02-10 00:03:09 +00001617 __io_req_complete(req, 0, res, 0);
Jens Axboebcda7ba2020-02-23 16:42:51 -07001618}
1619
Pavel Begunkovf41db2732021-02-28 22:35:12 +00001620static void io_req_complete_failed(struct io_kiocb *req, long res)
1621{
1622 req_set_fail_links(req);
1623 io_put_req(req);
1624 io_req_complete_post(req, res, 0);
1625}
1626
Pavel Begunkovdac7a092021-03-19 17:22:39 +00001627static void io_flush_cached_locked_reqs(struct io_ring_ctx *ctx,
1628 struct io_comp_state *cs)
1629{
1630 spin_lock_irq(&ctx->completion_lock);
1631 list_splice_init(&cs->locked_free_list, &cs->free_list);
1632 cs->locked_free_nr = 0;
1633 spin_unlock_irq(&ctx->completion_lock);
1634}
1635
Pavel Begunkovdd78f492021-03-19 17:22:35 +00001636/* Returns true IFF there are requests in the cache */
Jens Axboec7dae4b2021-02-09 19:53:37 -07001637static bool io_flush_cached_reqs(struct io_ring_ctx *ctx)
Pavel Begunkove5d1bc02021-02-10 00:03:23 +00001638{
Jens Axboec7dae4b2021-02-09 19:53:37 -07001639 struct io_submit_state *state = &ctx->submit_state;
1640 struct io_comp_state *cs = &state->comp;
Pavel Begunkovdd78f492021-03-19 17:22:35 +00001641 int nr;
Pavel Begunkove5d1bc02021-02-10 00:03:23 +00001642
Jens Axboec7dae4b2021-02-09 19:53:37 -07001643 /*
1644 * If we have more than a batch's worth of requests in our IRQ side
1645 * locked cache, grab the lock and move them over to our submission
1646 * side cache.
1647 */
Pavel Begunkovdac7a092021-03-19 17:22:39 +00001648 if (READ_ONCE(cs->locked_free_nr) > IO_COMPL_BATCH)
1649 io_flush_cached_locked_reqs(ctx, cs);
Jens Axboec7dae4b2021-02-09 19:53:37 -07001650
Pavel Begunkovdd78f492021-03-19 17:22:35 +00001651 nr = state->free_reqs;
Jens Axboec7dae4b2021-02-09 19:53:37 -07001652 while (!list_empty(&cs->free_list)) {
Pavel Begunkovdd78f492021-03-19 17:22:35 +00001653 struct io_kiocb *req = list_first_entry(&cs->free_list,
1654 struct io_kiocb, compl.list);
1655
Jens Axboe2b188cc2019-01-07 10:46:33 -07001656 list_del(&req->compl.list);
Pavel Begunkovdd78f492021-03-19 17:22:35 +00001657 state->reqs[nr++] = req;
1658 if (nr == ARRAY_SIZE(state->reqs))
Pavel Begunkove5d1bc02021-02-10 00:03:23 +00001659 break;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001660 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07001661
Pavel Begunkovdd78f492021-03-19 17:22:35 +00001662 state->free_reqs = nr;
1663 return nr != 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001664}
1665
Pavel Begunkov258b29a2021-02-10 00:03:10 +00001666static struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001667{
Pavel Begunkov258b29a2021-02-10 00:03:10 +00001668 struct io_submit_state *state = &ctx->submit_state;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001669
Pavel Begunkovbf019da2021-02-10 00:03:17 +00001670 BUILD_BUG_ON(IO_REQ_ALLOC_BATCH > ARRAY_SIZE(state->reqs));
Jens Axboe2b188cc2019-01-07 10:46:33 -07001671
Pavel Begunkovf6b6c7d2020-06-21 13:09:53 +03001672 if (!state->free_reqs) {
Pavel Begunkov291b2822020-09-30 22:57:01 +03001673 gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
Jens Axboe2579f912019-01-09 09:10:43 -07001674 int ret;
1675
Jens Axboec7dae4b2021-02-09 19:53:37 -07001676 if (io_flush_cached_reqs(ctx))
Pavel Begunkove5d1bc02021-02-10 00:03:23 +00001677 goto got_req;
1678
Pavel Begunkovbf019da2021-02-10 00:03:17 +00001679 ret = kmem_cache_alloc_bulk(req_cachep, gfp, IO_REQ_ALLOC_BATCH,
1680 state->reqs);
Jens Axboefd6fab22019-03-14 16:30:06 -06001681
1682 /*
1683 * Bulk alloc is all-or-nothing. If we fail to get a batch,
1684 * retry single alloc to be on the safe side.
1685 */
1686 if (unlikely(ret <= 0)) {
1687 state->reqs[0] = kmem_cache_alloc(req_cachep, gfp);
1688 if (!state->reqs[0])
Pavel Begunkov3893f392021-02-10 00:03:15 +00001689 return NULL;
Jens Axboefd6fab22019-03-14 16:30:06 -06001690 ret = 1;
1691 }
Pavel Begunkov291b2822020-09-30 22:57:01 +03001692 state->free_reqs = ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001693 }
Pavel Begunkove5d1bc02021-02-10 00:03:23 +00001694got_req:
Pavel Begunkov291b2822020-09-30 22:57:01 +03001695 state->free_reqs--;
1696 return state->reqs[state->free_reqs];
Jens Axboe2b188cc2019-01-07 10:46:33 -07001697}
1698
Pavel Begunkove1d767f2021-03-19 17:22:43 +00001699static inline void io_put_file(struct file *file)
Pavel Begunkov8da11c12020-02-24 11:32:44 +03001700{
Pavel Begunkove1d767f2021-03-19 17:22:43 +00001701 if (file)
Pavel Begunkov8da11c12020-02-24 11:32:44 +03001702 fput(file);
1703}
1704
Pavel Begunkov4edf20f2020-10-13 09:43:59 +01001705static void io_dismantle_req(struct io_kiocb *req)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001706{
Pavel Begunkov094bae42021-03-19 17:22:42 +00001707 unsigned int flags = req->flags;
1708
Pavel Begunkove1d767f2021-03-19 17:22:43 +00001709 if (!(flags & REQ_F_FIXED_FILE))
1710 io_put_file(req->file);
Pavel Begunkov094bae42021-03-19 17:22:42 +00001711 if (flags & (REQ_F_NEED_CLEANUP | REQ_F_BUFFER_SELECTED |
1712 REQ_F_INFLIGHT)) {
1713 io_clean_op(req);
1714
1715 if (req->flags & REQ_F_INFLIGHT) {
1716 struct io_ring_ctx *ctx = req->ctx;
1717 unsigned long flags;
1718
1719 spin_lock_irqsave(&ctx->inflight_lock, flags);
1720 list_del(&req->inflight_entry);
1721 spin_unlock_irqrestore(&ctx->inflight_lock, flags);
1722 req->flags &= ~REQ_F_INFLIGHT;
1723 }
1724 }
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00001725 if (req->fixed_rsrc_refs)
1726 percpu_ref_put(req->fixed_rsrc_refs);
Pavel Begunkov094bae42021-03-19 17:22:42 +00001727 if (req->async_data)
1728 kfree(req->async_data);
Jens Axboe003e8dc2021-03-06 09:22:27 -07001729 if (req->work.creds) {
1730 put_cred(req->work.creds);
1731 req->work.creds = NULL;
1732 }
Pavel Begunkove6543a82020-06-28 12:52:30 +03001733}
Pavel Begunkov2b85edf2019-12-28 14:13:03 +03001734
Pavel Begunkovb23fcf42021-03-01 18:20:48 +00001735/* must to be called somewhat shortly after putting a request */
Pavel Begunkov7c660732021-01-25 11:42:21 +00001736static inline void io_put_task(struct task_struct *task, int nr)
1737{
1738 struct io_uring_task *tctx = task->io_uring;
1739
1740 percpu_counter_sub(&tctx->inflight, nr);
1741 if (unlikely(atomic_read(&tctx->in_idle)))
1742 wake_up(&tctx->wait);
1743 put_task_struct_many(task, nr);
1744}
1745
Pavel Begunkov216578e2020-10-13 09:44:00 +01001746static void __io_free_req(struct io_kiocb *req)
Pavel Begunkove6543a82020-06-28 12:52:30 +03001747{
Jens Axboe51a4cc12020-08-10 10:55:56 -06001748 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkovecfc5172020-06-29 13:13:03 +03001749
Pavel Begunkov216578e2020-10-13 09:44:00 +01001750 io_dismantle_req(req);
Pavel Begunkov7c660732021-01-25 11:42:21 +00001751 io_put_task(req->task, 1);
Pavel Begunkove6543a82020-06-28 12:52:30 +03001752
Pavel Begunkov3893f392021-02-10 00:03:15 +00001753 kmem_cache_free(req_cachep, req);
Pavel Begunkovecfc5172020-06-29 13:13:03 +03001754 percpu_ref_put(&ctx->refs);
Jens Axboee65ef562019-03-12 10:16:44 -06001755}
1756
Pavel Begunkovf2f87372020-10-27 23:25:37 +00001757static inline void io_remove_next_linked(struct io_kiocb *req)
1758{
1759 struct io_kiocb *nxt = req->link;
1760
1761 req->link = nxt->link;
1762 nxt->link = NULL;
1763}
1764
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00001765static bool io_kill_linked_timeout(struct io_kiocb *req)
1766 __must_hold(&req->ctx->completion_lock)
Jens Axboe9e645e112019-05-10 16:07:28 -06001767{
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00001768 struct io_kiocb *link = req->link;
Pavel Begunkovf2f87372020-10-27 23:25:37 +00001769
Pavel Begunkov900fad42020-10-19 16:39:16 +01001770 /*
1771 * Can happen if a linked timeout fired and link had been like
1772 * req -> link t-out -> link t-out [-> ...]
1773 */
Pavel Begunkovc9abd7a2020-10-22 16:43:11 +01001774 if (link && (link->flags & REQ_F_LTIMEOUT_ACTIVE)) {
1775 struct io_timeout_data *io = link->async_data;
1776 int ret;
Pavel Begunkov7c86ffe2020-06-29 13:12:59 +03001777
Pavel Begunkovf2f87372020-10-27 23:25:37 +00001778 io_remove_next_linked(req);
Pavel Begunkov90cd7e42020-10-27 23:25:36 +00001779 link->timeout.head = NULL;
Pavel Begunkovc9abd7a2020-10-22 16:43:11 +01001780 ret = hrtimer_try_to_cancel(&io->timer);
1781 if (ret != -1) {
1782 io_cqring_fill_event(link, -ECANCELED);
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00001783 io_put_req_deferred(link, 1);
Pavel Begunkovd4729fb2021-03-22 01:58:24 +00001784 return true;
Pavel Begunkovc9abd7a2020-10-22 16:43:11 +01001785 }
1786 }
Pavel Begunkovd4729fb2021-03-22 01:58:24 +00001787 return false;
Pavel Begunkov7c86ffe2020-06-29 13:12:59 +03001788}
1789
Pavel Begunkovd148ca42020-10-18 10:17:39 +01001790static void io_fail_links(struct io_kiocb *req)
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00001791 __must_hold(&req->ctx->completion_lock)
Jens Axboe9e645e112019-05-10 16:07:28 -06001792{
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00001793 struct io_kiocb *nxt, *link = req->link;
Jens Axboe9e645e112019-05-10 16:07:28 -06001794
Pavel Begunkovf2f87372020-10-27 23:25:37 +00001795 req->link = NULL;
Pavel Begunkovf2f87372020-10-27 23:25:37 +00001796 while (link) {
1797 nxt = link->link;
1798 link->link = NULL;
1799
Dmitrii Dolgovc826bd72019-10-15 19:02:01 +02001800 trace_io_uring_fail_link(req, link);
Pavel Begunkov7c86ffe2020-06-29 13:12:59 +03001801 io_cqring_fill_event(link, -ECANCELED);
Jens Axboe1575f212021-02-27 15:20:49 -07001802 io_put_req_deferred(link, 2);
Pavel Begunkovf2f87372020-10-27 23:25:37 +00001803 link = nxt;
Jens Axboe9e645e112019-05-10 16:07:28 -06001804 }
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00001805}
Pavel Begunkov7c86ffe2020-06-29 13:12:59 +03001806
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00001807static bool io_disarm_next(struct io_kiocb *req)
1808 __must_hold(&req->ctx->completion_lock)
1809{
1810 bool posted = false;
1811
1812 if (likely(req->flags & REQ_F_LINK_TIMEOUT))
1813 posted = io_kill_linked_timeout(req);
1814 if (unlikely(req->flags & REQ_F_FAIL_LINK)) {
1815 posted |= (req->link != NULL);
1816 io_fail_links(req);
1817 }
1818 return posted;
Jens Axboe9e645e112019-05-10 16:07:28 -06001819}
1820
Pavel Begunkov3fa5e0f2020-06-30 15:20:43 +03001821static struct io_kiocb *__io_req_find_next(struct io_kiocb *req)
Jens Axboe9e645e112019-05-10 16:07:28 -06001822{
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00001823 struct io_kiocb *nxt;
Jens Axboe2665abf2019-11-05 12:40:47 -07001824
Jens Axboe9e645e112019-05-10 16:07:28 -06001825 /*
1826 * If LINK is set, we have dependent requests in this chain. If we
1827 * didn't fail this request, queue the first one up, moving any other
1828 * dependencies to the next request. In case of failure, fail the rest
1829 * of the chain.
1830 */
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00001831 if (req->flags & (REQ_F_LINK_TIMEOUT | REQ_F_FAIL_LINK)) {
1832 struct io_ring_ctx *ctx = req->ctx;
1833 unsigned long flags;
1834 bool posted;
Pavel Begunkovf2f87372020-10-27 23:25:37 +00001835
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00001836 spin_lock_irqsave(&ctx->completion_lock, flags);
1837 posted = io_disarm_next(req);
1838 if (posted)
1839 io_commit_cqring(req->ctx);
1840 spin_unlock_irqrestore(&ctx->completion_lock, flags);
1841 if (posted)
1842 io_cqring_ev_posted(ctx);
Pavel Begunkovf2f87372020-10-27 23:25:37 +00001843 }
Pavel Begunkov33cc89a2021-03-09 00:37:58 +00001844 nxt = req->link;
1845 req->link = NULL;
1846 return nxt;
Jens Axboe4d7dd462019-11-20 13:03:52 -07001847}
Jens Axboe2665abf2019-11-05 12:40:47 -07001848
Pavel Begunkovf2f87372020-10-27 23:25:37 +00001849static inline struct io_kiocb *io_req_find_next(struct io_kiocb *req)
Pavel Begunkov3fa5e0f2020-06-30 15:20:43 +03001850{
Pavel Begunkovcdbff982021-02-12 18:41:16 +00001851 if (likely(!(req->flags & (REQ_F_LINK|REQ_F_HARDLINK))))
Pavel Begunkov3fa5e0f2020-06-30 15:20:43 +03001852 return NULL;
1853 return __io_req_find_next(req);
1854}
1855
Pavel Begunkov2c323952021-02-28 22:04:53 +00001856static void ctx_flush_and_put(struct io_ring_ctx *ctx)
1857{
1858 if (!ctx)
1859 return;
1860 if (ctx->submit_state.comp.nr) {
1861 mutex_lock(&ctx->uring_lock);
1862 io_submit_flush_completions(&ctx->submit_state.comp, ctx);
1863 mutex_unlock(&ctx->uring_lock);
1864 }
1865 percpu_ref_put(&ctx->refs);
1866}
1867
Jens Axboe7cbf1722021-02-10 00:03:20 +00001868static bool __tctx_task_work(struct io_uring_task *tctx)
1869{
Jens Axboe65453d12021-02-10 00:03:21 +00001870 struct io_ring_ctx *ctx = NULL;
Jens Axboe7cbf1722021-02-10 00:03:20 +00001871 struct io_wq_work_list list;
1872 struct io_wq_work_node *node;
1873
1874 if (wq_list_empty(&tctx->task_list))
1875 return false;
1876
Jens Axboe0b81e802021-02-16 10:33:53 -07001877 spin_lock_irq(&tctx->task_lock);
Jens Axboe7cbf1722021-02-10 00:03:20 +00001878 list = tctx->task_list;
1879 INIT_WQ_LIST(&tctx->task_list);
Jens Axboe0b81e802021-02-16 10:33:53 -07001880 spin_unlock_irq(&tctx->task_lock);
Jens Axboe7cbf1722021-02-10 00:03:20 +00001881
1882 node = list.first;
1883 while (node) {
1884 struct io_wq_work_node *next = node->next;
1885 struct io_kiocb *req;
1886
1887 req = container_of(node, struct io_kiocb, io_task_work.node);
Pavel Begunkov2c323952021-02-28 22:04:53 +00001888 if (req->ctx != ctx) {
1889 ctx_flush_and_put(ctx);
1890 ctx = req->ctx;
1891 percpu_ref_get(&ctx->refs);
1892 }
1893
Jens Axboe7cbf1722021-02-10 00:03:20 +00001894 req->task_work.func(&req->task_work);
1895 node = next;
Jens Axboe65453d12021-02-10 00:03:21 +00001896 }
1897
Pavel Begunkov2c323952021-02-28 22:04:53 +00001898 ctx_flush_and_put(ctx);
Jens Axboe7cbf1722021-02-10 00:03:20 +00001899 return list.first != NULL;
1900}
1901
1902static void tctx_task_work(struct callback_head *cb)
1903{
1904 struct io_uring_task *tctx = container_of(cb, struct io_uring_task, task_work);
1905
Jens Axboe1d5f3602021-02-26 14:54:16 -07001906 clear_bit(0, &tctx->task_state);
1907
Jens Axboe7cbf1722021-02-10 00:03:20 +00001908 while (__tctx_task_work(tctx))
1909 cond_resched();
Jens Axboe7cbf1722021-02-10 00:03:20 +00001910}
1911
Pavel Begunkovc15b79d2021-03-19 17:22:44 +00001912static int io_req_task_work_add(struct io_kiocb *req)
Jens Axboe7cbf1722021-02-10 00:03:20 +00001913{
Pavel Begunkovc15b79d2021-03-19 17:22:44 +00001914 struct task_struct *tsk = req->task;
Jens Axboe7cbf1722021-02-10 00:03:20 +00001915 struct io_uring_task *tctx = tsk->io_uring;
Pavel Begunkovc15b79d2021-03-19 17:22:44 +00001916 enum task_work_notify_mode notify;
Jens Axboe7cbf1722021-02-10 00:03:20 +00001917 struct io_wq_work_node *node, *prev;
Jens Axboe0b81e802021-02-16 10:33:53 -07001918 unsigned long flags;
Pavel Begunkovc15b79d2021-03-19 17:22:44 +00001919 int ret = 0;
1920
1921 if (unlikely(tsk->flags & PF_EXITING))
1922 return -ESRCH;
Jens Axboe7cbf1722021-02-10 00:03:20 +00001923
1924 WARN_ON_ONCE(!tctx);
1925
Jens Axboe0b81e802021-02-16 10:33:53 -07001926 spin_lock_irqsave(&tctx->task_lock, flags);
Jens Axboe7cbf1722021-02-10 00:03:20 +00001927 wq_list_add_tail(&req->io_task_work.node, &tctx->task_list);
Jens Axboe0b81e802021-02-16 10:33:53 -07001928 spin_unlock_irqrestore(&tctx->task_lock, flags);
Jens Axboe7cbf1722021-02-10 00:03:20 +00001929
1930 /* task_work already pending, we're done */
1931 if (test_bit(0, &tctx->task_state) ||
1932 test_and_set_bit(0, &tctx->task_state))
1933 return 0;
1934
Pavel Begunkovc15b79d2021-03-19 17:22:44 +00001935 /*
1936 * SQPOLL kernel thread doesn't need notification, just a wakeup. For
1937 * all other cases, use TWA_SIGNAL unconditionally to ensure we're
1938 * processing task_work. There's no reliable way to tell if TWA_RESUME
1939 * will do the job.
1940 */
1941 notify = (req->ctx->flags & IORING_SETUP_SQPOLL) ? TWA_NONE : TWA_SIGNAL;
1942
1943 if (!task_work_add(tsk, &tctx->task_work, notify)) {
1944 wake_up_process(tsk);
Jens Axboe7cbf1722021-02-10 00:03:20 +00001945 return 0;
Pavel Begunkovc15b79d2021-03-19 17:22:44 +00001946 }
Jens Axboe7cbf1722021-02-10 00:03:20 +00001947
1948 /*
1949 * Slow path - we failed, find and delete work. if the work is not
1950 * in the list, it got run and we're fine.
1951 */
Jens Axboe0b81e802021-02-16 10:33:53 -07001952 spin_lock_irqsave(&tctx->task_lock, flags);
Jens Axboe7cbf1722021-02-10 00:03:20 +00001953 wq_list_for_each(node, prev, &tctx->task_list) {
1954 if (&req->io_task_work.node == node) {
1955 wq_list_del(&tctx->task_list, node, prev);
1956 ret = 1;
1957 break;
1958 }
1959 }
Jens Axboe0b81e802021-02-16 10:33:53 -07001960 spin_unlock_irqrestore(&tctx->task_lock, flags);
Jens Axboe7cbf1722021-02-10 00:03:20 +00001961 clear_bit(0, &tctx->task_state);
1962 return ret;
1963}
1964
Pavel Begunkov9b465712021-03-15 14:23:07 +00001965static bool io_run_task_work_head(struct callback_head **work_head)
1966{
1967 struct callback_head *work, *next;
1968 bool executed = false;
1969
1970 do {
1971 work = xchg(work_head, NULL);
1972 if (!work)
1973 break;
1974
1975 do {
1976 next = work->next;
1977 work->func(work);
1978 work = next;
1979 cond_resched();
1980 } while (work);
1981 executed = true;
1982 } while (1);
1983
1984 return executed;
1985}
1986
1987static void io_task_work_add_head(struct callback_head **work_head,
1988 struct callback_head *task_work)
1989{
1990 struct callback_head *head;
1991
1992 do {
1993 head = READ_ONCE(*work_head);
1994 task_work->next = head;
1995 } while (cmpxchg(work_head, head, task_work) != head);
1996}
1997
Pavel Begunkoveab30c42021-01-19 13:32:42 +00001998static void io_req_task_work_add_fallback(struct io_kiocb *req,
Jens Axboe7cbf1722021-02-10 00:03:20 +00001999 task_work_func_t cb)
Pavel Begunkoveab30c42021-01-19 13:32:42 +00002000{
Pavel Begunkoveab30c42021-01-19 13:32:42 +00002001 init_task_work(&req->task_work, cb);
Pavel Begunkov9b465712021-03-15 14:23:07 +00002002 io_task_work_add_head(&req->ctx->exit_task_work, &req->task_work);
Pavel Begunkoveab30c42021-01-19 13:32:42 +00002003}
2004
Jens Axboec40f6372020-06-25 15:39:59 -06002005static void io_req_task_cancel(struct callback_head *cb)
2006{
2007 struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
Jens Axboe87ceb6a2020-09-14 08:20:12 -06002008 struct io_ring_ctx *ctx = req->ctx;
Jens Axboec40f6372020-06-25 15:39:59 -06002009
Pavel Begunkove83acd72021-02-28 22:35:09 +00002010 /* ctx is guaranteed to stay alive while we hold uring_lock */
Pavel Begunkov792bb6e2021-02-18 22:32:51 +00002011 mutex_lock(&ctx->uring_lock);
Pavel Begunkov25935532021-03-19 17:22:40 +00002012 io_req_complete_failed(req, req->result);
Pavel Begunkov792bb6e2021-02-18 22:32:51 +00002013 mutex_unlock(&ctx->uring_lock);
Jens Axboec40f6372020-06-25 15:39:59 -06002014}
2015
2016static void __io_req_task_submit(struct io_kiocb *req)
2017{
2018 struct io_ring_ctx *ctx = req->ctx;
2019
Pavel Begunkov04fc6c82021-02-12 03:23:54 +00002020 /* ctx stays valid until unlock, even if we drop all ours ctx->refs */
Pavel Begunkov81b6d052021-01-04 20:36:35 +00002021 mutex_lock(&ctx->uring_lock);
Pavel Begunkov70aacfe2021-03-01 13:02:15 +00002022 if (!(current->flags & PF_EXITING) && !current->in_execve)
Pavel Begunkovc5eef2b2021-02-10 00:03:22 +00002023 __io_queue_sqe(req);
Pavel Begunkov81b6d052021-01-04 20:36:35 +00002024 else
Pavel Begunkov25935532021-03-19 17:22:40 +00002025 io_req_complete_failed(req, -EFAULT);
Pavel Begunkov81b6d052021-01-04 20:36:35 +00002026 mutex_unlock(&ctx->uring_lock);
Jens Axboe9e645e112019-05-10 16:07:28 -06002027}
2028
Jens Axboec40f6372020-06-25 15:39:59 -06002029static void io_req_task_submit(struct callback_head *cb)
2030{
2031 struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
2032
2033 __io_req_task_submit(req);
2034}
2035
Pavel Begunkova3df76982021-02-18 22:32:52 +00002036static void io_req_task_queue_fail(struct io_kiocb *req, int ret)
2037{
Pavel Begunkova3df76982021-02-18 22:32:52 +00002038 req->result = ret;
2039 req->task_work.func = io_req_task_cancel;
2040
2041 if (unlikely(io_req_task_work_add(req)))
2042 io_req_task_work_add_fallback(req, io_req_task_cancel);
2043}
2044
Pavel Begunkov2c4b8eb2021-02-28 22:35:10 +00002045static void io_req_task_queue(struct io_kiocb *req)
2046{
2047 req->task_work.func = io_req_task_submit;
2048
2049 if (unlikely(io_req_task_work_add(req)))
2050 io_req_task_queue_fail(req, -ECANCELED);
2051}
2052
Pavel Begunkovf2f87372020-10-27 23:25:37 +00002053static inline void io_queue_next(struct io_kiocb *req)
Jackie Liuc69f8db2019-11-09 11:00:08 +08002054{
Pavel Begunkov9b5f7bd2020-06-29 13:13:00 +03002055 struct io_kiocb *nxt = io_req_find_next(req);
Pavel Begunkov944e58b2019-11-21 23:21:01 +03002056
Pavel Begunkov906a8c32020-06-27 14:04:55 +03002057 if (nxt)
2058 io_req_task_queue(nxt);
Jackie Liuc69f8db2019-11-09 11:00:08 +08002059}
2060
Jens Axboe9e645e112019-05-10 16:07:28 -06002061static void io_free_req(struct io_kiocb *req)
2062{
Pavel Begunkovc3524382020-06-28 12:52:32 +03002063 io_queue_next(req);
Jens Axboe9e645e112019-05-10 16:07:28 -06002064 __io_free_req(req);
Jens Axboee65ef562019-03-12 10:16:44 -06002065}
2066
Pavel Begunkov2d6500d2020-06-28 12:52:33 +03002067struct req_batch {
Pavel Begunkov5af1d132020-07-18 11:32:52 +03002068 struct task_struct *task;
2069 int task_refs;
Jens Axboe1b4c3512021-02-10 00:03:19 +00002070 int ctx_refs;
Pavel Begunkov2d6500d2020-06-28 12:52:33 +03002071};
2072
Pavel Begunkov5af1d132020-07-18 11:32:52 +03002073static inline void io_init_req_batch(struct req_batch *rb)
Pavel Begunkov7a743e22020-03-03 21:33:13 +03002074{
Pavel Begunkov5af1d132020-07-18 11:32:52 +03002075 rb->task_refs = 0;
Pavel Begunkov9ae72462021-02-10 00:03:16 +00002076 rb->ctx_refs = 0;
Pavel Begunkov5af1d132020-07-18 11:32:52 +03002077 rb->task = NULL;
2078}
Pavel Begunkov8766dd52020-03-14 00:31:04 +03002079
Pavel Begunkov2d6500d2020-06-28 12:52:33 +03002080static void io_req_free_batch_finish(struct io_ring_ctx *ctx,
2081 struct req_batch *rb)
2082{
Pavel Begunkov6e833d52021-02-11 18:28:20 +00002083 if (rb->task)
Pavel Begunkov7c660732021-01-25 11:42:21 +00002084 io_put_task(rb->task, rb->task_refs);
Pavel Begunkov9ae72462021-02-10 00:03:16 +00002085 if (rb->ctx_refs)
2086 percpu_ref_put_many(&ctx->refs, rb->ctx_refs);
Pavel Begunkov2d6500d2020-06-28 12:52:33 +03002087}
2088
Pavel Begunkov6ff119a2021-02-10 00:03:18 +00002089static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req,
2090 struct io_submit_state *state)
Pavel Begunkov2d6500d2020-06-28 12:52:33 +03002091{
Pavel Begunkovf2f87372020-10-27 23:25:37 +00002092 io_queue_next(req);
Pavel Begunkov96670652021-03-19 17:22:32 +00002093 io_dismantle_req(req);
Pavel Begunkov2d6500d2020-06-28 12:52:33 +03002094
Jens Axboee3bc8e92020-09-24 08:45:57 -06002095 if (req->task != rb->task) {
Pavel Begunkov7c660732021-01-25 11:42:21 +00002096 if (rb->task)
2097 io_put_task(rb->task, rb->task_refs);
Jens Axboee3bc8e92020-09-24 08:45:57 -06002098 rb->task = req->task;
2099 rb->task_refs = 0;
Pavel Begunkov5af1d132020-07-18 11:32:52 +03002100 }
Jens Axboee3bc8e92020-09-24 08:45:57 -06002101 rb->task_refs++;
Pavel Begunkov9ae72462021-02-10 00:03:16 +00002102 rb->ctx_refs++;
Pavel Begunkov5af1d132020-07-18 11:32:52 +03002103
Pavel Begunkovbd759042021-02-12 03:23:50 +00002104 if (state->free_reqs != ARRAY_SIZE(state->reqs))
Pavel Begunkov6ff119a2021-02-10 00:03:18 +00002105 state->reqs[state->free_reqs++] = req;
Pavel Begunkovbd759042021-02-12 03:23:50 +00002106 else
2107 list_add(&req->compl.list, &state->comp.free_list);
Pavel Begunkov7a743e22020-03-03 21:33:13 +03002108}
2109
Pavel Begunkov905c1722021-02-10 00:03:14 +00002110static void io_submit_flush_completions(struct io_comp_state *cs,
2111 struct io_ring_ctx *ctx)
2112{
2113 int i, nr = cs->nr;
2114 struct io_kiocb *req;
2115 struct req_batch rb;
2116
2117 io_init_req_batch(&rb);
2118 spin_lock_irq(&ctx->completion_lock);
2119 for (i = 0; i < nr; i++) {
2120 req = cs->reqs[i];
2121 __io_cqring_fill_event(req, req->result, req->compl.cflags);
2122 }
2123 io_commit_cqring(ctx);
2124 spin_unlock_irq(&ctx->completion_lock);
2125
2126 io_cqring_ev_posted(ctx);
2127 for (i = 0; i < nr; i++) {
2128 req = cs->reqs[i];
2129
2130 /* submission and completion refs */
Jens Axboede9b4cc2021-02-24 13:28:27 -07002131 if (req_ref_sub_and_test(req, 2))
Pavel Begunkov6ff119a2021-02-10 00:03:18 +00002132 io_req_free_batch(&rb, req, &ctx->submit_state);
Pavel Begunkov905c1722021-02-10 00:03:14 +00002133 }
2134
2135 io_req_free_batch_finish(ctx, &rb);
2136 cs->nr = 0;
Jens Axboee65ef562019-03-12 10:16:44 -06002137}
2138
Jens Axboeba816ad2019-09-28 11:36:45 -06002139/*
2140 * Drop reference to request, return next in chain (if there is one) if this
2141 * was the last reference to this request.
2142 */
Pavel Begunkov0d850352021-03-19 17:22:37 +00002143static inline struct io_kiocb *io_put_req_find_next(struct io_kiocb *req)
Jens Axboee65ef562019-03-12 10:16:44 -06002144{
Pavel Begunkov9b5f7bd2020-06-29 13:13:00 +03002145 struct io_kiocb *nxt = NULL;
2146
Jens Axboede9b4cc2021-02-24 13:28:27 -07002147 if (req_ref_put_and_test(req)) {
Pavel Begunkov9b5f7bd2020-06-29 13:13:00 +03002148 nxt = io_req_find_next(req);
Jens Axboe4d7dd462019-11-20 13:03:52 -07002149 __io_free_req(req);
Jens Axboe2a44f462020-02-25 13:25:41 -07002150 }
Pavel Begunkov9b5f7bd2020-06-29 13:13:00 +03002151 return nxt;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002152}
2153
Pavel Begunkov0d850352021-03-19 17:22:37 +00002154static inline void io_put_req(struct io_kiocb *req)
Jens Axboe2b188cc2019-01-07 10:46:33 -07002155{
Jens Axboede9b4cc2021-02-24 13:28:27 -07002156 if (req_ref_put_and_test(req))
Jens Axboedef596e2019-01-09 08:59:42 -07002157 io_free_req(req);
2158}
2159
Pavel Begunkov216578e2020-10-13 09:44:00 +01002160static void io_put_req_deferred_cb(struct callback_head *cb)
2161{
2162 struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
2163
2164 io_free_req(req);
2165}
2166
2167static void io_free_req_deferred(struct io_kiocb *req)
2168{
Jens Axboe7cbf1722021-02-10 00:03:20 +00002169 req->task_work.func = io_put_req_deferred_cb;
Pavel Begunkova05432f2021-03-19 17:22:38 +00002170 if (unlikely(io_req_task_work_add(req)))
Pavel Begunkoveab30c42021-01-19 13:32:42 +00002171 io_req_task_work_add_fallback(req, io_put_req_deferred_cb);
Pavel Begunkov216578e2020-10-13 09:44:00 +01002172}
2173
2174static inline void io_put_req_deferred(struct io_kiocb *req, int refs)
2175{
Jens Axboede9b4cc2021-02-24 13:28:27 -07002176 if (req_ref_sub_and_test(req, refs))
Pavel Begunkov216578e2020-10-13 09:44:00 +01002177 io_free_req_deferred(req);
2178}
2179
Pavel Begunkov6c503152021-01-04 20:36:36 +00002180static unsigned io_cqring_events(struct io_ring_ctx *ctx)
Jens Axboea3a0e432019-08-20 11:03:11 -06002181{
2182 /* See comment at the top of this file */
2183 smp_rmb();
Pavel Begunkove23de152020-12-17 00:24:37 +00002184 return __io_cqring_events(ctx);
Jens Axboea3a0e432019-08-20 11:03:11 -06002185}
2186
Pavel Begunkovfb5ccc92019-10-25 12:31:30 +03002187static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
2188{
2189 struct io_rings *rings = ctx->rings;
2190
2191 /* make sure SQ entry isn't read before tail */
2192 return smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head;
2193}
2194
Pavel Begunkov8ff069b2020-07-16 23:28:04 +03002195static unsigned int io_put_kbuf(struct io_kiocb *req, struct io_buffer *kbuf)
Jens Axboee94f1412019-12-19 12:06:02 -07002196{
Pavel Begunkov8ff069b2020-07-16 23:28:04 +03002197 unsigned int cflags;
Jens Axboee94f1412019-12-19 12:06:02 -07002198
Jens Axboebcda7ba2020-02-23 16:42:51 -07002199 cflags = kbuf->bid << IORING_CQE_BUFFER_SHIFT;
2200 cflags |= IORING_CQE_F_BUFFER;
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03002201 req->flags &= ~REQ_F_BUFFER_SELECTED;
Jens Axboebcda7ba2020-02-23 16:42:51 -07002202 kfree(kbuf);
2203 return cflags;
2204}
2205
Pavel Begunkov8ff069b2020-07-16 23:28:04 +03002206static inline unsigned int io_put_rw_kbuf(struct io_kiocb *req)
2207{
2208 struct io_buffer *kbuf;
2209
2210 kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
2211 return io_put_kbuf(req, kbuf);
2212}
2213
Jens Axboe4c6e2772020-07-01 11:29:10 -06002214static inline bool io_run_task_work(void)
2215{
Jens Axboe6200b0a2020-09-13 14:38:30 -06002216 /*
2217 * Not safe to run on exiting task, and the task_work handling will
2218 * not add work to such a task.
2219 */
2220 if (unlikely(current->flags & PF_EXITING))
2221 return false;
Jens Axboe4c6e2772020-07-01 11:29:10 -06002222 if (current->task_works) {
2223 __set_current_state(TASK_RUNNING);
2224 task_work_run();
2225 return true;
2226 }
2227
2228 return false;
2229}
2230
Jens Axboedef596e2019-01-09 08:59:42 -07002231/*
2232 * Find and free completed poll iocbs
2233 */
2234static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
2235 struct list_head *done)
2236{
Jens Axboe8237e042019-12-28 10:48:22 -07002237 struct req_batch rb;
Jens Axboedef596e2019-01-09 08:59:42 -07002238 struct io_kiocb *req;
Xiaoguang Wangbbde0172020-06-16 02:06:38 +08002239
2240 /* order with ->result store in io_complete_rw_iopoll() */
2241 smp_rmb();
Jens Axboedef596e2019-01-09 08:59:42 -07002242
Pavel Begunkov5af1d132020-07-18 11:32:52 +03002243 io_init_req_batch(&rb);
Jens Axboedef596e2019-01-09 08:59:42 -07002244 while (!list_empty(done)) {
Jens Axboebcda7ba2020-02-23 16:42:51 -07002245 int cflags = 0;
2246
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002247 req = list_first_entry(done, struct io_kiocb, inflight_entry);
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002248 list_del(&req->inflight_entry);
Jens Axboedef596e2019-01-09 08:59:42 -07002249
Pavel Begunkov8c130822021-03-22 01:58:32 +00002250 if (READ_ONCE(req->result) == -EAGAIN &&
2251 !(req->flags & REQ_F_DONT_REISSUE)) {
Pavel Begunkovf1613402021-02-11 18:28:21 +00002252 req->iopoll_completed = 0;
Pavel Begunkov8c130822021-03-22 01:58:32 +00002253 req_ref_get(req);
2254 io_queue_async_work(req);
2255 continue;
Pavel Begunkovf1613402021-02-11 18:28:21 +00002256 }
2257
Jens Axboebcda7ba2020-02-23 16:42:51 -07002258 if (req->flags & REQ_F_BUFFER_SELECTED)
Pavel Begunkov8ff069b2020-07-16 23:28:04 +03002259 cflags = io_put_rw_kbuf(req);
Jens Axboebcda7ba2020-02-23 16:42:51 -07002260
2261 __io_cqring_fill_event(req, req->result, cflags);
Jens Axboedef596e2019-01-09 08:59:42 -07002262 (*nr_events)++;
2263
Jens Axboede9b4cc2021-02-24 13:28:27 -07002264 if (req_ref_put_and_test(req))
Pavel Begunkov6ff119a2021-02-10 00:03:18 +00002265 io_req_free_batch(&rb, req, &ctx->submit_state);
Jens Axboedef596e2019-01-09 08:59:42 -07002266 }
Jens Axboedef596e2019-01-09 08:59:42 -07002267
Jens Axboe09bb8392019-03-13 12:39:28 -06002268 io_commit_cqring(ctx);
Pavel Begunkov80c18e42021-01-07 03:15:41 +00002269 io_cqring_ev_posted_iopoll(ctx);
Pavel Begunkov2d6500d2020-06-28 12:52:33 +03002270 io_req_free_batch_finish(ctx, &rb);
Bijan Mottahedeh581f9812020-04-03 13:51:33 -07002271}
2272
Jens Axboedef596e2019-01-09 08:59:42 -07002273static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
2274 long min)
2275{
2276 struct io_kiocb *req, *tmp;
2277 LIST_HEAD(done);
2278 bool spin;
2279 int ret;
2280
2281 /*
2282 * Only spin for completions if we don't have multiple devices hanging
2283 * off our complete list, and we're under the requested amount.
2284 */
2285 spin = !ctx->poll_multi_file && *nr_events < min;
2286
2287 ret = 0;
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002288 list_for_each_entry_safe(req, tmp, &ctx->iopoll_list, inflight_entry) {
Jens Axboe9adbd452019-12-20 08:45:55 -07002289 struct kiocb *kiocb = &req->rw.kiocb;
Jens Axboedef596e2019-01-09 08:59:42 -07002290
2291 /*
Bijan Mottahedeh581f9812020-04-03 13:51:33 -07002292 * Move completed and retryable entries to our local lists.
2293 * If we find a request that requires polling, break out
2294 * and complete those lists first, if we have entries there.
Jens Axboedef596e2019-01-09 08:59:42 -07002295 */
Xiaoguang Wang65a65432020-06-11 23:39:36 +08002296 if (READ_ONCE(req->iopoll_completed)) {
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002297 list_move_tail(&req->inflight_entry, &done);
Jens Axboedef596e2019-01-09 08:59:42 -07002298 continue;
2299 }
2300 if (!list_empty(&done))
2301 break;
2302
2303 ret = kiocb->ki_filp->f_op->iopoll(kiocb, spin);
2304 if (ret < 0)
2305 break;
2306
Pavel Begunkov3aadc232020-07-06 17:59:29 +03002307 /* iopoll may have completed current req */
2308 if (READ_ONCE(req->iopoll_completed))
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002309 list_move_tail(&req->inflight_entry, &done);
Pavel Begunkov3aadc232020-07-06 17:59:29 +03002310
Jens Axboedef596e2019-01-09 08:59:42 -07002311 if (ret && spin)
2312 spin = false;
2313 ret = 0;
2314 }
2315
2316 if (!list_empty(&done))
2317 io_iopoll_complete(ctx, nr_events, &done);
2318
2319 return ret;
2320}
2321
2322/*
Brian Gianforcarod195a662019-12-13 03:09:50 -08002323 * Poll for a minimum of 'min' events. Note that if min == 0 we consider that a
Jens Axboedef596e2019-01-09 08:59:42 -07002324 * non-spinning poll check - we'll still enter the driver poll loop, but only
2325 * as a non-spinning completion check.
2326 */
2327static int io_iopoll_getevents(struct io_ring_ctx *ctx, unsigned int *nr_events,
2328 long min)
2329{
Pavel Begunkov540e32a2020-07-13 23:37:09 +03002330 while (!list_empty(&ctx->iopoll_list) && !need_resched()) {
Jens Axboedef596e2019-01-09 08:59:42 -07002331 int ret;
2332
2333 ret = io_do_iopoll(ctx, nr_events, min);
2334 if (ret < 0)
2335 return ret;
Pavel Begunkoveba0a4d2020-07-06 17:59:30 +03002336 if (*nr_events >= min)
Jens Axboedef596e2019-01-09 08:59:42 -07002337 return 0;
2338 }
2339
2340 return 1;
2341}
2342
2343/*
2344 * We can't just wait for polled events to come to us, we have to actively
2345 * find and complete them.
2346 */
Pavel Begunkovb2edc0a2020-07-07 16:36:22 +03002347static void io_iopoll_try_reap_events(struct io_ring_ctx *ctx)
Jens Axboedef596e2019-01-09 08:59:42 -07002348{
2349 if (!(ctx->flags & IORING_SETUP_IOPOLL))
2350 return;
2351
2352 mutex_lock(&ctx->uring_lock);
Pavel Begunkov540e32a2020-07-13 23:37:09 +03002353 while (!list_empty(&ctx->iopoll_list)) {
Jens Axboedef596e2019-01-09 08:59:42 -07002354 unsigned int nr_events = 0;
2355
Pavel Begunkovb2edc0a2020-07-07 16:36:22 +03002356 io_do_iopoll(ctx, &nr_events, 0);
Jens Axboe08f54392019-08-21 22:19:11 -06002357
Pavel Begunkovb2edc0a2020-07-07 16:36:22 +03002358 /* let it sleep and repeat later if can't complete a request */
2359 if (nr_events == 0)
2360 break;
Jens Axboe08f54392019-08-21 22:19:11 -06002361 /*
2362 * Ensure we allow local-to-the-cpu processing to take place,
2363 * in this case we need to ensure that we reap all events.
Pavel Begunkov3fcee5a2020-07-06 17:59:31 +03002364 * Also let task_work, etc. to progress by releasing the mutex
Jens Axboe08f54392019-08-21 22:19:11 -06002365 */
Pavel Begunkov3fcee5a2020-07-06 17:59:31 +03002366 if (need_resched()) {
2367 mutex_unlock(&ctx->uring_lock);
2368 cond_resched();
2369 mutex_lock(&ctx->uring_lock);
2370 }
Jens Axboedef596e2019-01-09 08:59:42 -07002371 }
2372 mutex_unlock(&ctx->uring_lock);
2373}
2374
Pavel Begunkov7668b922020-07-07 16:36:21 +03002375static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
Jens Axboedef596e2019-01-09 08:59:42 -07002376{
Pavel Begunkov7668b922020-07-07 16:36:21 +03002377 unsigned int nr_events = 0;
Jens Axboe2b2ed972019-10-25 10:06:15 -06002378 int iters = 0, ret = 0;
Jens Axboedef596e2019-01-09 08:59:42 -07002379
Xiaoguang Wangc7849be2020-02-22 14:46:05 +08002380 /*
2381 * We disallow the app entering submit/complete with polling, but we
2382 * still need to lock the ring to prevent racing with polled issue
2383 * that got punted to a workqueue.
2384 */
2385 mutex_lock(&ctx->uring_lock);
Jens Axboedef596e2019-01-09 08:59:42 -07002386 do {
Jens Axboe500f9fb2019-08-19 12:15:59 -06002387 /*
Jens Axboea3a0e432019-08-20 11:03:11 -06002388 * Don't enter poll loop if we already have events pending.
2389 * If we do, we can potentially be spinning for commands that
2390 * already triggered a CQE (eg in error).
2391 */
Pavel Begunkov6c503152021-01-04 20:36:36 +00002392 if (test_bit(0, &ctx->cq_check_overflow))
Pavel Begunkov6c2450a2021-02-23 12:40:22 +00002393 __io_cqring_overflow_flush(ctx, false);
Pavel Begunkov6c503152021-01-04 20:36:36 +00002394 if (io_cqring_events(ctx))
Jens Axboea3a0e432019-08-20 11:03:11 -06002395 break;
2396
2397 /*
Jens Axboe500f9fb2019-08-19 12:15:59 -06002398 * If a submit got punted to a workqueue, we can have the
2399 * application entering polling for a command before it gets
2400 * issued. That app will hold the uring_lock for the duration
2401 * of the poll right here, so we need to take a breather every
2402 * now and then to ensure that the issue has a chance to add
2403 * the poll to the issued list. Otherwise we can spin here
2404 * forever, while the workqueue is stuck trying to acquire the
2405 * very same mutex.
2406 */
2407 if (!(++iters & 7)) {
2408 mutex_unlock(&ctx->uring_lock);
Jens Axboe4c6e2772020-07-01 11:29:10 -06002409 io_run_task_work();
Jens Axboe500f9fb2019-08-19 12:15:59 -06002410 mutex_lock(&ctx->uring_lock);
2411 }
2412
Pavel Begunkov7668b922020-07-07 16:36:21 +03002413 ret = io_iopoll_getevents(ctx, &nr_events, min);
Jens Axboedef596e2019-01-09 08:59:42 -07002414 if (ret <= 0)
2415 break;
2416 ret = 0;
Pavel Begunkov7668b922020-07-07 16:36:21 +03002417 } while (min && !nr_events && !need_resched());
Jens Axboedef596e2019-01-09 08:59:42 -07002418
Jens Axboe500f9fb2019-08-19 12:15:59 -06002419 mutex_unlock(&ctx->uring_lock);
Jens Axboedef596e2019-01-09 08:59:42 -07002420 return ret;
2421}
2422
Jens Axboe491381ce2019-10-17 09:20:46 -06002423static void kiocb_end_write(struct io_kiocb *req)
Jens Axboe2b188cc2019-01-07 10:46:33 -07002424{
Jens Axboe491381ce2019-10-17 09:20:46 -06002425 /*
2426 * Tell lockdep we inherited freeze protection from submission
2427 * thread.
2428 */
2429 if (req->flags & REQ_F_ISREG) {
Pavel Begunkov1c986792021-03-22 01:58:31 +00002430 struct super_block *sb = file_inode(req->file)->i_sb;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002431
Pavel Begunkov1c986792021-03-22 01:58:31 +00002432 __sb_writers_acquired(sb, SB_FREEZE_WRITE);
2433 sb_end_write(sb);
Jens Axboe2b188cc2019-01-07 10:46:33 -07002434 }
2435}
2436
Jens Axboeb63534c2020-06-04 11:28:00 -06002437#ifdef CONFIG_BLOCK
Pavel Begunkovdc2a6e92021-01-19 13:32:35 +00002438static bool io_resubmit_prep(struct io_kiocb *req)
Jens Axboeb63534c2020-06-04 11:28:00 -06002439{
Pavel Begunkovab454432021-03-22 01:58:33 +00002440 struct io_async_rw *rw = req->async_data;
2441
2442 if (!rw)
2443 return !io_req_prep_async(req);
2444 /* may have left rw->iter inconsistent on -EIOCBQUEUED */
2445 iov_iter_revert(&rw->iter, req->result - iov_iter_count(&rw->iter));
2446 return true;
Jens Axboeb63534c2020-06-04 11:28:00 -06002447}
Jens Axboeb63534c2020-06-04 11:28:00 -06002448
Jens Axboe3e6a0d32021-03-01 13:56:00 -07002449static bool io_rw_should_reissue(struct io_kiocb *req)
Jens Axboeb63534c2020-06-04 11:28:00 -06002450{
Jens Axboe355afae2020-09-02 09:30:31 -06002451 umode_t mode = file_inode(req->file)->i_mode;
Jens Axboe3e6a0d32021-03-01 13:56:00 -07002452 struct io_ring_ctx *ctx = req->ctx;
Jens Axboeb63534c2020-06-04 11:28:00 -06002453
Jens Axboe355afae2020-09-02 09:30:31 -06002454 if (!S_ISBLK(mode) && !S_ISREG(mode))
2455 return false;
Jens Axboe3e6a0d32021-03-01 13:56:00 -07002456 if ((req->flags & REQ_F_NOWAIT) || (io_wq_current_is_worker() &&
2457 !(ctx->flags & IORING_SETUP_IOPOLL)))
Jens Axboeb63534c2020-06-04 11:28:00 -06002458 return false;
Jens Axboe7c977a52021-02-23 19:17:35 -07002459 /*
2460 * If ref is dying, we might be running poll reap from the exit work.
2461 * Don't attempt to reissue from that path, just let it fail with
2462 * -EAGAIN.
2463 */
Jens Axboe3e6a0d32021-03-01 13:56:00 -07002464 if (percpu_ref_is_dying(&ctx->refs))
2465 return false;
2466 return true;
2467}
Jens Axboee82ad482021-04-02 19:45:34 -06002468#else
2469static bool io_rw_should_reissue(struct io_kiocb *req)
2470{
2471 return false;
2472}
Jens Axboe3e6a0d32021-03-01 13:56:00 -07002473#endif
2474
Jens Axboea1d7c392020-06-22 11:09:46 -06002475static void __io_complete_rw(struct io_kiocb *req, long res, long res2,
Pavel Begunkov889fca72021-02-10 00:03:09 +00002476 unsigned int issue_flags)
Jens Axboea1d7c392020-06-22 11:09:46 -06002477{
Pavel Begunkov2f8e45f2021-02-11 18:28:23 +00002478 int cflags = 0;
2479
Pavel Begunkovb65c1282021-03-22 01:45:59 +00002480 if (req->rw.kiocb.ki_flags & IOCB_WRITE)
2481 kiocb_end_write(req);
Pavel Begunkov9532b992021-03-22 01:58:34 +00002482 if (res != req->result) {
2483 if ((res == -EAGAIN || res == -EOPNOTSUPP) &&
2484 io_rw_should_reissue(req)) {
2485 req->flags |= REQ_F_REISSUE;
2486 return;
2487 }
Pavel Begunkov2f8e45f2021-02-11 18:28:23 +00002488 req_set_fail_links(req);
Pavel Begunkov9532b992021-03-22 01:58:34 +00002489 }
Pavel Begunkov2f8e45f2021-02-11 18:28:23 +00002490 if (req->flags & REQ_F_BUFFER_SELECTED)
2491 cflags = io_put_rw_kbuf(req);
2492 __io_req_complete(req, issue_flags, res, cflags);
Jens Axboeba816ad2019-09-28 11:36:45 -06002493}
2494
2495static void io_complete_rw(struct kiocb *kiocb, long res, long res2)
2496{
Jens Axboe9adbd452019-12-20 08:45:55 -07002497 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
Jens Axboeba816ad2019-09-28 11:36:45 -06002498
Pavel Begunkov889fca72021-02-10 00:03:09 +00002499 __io_complete_rw(req, res, res2, 0);
Jens Axboe2b188cc2019-01-07 10:46:33 -07002500}
2501
Jens Axboedef596e2019-01-09 08:59:42 -07002502static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2)
2503{
Jens Axboe9adbd452019-12-20 08:45:55 -07002504 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
Jens Axboedef596e2019-01-09 08:59:42 -07002505
Jens Axboe491381ce2019-10-17 09:20:46 -06002506 if (kiocb->ki_flags & IOCB_WRITE)
2507 kiocb_end_write(req);
Pavel Begunkov9532b992021-03-22 01:58:34 +00002508 if (unlikely(res != req->result)) {
2509 bool fail = true;
Jens Axboedef596e2019-01-09 08:59:42 -07002510
Pavel Begunkov9532b992021-03-22 01:58:34 +00002511#ifdef CONFIG_BLOCK
2512 if (res == -EAGAIN && io_rw_should_reissue(req) &&
2513 io_resubmit_prep(req))
2514 fail = false;
2515#endif
2516 if (fail) {
2517 req_set_fail_links(req);
2518 req->flags |= REQ_F_DONT_REISSUE;
2519 }
Pavel Begunkov8c130822021-03-22 01:58:32 +00002520 }
Xiaoguang Wangbbde0172020-06-16 02:06:38 +08002521
2522 WRITE_ONCE(req->result, res);
Jens Axboeb9b0e0d2021-02-23 08:18:36 -07002523 /* order with io_iopoll_complete() checking ->result */
Pavel Begunkovcd664b02020-06-25 12:37:10 +03002524 smp_wmb();
2525 WRITE_ONCE(req->iopoll_completed, 1);
Jens Axboedef596e2019-01-09 08:59:42 -07002526}
2527
2528/*
2529 * After the iocb has been issued, it's safe to be found on the poll list.
2530 * Adding the kiocb to the list AFTER submission ensures that we don't
2531 * find it from a io_iopoll_getevents() thread before the issuer is done
2532 * accessing the kiocb cookie.
2533 */
Xiaoguang Wang2e9dbe92020-11-13 00:44:08 +08002534static void io_iopoll_req_issued(struct io_kiocb *req, bool in_async)
Jens Axboedef596e2019-01-09 08:59:42 -07002535{
2536 struct io_ring_ctx *ctx = req->ctx;
2537
2538 /*
2539 * Track whether we have multiple files in our lists. This will impact
2540 * how we do polling eventually, not spinning if we're on potentially
2541 * different devices.
2542 */
Pavel Begunkov540e32a2020-07-13 23:37:09 +03002543 if (list_empty(&ctx->iopoll_list)) {
Jens Axboedef596e2019-01-09 08:59:42 -07002544 ctx->poll_multi_file = false;
2545 } else if (!ctx->poll_multi_file) {
2546 struct io_kiocb *list_req;
2547
Pavel Begunkov540e32a2020-07-13 23:37:09 +03002548 list_req = list_first_entry(&ctx->iopoll_list, struct io_kiocb,
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002549 inflight_entry);
Jens Axboe9adbd452019-12-20 08:45:55 -07002550 if (list_req->file != req->file)
Jens Axboedef596e2019-01-09 08:59:42 -07002551 ctx->poll_multi_file = true;
2552 }
2553
2554 /*
2555 * For fast devices, IO may have already completed. If it has, add
2556 * it to the front so we find it first.
2557 */
Xiaoguang Wang65a65432020-06-11 23:39:36 +08002558 if (READ_ONCE(req->iopoll_completed))
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002559 list_add(&req->inflight_entry, &ctx->iopoll_list);
Jens Axboedef596e2019-01-09 08:59:42 -07002560 else
Pavel Begunkovd21ffe72020-07-13 23:37:10 +03002561 list_add_tail(&req->inflight_entry, &ctx->iopoll_list);
Xiaoguang Wangbdcd3ea2020-02-25 22:12:08 +08002562
Xiaoguang Wang2e9dbe92020-11-13 00:44:08 +08002563 /*
2564 * If IORING_SETUP_SQPOLL is enabled, sqes are either handled in sq thread
2565 * task context or in io worker task context. If current task context is
2566 * sq thread, we don't need to check whether should wake up sq thread.
2567 */
2568 if (in_async && (ctx->flags & IORING_SETUP_SQPOLL) &&
Jens Axboe534ca6d2020-09-02 13:52:19 -06002569 wq_has_sleeper(&ctx->sq_data->wait))
2570 wake_up(&ctx->sq_data->wait);
Jens Axboedef596e2019-01-09 08:59:42 -07002571}
2572
Pavel Begunkov9f13c352020-05-17 14:13:41 +03002573static inline void io_state_file_put(struct io_submit_state *state)
2574{
Pavel Begunkov02b23a92021-01-19 13:32:41 +00002575 if (state->file_refs) {
2576 fput_many(state->file, state->file_refs);
2577 state->file_refs = 0;
2578 }
Jens Axboe9a56a232019-01-09 09:06:50 -07002579}
2580
2581/*
2582 * Get as many references to a file as we have IOs left in this submission,
2583 * assuming most submissions are for one file, or at least that each file
2584 * has more than one submission.
2585 */
Pavel Begunkov8da11c12020-02-24 11:32:44 +03002586static struct file *__io_file_get(struct io_submit_state *state, int fd)
Jens Axboe9a56a232019-01-09 09:06:50 -07002587{
2588 if (!state)
2589 return fget(fd);
2590
Pavel Begunkov6e1271e2020-11-20 15:50:50 +00002591 if (state->file_refs) {
Jens Axboe9a56a232019-01-09 09:06:50 -07002592 if (state->fd == fd) {
Pavel Begunkov6e1271e2020-11-20 15:50:50 +00002593 state->file_refs--;
Jens Axboe9a56a232019-01-09 09:06:50 -07002594 return state->file;
2595 }
Pavel Begunkov02b23a92021-01-19 13:32:41 +00002596 io_state_file_put(state);
Jens Axboe9a56a232019-01-09 09:06:50 -07002597 }
2598 state->file = fget_many(fd, state->ios_left);
Pavel Begunkov6e1271e2020-11-20 15:50:50 +00002599 if (unlikely(!state->file))
Jens Axboe9a56a232019-01-09 09:06:50 -07002600 return NULL;
2601
2602 state->fd = fd;
Pavel Begunkov6e1271e2020-11-20 15:50:50 +00002603 state->file_refs = state->ios_left - 1;
Jens Axboe9a56a232019-01-09 09:06:50 -07002604 return state->file;
2605}
2606
Jens Axboe4503b762020-06-01 10:00:27 -06002607static bool io_bdev_nowait(struct block_device *bdev)
2608{
Jeffle Xu9ba0d0c2020-10-19 16:59:42 +08002609 return !bdev || blk_queue_nowait(bdev_get_queue(bdev));
Jens Axboe4503b762020-06-01 10:00:27 -06002610}
2611
Jens Axboe2b188cc2019-01-07 10:46:33 -07002612/*
2613 * If we tracked the file through the SCM inflight mechanism, we could support
2614 * any file. For now, just ensure that anything potentially problematic is done
2615 * inline.
2616 */
Jens Axboe7b29f922021-03-12 08:30:14 -07002617static bool __io_file_supports_async(struct file *file, int rw)
Jens Axboe2b188cc2019-01-07 10:46:33 -07002618{
2619 umode_t mode = file_inode(file)->i_mode;
2620
Jens Axboe4503b762020-06-01 10:00:27 -06002621 if (S_ISBLK(mode)) {
Christoph Hellwig4e7b5672020-11-23 13:38:40 +01002622 if (IS_ENABLED(CONFIG_BLOCK) &&
2623 io_bdev_nowait(I_BDEV(file->f_mapping->host)))
Jens Axboe4503b762020-06-01 10:00:27 -06002624 return true;
2625 return false;
2626 }
2627 if (S_ISCHR(mode) || S_ISSOCK(mode))
Jens Axboe2b188cc2019-01-07 10:46:33 -07002628 return true;
Jens Axboe4503b762020-06-01 10:00:27 -06002629 if (S_ISREG(mode)) {
Christoph Hellwig4e7b5672020-11-23 13:38:40 +01002630 if (IS_ENABLED(CONFIG_BLOCK) &&
2631 io_bdev_nowait(file->f_inode->i_sb->s_bdev) &&
Jens Axboe4503b762020-06-01 10:00:27 -06002632 file->f_op != &io_uring_fops)
2633 return true;
2634 return false;
2635 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07002636
Jens Axboec5b85622020-06-09 19:23:05 -06002637 /* any ->read/write should understand O_NONBLOCK */
2638 if (file->f_flags & O_NONBLOCK)
2639 return true;
2640
Jens Axboeaf197f52020-04-28 13:15:06 -06002641 if (!(file->f_mode & FMODE_NOWAIT))
2642 return false;
2643
2644 if (rw == READ)
2645 return file->f_op->read_iter != NULL;
2646
2647 return file->f_op->write_iter != NULL;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002648}
2649
Jens Axboe7b29f922021-03-12 08:30:14 -07002650static bool io_file_supports_async(struct io_kiocb *req, int rw)
2651{
2652 if (rw == READ && (req->flags & REQ_F_ASYNC_READ))
2653 return true;
2654 else if (rw == WRITE && (req->flags & REQ_F_ASYNC_WRITE))
2655 return true;
2656
2657 return __io_file_supports_async(req->file, rw);
2658}
2659
Pavel Begunkova88fc402020-09-30 22:57:53 +03002660static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboe2b188cc2019-01-07 10:46:33 -07002661{
Jens Axboedef596e2019-01-09 08:59:42 -07002662 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe9adbd452019-12-20 08:45:55 -07002663 struct kiocb *kiocb = &req->rw.kiocb;
Pavel Begunkov75c668c2021-02-04 13:52:05 +00002664 struct file *file = req->file;
Jens Axboe09bb8392019-03-13 12:39:28 -06002665 unsigned ioprio;
2666 int ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002667
Jens Axboe7b29f922021-03-12 08:30:14 -07002668 if (!(req->flags & REQ_F_ISREG) && S_ISREG(file_inode(file)->i_mode))
Jens Axboe491381ce2019-10-17 09:20:46 -06002669 req->flags |= REQ_F_ISREG;
2670
Jens Axboe2b188cc2019-01-07 10:46:33 -07002671 kiocb->ki_pos = READ_ONCE(sqe->off);
Pavel Begunkov75c668c2021-02-04 13:52:05 +00002672 if (kiocb->ki_pos == -1 && !(file->f_mode & FMODE_STREAM)) {
Jens Axboeba042912019-12-25 16:33:42 -07002673 req->flags |= REQ_F_CUR_POS;
Pavel Begunkov75c668c2021-02-04 13:52:05 +00002674 kiocb->ki_pos = file->f_pos;
Jens Axboeba042912019-12-25 16:33:42 -07002675 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07002676 kiocb->ki_hint = ki_hint_validate(file_write_hint(kiocb->ki_filp));
Pavel Begunkov3e577dc2020-02-01 03:58:42 +03002677 kiocb->ki_flags = iocb_flags(kiocb->ki_filp);
2678 ret = kiocb_set_rw_flags(kiocb, READ_ONCE(sqe->rw_flags));
2679 if (unlikely(ret))
2680 return ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002681
Pavel Begunkov75c668c2021-02-04 13:52:05 +00002682 /* don't allow async punt for O_NONBLOCK or RWF_NOWAIT */
2683 if ((kiocb->ki_flags & IOCB_NOWAIT) || (file->f_flags & O_NONBLOCK))
2684 req->flags |= REQ_F_NOWAIT;
2685
Jens Axboe2b188cc2019-01-07 10:46:33 -07002686 ioprio = READ_ONCE(sqe->ioprio);
2687 if (ioprio) {
2688 ret = ioprio_check_cap(ioprio);
2689 if (ret)
Jens Axboe09bb8392019-03-13 12:39:28 -06002690 return ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002691
2692 kiocb->ki_ioprio = ioprio;
2693 } else
2694 kiocb->ki_ioprio = get_current_ioprio();
2695
Jens Axboedef596e2019-01-09 08:59:42 -07002696 if (ctx->flags & IORING_SETUP_IOPOLL) {
Jens Axboedef596e2019-01-09 08:59:42 -07002697 if (!(kiocb->ki_flags & IOCB_DIRECT) ||
2698 !kiocb->ki_filp->f_op->iopoll)
Jens Axboe09bb8392019-03-13 12:39:28 -06002699 return -EOPNOTSUPP;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002700
Jens Axboedef596e2019-01-09 08:59:42 -07002701 kiocb->ki_flags |= IOCB_HIPRI;
2702 kiocb->ki_complete = io_complete_rw_iopoll;
Xiaoguang Wang65a65432020-06-11 23:39:36 +08002703 req->iopoll_completed = 0;
Jens Axboedef596e2019-01-09 08:59:42 -07002704 } else {
Jens Axboe09bb8392019-03-13 12:39:28 -06002705 if (kiocb->ki_flags & IOCB_HIPRI)
2706 return -EINVAL;
Jens Axboedef596e2019-01-09 08:59:42 -07002707 kiocb->ki_complete = io_complete_rw;
2708 }
Jens Axboe9adbd452019-12-20 08:45:55 -07002709
Jens Axboe3529d8c2019-12-19 18:24:38 -07002710 req->rw.addr = READ_ONCE(sqe->addr);
2711 req->rw.len = READ_ONCE(sqe->len);
Bijan Mottahedeh4f4eeba2020-05-19 14:52:49 -07002712 req->buf_index = READ_ONCE(sqe->buf_index);
Jens Axboe2b188cc2019-01-07 10:46:33 -07002713 return 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002714}
2715
2716static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
2717{
2718 switch (ret) {
2719 case -EIOCBQUEUED:
2720 break;
2721 case -ERESTARTSYS:
2722 case -ERESTARTNOINTR:
2723 case -ERESTARTNOHAND:
2724 case -ERESTART_RESTARTBLOCK:
2725 /*
2726 * We can't just restart the syscall, since previously
2727 * submitted sqes may already be in progress. Just fail this
2728 * IO with EINTR.
2729 */
2730 ret = -EINTR;
Gustavo A. R. Silvadf561f662020-08-23 17:36:59 -05002731 fallthrough;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002732 default:
2733 kiocb->ki_complete(kiocb, ret, 0);
2734 }
2735}
2736
Jens Axboea1d7c392020-06-22 11:09:46 -06002737static void kiocb_done(struct kiocb *kiocb, ssize_t ret,
Pavel Begunkov889fca72021-02-10 00:03:09 +00002738 unsigned int issue_flags)
Jens Axboeba816ad2019-09-28 11:36:45 -06002739{
Jens Axboeba042912019-12-25 16:33:42 -07002740 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
Jens Axboee8c2bc12020-08-15 18:44:09 -07002741 struct io_async_rw *io = req->async_data;
Pavel Begunkov97284632021-04-08 19:28:03 +01002742 bool check_reissue = kiocb->ki_complete == io_complete_rw;
Jens Axboeba042912019-12-25 16:33:42 -07002743
Jens Axboe227c0c92020-08-13 11:51:40 -06002744 /* add previously done IO, if any */
Jens Axboee8c2bc12020-08-15 18:44:09 -07002745 if (io && io->bytes_done > 0) {
Jens Axboe227c0c92020-08-13 11:51:40 -06002746 if (ret < 0)
Jens Axboee8c2bc12020-08-15 18:44:09 -07002747 ret = io->bytes_done;
Jens Axboe227c0c92020-08-13 11:51:40 -06002748 else
Jens Axboee8c2bc12020-08-15 18:44:09 -07002749 ret += io->bytes_done;
Jens Axboe227c0c92020-08-13 11:51:40 -06002750 }
2751
Jens Axboeba042912019-12-25 16:33:42 -07002752 if (req->flags & REQ_F_CUR_POS)
2753 req->file->f_pos = kiocb->ki_pos;
Pavel Begunkovbcaec082020-02-24 11:30:18 +03002754 if (ret >= 0 && kiocb->ki_complete == io_complete_rw)
Pavel Begunkov889fca72021-02-10 00:03:09 +00002755 __io_complete_rw(req, ret, 0, issue_flags);
Jens Axboeba816ad2019-09-28 11:36:45 -06002756 else
2757 io_rw_done(kiocb, ret);
Pavel Begunkov97284632021-04-08 19:28:03 +01002758
2759 if (check_reissue && req->flags & REQ_F_REISSUE) {
2760 req->flags &= ~REQ_F_REISSUE;
Pavel Begunkov8c130822021-03-22 01:58:32 +00002761 if (!io_resubmit_prep(req)) {
2762 req_ref_get(req);
2763 io_queue_async_work(req);
2764 } else {
Pavel Begunkov97284632021-04-08 19:28:03 +01002765 int cflags = 0;
2766
2767 req_set_fail_links(req);
2768 if (req->flags & REQ_F_BUFFER_SELECTED)
2769 cflags = io_put_rw_kbuf(req);
2770 __io_req_complete(req, issue_flags, ret, cflags);
2771 }
2772 }
Jens Axboeba816ad2019-09-28 11:36:45 -06002773}
2774
Pavel Begunkov847595d2021-02-04 13:52:06 +00002775static int io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter)
Jens Axboeedafcce2019-01-09 09:16:05 -07002776{
Jens Axboe9adbd452019-12-20 08:45:55 -07002777 struct io_ring_ctx *ctx = req->ctx;
2778 size_t len = req->rw.len;
Jens Axboeedafcce2019-01-09 09:16:05 -07002779 struct io_mapped_ubuf *imu;
Pavel Begunkov4be1c612020-09-06 00:45:48 +03002780 u16 index, buf_index = req->buf_index;
Jens Axboeedafcce2019-01-09 09:16:05 -07002781 size_t offset;
2782 u64 buf_addr;
2783
Jens Axboeedafcce2019-01-09 09:16:05 -07002784 if (unlikely(buf_index >= ctx->nr_user_bufs))
2785 return -EFAULT;
Jens Axboeedafcce2019-01-09 09:16:05 -07002786 index = array_index_nospec(buf_index, ctx->nr_user_bufs);
2787 imu = &ctx->user_bufs[index];
Jens Axboe9adbd452019-12-20 08:45:55 -07002788 buf_addr = req->rw.addr;
Jens Axboeedafcce2019-01-09 09:16:05 -07002789
2790 /* overflow */
2791 if (buf_addr + len < buf_addr)
2792 return -EFAULT;
2793 /* not inside the mapped region */
2794 if (buf_addr < imu->ubuf || buf_addr + len > imu->ubuf + imu->len)
2795 return -EFAULT;
2796
2797 /*
2798 * May not be a start of buffer, set size appropriately
2799 * and advance us to the beginning.
2800 */
2801 offset = buf_addr - imu->ubuf;
2802 iov_iter_bvec(iter, rw, imu->bvec, imu->nr_bvecs, offset + len);
Jens Axboebd11b3a2019-07-20 08:37:31 -06002803
2804 if (offset) {
2805 /*
2806 * Don't use iov_iter_advance() here, as it's really slow for
2807 * using the latter parts of a big fixed buffer - it iterates
2808 * over each segment manually. We can cheat a bit here, because
2809 * we know that:
2810 *
2811 * 1) it's a BVEC iter, we set it up
2812 * 2) all bvecs are PAGE_SIZE in size, except potentially the
2813 * first and last bvec
2814 *
2815 * So just find our index, and adjust the iterator afterwards.
2816 * If the offset is within the first bvec (or the whole first
2817 * bvec, just use iov_iter_advance(). This makes it easier
2818 * since we can just skip the first segment, which may not
2819 * be PAGE_SIZE aligned.
2820 */
2821 const struct bio_vec *bvec = imu->bvec;
2822
2823 if (offset <= bvec->bv_len) {
2824 iov_iter_advance(iter, offset);
2825 } else {
2826 unsigned long seg_skip;
2827
2828 /* skip first vec */
2829 offset -= bvec->bv_len;
2830 seg_skip = 1 + (offset >> PAGE_SHIFT);
2831
2832 iter->bvec = bvec + seg_skip;
2833 iter->nr_segs -= seg_skip;
Aleix Roca Nonell99c79f62019-08-15 14:03:22 +02002834 iter->count -= bvec->bv_len + offset;
Jens Axboebd11b3a2019-07-20 08:37:31 -06002835 iter->iov_offset = offset & ~PAGE_MASK;
Jens Axboebd11b3a2019-07-20 08:37:31 -06002836 }
2837 }
2838
Pavel Begunkov847595d2021-02-04 13:52:06 +00002839 return 0;
Jens Axboeedafcce2019-01-09 09:16:05 -07002840}
2841
Jens Axboebcda7ba2020-02-23 16:42:51 -07002842static void io_ring_submit_unlock(struct io_ring_ctx *ctx, bool needs_lock)
2843{
2844 if (needs_lock)
2845 mutex_unlock(&ctx->uring_lock);
2846}
2847
2848static void io_ring_submit_lock(struct io_ring_ctx *ctx, bool needs_lock)
2849{
2850 /*
2851 * "Normal" inline submissions always hold the uring_lock, since we
2852 * grab it from the system call. Same is true for the SQPOLL offload.
2853 * The only exception is when we've detached the request and issue it
2854 * from an async worker thread, grab the lock for that case.
2855 */
2856 if (needs_lock)
2857 mutex_lock(&ctx->uring_lock);
2858}
2859
2860static struct io_buffer *io_buffer_select(struct io_kiocb *req, size_t *len,
2861 int bgid, struct io_buffer *kbuf,
2862 bool needs_lock)
2863{
2864 struct io_buffer *head;
2865
2866 if (req->flags & REQ_F_BUFFER_SELECTED)
2867 return kbuf;
2868
2869 io_ring_submit_lock(req->ctx, needs_lock);
2870
2871 lockdep_assert_held(&req->ctx->uring_lock);
2872
Jens Axboe9e15c3a2021-03-13 12:29:43 -07002873 head = xa_load(&req->ctx->io_buffers, bgid);
Jens Axboebcda7ba2020-02-23 16:42:51 -07002874 if (head) {
2875 if (!list_empty(&head->list)) {
2876 kbuf = list_last_entry(&head->list, struct io_buffer,
2877 list);
2878 list_del(&kbuf->list);
2879 } else {
2880 kbuf = head;
Jens Axboe9e15c3a2021-03-13 12:29:43 -07002881 xa_erase(&req->ctx->io_buffers, bgid);
Jens Axboebcda7ba2020-02-23 16:42:51 -07002882 }
2883 if (*len > kbuf->len)
2884 *len = kbuf->len;
2885 } else {
2886 kbuf = ERR_PTR(-ENOBUFS);
2887 }
2888
2889 io_ring_submit_unlock(req->ctx, needs_lock);
2890
2891 return kbuf;
2892}
2893
Jens Axboe4d954c22020-02-27 07:31:19 -07002894static void __user *io_rw_buffer_select(struct io_kiocb *req, size_t *len,
2895 bool needs_lock)
2896{
2897 struct io_buffer *kbuf;
Bijan Mottahedeh4f4eeba2020-05-19 14:52:49 -07002898 u16 bgid;
Jens Axboe4d954c22020-02-27 07:31:19 -07002899
2900 kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
Bijan Mottahedeh4f4eeba2020-05-19 14:52:49 -07002901 bgid = req->buf_index;
Jens Axboe4d954c22020-02-27 07:31:19 -07002902 kbuf = io_buffer_select(req, len, bgid, kbuf, needs_lock);
2903 if (IS_ERR(kbuf))
2904 return kbuf;
2905 req->rw.addr = (u64) (unsigned long) kbuf;
2906 req->flags |= REQ_F_BUFFER_SELECTED;
2907 return u64_to_user_ptr(kbuf->addr);
2908}
2909
2910#ifdef CONFIG_COMPAT
2911static ssize_t io_compat_import(struct io_kiocb *req, struct iovec *iov,
2912 bool needs_lock)
2913{
2914 struct compat_iovec __user *uiov;
2915 compat_ssize_t clen;
2916 void __user *buf;
2917 ssize_t len;
2918
2919 uiov = u64_to_user_ptr(req->rw.addr);
2920 if (!access_ok(uiov, sizeof(*uiov)))
2921 return -EFAULT;
2922 if (__get_user(clen, &uiov->iov_len))
2923 return -EFAULT;
2924 if (clen < 0)
2925 return -EINVAL;
2926
2927 len = clen;
2928 buf = io_rw_buffer_select(req, &len, needs_lock);
2929 if (IS_ERR(buf))
2930 return PTR_ERR(buf);
2931 iov[0].iov_base = buf;
2932 iov[0].iov_len = (compat_size_t) len;
2933 return 0;
2934}
2935#endif
2936
2937static ssize_t __io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
2938 bool needs_lock)
2939{
2940 struct iovec __user *uiov = u64_to_user_ptr(req->rw.addr);
2941 void __user *buf;
2942 ssize_t len;
2943
2944 if (copy_from_user(iov, uiov, sizeof(*uiov)))
2945 return -EFAULT;
2946
2947 len = iov[0].iov_len;
2948 if (len < 0)
2949 return -EINVAL;
2950 buf = io_rw_buffer_select(req, &len, needs_lock);
2951 if (IS_ERR(buf))
2952 return PTR_ERR(buf);
2953 iov[0].iov_base = buf;
2954 iov[0].iov_len = len;
2955 return 0;
2956}
2957
2958static ssize_t io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
2959 bool needs_lock)
2960{
Jens Axboedddb3e22020-06-04 11:27:01 -06002961 if (req->flags & REQ_F_BUFFER_SELECTED) {
2962 struct io_buffer *kbuf;
2963
2964 kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
2965 iov[0].iov_base = u64_to_user_ptr(kbuf->addr);
2966 iov[0].iov_len = kbuf->len;
Jens Axboe4d954c22020-02-27 07:31:19 -07002967 return 0;
Jens Axboedddb3e22020-06-04 11:27:01 -06002968 }
Pavel Begunkovdd201662020-12-19 03:15:43 +00002969 if (req->rw.len != 1)
Jens Axboe4d954c22020-02-27 07:31:19 -07002970 return -EINVAL;
2971
2972#ifdef CONFIG_COMPAT
2973 if (req->ctx->compat)
2974 return io_compat_import(req, iov, needs_lock);
2975#endif
2976
2977 return __io_iov_buffer_select(req, iov, needs_lock);
2978}
2979
Pavel Begunkov847595d2021-02-04 13:52:06 +00002980static int io_import_iovec(int rw, struct io_kiocb *req, struct iovec **iovec,
2981 struct iov_iter *iter, bool needs_lock)
Jens Axboe2b188cc2019-01-07 10:46:33 -07002982{
Jens Axboe9adbd452019-12-20 08:45:55 -07002983 void __user *buf = u64_to_user_ptr(req->rw.addr);
2984 size_t sqe_len = req->rw.len;
Pavel Begunkov847595d2021-02-04 13:52:06 +00002985 u8 opcode = req->opcode;
Jens Axboe4d954c22020-02-27 07:31:19 -07002986 ssize_t ret;
Jens Axboeedafcce2019-01-09 09:16:05 -07002987
Pavel Begunkov7d009162019-11-25 23:14:40 +03002988 if (opcode == IORING_OP_READ_FIXED || opcode == IORING_OP_WRITE_FIXED) {
Jens Axboeedafcce2019-01-09 09:16:05 -07002989 *iovec = NULL;
Jens Axboe9adbd452019-12-20 08:45:55 -07002990 return io_import_fixed(req, rw, iter);
Jens Axboeedafcce2019-01-09 09:16:05 -07002991 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07002992
Jens Axboebcda7ba2020-02-23 16:42:51 -07002993 /* buffer index only valid with fixed read/write, or buffer select */
Bijan Mottahedeh4f4eeba2020-05-19 14:52:49 -07002994 if (req->buf_index && !(req->flags & REQ_F_BUFFER_SELECT))
Jens Axboe9adbd452019-12-20 08:45:55 -07002995 return -EINVAL;
2996
Jens Axboe3a6820f2019-12-22 15:19:35 -07002997 if (opcode == IORING_OP_READ || opcode == IORING_OP_WRITE) {
Jens Axboebcda7ba2020-02-23 16:42:51 -07002998 if (req->flags & REQ_F_BUFFER_SELECT) {
Jens Axboe4d954c22020-02-27 07:31:19 -07002999 buf = io_rw_buffer_select(req, &sqe_len, needs_lock);
Pavel Begunkov867a23e2020-08-20 11:34:39 +03003000 if (IS_ERR(buf))
Jens Axboe4d954c22020-02-27 07:31:19 -07003001 return PTR_ERR(buf);
Jens Axboe3f9d6442020-03-11 12:27:04 -06003002 req->rw.len = sqe_len;
Jens Axboebcda7ba2020-02-23 16:42:51 -07003003 }
3004
Jens Axboe3a6820f2019-12-22 15:19:35 -07003005 ret = import_single_range(rw, buf, sqe_len, *iovec, iter);
3006 *iovec = NULL;
David Laight10fc72e2020-11-07 13:16:25 +00003007 return ret;
Jens Axboe3a6820f2019-12-22 15:19:35 -07003008 }
3009
Jens Axboe4d954c22020-02-27 07:31:19 -07003010 if (req->flags & REQ_F_BUFFER_SELECT) {
3011 ret = io_iov_buffer_select(req, *iovec, needs_lock);
Pavel Begunkov847595d2021-02-04 13:52:06 +00003012 if (!ret)
3013 iov_iter_init(iter, rw, *iovec, 1, (*iovec)->iov_len);
Jens Axboe4d954c22020-02-27 07:31:19 -07003014 *iovec = NULL;
3015 return ret;
3016 }
3017
Christoph Hellwig89cd35c2020-09-25 06:51:41 +02003018 return __import_iovec(rw, buf, sqe_len, UIO_FASTIOV, iovec, iter,
3019 req->ctx->compat);
Jens Axboe2b188cc2019-01-07 10:46:33 -07003020}
3021
Jens Axboe0fef9482020-08-26 10:36:20 -06003022static inline loff_t *io_kiocb_ppos(struct kiocb *kiocb)
3023{
Pavel Begunkov5b09e372020-09-30 22:57:15 +03003024 return (kiocb->ki_filp->f_mode & FMODE_STREAM) ? NULL : &kiocb->ki_pos;
Jens Axboe0fef9482020-08-26 10:36:20 -06003025}
3026
Jens Axboe32960612019-09-23 11:05:34 -06003027/*
3028 * For files that don't have ->read_iter() and ->write_iter(), handle them
3029 * by looping over ->read() or ->write() manually.
3030 */
Jens Axboe4017eb92020-10-22 14:14:12 -06003031static ssize_t loop_rw_iter(int rw, struct io_kiocb *req, struct iov_iter *iter)
Jens Axboe32960612019-09-23 11:05:34 -06003032{
Jens Axboe4017eb92020-10-22 14:14:12 -06003033 struct kiocb *kiocb = &req->rw.kiocb;
3034 struct file *file = req->file;
Jens Axboe32960612019-09-23 11:05:34 -06003035 ssize_t ret = 0;
3036
3037 /*
3038 * Don't support polled IO through this interface, and we can't
3039 * support non-blocking either. For the latter, this just causes
3040 * the kiocb to be handled from an async context.
3041 */
3042 if (kiocb->ki_flags & IOCB_HIPRI)
3043 return -EOPNOTSUPP;
3044 if (kiocb->ki_flags & IOCB_NOWAIT)
3045 return -EAGAIN;
3046
3047 while (iov_iter_count(iter)) {
Pavel Begunkov311ae9e2019-11-24 11:58:24 +03003048 struct iovec iovec;
Jens Axboe32960612019-09-23 11:05:34 -06003049 ssize_t nr;
3050
Pavel Begunkov311ae9e2019-11-24 11:58:24 +03003051 if (!iov_iter_is_bvec(iter)) {
3052 iovec = iov_iter_iovec(iter);
3053 } else {
Jens Axboe4017eb92020-10-22 14:14:12 -06003054 iovec.iov_base = u64_to_user_ptr(req->rw.addr);
3055 iovec.iov_len = req->rw.len;
Pavel Begunkov311ae9e2019-11-24 11:58:24 +03003056 }
3057
Jens Axboe32960612019-09-23 11:05:34 -06003058 if (rw == READ) {
3059 nr = file->f_op->read(file, iovec.iov_base,
Jens Axboe0fef9482020-08-26 10:36:20 -06003060 iovec.iov_len, io_kiocb_ppos(kiocb));
Jens Axboe32960612019-09-23 11:05:34 -06003061 } else {
3062 nr = file->f_op->write(file, iovec.iov_base,
Jens Axboe0fef9482020-08-26 10:36:20 -06003063 iovec.iov_len, io_kiocb_ppos(kiocb));
Jens Axboe32960612019-09-23 11:05:34 -06003064 }
3065
3066 if (nr < 0) {
3067 if (!ret)
3068 ret = nr;
3069 break;
3070 }
3071 ret += nr;
3072 if (nr != iovec.iov_len)
3073 break;
Jens Axboe4017eb92020-10-22 14:14:12 -06003074 req->rw.len -= nr;
3075 req->rw.addr += nr;
Jens Axboe32960612019-09-23 11:05:34 -06003076 iov_iter_advance(iter, nr);
3077 }
3078
3079 return ret;
3080}
3081
Jens Axboeff6165b2020-08-13 09:47:43 -06003082static void io_req_map_rw(struct io_kiocb *req, const struct iovec *iovec,
3083 const struct iovec *fast_iov, struct iov_iter *iter)
Jens Axboef67676d2019-12-02 11:03:47 -07003084{
Jens Axboee8c2bc12020-08-15 18:44:09 -07003085 struct io_async_rw *rw = req->async_data;
Pavel Begunkovb64e3442020-07-13 22:59:18 +03003086
Jens Axboeff6165b2020-08-13 09:47:43 -06003087 memcpy(&rw->iter, iter, sizeof(*iter));
Pavel Begunkovafb87652020-09-06 00:45:46 +03003088 rw->free_iovec = iovec;
Jens Axboe227c0c92020-08-13 11:51:40 -06003089 rw->bytes_done = 0;
Jens Axboeff6165b2020-08-13 09:47:43 -06003090 /* can only be fixed buffers, no need to do anything */
Pavel Begunkov9c3a2052020-11-23 23:20:27 +00003091 if (iov_iter_is_bvec(iter))
Jens Axboeff6165b2020-08-13 09:47:43 -06003092 return;
Pavel Begunkovb64e3442020-07-13 22:59:18 +03003093 if (!iovec) {
Jens Axboeff6165b2020-08-13 09:47:43 -06003094 unsigned iov_off = 0;
3095
3096 rw->iter.iov = rw->fast_iov;
3097 if (iter->iov != fast_iov) {
3098 iov_off = iter->iov - fast_iov;
3099 rw->iter.iov += iov_off;
3100 }
3101 if (rw->fast_iov != fast_iov)
3102 memcpy(rw->fast_iov + iov_off, fast_iov + iov_off,
Xiaoguang Wang45097da2020-04-08 22:29:58 +08003103 sizeof(struct iovec) * iter->nr_segs);
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03003104 } else {
3105 req->flags |= REQ_F_NEED_CLEANUP;
Jens Axboef67676d2019-12-02 11:03:47 -07003106 }
3107}
3108
Pavel Begunkov6cb78682021-02-28 22:35:17 +00003109static inline int io_alloc_async_data(struct io_kiocb *req)
Xiaoguang Wang3d9932a2020-03-27 15:36:52 +08003110{
Jens Axboee8c2bc12020-08-15 18:44:09 -07003111 WARN_ON_ONCE(!io_op_defs[req->opcode].async_size);
3112 req->async_data = kmalloc(io_op_defs[req->opcode].async_size, GFP_KERNEL);
3113 return req->async_data == NULL;
Xiaoguang Wang3d9932a2020-03-27 15:36:52 +08003114}
3115
Jens Axboeff6165b2020-08-13 09:47:43 -06003116static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec,
3117 const struct iovec *fast_iov,
Jens Axboe227c0c92020-08-13 11:51:40 -06003118 struct iov_iter *iter, bool force)
Jens Axboeb7bb4f72019-12-15 22:13:43 -07003119{
Pavel Begunkov26f05052021-02-28 22:35:18 +00003120 if (!force && !io_op_defs[req->opcode].needs_async_setup)
Jens Axboe74566df2020-01-13 19:23:24 -07003121 return 0;
Jens Axboee8c2bc12020-08-15 18:44:09 -07003122 if (!req->async_data) {
Pavel Begunkov6cb78682021-02-28 22:35:17 +00003123 if (io_alloc_async_data(req)) {
Pavel Begunkov6bf985d2021-02-04 13:52:01 +00003124 kfree(iovec);
Jens Axboe5d204bc2020-01-31 12:06:52 -07003125 return -ENOMEM;
Pavel Begunkov6bf985d2021-02-04 13:52:01 +00003126 }
Jens Axboeb7bb4f72019-12-15 22:13:43 -07003127
Jens Axboeff6165b2020-08-13 09:47:43 -06003128 io_req_map_rw(req, iovec, fast_iov, iter);
Jens Axboe5d204bc2020-01-31 12:06:52 -07003129 }
Jens Axboeb7bb4f72019-12-15 22:13:43 -07003130 return 0;
Jens Axboef67676d2019-12-02 11:03:47 -07003131}
3132
Pavel Begunkov73debe62020-09-30 22:57:54 +03003133static inline int io_rw_prep_async(struct io_kiocb *req, int rw)
Pavel Begunkovc3e330a2020-07-13 22:59:19 +03003134{
Jens Axboee8c2bc12020-08-15 18:44:09 -07003135 struct io_async_rw *iorw = req->async_data;
Pavel Begunkovf4bff102020-09-06 00:45:45 +03003136 struct iovec *iov = iorw->fast_iov;
Pavel Begunkov847595d2021-02-04 13:52:06 +00003137 int ret;
Pavel Begunkovc3e330a2020-07-13 22:59:19 +03003138
Pavel Begunkov2846c482020-11-07 13:16:27 +00003139 ret = io_import_iovec(rw, req, &iov, &iorw->iter, false);
Pavel Begunkovc3e330a2020-07-13 22:59:19 +03003140 if (unlikely(ret < 0))
3141 return ret;
3142
Pavel Begunkovab0b1962020-09-06 00:45:47 +03003143 iorw->bytes_done = 0;
3144 iorw->free_iovec = iov;
3145 if (iov)
3146 req->flags |= REQ_F_NEED_CLEANUP;
Pavel Begunkovc3e330a2020-07-13 22:59:19 +03003147 return 0;
3148}
3149
Pavel Begunkov73debe62020-09-30 22:57:54 +03003150static int io_read_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboef67676d2019-12-02 11:03:47 -07003151{
Jens Axboe3529d8c2019-12-19 18:24:38 -07003152 if (unlikely(!(req->file->f_mode & FMODE_READ)))
3153 return -EBADF;
Pavel Begunkov93642ef2021-02-18 18:29:44 +00003154 return io_prep_rw(req, sqe);
Jens Axboef67676d2019-12-02 11:03:47 -07003155}
3156
Jens Axboec1dd91d2020-08-03 16:43:59 -06003157/*
3158 * This is our waitqueue callback handler, registered through lock_page_async()
3159 * when we initially tried to do the IO with the iocb armed our waitqueue.
3160 * This gets called when the page is unlocked, and we generally expect that to
3161 * happen when the page IO is completed and the page is now uptodate. This will
3162 * queue a task_work based retry of the operation, attempting to copy the data
3163 * again. If the latter fails because the page was NOT uptodate, then we will
3164 * do a thread based blocking retry of the operation. That's the unexpected
3165 * slow path.
3166 */
Jens Axboebcf5a062020-05-22 09:24:42 -06003167static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode,
3168 int sync, void *arg)
3169{
3170 struct wait_page_queue *wpq;
3171 struct io_kiocb *req = wait->private;
Jens Axboebcf5a062020-05-22 09:24:42 -06003172 struct wait_page_key *key = arg;
Jens Axboebcf5a062020-05-22 09:24:42 -06003173
3174 wpq = container_of(wait, struct wait_page_queue, wait);
3175
Linus Torvaldscdc8fcb2020-08-03 13:01:22 -07003176 if (!wake_page_match(wpq, key))
3177 return 0;
3178
Hao Xuc8d317a2020-09-29 20:00:45 +08003179 req->rw.kiocb.ki_flags &= ~IOCB_WAITQ;
Jens Axboebcf5a062020-05-22 09:24:42 -06003180 list_del_init(&wait->entry);
3181
Jens Axboebcf5a062020-05-22 09:24:42 -06003182 /* submit ref gets dropped, acquire a new one */
Jens Axboede9b4cc2021-02-24 13:28:27 -07003183 req_ref_get(req);
Pavel Begunkov921b9052021-02-12 03:23:53 +00003184 io_req_task_queue(req);
Jens Axboebcf5a062020-05-22 09:24:42 -06003185 return 1;
3186}
3187
Jens Axboec1dd91d2020-08-03 16:43:59 -06003188/*
3189 * This controls whether a given IO request should be armed for async page
3190 * based retry. If we return false here, the request is handed to the async
3191 * worker threads for retry. If we're doing buffered reads on a regular file,
3192 * we prepare a private wait_page_queue entry and retry the operation. This
3193 * will either succeed because the page is now uptodate and unlocked, or it
3194 * will register a callback when the page is unlocked at IO completion. Through
3195 * that callback, io_uring uses task_work to setup a retry of the operation.
3196 * That retry will attempt the buffered read again. The retry will generally
3197 * succeed, or in rare cases where it fails, we then fall back to using the
3198 * async worker threads for a blocking retry.
3199 */
Jens Axboe227c0c92020-08-13 11:51:40 -06003200static bool io_rw_should_retry(struct io_kiocb *req)
Jens Axboebcf5a062020-05-22 09:24:42 -06003201{
Jens Axboee8c2bc12020-08-15 18:44:09 -07003202 struct io_async_rw *rw = req->async_data;
3203 struct wait_page_queue *wait = &rw->wpq;
Jens Axboebcf5a062020-05-22 09:24:42 -06003204 struct kiocb *kiocb = &req->rw.kiocb;
Jens Axboebcf5a062020-05-22 09:24:42 -06003205
3206 /* never retry for NOWAIT, we just complete with -EAGAIN */
3207 if (req->flags & REQ_F_NOWAIT)
3208 return false;
3209
Jens Axboe227c0c92020-08-13 11:51:40 -06003210 /* Only for buffered IO */
Jens Axboe3b2a4432020-08-16 10:58:43 -07003211 if (kiocb->ki_flags & (IOCB_DIRECT | IOCB_HIPRI))
Jens Axboebcf5a062020-05-22 09:24:42 -06003212 return false;
Jens Axboe3b2a4432020-08-16 10:58:43 -07003213
Jens Axboebcf5a062020-05-22 09:24:42 -06003214 /*
3215 * just use poll if we can, and don't attempt if the fs doesn't
3216 * support callback based unlocks
3217 */
3218 if (file_can_poll(req->file) || !(req->file->f_mode & FMODE_BUF_RASYNC))
3219 return false;
3220
Jens Axboe3b2a4432020-08-16 10:58:43 -07003221 wait->wait.func = io_async_buf_func;
3222 wait->wait.private = req;
3223 wait->wait.flags = 0;
3224 INIT_LIST_HEAD(&wait->wait.entry);
3225 kiocb->ki_flags |= IOCB_WAITQ;
Hao Xuc8d317a2020-09-29 20:00:45 +08003226 kiocb->ki_flags &= ~IOCB_NOWAIT;
Jens Axboe3b2a4432020-08-16 10:58:43 -07003227 kiocb->ki_waitq = wait;
Jens Axboe3b2a4432020-08-16 10:58:43 -07003228 return true;
Jens Axboebcf5a062020-05-22 09:24:42 -06003229}
3230
3231static int io_iter_do_read(struct io_kiocb *req, struct iov_iter *iter)
3232{
3233 if (req->file->f_op->read_iter)
3234 return call_read_iter(req->file, &req->rw.kiocb, iter);
Guoyu Huang2dd21112020-08-05 03:53:50 -07003235 else if (req->file->f_op->read)
Jens Axboe4017eb92020-10-22 14:14:12 -06003236 return loop_rw_iter(READ, req, iter);
Guoyu Huang2dd21112020-08-05 03:53:50 -07003237 else
3238 return -EINVAL;
Jens Axboebcf5a062020-05-22 09:24:42 -06003239}
3240
Pavel Begunkov889fca72021-02-10 00:03:09 +00003241static int io_read(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe2b188cc2019-01-07 10:46:33 -07003242{
3243 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
Jens Axboe9adbd452019-12-20 08:45:55 -07003244 struct kiocb *kiocb = &req->rw.kiocb;
Jens Axboeff6165b2020-08-13 09:47:43 -06003245 struct iov_iter __iter, *iter = &__iter;
Jens Axboee8c2bc12020-08-15 18:44:09 -07003246 struct io_async_rw *rw = req->async_data;
Jens Axboe227c0c92020-08-13 11:51:40 -06003247 ssize_t io_size, ret, ret2;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003248 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003249
Pavel Begunkov2846c482020-11-07 13:16:27 +00003250 if (rw) {
Jens Axboee8c2bc12020-08-15 18:44:09 -07003251 iter = &rw->iter;
Pavel Begunkov2846c482020-11-07 13:16:27 +00003252 iovec = NULL;
3253 } else {
3254 ret = io_import_iovec(READ, req, &iovec, iter, !force_nonblock);
3255 if (ret < 0)
3256 return ret;
3257 }
Pavel Begunkov632546c2020-11-07 13:16:26 +00003258 io_size = iov_iter_count(iter);
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003259 req->result = io_size;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003260
Jens Axboefd6c2e42019-12-18 12:19:41 -07003261 /* Ensure we clear previously set non-block flag */
3262 if (!force_nonblock)
Jens Axboe29de5f62020-02-20 09:56:08 -07003263 kiocb->ki_flags &= ~IOCB_NOWAIT;
Pavel Begunkova88fc402020-09-30 22:57:53 +03003264 else
3265 kiocb->ki_flags |= IOCB_NOWAIT;
3266
Pavel Begunkov24c74672020-06-21 13:09:51 +03003267 /* If the file doesn't support async, just async punt */
Jens Axboe7b29f922021-03-12 08:30:14 -07003268 if (force_nonblock && !io_file_supports_async(req, READ)) {
Pavel Begunkov6713e7a2021-02-04 13:51:59 +00003269 ret = io_setup_async_rw(req, iovec, inline_vecs, iter, true);
Pavel Begunkov6bf985d2021-02-04 13:52:01 +00003270 return ret ?: -EAGAIN;
Pavel Begunkov6713e7a2021-02-04 13:51:59 +00003271 }
Jens Axboe9e645e112019-05-10 16:07:28 -06003272
Pavel Begunkov632546c2020-11-07 13:16:26 +00003273 ret = rw_verify_area(READ, req->file, io_kiocb_ppos(kiocb), io_size);
Pavel Begunkov5ea5dd42021-02-04 13:52:03 +00003274 if (unlikely(ret)) {
3275 kfree(iovec);
3276 return ret;
3277 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07003278
Jens Axboe227c0c92020-08-13 11:51:40 -06003279 ret = io_iter_do_read(req, iter);
Jens Axboe32960612019-09-23 11:05:34 -06003280
Jens Axboe230d50d2021-04-01 20:41:15 -06003281 if (ret == -EAGAIN || (req->flags & REQ_F_REISSUE)) {
Pavel Begunkov6ad7f232021-04-08 01:54:39 +01003282 req->flags &= ~REQ_F_REISSUE;
Jens Axboeeefdf302020-08-27 16:40:19 -06003283 /* IOPOLL retry should happen for io-wq threads */
3284 if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboef91daf52020-08-15 15:58:42 -07003285 goto done;
Pavel Begunkov75c668c2021-02-04 13:52:05 +00003286 /* no retry on NONBLOCK nor RWF_NOWAIT */
3287 if (req->flags & REQ_F_NOWAIT)
Jens Axboe355afae2020-09-02 09:30:31 -06003288 goto done;
Jens Axboe84216312020-08-24 11:45:26 -06003289 /* some cases will consume bytes even on error returns */
Pavel Begunkov632546c2020-11-07 13:16:26 +00003290 iov_iter_revert(iter, io_size - iov_iter_count(iter));
Jens Axboef38c7e32020-09-25 15:23:43 -06003291 ret = 0;
Jens Axboe230d50d2021-04-01 20:41:15 -06003292 } else if (ret == -EIOCBQUEUED) {
3293 goto out_free;
Pavel Begunkov7335e3b2021-02-04 13:52:02 +00003294 } else if (ret <= 0 || ret == io_size || !force_nonblock ||
Pavel Begunkov75c668c2021-02-04 13:52:05 +00003295 (req->flags & REQ_F_NOWAIT) || !(req->flags & REQ_F_ISREG)) {
Pavel Begunkov7335e3b2021-02-04 13:52:02 +00003296 /* read all, failed, already did sync or don't want to retry */
Jens Axboe00d23d52020-08-25 12:59:22 -06003297 goto done;
Jens Axboe227c0c92020-08-13 11:51:40 -06003298 }
3299
Jens Axboe227c0c92020-08-13 11:51:40 -06003300 ret2 = io_setup_async_rw(req, iovec, inline_vecs, iter, true);
Pavel Begunkov6bf985d2021-02-04 13:52:01 +00003301 if (ret2)
3302 return ret2;
3303
Pavel Begunkovfe1cdd52021-02-17 21:02:36 +00003304 iovec = NULL;
Jens Axboee8c2bc12020-08-15 18:44:09 -07003305 rw = req->async_data;
Jens Axboe227c0c92020-08-13 11:51:40 -06003306 /* now use our persistent iterator, if we aren't already */
Jens Axboee8c2bc12020-08-15 18:44:09 -07003307 iter = &rw->iter;
Jens Axboe227c0c92020-08-13 11:51:40 -06003308
Pavel Begunkovb23df912021-02-04 13:52:04 +00003309 do {
3310 io_size -= ret;
3311 rw->bytes_done += ret;
3312 /* if we can retry, do so with the callbacks armed */
3313 if (!io_rw_should_retry(req)) {
3314 kiocb->ki_flags &= ~IOCB_WAITQ;
3315 return -EAGAIN;
3316 }
3317
3318 /*
3319 * Now retry read with the IOCB_WAITQ parts set in the iocb. If
3320 * we get -EIOCBQUEUED, then we'll get a notification when the
3321 * desired page gets unlocked. We can also get a partial read
3322 * here, and if we do, then just retry at the new offset.
3323 */
3324 ret = io_iter_do_read(req, iter);
3325 if (ret == -EIOCBQUEUED)
3326 return 0;
Jens Axboe227c0c92020-08-13 11:51:40 -06003327 /* we got some bytes, but not all. retry. */
Jens Axboeb5b0ecb2021-03-04 21:02:58 -07003328 kiocb->ki_flags &= ~IOCB_WAITQ;
Pavel Begunkovb23df912021-02-04 13:52:04 +00003329 } while (ret > 0 && ret < io_size);
Jens Axboe227c0c92020-08-13 11:51:40 -06003330done:
Pavel Begunkov889fca72021-02-10 00:03:09 +00003331 kiocb_done(kiocb, ret, issue_flags);
Pavel Begunkovfe1cdd52021-02-17 21:02:36 +00003332out_free:
3333 /* it's faster to check here then delegate to kfree */
3334 if (iovec)
3335 kfree(iovec);
Pavel Begunkov5ea5dd42021-02-04 13:52:03 +00003336 return 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003337}
3338
Pavel Begunkov73debe62020-09-30 22:57:54 +03003339static int io_write_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboef67676d2019-12-02 11:03:47 -07003340{
Jens Axboe3529d8c2019-12-19 18:24:38 -07003341 if (unlikely(!(req->file->f_mode & FMODE_WRITE)))
3342 return -EBADF;
Pavel Begunkov93642ef2021-02-18 18:29:44 +00003343 return io_prep_rw(req, sqe);
Jens Axboef67676d2019-12-02 11:03:47 -07003344}
3345
Pavel Begunkov889fca72021-02-10 00:03:09 +00003346static int io_write(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe2b188cc2019-01-07 10:46:33 -07003347{
3348 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
Jens Axboe9adbd452019-12-20 08:45:55 -07003349 struct kiocb *kiocb = &req->rw.kiocb;
Jens Axboeff6165b2020-08-13 09:47:43 -06003350 struct iov_iter __iter, *iter = &__iter;
Jens Axboee8c2bc12020-08-15 18:44:09 -07003351 struct io_async_rw *rw = req->async_data;
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003352 ssize_t ret, ret2, io_size;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003353 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003354
Pavel Begunkov2846c482020-11-07 13:16:27 +00003355 if (rw) {
Jens Axboee8c2bc12020-08-15 18:44:09 -07003356 iter = &rw->iter;
Pavel Begunkov2846c482020-11-07 13:16:27 +00003357 iovec = NULL;
3358 } else {
3359 ret = io_import_iovec(WRITE, req, &iovec, iter, !force_nonblock);
3360 if (ret < 0)
3361 return ret;
3362 }
Pavel Begunkov632546c2020-11-07 13:16:26 +00003363 io_size = iov_iter_count(iter);
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003364 req->result = io_size;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003365
Jens Axboefd6c2e42019-12-18 12:19:41 -07003366 /* Ensure we clear previously set non-block flag */
3367 if (!force_nonblock)
Pavel Begunkova88fc402020-09-30 22:57:53 +03003368 kiocb->ki_flags &= ~IOCB_NOWAIT;
3369 else
3370 kiocb->ki_flags |= IOCB_NOWAIT;
Jens Axboefd6c2e42019-12-18 12:19:41 -07003371
Pavel Begunkov24c74672020-06-21 13:09:51 +03003372 /* If the file doesn't support async, just async punt */
Jens Axboe7b29f922021-03-12 08:30:14 -07003373 if (force_nonblock && !io_file_supports_async(req, WRITE))
Jens Axboef67676d2019-12-02 11:03:47 -07003374 goto copy_iov;
Jens Axboef67676d2019-12-02 11:03:47 -07003375
Jens Axboe10d59342019-12-09 20:16:22 -07003376 /* file path doesn't support NOWAIT for non-direct_IO */
3377 if (force_nonblock && !(kiocb->ki_flags & IOCB_DIRECT) &&
3378 (req->flags & REQ_F_ISREG))
Jens Axboef67676d2019-12-02 11:03:47 -07003379 goto copy_iov;
Jens Axboe9e645e112019-05-10 16:07:28 -06003380
Pavel Begunkov632546c2020-11-07 13:16:26 +00003381 ret = rw_verify_area(WRITE, req->file, io_kiocb_ppos(kiocb), io_size);
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003382 if (unlikely(ret))
3383 goto out_free;
Roman Penyaev9bf79332019-03-25 20:09:24 +01003384
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003385 /*
3386 * Open-code file_start_write here to grab freeze protection,
3387 * which will be released by another thread in
3388 * io_complete_rw(). Fool lockdep by telling it the lock got
3389 * released so that it doesn't complain about the held lock when
3390 * we return to userspace.
3391 */
3392 if (req->flags & REQ_F_ISREG) {
Darrick J. Wong8a3c84b2020-11-10 16:50:21 -08003393 sb_start_write(file_inode(req->file)->i_sb);
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003394 __sb_writers_release(file_inode(req->file)->i_sb,
3395 SB_FREEZE_WRITE);
3396 }
3397 kiocb->ki_flags |= IOCB_WRITE;
Roman Penyaev9bf79332019-03-25 20:09:24 +01003398
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003399 if (req->file->f_op->write_iter)
Jens Axboeff6165b2020-08-13 09:47:43 -06003400 ret2 = call_write_iter(req->file, kiocb, iter);
Guoyu Huang2dd21112020-08-05 03:53:50 -07003401 else if (req->file->f_op->write)
Jens Axboe4017eb92020-10-22 14:14:12 -06003402 ret2 = loop_rw_iter(WRITE, req, iter);
Guoyu Huang2dd21112020-08-05 03:53:50 -07003403 else
3404 ret2 = -EINVAL;
Jens Axboe4ed734b2020-03-20 11:23:41 -06003405
Pavel Begunkov6ad7f232021-04-08 01:54:39 +01003406 if (req->flags & REQ_F_REISSUE) {
3407 req->flags &= ~REQ_F_REISSUE;
Jens Axboe230d50d2021-04-01 20:41:15 -06003408 ret2 = -EAGAIN;
Pavel Begunkov6ad7f232021-04-08 01:54:39 +01003409 }
Jens Axboe230d50d2021-04-01 20:41:15 -06003410
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003411 /*
3412 * Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just
3413 * retry them without IOCB_NOWAIT.
3414 */
3415 if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT))
3416 ret2 = -EAGAIN;
Pavel Begunkov75c668c2021-02-04 13:52:05 +00003417 /* no retry on NONBLOCK nor RWF_NOWAIT */
3418 if (ret2 == -EAGAIN && (req->flags & REQ_F_NOWAIT))
Jens Axboe355afae2020-09-02 09:30:31 -06003419 goto done;
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003420 if (!force_nonblock || ret2 != -EAGAIN) {
Jens Axboeeefdf302020-08-27 16:40:19 -06003421 /* IOPOLL retry should happen for io-wq threads */
3422 if ((req->ctx->flags & IORING_SETUP_IOPOLL) && ret2 == -EAGAIN)
3423 goto copy_iov;
Jens Axboe355afae2020-09-02 09:30:31 -06003424done:
Pavel Begunkov889fca72021-02-10 00:03:09 +00003425 kiocb_done(kiocb, ret2, issue_flags);
Pavel Begunkovfa15baf2020-08-01 13:50:02 +03003426 } else {
Jens Axboef67676d2019-12-02 11:03:47 -07003427copy_iov:
Jens Axboe84216312020-08-24 11:45:26 -06003428 /* some cases will consume bytes even on error returns */
Pavel Begunkov632546c2020-11-07 13:16:26 +00003429 iov_iter_revert(iter, io_size - iov_iter_count(iter));
Jens Axboe227c0c92020-08-13 11:51:40 -06003430 ret = io_setup_async_rw(req, iovec, inline_vecs, iter, false);
Pavel Begunkov6bf985d2021-02-04 13:52:01 +00003431 return ret ?: -EAGAIN;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003432 }
Jens Axboe31b51512019-01-18 22:56:34 -07003433out_free:
Pavel Begunkovf261c162020-08-20 11:34:10 +03003434 /* it's reportedly faster than delegating the null check to kfree() */
Pavel Begunkov252917c2020-07-13 22:59:20 +03003435 if (iovec)
Xiaoguang Wang6f2cc162020-06-18 15:01:56 +08003436 kfree(iovec);
Jens Axboe2b188cc2019-01-07 10:46:33 -07003437 return ret;
3438}
3439
Jens Axboe80a261f2020-09-28 14:23:58 -06003440static int io_renameat_prep(struct io_kiocb *req,
3441 const struct io_uring_sqe *sqe)
3442{
3443 struct io_rename *ren = &req->rename;
3444 const char __user *oldf, *newf;
3445
3446 if (unlikely(req->flags & REQ_F_FIXED_FILE))
3447 return -EBADF;
3448
3449 ren->old_dfd = READ_ONCE(sqe->fd);
3450 oldf = u64_to_user_ptr(READ_ONCE(sqe->addr));
3451 newf = u64_to_user_ptr(READ_ONCE(sqe->addr2));
3452 ren->new_dfd = READ_ONCE(sqe->len);
3453 ren->flags = READ_ONCE(sqe->rename_flags);
3454
3455 ren->oldpath = getname(oldf);
3456 if (IS_ERR(ren->oldpath))
3457 return PTR_ERR(ren->oldpath);
3458
3459 ren->newpath = getname(newf);
3460 if (IS_ERR(ren->newpath)) {
3461 putname(ren->oldpath);
3462 return PTR_ERR(ren->newpath);
3463 }
3464
3465 req->flags |= REQ_F_NEED_CLEANUP;
3466 return 0;
3467}
3468
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003469static int io_renameat(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe80a261f2020-09-28 14:23:58 -06003470{
3471 struct io_rename *ren = &req->rename;
3472 int ret;
3473
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003474 if (issue_flags & IO_URING_F_NONBLOCK)
Jens Axboe80a261f2020-09-28 14:23:58 -06003475 return -EAGAIN;
3476
3477 ret = do_renameat2(ren->old_dfd, ren->oldpath, ren->new_dfd,
3478 ren->newpath, ren->flags);
3479
3480 req->flags &= ~REQ_F_NEED_CLEANUP;
3481 if (ret < 0)
3482 req_set_fail_links(req);
3483 io_req_complete(req, ret);
3484 return 0;
3485}
3486
Jens Axboe14a11432020-09-28 14:27:37 -06003487static int io_unlinkat_prep(struct io_kiocb *req,
3488 const struct io_uring_sqe *sqe)
3489{
3490 struct io_unlink *un = &req->unlink;
3491 const char __user *fname;
3492
3493 if (unlikely(req->flags & REQ_F_FIXED_FILE))
3494 return -EBADF;
3495
3496 un->dfd = READ_ONCE(sqe->fd);
3497
3498 un->flags = READ_ONCE(sqe->unlink_flags);
3499 if (un->flags & ~AT_REMOVEDIR)
3500 return -EINVAL;
3501
3502 fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
3503 un->filename = getname(fname);
3504 if (IS_ERR(un->filename))
3505 return PTR_ERR(un->filename);
3506
3507 req->flags |= REQ_F_NEED_CLEANUP;
3508 return 0;
3509}
3510
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003511static int io_unlinkat(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe14a11432020-09-28 14:27:37 -06003512{
3513 struct io_unlink *un = &req->unlink;
3514 int ret;
3515
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003516 if (issue_flags & IO_URING_F_NONBLOCK)
Jens Axboe14a11432020-09-28 14:27:37 -06003517 return -EAGAIN;
3518
3519 if (un->flags & AT_REMOVEDIR)
3520 ret = do_rmdir(un->dfd, un->filename);
3521 else
3522 ret = do_unlinkat(un->dfd, un->filename);
3523
3524 req->flags &= ~REQ_F_NEED_CLEANUP;
3525 if (ret < 0)
3526 req_set_fail_links(req);
3527 io_req_complete(req, ret);
3528 return 0;
3529}
3530
Jens Axboe36f4fa62020-09-05 11:14:22 -06003531static int io_shutdown_prep(struct io_kiocb *req,
3532 const struct io_uring_sqe *sqe)
3533{
3534#if defined(CONFIG_NET)
3535 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3536 return -EINVAL;
3537 if (sqe->ioprio || sqe->off || sqe->addr || sqe->rw_flags ||
3538 sqe->buf_index)
3539 return -EINVAL;
3540
3541 req->shutdown.how = READ_ONCE(sqe->len);
3542 return 0;
3543#else
3544 return -EOPNOTSUPP;
3545#endif
3546}
3547
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003548static int io_shutdown(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe36f4fa62020-09-05 11:14:22 -06003549{
3550#if defined(CONFIG_NET)
3551 struct socket *sock;
3552 int ret;
3553
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003554 if (issue_flags & IO_URING_F_NONBLOCK)
Jens Axboe36f4fa62020-09-05 11:14:22 -06003555 return -EAGAIN;
3556
Linus Torvalds48aba792020-12-16 12:44:05 -08003557 sock = sock_from_file(req->file);
Jens Axboe36f4fa62020-09-05 11:14:22 -06003558 if (unlikely(!sock))
Linus Torvalds48aba792020-12-16 12:44:05 -08003559 return -ENOTSOCK;
Jens Axboe36f4fa62020-09-05 11:14:22 -06003560
3561 ret = __sys_shutdown_sock(sock, req->shutdown.how);
Jens Axboea1464682020-12-14 20:57:27 -07003562 if (ret < 0)
3563 req_set_fail_links(req);
Jens Axboe36f4fa62020-09-05 11:14:22 -06003564 io_req_complete(req, ret);
3565 return 0;
3566#else
3567 return -EOPNOTSUPP;
3568#endif
3569}
3570
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03003571static int __io_splice_prep(struct io_kiocb *req,
3572 const struct io_uring_sqe *sqe)
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003573{
3574 struct io_splice* sp = &req->splice;
3575 unsigned int valid_flags = SPLICE_F_FD_IN_FIXED | SPLICE_F_ALL;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003576
Pavel Begunkov3232dd02020-06-03 18:03:22 +03003577 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3578 return -EINVAL;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003579
3580 sp->file_in = NULL;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003581 sp->len = READ_ONCE(sqe->len);
3582 sp->flags = READ_ONCE(sqe->splice_flags);
3583
3584 if (unlikely(sp->flags & ~valid_flags))
3585 return -EINVAL;
3586
Pavel Begunkov8371adf2020-10-10 18:34:08 +01003587 sp->file_in = io_file_get(NULL, req, READ_ONCE(sqe->splice_fd_in),
3588 (sp->flags & SPLICE_F_FD_IN_FIXED));
3589 if (!sp->file_in)
3590 return -EBADF;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003591 req->flags |= REQ_F_NEED_CLEANUP;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003592 return 0;
3593}
3594
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03003595static int io_tee_prep(struct io_kiocb *req,
3596 const struct io_uring_sqe *sqe)
3597{
3598 if (READ_ONCE(sqe->splice_off_in) || READ_ONCE(sqe->off))
3599 return -EINVAL;
3600 return __io_splice_prep(req, sqe);
3601}
3602
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003603static int io_tee(struct io_kiocb *req, unsigned int issue_flags)
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03003604{
3605 struct io_splice *sp = &req->splice;
3606 struct file *in = sp->file_in;
3607 struct file *out = sp->file_out;
3608 unsigned int flags = sp->flags & ~SPLICE_F_FD_IN_FIXED;
3609 long ret = 0;
3610
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003611 if (issue_flags & IO_URING_F_NONBLOCK)
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03003612 return -EAGAIN;
3613 if (sp->len)
3614 ret = do_tee(in, out, sp->len, flags);
3615
Pavel Begunkove1d767f2021-03-19 17:22:43 +00003616 if (!(sp->flags & SPLICE_F_FD_IN_FIXED))
3617 io_put_file(in);
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03003618 req->flags &= ~REQ_F_NEED_CLEANUP;
3619
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03003620 if (ret != sp->len)
3621 req_set_fail_links(req);
Jens Axboee1e16092020-06-22 09:17:17 -06003622 io_req_complete(req, ret);
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03003623 return 0;
3624}
3625
3626static int io_splice_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3627{
3628 struct io_splice* sp = &req->splice;
3629
3630 sp->off_in = READ_ONCE(sqe->splice_off_in);
3631 sp->off_out = READ_ONCE(sqe->off);
3632 return __io_splice_prep(req, sqe);
3633}
3634
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003635static int io_splice(struct io_kiocb *req, unsigned int issue_flags)
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003636{
3637 struct io_splice *sp = &req->splice;
3638 struct file *in = sp->file_in;
3639 struct file *out = sp->file_out;
3640 unsigned int flags = sp->flags & ~SPLICE_F_FD_IN_FIXED;
3641 loff_t *poff_in, *poff_out;
Pavel Begunkovc9687422020-05-04 23:00:54 +03003642 long ret = 0;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003643
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003644 if (issue_flags & IO_URING_F_NONBLOCK)
Pavel Begunkov2fb3e822020-05-01 17:09:38 +03003645 return -EAGAIN;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003646
3647 poff_in = (sp->off_in == -1) ? NULL : &sp->off_in;
3648 poff_out = (sp->off_out == -1) ? NULL : &sp->off_out;
Pavel Begunkovc9687422020-05-04 23:00:54 +03003649
Jens Axboe948a7742020-05-17 14:21:38 -06003650 if (sp->len)
Pavel Begunkovc9687422020-05-04 23:00:54 +03003651 ret = do_splice(in, poff_in, out, poff_out, sp->len, flags);
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003652
Pavel Begunkove1d767f2021-03-19 17:22:43 +00003653 if (!(sp->flags & SPLICE_F_FD_IN_FIXED))
3654 io_put_file(in);
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003655 req->flags &= ~REQ_F_NEED_CLEANUP;
3656
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003657 if (ret != sp->len)
3658 req_set_fail_links(req);
Jens Axboee1e16092020-06-22 09:17:17 -06003659 io_req_complete(req, ret);
Pavel Begunkov7d67af22020-02-24 11:32:45 +03003660 return 0;
3661}
3662
Jens Axboe2b188cc2019-01-07 10:46:33 -07003663/*
3664 * IORING_OP_NOP just posts a completion event, nothing else.
3665 */
Pavel Begunkov889fca72021-02-10 00:03:09 +00003666static int io_nop(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe2b188cc2019-01-07 10:46:33 -07003667{
3668 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003669
Jens Axboedef596e2019-01-09 08:59:42 -07003670 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
3671 return -EINVAL;
3672
Pavel Begunkov889fca72021-02-10 00:03:09 +00003673 __io_req_complete(req, issue_flags, 0, 0);
Jens Axboe2b188cc2019-01-07 10:46:33 -07003674 return 0;
3675}
3676
Pavel Begunkov1155c762021-02-18 18:29:38 +00003677static int io_fsync_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Christoph Hellwigc992fe22019-01-11 09:43:02 -07003678{
Jens Axboe6b063142019-01-10 22:13:58 -07003679 struct io_ring_ctx *ctx = req->ctx;
Christoph Hellwigc992fe22019-01-11 09:43:02 -07003680
Jens Axboe09bb8392019-03-13 12:39:28 -06003681 if (!req->file)
3682 return -EBADF;
Christoph Hellwigc992fe22019-01-11 09:43:02 -07003683
Jens Axboe6b063142019-01-10 22:13:58 -07003684 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboedef596e2019-01-09 08:59:42 -07003685 return -EINVAL;
Jens Axboeedafcce2019-01-09 09:16:05 -07003686 if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index))
Christoph Hellwigc992fe22019-01-11 09:43:02 -07003687 return -EINVAL;
3688
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07003689 req->sync.flags = READ_ONCE(sqe->fsync_flags);
3690 if (unlikely(req->sync.flags & ~IORING_FSYNC_DATASYNC))
3691 return -EINVAL;
3692
3693 req->sync.off = READ_ONCE(sqe->off);
3694 req->sync.len = READ_ONCE(sqe->len);
Christoph Hellwigc992fe22019-01-11 09:43:02 -07003695 return 0;
3696}
3697
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003698static int io_fsync(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe78912932020-01-14 22:09:06 -07003699{
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07003700 loff_t end = req->sync.off + req->sync.len;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07003701 int ret;
3702
Pavel Begunkovac45abc2020-06-08 21:08:18 +03003703 /* fsync always requires a blocking context */
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003704 if (issue_flags & IO_URING_F_NONBLOCK)
Pavel Begunkovac45abc2020-06-08 21:08:18 +03003705 return -EAGAIN;
3706
Jens Axboe9adbd452019-12-20 08:45:55 -07003707 ret = vfs_fsync_range(req->file, req->sync.off,
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07003708 end > 0 ? end : LLONG_MAX,
3709 req->sync.flags & IORING_FSYNC_DATASYNC);
3710 if (ret < 0)
3711 req_set_fail_links(req);
Jens Axboee1e16092020-06-22 09:17:17 -06003712 io_req_complete(req, ret);
Christoph Hellwigc992fe22019-01-11 09:43:02 -07003713 return 0;
3714}
3715
Jens Axboed63d1b52019-12-10 10:38:56 -07003716static int io_fallocate_prep(struct io_kiocb *req,
3717 const struct io_uring_sqe *sqe)
3718{
3719 if (sqe->ioprio || sqe->buf_index || sqe->rw_flags)
3720 return -EINVAL;
Pavel Begunkov3232dd02020-06-03 18:03:22 +03003721 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3722 return -EINVAL;
Jens Axboed63d1b52019-12-10 10:38:56 -07003723
3724 req->sync.off = READ_ONCE(sqe->off);
3725 req->sync.len = READ_ONCE(sqe->addr);
3726 req->sync.mode = READ_ONCE(sqe->len);
3727 return 0;
3728}
3729
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003730static int io_fallocate(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboed63d1b52019-12-10 10:38:56 -07003731{
Pavel Begunkovac45abc2020-06-08 21:08:18 +03003732 int ret;
Jens Axboed63d1b52019-12-10 10:38:56 -07003733
Pavel Begunkovac45abc2020-06-08 21:08:18 +03003734 /* fallocate always requiring blocking context */
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003735 if (issue_flags & IO_URING_F_NONBLOCK)
Pavel Begunkovac45abc2020-06-08 21:08:18 +03003736 return -EAGAIN;
Pavel Begunkovac45abc2020-06-08 21:08:18 +03003737 ret = vfs_fallocate(req->file, req->sync.mode, req->sync.off,
3738 req->sync.len);
Pavel Begunkovac45abc2020-06-08 21:08:18 +03003739 if (ret < 0)
3740 req_set_fail_links(req);
Jens Axboee1e16092020-06-22 09:17:17 -06003741 io_req_complete(req, ret);
Jens Axboed63d1b52019-12-10 10:38:56 -07003742 return 0;
3743}
3744
Pavel Begunkovec65fea2020-06-03 18:03:24 +03003745static int __io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboe15b71ab2019-12-11 11:20:36 -07003746{
Jens Axboef8748882020-01-08 17:47:02 -07003747 const char __user *fname;
Jens Axboe15b71ab2019-12-11 11:20:36 -07003748 int ret;
3749
Pavel Begunkovec65fea2020-06-03 18:03:24 +03003750 if (unlikely(sqe->ioprio || sqe->buf_index))
Jens Axboe15b71ab2019-12-11 11:20:36 -07003751 return -EINVAL;
Pavel Begunkovec65fea2020-06-03 18:03:24 +03003752 if (unlikely(req->flags & REQ_F_FIXED_FILE))
Jens Axboecf3040c2020-02-06 21:31:40 -07003753 return -EBADF;
Jens Axboe15b71ab2019-12-11 11:20:36 -07003754
Pavel Begunkovec65fea2020-06-03 18:03:24 +03003755 /* open.how should be already initialised */
3756 if (!(req->open.how.flags & O_PATH) && force_o_largefile())
Jens Axboe08a1d26eb2020-04-08 09:20:54 -06003757 req->open.how.flags |= O_LARGEFILE;
Jens Axboe15b71ab2019-12-11 11:20:36 -07003758
Pavel Begunkov25e72d12020-06-03 18:03:23 +03003759 req->open.dfd = READ_ONCE(sqe->fd);
3760 fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
Jens Axboef8748882020-01-08 17:47:02 -07003761 req->open.filename = getname(fname);
Jens Axboe15b71ab2019-12-11 11:20:36 -07003762 if (IS_ERR(req->open.filename)) {
3763 ret = PTR_ERR(req->open.filename);
3764 req->open.filename = NULL;
3765 return ret;
3766 }
Jens Axboe4022e7a2020-03-19 19:23:18 -06003767 req->open.nofile = rlimit(RLIMIT_NOFILE);
Pavel Begunkov8fef80b2020-02-07 23:59:53 +03003768 req->flags |= REQ_F_NEED_CLEANUP;
Jens Axboe15b71ab2019-12-11 11:20:36 -07003769 return 0;
3770}
3771
Pavel Begunkovec65fea2020-06-03 18:03:24 +03003772static int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3773{
3774 u64 flags, mode;
3775
Jens Axboe14587a462020-09-05 11:36:08 -06003776 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboe4eb8dde2020-09-18 19:36:24 -06003777 return -EINVAL;
Pavel Begunkovec65fea2020-06-03 18:03:24 +03003778 mode = READ_ONCE(sqe->len);
3779 flags = READ_ONCE(sqe->open_flags);
3780 req->open.how = build_open_how(flags, mode);
3781 return __io_openat_prep(req, sqe);
3782}
3783
Jens Axboecebdb982020-01-08 17:59:24 -07003784static int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3785{
3786 struct open_how __user *how;
Jens Axboecebdb982020-01-08 17:59:24 -07003787 size_t len;
3788 int ret;
3789
Jens Axboe14587a462020-09-05 11:36:08 -06003790 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboe4eb8dde2020-09-18 19:36:24 -06003791 return -EINVAL;
Jens Axboecebdb982020-01-08 17:59:24 -07003792 how = u64_to_user_ptr(READ_ONCE(sqe->addr2));
3793 len = READ_ONCE(sqe->len);
Jens Axboecebdb982020-01-08 17:59:24 -07003794 if (len < OPEN_HOW_SIZE_VER0)
3795 return -EINVAL;
3796
3797 ret = copy_struct_from_user(&req->open.how, sizeof(req->open.how), how,
3798 len);
3799 if (ret)
3800 return ret;
3801
Pavel Begunkovec65fea2020-06-03 18:03:24 +03003802 return __io_openat_prep(req, sqe);
Jens Axboecebdb982020-01-08 17:59:24 -07003803}
3804
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003805static int io_openat2(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe15b71ab2019-12-11 11:20:36 -07003806{
3807 struct open_flags op;
Jens Axboe15b71ab2019-12-11 11:20:36 -07003808 struct file *file;
Jens Axboe3a81fd02020-12-10 12:25:36 -07003809 bool nonblock_set;
3810 bool resolve_nonblock;
Jens Axboe15b71ab2019-12-11 11:20:36 -07003811 int ret;
3812
Jens Axboecebdb982020-01-08 17:59:24 -07003813 ret = build_open_flags(&req->open.how, &op);
Jens Axboe15b71ab2019-12-11 11:20:36 -07003814 if (ret)
3815 goto err;
Jens Axboe3a81fd02020-12-10 12:25:36 -07003816 nonblock_set = op.open_flag & O_NONBLOCK;
3817 resolve_nonblock = req->open.how.resolve & RESOLVE_CACHED;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003818 if (issue_flags & IO_URING_F_NONBLOCK) {
Jens Axboe3a81fd02020-12-10 12:25:36 -07003819 /*
3820 * Don't bother trying for O_TRUNC, O_CREAT, or O_TMPFILE open,
3821 * it'll always -EAGAIN
3822 */
3823 if (req->open.how.flags & (O_TRUNC | O_CREAT | O_TMPFILE))
3824 return -EAGAIN;
3825 op.lookup_flags |= LOOKUP_CACHED;
3826 op.open_flag |= O_NONBLOCK;
3827 }
Jens Axboe15b71ab2019-12-11 11:20:36 -07003828
Jens Axboe4022e7a2020-03-19 19:23:18 -06003829 ret = __get_unused_fd_flags(req->open.how.flags, req->open.nofile);
Jens Axboe15b71ab2019-12-11 11:20:36 -07003830 if (ret < 0)
3831 goto err;
3832
3833 file = do_filp_open(req->open.dfd, req->open.filename, &op);
Jens Axboe3a81fd02020-12-10 12:25:36 -07003834 /* only retry if RESOLVE_CACHED wasn't already set by application */
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003835 if ((!resolve_nonblock && (issue_flags & IO_URING_F_NONBLOCK)) &&
3836 file == ERR_PTR(-EAGAIN)) {
Jens Axboe3a81fd02020-12-10 12:25:36 -07003837 /*
3838 * We could hang on to this 'fd', but seems like marginal
3839 * gain for something that is now known to be a slower path.
3840 * So just put it, and we'll get a new one when we retry.
3841 */
3842 put_unused_fd(ret);
3843 return -EAGAIN;
3844 }
3845
Jens Axboe15b71ab2019-12-11 11:20:36 -07003846 if (IS_ERR(file)) {
3847 put_unused_fd(ret);
3848 ret = PTR_ERR(file);
3849 } else {
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003850 if ((issue_flags & IO_URING_F_NONBLOCK) && !nonblock_set)
Jens Axboe3a81fd02020-12-10 12:25:36 -07003851 file->f_flags &= ~O_NONBLOCK;
Jens Axboe15b71ab2019-12-11 11:20:36 -07003852 fsnotify_open(file);
3853 fd_install(ret, file);
3854 }
3855err:
3856 putname(req->open.filename);
Pavel Begunkov8fef80b2020-02-07 23:59:53 +03003857 req->flags &= ~REQ_F_NEED_CLEANUP;
Jens Axboe15b71ab2019-12-11 11:20:36 -07003858 if (ret < 0)
3859 req_set_fail_links(req);
Jens Axboee1e16092020-06-22 09:17:17 -06003860 io_req_complete(req, ret);
Jens Axboe15b71ab2019-12-11 11:20:36 -07003861 return 0;
3862}
3863
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003864static int io_openat(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboecebdb982020-01-08 17:59:24 -07003865{
Pavel Begunkove45cff52021-02-28 22:35:14 +00003866 return io_openat2(req, issue_flags);
Jens Axboecebdb982020-01-08 17:59:24 -07003867}
3868
Jens Axboe067524e2020-03-02 16:32:28 -07003869static int io_remove_buffers_prep(struct io_kiocb *req,
3870 const struct io_uring_sqe *sqe)
3871{
3872 struct io_provide_buf *p = &req->pbuf;
3873 u64 tmp;
3874
3875 if (sqe->ioprio || sqe->rw_flags || sqe->addr || sqe->len || sqe->off)
3876 return -EINVAL;
3877
3878 tmp = READ_ONCE(sqe->fd);
3879 if (!tmp || tmp > USHRT_MAX)
3880 return -EINVAL;
3881
3882 memset(p, 0, sizeof(*p));
3883 p->nbufs = tmp;
3884 p->bgid = READ_ONCE(sqe->buf_group);
3885 return 0;
3886}
3887
3888static int __io_remove_buffers(struct io_ring_ctx *ctx, struct io_buffer *buf,
3889 int bgid, unsigned nbufs)
3890{
3891 unsigned i = 0;
3892
3893 /* shouldn't happen */
3894 if (!nbufs)
3895 return 0;
3896
3897 /* the head kbuf is the list itself */
3898 while (!list_empty(&buf->list)) {
3899 struct io_buffer *nxt;
3900
3901 nxt = list_first_entry(&buf->list, struct io_buffer, list);
3902 list_del(&nxt->list);
3903 kfree(nxt);
3904 if (++i == nbufs)
3905 return i;
3906 }
3907 i++;
3908 kfree(buf);
Jens Axboe9e15c3a2021-03-13 12:29:43 -07003909 xa_erase(&ctx->io_buffers, bgid);
Jens Axboe067524e2020-03-02 16:32:28 -07003910
3911 return i;
3912}
3913
Pavel Begunkov889fca72021-02-10 00:03:09 +00003914static int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe067524e2020-03-02 16:32:28 -07003915{
3916 struct io_provide_buf *p = &req->pbuf;
3917 struct io_ring_ctx *ctx = req->ctx;
3918 struct io_buffer *head;
3919 int ret = 0;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00003920 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Jens Axboe067524e2020-03-02 16:32:28 -07003921
3922 io_ring_submit_lock(ctx, !force_nonblock);
3923
3924 lockdep_assert_held(&ctx->uring_lock);
3925
3926 ret = -ENOENT;
Jens Axboe9e15c3a2021-03-13 12:29:43 -07003927 head = xa_load(&ctx->io_buffers, p->bgid);
Jens Axboe067524e2020-03-02 16:32:28 -07003928 if (head)
3929 ret = __io_remove_buffers(ctx, head, p->bgid, p->nbufs);
Jens Axboe067524e2020-03-02 16:32:28 -07003930 if (ret < 0)
3931 req_set_fail_links(req);
Pavel Begunkov31bff9a2020-12-06 22:22:43 +00003932
Pavel Begunkov9fb8cb42021-02-28 22:35:13 +00003933 /* complete before unlock, IOPOLL may need the lock */
3934 __io_req_complete(req, issue_flags, ret, 0);
3935 io_ring_submit_unlock(ctx, !force_nonblock);
Jens Axboe067524e2020-03-02 16:32:28 -07003936 return 0;
3937}
3938
Jens Axboeddf0322d2020-02-23 16:41:33 -07003939static int io_provide_buffers_prep(struct io_kiocb *req,
3940 const struct io_uring_sqe *sqe)
3941{
Pavel Begunkovd81269f2021-03-19 10:21:19 +00003942 unsigned long size;
Jens Axboeddf0322d2020-02-23 16:41:33 -07003943 struct io_provide_buf *p = &req->pbuf;
3944 u64 tmp;
3945
3946 if (sqe->ioprio || sqe->rw_flags)
3947 return -EINVAL;
3948
3949 tmp = READ_ONCE(sqe->fd);
3950 if (!tmp || tmp > USHRT_MAX)
3951 return -E2BIG;
3952 p->nbufs = tmp;
3953 p->addr = READ_ONCE(sqe->addr);
3954 p->len = READ_ONCE(sqe->len);
3955
Pavel Begunkovd81269f2021-03-19 10:21:19 +00003956 size = (unsigned long)p->len * p->nbufs;
3957 if (!access_ok(u64_to_user_ptr(p->addr), size))
Jens Axboeddf0322d2020-02-23 16:41:33 -07003958 return -EFAULT;
3959
3960 p->bgid = READ_ONCE(sqe->buf_group);
3961 tmp = READ_ONCE(sqe->off);
3962 if (tmp > USHRT_MAX)
3963 return -E2BIG;
3964 p->bid = tmp;
3965 return 0;
3966}
3967
3968static int io_add_buffers(struct io_provide_buf *pbuf, struct io_buffer **head)
3969{
3970 struct io_buffer *buf;
3971 u64 addr = pbuf->addr;
3972 int i, bid = pbuf->bid;
3973
3974 for (i = 0; i < pbuf->nbufs; i++) {
3975 buf = kmalloc(sizeof(*buf), GFP_KERNEL);
3976 if (!buf)
3977 break;
3978
3979 buf->addr = addr;
3980 buf->len = pbuf->len;
3981 buf->bid = bid;
3982 addr += pbuf->len;
3983 bid++;
3984 if (!*head) {
3985 INIT_LIST_HEAD(&buf->list);
3986 *head = buf;
3987 } else {
3988 list_add_tail(&buf->list, &(*head)->list);
3989 }
3990 }
3991
3992 return i ? i : -ENOMEM;
3993}
3994
Pavel Begunkov889fca72021-02-10 00:03:09 +00003995static int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboeddf0322d2020-02-23 16:41:33 -07003996{
3997 struct io_provide_buf *p = &req->pbuf;
3998 struct io_ring_ctx *ctx = req->ctx;
3999 struct io_buffer *head, *list;
4000 int ret = 0;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004001 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Jens Axboeddf0322d2020-02-23 16:41:33 -07004002
4003 io_ring_submit_lock(ctx, !force_nonblock);
4004
4005 lockdep_assert_held(&ctx->uring_lock);
4006
Jens Axboe9e15c3a2021-03-13 12:29:43 -07004007 list = head = xa_load(&ctx->io_buffers, p->bgid);
Jens Axboeddf0322d2020-02-23 16:41:33 -07004008
4009 ret = io_add_buffers(p, &head);
Jens Axboe9e15c3a2021-03-13 12:29:43 -07004010 if (ret >= 0 && !list) {
4011 ret = xa_insert(&ctx->io_buffers, p->bgid, head, GFP_KERNEL);
4012 if (ret < 0)
Jens Axboe067524e2020-03-02 16:32:28 -07004013 __io_remove_buffers(ctx, head, p->bgid, -1U);
Jens Axboeddf0322d2020-02-23 16:41:33 -07004014 }
Jens Axboeddf0322d2020-02-23 16:41:33 -07004015 if (ret < 0)
4016 req_set_fail_links(req);
Pavel Begunkov9fb8cb42021-02-28 22:35:13 +00004017 /* complete before unlock, IOPOLL may need the lock */
4018 __io_req_complete(req, issue_flags, ret, 0);
4019 io_ring_submit_unlock(ctx, !force_nonblock);
Jens Axboeddf0322d2020-02-23 16:41:33 -07004020 return 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07004021}
4022
Jens Axboe3e4827b2020-01-08 15:18:09 -07004023static int io_epoll_ctl_prep(struct io_kiocb *req,
4024 const struct io_uring_sqe *sqe)
4025{
4026#if defined(CONFIG_EPOLL)
4027 if (sqe->ioprio || sqe->buf_index)
4028 return -EINVAL;
Jens Axboe6ca56f82020-09-18 16:51:19 -06004029 if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL)))
Pavel Begunkov3232dd02020-06-03 18:03:22 +03004030 return -EINVAL;
Jens Axboe3e4827b2020-01-08 15:18:09 -07004031
4032 req->epoll.epfd = READ_ONCE(sqe->fd);
4033 req->epoll.op = READ_ONCE(sqe->len);
4034 req->epoll.fd = READ_ONCE(sqe->off);
4035
4036 if (ep_op_has_event(req->epoll.op)) {
4037 struct epoll_event __user *ev;
4038
4039 ev = u64_to_user_ptr(READ_ONCE(sqe->addr));
4040 if (copy_from_user(&req->epoll.event, ev, sizeof(*ev)))
4041 return -EFAULT;
4042 }
4043
4044 return 0;
4045#else
4046 return -EOPNOTSUPP;
4047#endif
4048}
4049
Pavel Begunkov889fca72021-02-10 00:03:09 +00004050static int io_epoll_ctl(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe3e4827b2020-01-08 15:18:09 -07004051{
4052#if defined(CONFIG_EPOLL)
4053 struct io_epoll *ie = &req->epoll;
4054 int ret;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004055 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Jens Axboe3e4827b2020-01-08 15:18:09 -07004056
4057 ret = do_epoll_ctl(ie->epfd, ie->op, ie->fd, &ie->event, force_nonblock);
4058 if (force_nonblock && ret == -EAGAIN)
4059 return -EAGAIN;
4060
4061 if (ret < 0)
4062 req_set_fail_links(req);
Pavel Begunkov889fca72021-02-10 00:03:09 +00004063 __io_req_complete(req, issue_flags, ret, 0);
Jens Axboe3e4827b2020-01-08 15:18:09 -07004064 return 0;
4065#else
4066 return -EOPNOTSUPP;
4067#endif
4068}
4069
Jens Axboec1ca7572019-12-25 22:18:28 -07004070static int io_madvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4071{
4072#if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
4073 if (sqe->ioprio || sqe->buf_index || sqe->off)
4074 return -EINVAL;
Pavel Begunkov3232dd02020-06-03 18:03:22 +03004075 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4076 return -EINVAL;
Jens Axboec1ca7572019-12-25 22:18:28 -07004077
4078 req->madvise.addr = READ_ONCE(sqe->addr);
4079 req->madvise.len = READ_ONCE(sqe->len);
4080 req->madvise.advice = READ_ONCE(sqe->fadvise_advice);
4081 return 0;
4082#else
4083 return -EOPNOTSUPP;
4084#endif
4085}
4086
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004087static int io_madvise(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboec1ca7572019-12-25 22:18:28 -07004088{
4089#if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
4090 struct io_madvise *ma = &req->madvise;
4091 int ret;
4092
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004093 if (issue_flags & IO_URING_F_NONBLOCK)
Jens Axboec1ca7572019-12-25 22:18:28 -07004094 return -EAGAIN;
4095
Minchan Kim0726b012020-10-17 16:14:50 -07004096 ret = do_madvise(current->mm, ma->addr, ma->len, ma->advice);
Jens Axboec1ca7572019-12-25 22:18:28 -07004097 if (ret < 0)
4098 req_set_fail_links(req);
Jens Axboee1e16092020-06-22 09:17:17 -06004099 io_req_complete(req, ret);
Jens Axboec1ca7572019-12-25 22:18:28 -07004100 return 0;
4101#else
4102 return -EOPNOTSUPP;
4103#endif
4104}
4105
Jens Axboe4840e412019-12-25 22:03:45 -07004106static int io_fadvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4107{
4108 if (sqe->ioprio || sqe->buf_index || sqe->addr)
4109 return -EINVAL;
Pavel Begunkov3232dd02020-06-03 18:03:22 +03004110 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4111 return -EINVAL;
Jens Axboe4840e412019-12-25 22:03:45 -07004112
4113 req->fadvise.offset = READ_ONCE(sqe->off);
4114 req->fadvise.len = READ_ONCE(sqe->len);
4115 req->fadvise.advice = READ_ONCE(sqe->fadvise_advice);
4116 return 0;
4117}
4118
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004119static int io_fadvise(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe4840e412019-12-25 22:03:45 -07004120{
4121 struct io_fadvise *fa = &req->fadvise;
4122 int ret;
4123
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004124 if (issue_flags & IO_URING_F_NONBLOCK) {
Jens Axboe3e694262020-02-01 09:22:49 -07004125 switch (fa->advice) {
4126 case POSIX_FADV_NORMAL:
4127 case POSIX_FADV_RANDOM:
4128 case POSIX_FADV_SEQUENTIAL:
4129 break;
4130 default:
4131 return -EAGAIN;
4132 }
4133 }
Jens Axboe4840e412019-12-25 22:03:45 -07004134
4135 ret = vfs_fadvise(req->file, fa->offset, fa->len, fa->advice);
4136 if (ret < 0)
4137 req_set_fail_links(req);
Jens Axboee1e16092020-06-22 09:17:17 -06004138 io_req_complete(req, ret);
Jens Axboe4840e412019-12-25 22:03:45 -07004139 return 0;
4140}
4141
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004142static int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4143{
Jens Axboe6ca56f82020-09-18 16:51:19 -06004144 if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL)))
Pavel Begunkov3232dd02020-06-03 18:03:22 +03004145 return -EINVAL;
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004146 if (sqe->ioprio || sqe->buf_index)
4147 return -EINVAL;
Pavel Begunkov9c280f92020-04-08 08:58:46 +03004148 if (req->flags & REQ_F_FIXED_FILE)
Jens Axboecf3040c2020-02-06 21:31:40 -07004149 return -EBADF;
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004150
Bijan Mottahedeh1d9e1282020-05-22 21:31:16 -07004151 req->statx.dfd = READ_ONCE(sqe->fd);
4152 req->statx.mask = READ_ONCE(sqe->len);
Bijan Mottahedehe62753e2020-05-22 21:31:18 -07004153 req->statx.filename = u64_to_user_ptr(READ_ONCE(sqe->addr));
Bijan Mottahedeh1d9e1282020-05-22 21:31:16 -07004154 req->statx.buffer = u64_to_user_ptr(READ_ONCE(sqe->addr2));
4155 req->statx.flags = READ_ONCE(sqe->statx_flags);
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004156
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004157 return 0;
4158}
4159
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004160static int io_statx(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004161{
Bijan Mottahedeh1d9e1282020-05-22 21:31:16 -07004162 struct io_statx *ctx = &req->statx;
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004163 int ret;
4164
Pavel Begunkov59d70012021-03-22 01:58:30 +00004165 if (issue_flags & IO_URING_F_NONBLOCK)
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004166 return -EAGAIN;
4167
Bijan Mottahedehe62753e2020-05-22 21:31:18 -07004168 ret = do_statx(ctx->dfd, ctx->filename, ctx->flags, ctx->mask,
4169 ctx->buffer);
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004170
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004171 if (ret < 0)
4172 req_set_fail_links(req);
Jens Axboee1e16092020-06-22 09:17:17 -06004173 io_req_complete(req, ret);
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004174 return 0;
4175}
4176
Jens Axboeb5dba592019-12-11 14:02:38 -07004177static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4178{
Jens Axboe14587a462020-09-05 11:36:08 -06004179 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
Pavel Begunkov3232dd02020-06-03 18:03:22 +03004180 return -EINVAL;
Jens Axboeb5dba592019-12-11 14:02:38 -07004181 if (sqe->ioprio || sqe->off || sqe->addr || sqe->len ||
4182 sqe->rw_flags || sqe->buf_index)
4183 return -EINVAL;
Pavel Begunkov9c280f92020-04-08 08:58:46 +03004184 if (req->flags & REQ_F_FIXED_FILE)
Jens Axboecf3040c2020-02-06 21:31:40 -07004185 return -EBADF;
Jens Axboeb5dba592019-12-11 14:02:38 -07004186
4187 req->close.fd = READ_ONCE(sqe->fd);
Jens Axboeb5dba592019-12-11 14:02:38 -07004188 return 0;
4189}
4190
Pavel Begunkov889fca72021-02-10 00:03:09 +00004191static int io_close(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboeb5dba592019-12-11 14:02:38 -07004192{
Jens Axboe9eac1902021-01-19 15:50:37 -07004193 struct files_struct *files = current->files;
Pavel Begunkov3af73b22020-06-08 21:08:17 +03004194 struct io_close *close = &req->close;
Jens Axboe9eac1902021-01-19 15:50:37 -07004195 struct fdtable *fdt;
4196 struct file *file;
Jens Axboeb5dba592019-12-11 14:02:38 -07004197 int ret;
4198
Jens Axboe9eac1902021-01-19 15:50:37 -07004199 file = NULL;
4200 ret = -EBADF;
4201 spin_lock(&files->file_lock);
4202 fdt = files_fdtable(files);
4203 if (close->fd >= fdt->max_fds) {
4204 spin_unlock(&files->file_lock);
4205 goto err;
4206 }
4207 file = fdt->fd[close->fd];
4208 if (!file) {
4209 spin_unlock(&files->file_lock);
4210 goto err;
4211 }
4212
4213 if (file->f_op == &io_uring_fops) {
4214 spin_unlock(&files->file_lock);
4215 file = NULL;
4216 goto err;
Pavel Begunkov3af73b22020-06-08 21:08:17 +03004217 }
Jens Axboeb5dba592019-12-11 14:02:38 -07004218
4219 /* if the file has a flush method, be safe and punt to async */
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004220 if (file->f_op->flush && (issue_flags & IO_URING_F_NONBLOCK)) {
Jens Axboe9eac1902021-01-19 15:50:37 -07004221 spin_unlock(&files->file_lock);
Pavel Begunkov0bf0eef2020-05-26 20:34:06 +03004222 return -EAGAIN;
Pavel Begunkova2100672020-03-02 23:45:16 +03004223 }
Jens Axboeb5dba592019-12-11 14:02:38 -07004224
Jens Axboe9eac1902021-01-19 15:50:37 -07004225 ret = __close_fd_get_file(close->fd, &file);
4226 spin_unlock(&files->file_lock);
4227 if (ret < 0) {
4228 if (ret == -ENOENT)
4229 ret = -EBADF;
4230 goto err;
4231 }
4232
Pavel Begunkov3af73b22020-06-08 21:08:17 +03004233 /* No ->flush() or already async, safely close from here */
Jens Axboe9eac1902021-01-19 15:50:37 -07004234 ret = filp_close(file, current->files);
4235err:
Pavel Begunkov3af73b22020-06-08 21:08:17 +03004236 if (ret < 0)
4237 req_set_fail_links(req);
Jens Axboe9eac1902021-01-19 15:50:37 -07004238 if (file)
4239 fput(file);
Pavel Begunkov889fca72021-02-10 00:03:09 +00004240 __io_req_complete(req, issue_flags, ret, 0);
Jens Axboe1a417f42020-01-31 17:16:48 -07004241 return 0;
Jens Axboeb5dba592019-12-11 14:02:38 -07004242}
4243
Pavel Begunkov1155c762021-02-18 18:29:38 +00004244static int io_sfr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboe5d17b4a2019-04-09 14:56:44 -06004245{
4246 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe5d17b4a2019-04-09 14:56:44 -06004247
Jens Axboe5d17b4a2019-04-09 14:56:44 -06004248 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
4249 return -EINVAL;
4250 if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index))
4251 return -EINVAL;
4252
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004253 req->sync.off = READ_ONCE(sqe->off);
4254 req->sync.len = READ_ONCE(sqe->len);
4255 req->sync.flags = READ_ONCE(sqe->sync_range_flags);
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004256 return 0;
4257}
4258
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004259static int io_sync_file_range(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004260{
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004261 int ret;
4262
Pavel Begunkovac45abc2020-06-08 21:08:18 +03004263 /* sync_file_range always requires a blocking context */
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004264 if (issue_flags & IO_URING_F_NONBLOCK)
Pavel Begunkovac45abc2020-06-08 21:08:18 +03004265 return -EAGAIN;
4266
Jens Axboe9adbd452019-12-20 08:45:55 -07004267 ret = sync_file_range(req->file, req->sync.off, req->sync.len,
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004268 req->sync.flags);
4269 if (ret < 0)
4270 req_set_fail_links(req);
Jens Axboee1e16092020-06-22 09:17:17 -06004271 io_req_complete(req, ret);
Jens Axboe5d17b4a2019-04-09 14:56:44 -06004272 return 0;
4273}
4274
YueHaibing469956e2020-03-04 15:53:52 +08004275#if defined(CONFIG_NET)
Pavel Begunkov02d27d82020-02-28 10:36:36 +03004276static int io_setup_async_msg(struct io_kiocb *req,
4277 struct io_async_msghdr *kmsg)
4278{
Jens Axboee8c2bc12020-08-15 18:44:09 -07004279 struct io_async_msghdr *async_msg = req->async_data;
4280
4281 if (async_msg)
Pavel Begunkov02d27d82020-02-28 10:36:36 +03004282 return -EAGAIN;
Jens Axboee8c2bc12020-08-15 18:44:09 -07004283 if (io_alloc_async_data(req)) {
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004284 kfree(kmsg->free_iov);
Pavel Begunkov02d27d82020-02-28 10:36:36 +03004285 return -ENOMEM;
4286 }
Jens Axboee8c2bc12020-08-15 18:44:09 -07004287 async_msg = req->async_data;
Pavel Begunkov02d27d82020-02-28 10:36:36 +03004288 req->flags |= REQ_F_NEED_CLEANUP;
Jens Axboee8c2bc12020-08-15 18:44:09 -07004289 memcpy(async_msg, kmsg, sizeof(*kmsg));
Pavel Begunkov2a780802021-02-05 00:57:58 +00004290 async_msg->msg.msg_name = &async_msg->addr;
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004291 /* if were using fast_iov, set it to the new one */
4292 if (!async_msg->free_iov)
4293 async_msg->msg.msg_iter.iov = async_msg->fast_iov;
4294
Pavel Begunkov02d27d82020-02-28 10:36:36 +03004295 return -EAGAIN;
4296}
4297
Pavel Begunkov2ae523e2020-07-12 20:41:06 +03004298static int io_sendmsg_copy_hdr(struct io_kiocb *req,
4299 struct io_async_msghdr *iomsg)
4300{
Pavel Begunkov2ae523e2020-07-12 20:41:06 +03004301 iomsg->msg.msg_name = &iomsg->addr;
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004302 iomsg->free_iov = iomsg->fast_iov;
Pavel Begunkov2ae523e2020-07-12 20:41:06 +03004303 return sendmsg_copy_msghdr(&iomsg->msg, req->sr_msg.umsg,
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004304 req->sr_msg.msg_flags, &iomsg->free_iov);
Pavel Begunkov2ae523e2020-07-12 20:41:06 +03004305}
4306
Pavel Begunkov93642ef2021-02-18 18:29:44 +00004307static int io_sendmsg_prep_async(struct io_kiocb *req)
4308{
4309 int ret;
4310
Pavel Begunkov93642ef2021-02-18 18:29:44 +00004311 ret = io_sendmsg_copy_hdr(req, req->async_data);
4312 if (!ret)
4313 req->flags |= REQ_F_NEED_CLEANUP;
4314 return ret;
4315}
4316
Jens Axboe3529d8c2019-12-19 18:24:38 -07004317static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboeaa1fa282019-04-19 13:38:09 -06004318{
Jens Axboee47293f2019-12-20 08:58:21 -07004319 struct io_sr_msg *sr = &req->sr_msg;
Jens Axboe03b12302019-12-02 18:50:25 -07004320
Pavel Begunkovd2b6f482020-06-03 18:03:25 +03004321 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4322 return -EINVAL;
4323
Jens Axboee47293f2019-12-20 08:58:21 -07004324 sr->msg_flags = READ_ONCE(sqe->msg_flags);
Pavel Begunkov270a5942020-07-12 20:41:04 +03004325 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
Jens Axboefddafac2020-01-04 20:19:44 -07004326 sr->len = READ_ONCE(sqe->len);
Jens Axboe3529d8c2019-12-19 18:24:38 -07004327
Jens Axboed8768362020-02-27 14:17:49 -07004328#ifdef CONFIG_COMPAT
4329 if (req->ctx->compat)
4330 sr->msg_flags |= MSG_CMSG_COMPAT;
4331#endif
Pavel Begunkov93642ef2021-02-18 18:29:44 +00004332 return 0;
Jens Axboe03b12302019-12-02 18:50:25 -07004333}
4334
Pavel Begunkov889fca72021-02-10 00:03:09 +00004335static int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe03b12302019-12-02 18:50:25 -07004336{
Pavel Begunkov6b754c82020-07-16 23:28:00 +03004337 struct io_async_msghdr iomsg, *kmsg;
Jens Axboe03b12302019-12-02 18:50:25 -07004338 struct socket *sock;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004339 unsigned flags;
Stefan Metzmacher00312752021-03-20 20:33:36 +01004340 int min_ret = 0;
Jens Axboe03b12302019-12-02 18:50:25 -07004341 int ret;
4342
Florent Revestdba4a922020-12-04 12:36:04 +01004343 sock = sock_from_file(req->file);
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004344 if (unlikely(!sock))
Florent Revestdba4a922020-12-04 12:36:04 +01004345 return -ENOTSOCK;
Jens Axboe03b12302019-12-02 18:50:25 -07004346
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004347 kmsg = req->async_data;
4348 if (!kmsg) {
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004349 ret = io_sendmsg_copy_hdr(req, &iomsg);
Jens Axboefddafac2020-01-04 20:19:44 -07004350 if (ret)
4351 return ret;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004352 kmsg = &iomsg;
Jens Axboefddafac2020-01-04 20:19:44 -07004353 }
4354
Stefan Metzmacher76cd9792021-03-16 16:33:27 +01004355 flags = req->sr_msg.msg_flags | MSG_NOSIGNAL;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004356 if (flags & MSG_DONTWAIT)
4357 req->flags |= REQ_F_NOWAIT;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004358 else if (issue_flags & IO_URING_F_NONBLOCK)
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004359 flags |= MSG_DONTWAIT;
4360
Stefan Metzmacher00312752021-03-20 20:33:36 +01004361 if (flags & MSG_WAITALL)
4362 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
4363
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004364 ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004365 if ((issue_flags & IO_URING_F_NONBLOCK) && ret == -EAGAIN)
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004366 return io_setup_async_msg(req, kmsg);
4367 if (ret == -ERESTARTSYS)
4368 ret = -EINTR;
4369
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004370 /* fast path, check for non-NULL to avoid function call */
4371 if (kmsg->free_iov)
4372 kfree(kmsg->free_iov);
Jens Axboe03b12302019-12-02 18:50:25 -07004373 req->flags &= ~REQ_F_NEED_CLEANUP;
Stefan Metzmacher00312752021-03-20 20:33:36 +01004374 if (ret < min_ret)
Jens Axboefddafac2020-01-04 20:19:44 -07004375 req_set_fail_links(req);
Pavel Begunkov889fca72021-02-10 00:03:09 +00004376 __io_req_complete(req, issue_flags, ret, 0);
Jens Axboefddafac2020-01-04 20:19:44 -07004377 return 0;
Jens Axboefddafac2020-01-04 20:19:44 -07004378}
4379
Pavel Begunkov889fca72021-02-10 00:03:09 +00004380static int io_send(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe03b12302019-12-02 18:50:25 -07004381{
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004382 struct io_sr_msg *sr = &req->sr_msg;
4383 struct msghdr msg;
4384 struct iovec iov;
Jens Axboe03b12302019-12-02 18:50:25 -07004385 struct socket *sock;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004386 unsigned flags;
Stefan Metzmacher00312752021-03-20 20:33:36 +01004387 int min_ret = 0;
Jens Axboe03b12302019-12-02 18:50:25 -07004388 int ret;
4389
Florent Revestdba4a922020-12-04 12:36:04 +01004390 sock = sock_from_file(req->file);
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004391 if (unlikely(!sock))
Florent Revestdba4a922020-12-04 12:36:04 +01004392 return -ENOTSOCK;
Jens Axboe03b12302019-12-02 18:50:25 -07004393
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004394 ret = import_single_range(WRITE, sr->buf, sr->len, &iov, &msg.msg_iter);
4395 if (unlikely(ret))
Zheng Bin14db8412020-09-09 20:12:37 +08004396 return ret;
Jens Axboe03b12302019-12-02 18:50:25 -07004397
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004398 msg.msg_name = NULL;
4399 msg.msg_control = NULL;
4400 msg.msg_controllen = 0;
4401 msg.msg_namelen = 0;
Jens Axboe03b12302019-12-02 18:50:25 -07004402
Stefan Metzmacher76cd9792021-03-16 16:33:27 +01004403 flags = req->sr_msg.msg_flags | MSG_NOSIGNAL;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004404 if (flags & MSG_DONTWAIT)
4405 req->flags |= REQ_F_NOWAIT;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004406 else if (issue_flags & IO_URING_F_NONBLOCK)
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004407 flags |= MSG_DONTWAIT;
Jens Axboe03b12302019-12-02 18:50:25 -07004408
Stefan Metzmacher00312752021-03-20 20:33:36 +01004409 if (flags & MSG_WAITALL)
4410 min_ret = iov_iter_count(&msg.msg_iter);
4411
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004412 msg.msg_flags = flags;
4413 ret = sock_sendmsg(sock, &msg);
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004414 if ((issue_flags & IO_URING_F_NONBLOCK) && ret == -EAGAIN)
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004415 return -EAGAIN;
4416 if (ret == -ERESTARTSYS)
4417 ret = -EINTR;
Jens Axboe03b12302019-12-02 18:50:25 -07004418
Stefan Metzmacher00312752021-03-20 20:33:36 +01004419 if (ret < min_ret)
Jens Axboe03b12302019-12-02 18:50:25 -07004420 req_set_fail_links(req);
Pavel Begunkov889fca72021-02-10 00:03:09 +00004421 __io_req_complete(req, issue_flags, ret, 0);
Jens Axboe03b12302019-12-02 18:50:25 -07004422 return 0;
Jens Axboe03b12302019-12-02 18:50:25 -07004423}
4424
Pavel Begunkov1400e692020-07-12 20:41:05 +03004425static int __io_recvmsg_copy_hdr(struct io_kiocb *req,
4426 struct io_async_msghdr *iomsg)
Jens Axboe52de1fe2020-02-27 10:15:42 -07004427{
4428 struct io_sr_msg *sr = &req->sr_msg;
4429 struct iovec __user *uiov;
4430 size_t iov_len;
4431 int ret;
4432
Pavel Begunkov1400e692020-07-12 20:41:05 +03004433 ret = __copy_msghdr_from_user(&iomsg->msg, sr->umsg,
4434 &iomsg->uaddr, &uiov, &iov_len);
Jens Axboe52de1fe2020-02-27 10:15:42 -07004435 if (ret)
4436 return ret;
4437
4438 if (req->flags & REQ_F_BUFFER_SELECT) {
4439 if (iov_len > 1)
4440 return -EINVAL;
Pavel Begunkov5476dfe2021-02-05 00:57:59 +00004441 if (copy_from_user(iomsg->fast_iov, uiov, sizeof(*uiov)))
Jens Axboe52de1fe2020-02-27 10:15:42 -07004442 return -EFAULT;
Pavel Begunkov5476dfe2021-02-05 00:57:59 +00004443 sr->len = iomsg->fast_iov[0].iov_len;
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004444 iomsg->free_iov = NULL;
Jens Axboe52de1fe2020-02-27 10:15:42 -07004445 } else {
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004446 iomsg->free_iov = iomsg->fast_iov;
Christoph Hellwig89cd35c2020-09-25 06:51:41 +02004447 ret = __import_iovec(READ, uiov, iov_len, UIO_FASTIOV,
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004448 &iomsg->free_iov, &iomsg->msg.msg_iter,
Christoph Hellwig89cd35c2020-09-25 06:51:41 +02004449 false);
Jens Axboe52de1fe2020-02-27 10:15:42 -07004450 if (ret > 0)
4451 ret = 0;
4452 }
4453
4454 return ret;
4455}
4456
4457#ifdef CONFIG_COMPAT
4458static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req,
Pavel Begunkov1400e692020-07-12 20:41:05 +03004459 struct io_async_msghdr *iomsg)
Jens Axboe52de1fe2020-02-27 10:15:42 -07004460{
4461 struct compat_msghdr __user *msg_compat;
4462 struct io_sr_msg *sr = &req->sr_msg;
4463 struct compat_iovec __user *uiov;
4464 compat_uptr_t ptr;
4465 compat_size_t len;
4466 int ret;
4467
Pavel Begunkov270a5942020-07-12 20:41:04 +03004468 msg_compat = (struct compat_msghdr __user *) sr->umsg;
Pavel Begunkov1400e692020-07-12 20:41:05 +03004469 ret = __get_compat_msghdr(&iomsg->msg, msg_compat, &iomsg->uaddr,
Jens Axboe52de1fe2020-02-27 10:15:42 -07004470 &ptr, &len);
4471 if (ret)
4472 return ret;
4473
4474 uiov = compat_ptr(ptr);
4475 if (req->flags & REQ_F_BUFFER_SELECT) {
4476 compat_ssize_t clen;
4477
4478 if (len > 1)
4479 return -EINVAL;
4480 if (!access_ok(uiov, sizeof(*uiov)))
4481 return -EFAULT;
4482 if (__get_user(clen, &uiov->iov_len))
4483 return -EFAULT;
4484 if (clen < 0)
4485 return -EINVAL;
Pavel Begunkov2d280bc2020-11-29 18:33:32 +00004486 sr->len = clen;
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004487 iomsg->free_iov = NULL;
Jens Axboe52de1fe2020-02-27 10:15:42 -07004488 } else {
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004489 iomsg->free_iov = iomsg->fast_iov;
Christoph Hellwig89cd35c2020-09-25 06:51:41 +02004490 ret = __import_iovec(READ, (struct iovec __user *)uiov, len,
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004491 UIO_FASTIOV, &iomsg->free_iov,
Christoph Hellwig89cd35c2020-09-25 06:51:41 +02004492 &iomsg->msg.msg_iter, true);
Jens Axboe52de1fe2020-02-27 10:15:42 -07004493 if (ret < 0)
4494 return ret;
4495 }
4496
4497 return 0;
4498}
Jens Axboe03b12302019-12-02 18:50:25 -07004499#endif
Jens Axboe52de1fe2020-02-27 10:15:42 -07004500
Pavel Begunkov1400e692020-07-12 20:41:05 +03004501static int io_recvmsg_copy_hdr(struct io_kiocb *req,
4502 struct io_async_msghdr *iomsg)
Jens Axboe52de1fe2020-02-27 10:15:42 -07004503{
Pavel Begunkov1400e692020-07-12 20:41:05 +03004504 iomsg->msg.msg_name = &iomsg->addr;
Jens Axboe52de1fe2020-02-27 10:15:42 -07004505
4506#ifdef CONFIG_COMPAT
4507 if (req->ctx->compat)
Pavel Begunkov1400e692020-07-12 20:41:05 +03004508 return __io_compat_recvmsg_copy_hdr(req, iomsg);
Jens Axboe52de1fe2020-02-27 10:15:42 -07004509#endif
4510
Pavel Begunkov1400e692020-07-12 20:41:05 +03004511 return __io_recvmsg_copy_hdr(req, iomsg);
Jens Axboe52de1fe2020-02-27 10:15:42 -07004512}
4513
Jens Axboebcda7ba2020-02-23 16:42:51 -07004514static struct io_buffer *io_recv_buffer_select(struct io_kiocb *req,
Pavel Begunkov7fbb1b52020-07-16 23:28:05 +03004515 bool needs_lock)
Jens Axboebcda7ba2020-02-23 16:42:51 -07004516{
4517 struct io_sr_msg *sr = &req->sr_msg;
4518 struct io_buffer *kbuf;
4519
Jens Axboebcda7ba2020-02-23 16:42:51 -07004520 kbuf = io_buffer_select(req, &sr->len, sr->bgid, sr->kbuf, needs_lock);
4521 if (IS_ERR(kbuf))
4522 return kbuf;
4523
4524 sr->kbuf = kbuf;
4525 req->flags |= REQ_F_BUFFER_SELECTED;
Jens Axboebcda7ba2020-02-23 16:42:51 -07004526 return kbuf;
Jens Axboe03b12302019-12-02 18:50:25 -07004527}
4528
Pavel Begunkov7fbb1b52020-07-16 23:28:05 +03004529static inline unsigned int io_put_recv_kbuf(struct io_kiocb *req)
4530{
4531 return io_put_kbuf(req, req->sr_msg.kbuf);
4532}
4533
Pavel Begunkov93642ef2021-02-18 18:29:44 +00004534static int io_recvmsg_prep_async(struct io_kiocb *req)
Jens Axboe03b12302019-12-02 18:50:25 -07004535{
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03004536 int ret;
Jens Axboe06b76d42019-12-19 14:44:26 -07004537
Pavel Begunkov93642ef2021-02-18 18:29:44 +00004538 ret = io_recvmsg_copy_hdr(req, req->async_data);
4539 if (!ret)
4540 req->flags |= REQ_F_NEED_CLEANUP;
4541 return ret;
4542}
4543
4544static int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4545{
4546 struct io_sr_msg *sr = &req->sr_msg;
4547
Pavel Begunkovd2b6f482020-06-03 18:03:25 +03004548 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4549 return -EINVAL;
4550
Jens Axboe3529d8c2019-12-19 18:24:38 -07004551 sr->msg_flags = READ_ONCE(sqe->msg_flags);
Pavel Begunkov270a5942020-07-12 20:41:04 +03004552 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
Jens Axboe0b7b21e2020-01-31 08:34:59 -07004553 sr->len = READ_ONCE(sqe->len);
Jens Axboebcda7ba2020-02-23 16:42:51 -07004554 sr->bgid = READ_ONCE(sqe->buf_group);
Jens Axboe3529d8c2019-12-19 18:24:38 -07004555
Jens Axboed8768362020-02-27 14:17:49 -07004556#ifdef CONFIG_COMPAT
4557 if (req->ctx->compat)
4558 sr->msg_flags |= MSG_CMSG_COMPAT;
4559#endif
Pavel Begunkov93642ef2021-02-18 18:29:44 +00004560 return 0;
Jens Axboe03b12302019-12-02 18:50:25 -07004561}
4562
Pavel Begunkov889fca72021-02-10 00:03:09 +00004563static int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe03b12302019-12-02 18:50:25 -07004564{
Pavel Begunkov6b754c82020-07-16 23:28:00 +03004565 struct io_async_msghdr iomsg, *kmsg;
Jens Axboe0fa03c62019-04-19 13:34:07 -06004566 struct socket *sock;
Pavel Begunkov7fbb1b52020-07-16 23:28:05 +03004567 struct io_buffer *kbuf;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004568 unsigned flags;
Stefan Metzmacher00312752021-03-20 20:33:36 +01004569 int min_ret = 0;
Jens Axboe52de1fe2020-02-27 10:15:42 -07004570 int ret, cflags = 0;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004571 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Jens Axboe0fa03c62019-04-19 13:34:07 -06004572
Florent Revestdba4a922020-12-04 12:36:04 +01004573 sock = sock_from_file(req->file);
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004574 if (unlikely(!sock))
Florent Revestdba4a922020-12-04 12:36:04 +01004575 return -ENOTSOCK;
Jens Axboe0fa03c62019-04-19 13:34:07 -06004576
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004577 kmsg = req->async_data;
4578 if (!kmsg) {
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004579 ret = io_recvmsg_copy_hdr(req, &iomsg);
4580 if (ret)
Pavel Begunkov681fda82020-07-15 22:20:45 +03004581 return ret;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004582 kmsg = &iomsg;
Jens Axboe0fa03c62019-04-19 13:34:07 -06004583 }
4584
Pavel Begunkovbc02ef32020-07-16 23:28:03 +03004585 if (req->flags & REQ_F_BUFFER_SELECT) {
Pavel Begunkov7fbb1b52020-07-16 23:28:05 +03004586 kbuf = io_recv_buffer_select(req, !force_nonblock);
Pavel Begunkovbc02ef32020-07-16 23:28:03 +03004587 if (IS_ERR(kbuf))
4588 return PTR_ERR(kbuf);
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004589 kmsg->fast_iov[0].iov_base = u64_to_user_ptr(kbuf->addr);
Pavel Begunkov5476dfe2021-02-05 00:57:59 +00004590 kmsg->fast_iov[0].iov_len = req->sr_msg.len;
4591 iov_iter_init(&kmsg->msg.msg_iter, READ, kmsg->fast_iov,
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004592 1, req->sr_msg.len);
4593 }
4594
Stefan Metzmacher76cd9792021-03-16 16:33:27 +01004595 flags = req->sr_msg.msg_flags | MSG_NOSIGNAL;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004596 if (flags & MSG_DONTWAIT)
4597 req->flags |= REQ_F_NOWAIT;
4598 else if (force_nonblock)
4599 flags |= MSG_DONTWAIT;
4600
Stefan Metzmacher00312752021-03-20 20:33:36 +01004601 if (flags & MSG_WAITALL)
4602 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
4603
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004604 ret = __sys_recvmsg_sock(sock, &kmsg->msg, req->sr_msg.umsg,
4605 kmsg->uaddr, flags);
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03004606 if (force_nonblock && ret == -EAGAIN)
4607 return io_setup_async_msg(req, kmsg);
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004608 if (ret == -ERESTARTSYS)
4609 ret = -EINTR;
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03004610
Pavel Begunkov7fbb1b52020-07-16 23:28:05 +03004611 if (req->flags & REQ_F_BUFFER_SELECTED)
4612 cflags = io_put_recv_kbuf(req);
Pavel Begunkov257e84a2021-02-05 00:58:00 +00004613 /* fast path, check for non-NULL to avoid function call */
4614 if (kmsg->free_iov)
4615 kfree(kmsg->free_iov);
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03004616 req->flags &= ~REQ_F_NEED_CLEANUP;
Stefan Metzmacher00312752021-03-20 20:33:36 +01004617 if (ret < min_ret || ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))))
Jens Axboe4e88d6e2019-12-07 20:59:47 -07004618 req_set_fail_links(req);
Pavel Begunkov889fca72021-02-10 00:03:09 +00004619 __io_req_complete(req, issue_flags, ret, cflags);
Jens Axboe0fa03c62019-04-19 13:34:07 -06004620 return 0;
Jens Axboe0fa03c62019-04-19 13:34:07 -06004621}
4622
Pavel Begunkov889fca72021-02-10 00:03:09 +00004623static int io_recv(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboefddafac2020-01-04 20:19:44 -07004624{
Pavel Begunkov6b754c82020-07-16 23:28:00 +03004625 struct io_buffer *kbuf;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004626 struct io_sr_msg *sr = &req->sr_msg;
4627 struct msghdr msg;
4628 void __user *buf = sr->buf;
Jens Axboefddafac2020-01-04 20:19:44 -07004629 struct socket *sock;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004630 struct iovec iov;
4631 unsigned flags;
Stefan Metzmacher00312752021-03-20 20:33:36 +01004632 int min_ret = 0;
Jens Axboebcda7ba2020-02-23 16:42:51 -07004633 int ret, cflags = 0;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004634 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Jens Axboefddafac2020-01-04 20:19:44 -07004635
Florent Revestdba4a922020-12-04 12:36:04 +01004636 sock = sock_from_file(req->file);
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004637 if (unlikely(!sock))
Florent Revestdba4a922020-12-04 12:36:04 +01004638 return -ENOTSOCK;
Jens Axboefddafac2020-01-04 20:19:44 -07004639
Pavel Begunkovbc02ef32020-07-16 23:28:03 +03004640 if (req->flags & REQ_F_BUFFER_SELECT) {
Pavel Begunkov7fbb1b52020-07-16 23:28:05 +03004641 kbuf = io_recv_buffer_select(req, !force_nonblock);
Jens Axboebcda7ba2020-02-23 16:42:51 -07004642 if (IS_ERR(kbuf))
4643 return PTR_ERR(kbuf);
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004644 buf = u64_to_user_ptr(kbuf->addr);
Jens Axboefddafac2020-01-04 20:19:44 -07004645 }
4646
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004647 ret = import_single_range(READ, buf, sr->len, &iov, &msg.msg_iter);
Pavel Begunkov14c32ee2020-07-16 23:28:01 +03004648 if (unlikely(ret))
4649 goto out_free;
Jens Axboefddafac2020-01-04 20:19:44 -07004650
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004651 msg.msg_name = NULL;
4652 msg.msg_control = NULL;
4653 msg.msg_controllen = 0;
4654 msg.msg_namelen = 0;
4655 msg.msg_iocb = NULL;
4656 msg.msg_flags = 0;
4657
Stefan Metzmacher76cd9792021-03-16 16:33:27 +01004658 flags = req->sr_msg.msg_flags | MSG_NOSIGNAL;
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004659 if (flags & MSG_DONTWAIT)
4660 req->flags |= REQ_F_NOWAIT;
4661 else if (force_nonblock)
4662 flags |= MSG_DONTWAIT;
4663
Stefan Metzmacher00312752021-03-20 20:33:36 +01004664 if (flags & MSG_WAITALL)
4665 min_ret = iov_iter_count(&msg.msg_iter);
4666
Pavel Begunkov7a7cacb2020-07-16 23:27:59 +03004667 ret = sock_recvmsg(sock, &msg, flags);
4668 if (force_nonblock && ret == -EAGAIN)
4669 return -EAGAIN;
4670 if (ret == -ERESTARTSYS)
4671 ret = -EINTR;
Pavel Begunkov14c32ee2020-07-16 23:28:01 +03004672out_free:
Pavel Begunkov7fbb1b52020-07-16 23:28:05 +03004673 if (req->flags & REQ_F_BUFFER_SELECTED)
4674 cflags = io_put_recv_kbuf(req);
Stefan Metzmacher00312752021-03-20 20:33:36 +01004675 if (ret < min_ret || ((flags & MSG_WAITALL) && (msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))))
Jens Axboefddafac2020-01-04 20:19:44 -07004676 req_set_fail_links(req);
Pavel Begunkov889fca72021-02-10 00:03:09 +00004677 __io_req_complete(req, issue_flags, ret, cflags);
Jens Axboefddafac2020-01-04 20:19:44 -07004678 return 0;
Jens Axboefddafac2020-01-04 20:19:44 -07004679}
4680
Jens Axboe3529d8c2019-12-19 18:24:38 -07004681static int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboe17f2fe32019-10-17 14:42:58 -06004682{
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004683 struct io_accept *accept = &req->accept;
4684
Jens Axboe14587a462020-09-05 11:36:08 -06004685 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboe17f2fe32019-10-17 14:42:58 -06004686 return -EINVAL;
Hrvoje Zeba8042d6c2019-11-25 14:40:22 -05004687 if (sqe->ioprio || sqe->len || sqe->buf_index)
Jens Axboe17f2fe32019-10-17 14:42:58 -06004688 return -EINVAL;
4689
Jens Axboed55e5f52019-12-11 16:12:15 -07004690 accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
4691 accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2));
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004692 accept->flags = READ_ONCE(sqe->accept_flags);
Jens Axboe09952e32020-03-19 20:16:56 -06004693 accept->nofile = rlimit(RLIMIT_NOFILE);
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004694 return 0;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004695}
Jens Axboe17f2fe32019-10-17 14:42:58 -06004696
Pavel Begunkov889fca72021-02-10 00:03:09 +00004697static int io_accept(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004698{
4699 struct io_accept *accept = &req->accept;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004700 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Pavel Begunkovac45abc2020-06-08 21:08:18 +03004701 unsigned int file_flags = force_nonblock ? O_NONBLOCK : 0;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004702 int ret;
4703
Jiufei Xuee697dee2020-06-10 13:41:59 +08004704 if (req->file->f_flags & O_NONBLOCK)
4705 req->flags |= REQ_F_NOWAIT;
4706
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004707 ret = __sys_accept4_file(req->file, file_flags, accept->addr,
Jens Axboe09952e32020-03-19 20:16:56 -06004708 accept->addr_len, accept->flags,
4709 accept->nofile);
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004710 if (ret == -EAGAIN && force_nonblock)
Jens Axboe17f2fe32019-10-17 14:42:58 -06004711 return -EAGAIN;
Pavel Begunkovac45abc2020-06-08 21:08:18 +03004712 if (ret < 0) {
4713 if (ret == -ERESTARTSYS)
4714 ret = -EINTR;
Jens Axboe4e88d6e2019-12-07 20:59:47 -07004715 req_set_fail_links(req);
Pavel Begunkovac45abc2020-06-08 21:08:18 +03004716 }
Pavel Begunkov889fca72021-02-10 00:03:09 +00004717 __io_req_complete(req, issue_flags, ret, 0);
Jens Axboe17f2fe32019-10-17 14:42:58 -06004718 return 0;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004719}
4720
Pavel Begunkov93642ef2021-02-18 18:29:44 +00004721static int io_connect_prep_async(struct io_kiocb *req)
4722{
4723 struct io_async_connect *io = req->async_data;
4724 struct io_connect *conn = &req->connect;
4725
4726 return move_addr_to_kernel(conn->addr, conn->addr_len, &io->address);
4727}
4728
Jens Axboe3529d8c2019-12-19 18:24:38 -07004729static int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboef499a022019-12-02 16:28:46 -07004730{
Jens Axboe3529d8c2019-12-19 18:24:38 -07004731 struct io_connect *conn = &req->connect;
Jens Axboef499a022019-12-02 16:28:46 -07004732
Jens Axboe14587a462020-09-05 11:36:08 -06004733 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboe3fbb51c2019-12-20 08:51:52 -07004734 return -EINVAL;
4735 if (sqe->ioprio || sqe->len || sqe->buf_index || sqe->rw_flags)
4736 return -EINVAL;
4737
Jens Axboe3529d8c2019-12-19 18:24:38 -07004738 conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
4739 conn->addr_len = READ_ONCE(sqe->addr2);
Pavel Begunkov93642ef2021-02-18 18:29:44 +00004740 return 0;
Jens Axboef499a022019-12-02 16:28:46 -07004741}
4742
Pavel Begunkov889fca72021-02-10 00:03:09 +00004743static int io_connect(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboef8e85cf2019-11-23 14:24:24 -07004744{
Jens Axboee8c2bc12020-08-15 18:44:09 -07004745 struct io_async_connect __io, *io;
Jens Axboef8e85cf2019-11-23 14:24:24 -07004746 unsigned file_flags;
Jens Axboe3fbb51c2019-12-20 08:51:52 -07004747 int ret;
Pavel Begunkov45d189c2021-02-10 00:03:07 +00004748 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
Jens Axboef8e85cf2019-11-23 14:24:24 -07004749
Jens Axboee8c2bc12020-08-15 18:44:09 -07004750 if (req->async_data) {
4751 io = req->async_data;
Jens Axboef499a022019-12-02 16:28:46 -07004752 } else {
Jens Axboe3529d8c2019-12-19 18:24:38 -07004753 ret = move_addr_to_kernel(req->connect.addr,
4754 req->connect.addr_len,
Jens Axboee8c2bc12020-08-15 18:44:09 -07004755 &__io.address);
Jens Axboef499a022019-12-02 16:28:46 -07004756 if (ret)
4757 goto out;
4758 io = &__io;
4759 }
4760
Jens Axboe3fbb51c2019-12-20 08:51:52 -07004761 file_flags = force_nonblock ? O_NONBLOCK : 0;
4762
Jens Axboee8c2bc12020-08-15 18:44:09 -07004763 ret = __sys_connect_file(req->file, &io->address,
Jens Axboe3fbb51c2019-12-20 08:51:52 -07004764 req->connect.addr_len, file_flags);
Jens Axboe87f80d62019-12-03 11:23:54 -07004765 if ((ret == -EAGAIN || ret == -EINPROGRESS) && force_nonblock) {
Jens Axboee8c2bc12020-08-15 18:44:09 -07004766 if (req->async_data)
Jens Axboeb7bb4f72019-12-15 22:13:43 -07004767 return -EAGAIN;
Jens Axboee8c2bc12020-08-15 18:44:09 -07004768 if (io_alloc_async_data(req)) {
Jens Axboef499a022019-12-02 16:28:46 -07004769 ret = -ENOMEM;
4770 goto out;
4771 }
Jens Axboee8c2bc12020-08-15 18:44:09 -07004772 memcpy(req->async_data, &__io, sizeof(__io));
Jens Axboef8e85cf2019-11-23 14:24:24 -07004773 return -EAGAIN;
Jens Axboef499a022019-12-02 16:28:46 -07004774 }
Jens Axboef8e85cf2019-11-23 14:24:24 -07004775 if (ret == -ERESTARTSYS)
4776 ret = -EINTR;
Jens Axboef499a022019-12-02 16:28:46 -07004777out:
Jens Axboe4e88d6e2019-12-07 20:59:47 -07004778 if (ret < 0)
4779 req_set_fail_links(req);
Pavel Begunkov889fca72021-02-10 00:03:09 +00004780 __io_req_complete(req, issue_flags, ret, 0);
Jens Axboef8e85cf2019-11-23 14:24:24 -07004781 return 0;
Jens Axboef8e85cf2019-11-23 14:24:24 -07004782}
YueHaibing469956e2020-03-04 15:53:52 +08004783#else /* !CONFIG_NET */
Jens Axboe99a10082021-02-19 09:35:19 -07004784#define IO_NETOP_FN(op) \
4785static int io_##op(struct io_kiocb *req, unsigned int issue_flags) \
4786{ \
4787 return -EOPNOTSUPP; \
Jens Axboef8e85cf2019-11-23 14:24:24 -07004788}
4789
Jens Axboe99a10082021-02-19 09:35:19 -07004790#define IO_NETOP_PREP(op) \
4791IO_NETOP_FN(op) \
4792static int io_##op##_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) \
4793{ \
4794 return -EOPNOTSUPP; \
4795} \
4796
4797#define IO_NETOP_PREP_ASYNC(op) \
4798IO_NETOP_PREP(op) \
4799static int io_##op##_prep_async(struct io_kiocb *req) \
4800{ \
4801 return -EOPNOTSUPP; \
YueHaibing469956e2020-03-04 15:53:52 +08004802}
4803
Jens Axboe99a10082021-02-19 09:35:19 -07004804IO_NETOP_PREP_ASYNC(sendmsg);
4805IO_NETOP_PREP_ASYNC(recvmsg);
4806IO_NETOP_PREP_ASYNC(connect);
4807IO_NETOP_PREP(accept);
4808IO_NETOP_FN(send);
4809IO_NETOP_FN(recv);
YueHaibing469956e2020-03-04 15:53:52 +08004810#endif /* CONFIG_NET */
Jens Axboe17f2fe32019-10-17 14:42:58 -06004811
Jens Axboed7718a92020-02-14 22:23:12 -07004812struct io_poll_table {
4813 struct poll_table_struct pt;
4814 struct io_kiocb *req;
4815 int error;
4816};
4817
Jens Axboed7718a92020-02-14 22:23:12 -07004818static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll,
4819 __poll_t mask, task_work_func_t func)
4820{
Jens Axboeaa96bf82020-04-03 11:26:26 -06004821 int ret;
Jens Axboed7718a92020-02-14 22:23:12 -07004822
4823 /* for instances that support it check for an event match first: */
4824 if (mask && !(mask & poll->events))
4825 return 0;
4826
4827 trace_io_uring_task_add(req->ctx, req->opcode, req->user_data, mask);
4828
4829 list_del_init(&poll->wait.entry);
4830
Jens Axboed7718a92020-02-14 22:23:12 -07004831 req->result = mask;
Jens Axboe7cbf1722021-02-10 00:03:20 +00004832 req->task_work.func = func;
Jens Axboe6d816e02020-08-11 08:04:14 -06004833
Jens Axboed7718a92020-02-14 22:23:12 -07004834 /*
Jens Axboee3aabf92020-05-18 11:04:17 -06004835 * If this fails, then the task is exiting. When a task exits, the
4836 * work gets canceled, so just cancel this request as well instead
4837 * of executing it. We can't safely execute it anyway, as we may not
4838 * have the needed state needed for it anyway.
Jens Axboed7718a92020-02-14 22:23:12 -07004839 */
Jens Axboe355fb9e2020-10-22 20:19:35 -06004840 ret = io_req_task_work_add(req);
Jens Axboeaa96bf82020-04-03 11:26:26 -06004841 if (unlikely(ret)) {
Jens Axboee3aabf92020-05-18 11:04:17 -06004842 WRITE_ONCE(poll->canceled, true);
Pavel Begunkoveab30c42021-01-19 13:32:42 +00004843 io_req_task_work_add_fallback(req, func);
Jens Axboeaa96bf82020-04-03 11:26:26 -06004844 }
Jens Axboed7718a92020-02-14 22:23:12 -07004845 return 1;
4846}
4847
Jens Axboe74ce6ce2020-04-13 11:09:12 -06004848static bool io_poll_rewait(struct io_kiocb *req, struct io_poll_iocb *poll)
4849 __acquires(&req->ctx->completion_lock)
4850{
4851 struct io_ring_ctx *ctx = req->ctx;
4852
4853 if (!req->result && !READ_ONCE(poll->canceled)) {
4854 struct poll_table_struct pt = { ._key = poll->events };
4855
4856 req->result = vfs_poll(req->file, &pt) & poll->events;
4857 }
4858
4859 spin_lock_irq(&ctx->completion_lock);
4860 if (!req->result && !READ_ONCE(poll->canceled)) {
4861 add_wait_queue(poll->head, &poll->wait);
4862 return true;
4863 }
4864
4865 return false;
4866}
4867
Jens Axboed4e7cd32020-08-15 11:44:50 -07004868static struct io_poll_iocb *io_poll_get_double(struct io_kiocb *req)
Jens Axboe18bceab2020-05-15 11:56:54 -06004869{
Jens Axboee8c2bc12020-08-15 18:44:09 -07004870 /* pure poll stashes this in ->async_data, poll driven retry elsewhere */
Jens Axboed4e7cd32020-08-15 11:44:50 -07004871 if (req->opcode == IORING_OP_POLL_ADD)
Jens Axboee8c2bc12020-08-15 18:44:09 -07004872 return req->async_data;
Jens Axboed4e7cd32020-08-15 11:44:50 -07004873 return req->apoll->double_poll;
4874}
4875
4876static struct io_poll_iocb *io_poll_get_single(struct io_kiocb *req)
4877{
4878 if (req->opcode == IORING_OP_POLL_ADD)
4879 return &req->poll;
4880 return &req->apoll->poll;
4881}
4882
4883static void io_poll_remove_double(struct io_kiocb *req)
4884{
4885 struct io_poll_iocb *poll = io_poll_get_double(req);
Jens Axboe18bceab2020-05-15 11:56:54 -06004886
4887 lockdep_assert_held(&req->ctx->completion_lock);
4888
4889 if (poll && poll->head) {
4890 struct wait_queue_head *head = poll->head;
4891
4892 spin_lock(&head->lock);
4893 list_del_init(&poll->wait.entry);
4894 if (poll->wait.private)
Jens Axboede9b4cc2021-02-24 13:28:27 -07004895 req_ref_put(req);
Jens Axboe18bceab2020-05-15 11:56:54 -06004896 poll->head = NULL;
4897 spin_unlock(&head->lock);
4898 }
4899}
4900
4901static void io_poll_complete(struct io_kiocb *req, __poll_t mask, int error)
4902{
4903 struct io_ring_ctx *ctx = req->ctx;
4904
Jens Axboe45ab03b2021-02-23 08:19:33 -07004905 if (!error && req->poll.canceled)
4906 error = -ECANCELED;
4907
Jens Axboed4e7cd32020-08-15 11:44:50 -07004908 io_poll_remove_double(req);
Jens Axboe18bceab2020-05-15 11:56:54 -06004909 req->poll.done = true;
4910 io_cqring_fill_event(req, error ? error : mangle_poll(mask));
4911 io_commit_cqring(ctx);
4912}
4913
Jens Axboe18bceab2020-05-15 11:56:54 -06004914static void io_poll_task_func(struct callback_head *cb)
4915{
4916 struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
Jens Axboe6d816e02020-08-11 08:04:14 -06004917 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkovdd221f462020-10-18 10:17:42 +01004918 struct io_kiocb *nxt;
Jens Axboe18bceab2020-05-15 11:56:54 -06004919
Pavel Begunkovdd221f462020-10-18 10:17:42 +01004920 if (io_poll_rewait(req, &req->poll)) {
4921 spin_unlock_irq(&ctx->completion_lock);
4922 } else {
4923 hash_del(&req->hash_node);
4924 io_poll_complete(req, req->result, 0);
4925 spin_unlock_irq(&ctx->completion_lock);
4926
4927 nxt = io_put_req_find_next(req);
4928 io_cqring_ev_posted(ctx);
4929 if (nxt)
4930 __io_req_task_submit(nxt);
4931 }
Jens Axboe18bceab2020-05-15 11:56:54 -06004932}
4933
4934static int io_poll_double_wake(struct wait_queue_entry *wait, unsigned mode,
4935 int sync, void *key)
4936{
4937 struct io_kiocb *req = wait->private;
Jens Axboed4e7cd32020-08-15 11:44:50 -07004938 struct io_poll_iocb *poll = io_poll_get_single(req);
Jens Axboe18bceab2020-05-15 11:56:54 -06004939 __poll_t mask = key_to_poll(key);
4940
4941 /* for instances that support it check for an event match first: */
4942 if (mask && !(mask & poll->events))
4943 return 0;
4944
Jens Axboe8706e042020-09-28 08:38:54 -06004945 list_del_init(&wait->entry);
4946
Jens Axboe807abcb2020-07-17 17:09:27 -06004947 if (poll && poll->head) {
Jens Axboe18bceab2020-05-15 11:56:54 -06004948 bool done;
4949
Jens Axboe807abcb2020-07-17 17:09:27 -06004950 spin_lock(&poll->head->lock);
4951 done = list_empty(&poll->wait.entry);
Jens Axboe18bceab2020-05-15 11:56:54 -06004952 if (!done)
Jens Axboe807abcb2020-07-17 17:09:27 -06004953 list_del_init(&poll->wait.entry);
Jens Axboed4e7cd32020-08-15 11:44:50 -07004954 /* make sure double remove sees this as being gone */
4955 wait->private = NULL;
Jens Axboe807abcb2020-07-17 17:09:27 -06004956 spin_unlock(&poll->head->lock);
Jens Axboec8b5e262020-10-25 13:53:26 -06004957 if (!done) {
4958 /* use wait func handler, so it matches the rq type */
4959 poll->wait.func(&poll->wait, mode, sync, key);
4960 }
Jens Axboe18bceab2020-05-15 11:56:54 -06004961 }
Jens Axboede9b4cc2021-02-24 13:28:27 -07004962 req_ref_put(req);
Jens Axboe18bceab2020-05-15 11:56:54 -06004963 return 1;
4964}
4965
4966static void io_init_poll_iocb(struct io_poll_iocb *poll, __poll_t events,
4967 wait_queue_func_t wake_func)
4968{
4969 poll->head = NULL;
4970 poll->done = false;
4971 poll->canceled = false;
Jens Axboe464dca62021-03-19 14:06:24 -06004972#define IO_POLL_UNMASK (EPOLLERR|EPOLLHUP|EPOLLNVAL|EPOLLRDHUP)
4973 /* mask in events that we always want/need */
4974 poll->events = events | IO_POLL_UNMASK;
Jens Axboe18bceab2020-05-15 11:56:54 -06004975 INIT_LIST_HEAD(&poll->wait.entry);
4976 init_waitqueue_func_entry(&poll->wait, wake_func);
4977}
4978
4979static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt,
Jens Axboe807abcb2020-07-17 17:09:27 -06004980 struct wait_queue_head *head,
4981 struct io_poll_iocb **poll_ptr)
Jens Axboe18bceab2020-05-15 11:56:54 -06004982{
4983 struct io_kiocb *req = pt->req;
4984
4985 /*
4986 * If poll->head is already set, it's because the file being polled
4987 * uses multiple waitqueues for poll handling (eg one for read, one
4988 * for write). Setup a separate io_poll_iocb if this happens.
4989 */
4990 if (unlikely(poll->head)) {
Pavel Begunkov58852d42020-10-16 20:55:56 +01004991 struct io_poll_iocb *poll_one = poll;
4992
Jens Axboe18bceab2020-05-15 11:56:54 -06004993 /* already have a 2nd entry, fail a third attempt */
Jens Axboe807abcb2020-07-17 17:09:27 -06004994 if (*poll_ptr) {
Jens Axboe18bceab2020-05-15 11:56:54 -06004995 pt->error = -EINVAL;
4996 return;
4997 }
Jens Axboe1c3b3e62021-02-28 16:07:30 -07004998 /* double add on the same waitqueue head, ignore */
4999 if (poll->head == head)
5000 return;
Jens Axboe18bceab2020-05-15 11:56:54 -06005001 poll = kmalloc(sizeof(*poll), GFP_ATOMIC);
5002 if (!poll) {
5003 pt->error = -ENOMEM;
5004 return;
5005 }
Pavel Begunkov58852d42020-10-16 20:55:56 +01005006 io_init_poll_iocb(poll, poll_one->events, io_poll_double_wake);
Jens Axboede9b4cc2021-02-24 13:28:27 -07005007 req_ref_get(req);
Jens Axboe18bceab2020-05-15 11:56:54 -06005008 poll->wait.private = req;
Jens Axboe807abcb2020-07-17 17:09:27 -06005009 *poll_ptr = poll;
Jens Axboe18bceab2020-05-15 11:56:54 -06005010 }
5011
5012 pt->error = 0;
5013 poll->head = head;
Jiufei Xuea31eb4a2020-06-17 17:53:56 +08005014
5015 if (poll->events & EPOLLEXCLUSIVE)
5016 add_wait_queue_exclusive(head, &poll->wait);
5017 else
5018 add_wait_queue(head, &poll->wait);
Jens Axboe18bceab2020-05-15 11:56:54 -06005019}
5020
5021static void io_async_queue_proc(struct file *file, struct wait_queue_head *head,
5022 struct poll_table_struct *p)
5023{
5024 struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
Jens Axboe807abcb2020-07-17 17:09:27 -06005025 struct async_poll *apoll = pt->req->apoll;
Jens Axboe18bceab2020-05-15 11:56:54 -06005026
Jens Axboe807abcb2020-07-17 17:09:27 -06005027 __io_queue_proc(&apoll->poll, pt, head, &apoll->double_poll);
Jens Axboe18bceab2020-05-15 11:56:54 -06005028}
5029
Jens Axboed7718a92020-02-14 22:23:12 -07005030static void io_async_task_func(struct callback_head *cb)
5031{
5032 struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
5033 struct async_poll *apoll = req->apoll;
5034 struct io_ring_ctx *ctx = req->ctx;
5035
5036 trace_io_uring_task_run(req->ctx, req->opcode, req->user_data);
5037
Jens Axboe74ce6ce2020-04-13 11:09:12 -06005038 if (io_poll_rewait(req, &apoll->poll)) {
Jens Axboed7718a92020-02-14 22:23:12 -07005039 spin_unlock_irq(&ctx->completion_lock);
Jens Axboe74ce6ce2020-04-13 11:09:12 -06005040 return;
Jens Axboed7718a92020-02-14 22:23:12 -07005041 }
5042
Jens Axboe31067252020-05-17 17:43:31 -06005043 /* If req is still hashed, it cannot have been canceled. Don't check. */
Pavel Begunkov0be0b0e2020-06-30 15:20:42 +03005044 if (hash_hashed(&req->hash_node))
Jens Axboe74ce6ce2020-04-13 11:09:12 -06005045 hash_del(&req->hash_node);
Jens Axboe2bae0472020-04-13 11:16:34 -06005046
Jens Axboed4e7cd32020-08-15 11:44:50 -07005047 io_poll_remove_double(req);
Jens Axboe74ce6ce2020-04-13 11:09:12 -06005048 spin_unlock_irq(&ctx->completion_lock);
5049
Pavel Begunkov0be0b0e2020-06-30 15:20:42 +03005050 if (!READ_ONCE(apoll->poll.canceled))
5051 __io_req_task_submit(req);
5052 else
Pavel Begunkov25935532021-03-19 17:22:40 +00005053 io_req_complete_failed(req, -ECANCELED);
Dan Carpenteraa340842020-07-08 21:47:11 +03005054
Jens Axboe807abcb2020-07-17 17:09:27 -06005055 kfree(apoll->double_poll);
Jens Axboe31067252020-05-17 17:43:31 -06005056 kfree(apoll);
Jens Axboed7718a92020-02-14 22:23:12 -07005057}
5058
5059static int io_async_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
5060 void *key)
5061{
5062 struct io_kiocb *req = wait->private;
5063 struct io_poll_iocb *poll = &req->apoll->poll;
5064
5065 trace_io_uring_poll_wake(req->ctx, req->opcode, req->user_data,
5066 key_to_poll(key));
5067
5068 return __io_async_wake(req, poll, key_to_poll(key), io_async_task_func);
5069}
5070
5071static void io_poll_req_insert(struct io_kiocb *req)
5072{
5073 struct io_ring_ctx *ctx = req->ctx;
5074 struct hlist_head *list;
5075
5076 list = &ctx->cancel_hash[hash_long(req->user_data, ctx->cancel_hash_bits)];
5077 hlist_add_head(&req->hash_node, list);
5078}
5079
5080static __poll_t __io_arm_poll_handler(struct io_kiocb *req,
5081 struct io_poll_iocb *poll,
5082 struct io_poll_table *ipt, __poll_t mask,
5083 wait_queue_func_t wake_func)
5084 __acquires(&ctx->completion_lock)
5085{
5086 struct io_ring_ctx *ctx = req->ctx;
5087 bool cancel = false;
5088
Pavel Begunkov4d52f332020-10-18 10:17:43 +01005089 INIT_HLIST_NODE(&req->hash_node);
Jens Axboe18bceab2020-05-15 11:56:54 -06005090 io_init_poll_iocb(poll, mask, wake_func);
Pavel Begunkovb90cd192020-06-21 13:09:52 +03005091 poll->file = req->file;
Jens Axboe18bceab2020-05-15 11:56:54 -06005092 poll->wait.private = req;
Jens Axboed7718a92020-02-14 22:23:12 -07005093
5094 ipt->pt._key = mask;
5095 ipt->req = req;
5096 ipt->error = -EINVAL;
5097
Jens Axboed7718a92020-02-14 22:23:12 -07005098 mask = vfs_poll(req->file, &ipt->pt) & poll->events;
5099
5100 spin_lock_irq(&ctx->completion_lock);
5101 if (likely(poll->head)) {
5102 spin_lock(&poll->head->lock);
5103 if (unlikely(list_empty(&poll->wait.entry))) {
5104 if (ipt->error)
5105 cancel = true;
5106 ipt->error = 0;
5107 mask = 0;
5108 }
5109 if (mask || ipt->error)
5110 list_del_init(&poll->wait.entry);
5111 else if (cancel)
5112 WRITE_ONCE(poll->canceled, true);
5113 else if (!poll->done) /* actually waiting for an event */
5114 io_poll_req_insert(req);
5115 spin_unlock(&poll->head->lock);
5116 }
5117
5118 return mask;
5119}
5120
5121static bool io_arm_poll_handler(struct io_kiocb *req)
5122{
5123 const struct io_op_def *def = &io_op_defs[req->opcode];
5124 struct io_ring_ctx *ctx = req->ctx;
5125 struct async_poll *apoll;
5126 struct io_poll_table ipt;
5127 __poll_t mask, ret;
Jens Axboe9dab14b2020-08-25 12:27:50 -06005128 int rw;
Jens Axboed7718a92020-02-14 22:23:12 -07005129
5130 if (!req->file || !file_can_poll(req->file))
5131 return false;
Pavel Begunkov24c74672020-06-21 13:09:51 +03005132 if (req->flags & REQ_F_POLLED)
Jens Axboed7718a92020-02-14 22:23:12 -07005133 return false;
Jens Axboe9dab14b2020-08-25 12:27:50 -06005134 if (def->pollin)
5135 rw = READ;
5136 else if (def->pollout)
5137 rw = WRITE;
5138 else
5139 return false;
5140 /* if we can't nonblock try, then no point in arming a poll handler */
Jens Axboe7b29f922021-03-12 08:30:14 -07005141 if (!io_file_supports_async(req, rw))
Jens Axboed7718a92020-02-14 22:23:12 -07005142 return false;
5143
5144 apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC);
5145 if (unlikely(!apoll))
5146 return false;
Jens Axboe807abcb2020-07-17 17:09:27 -06005147 apoll->double_poll = NULL;
Jens Axboed7718a92020-02-14 22:23:12 -07005148
5149 req->flags |= REQ_F_POLLED;
Jens Axboed7718a92020-02-14 22:23:12 -07005150 req->apoll = apoll;
Jens Axboed7718a92020-02-14 22:23:12 -07005151
Nathan Chancellor8755d972020-03-02 16:01:19 -07005152 mask = 0;
Jens Axboed7718a92020-02-14 22:23:12 -07005153 if (def->pollin)
Nathan Chancellor8755d972020-03-02 16:01:19 -07005154 mask |= POLLIN | POLLRDNORM;
Jens Axboed7718a92020-02-14 22:23:12 -07005155 if (def->pollout)
5156 mask |= POLLOUT | POLLWRNORM;
Luke Hsiao901341b2020-08-21 21:41:05 -07005157
5158 /* If reading from MSG_ERRQUEUE using recvmsg, ignore POLLIN */
5159 if ((req->opcode == IORING_OP_RECVMSG) &&
5160 (req->sr_msg.msg_flags & MSG_ERRQUEUE))
5161 mask &= ~POLLIN;
5162
Jens Axboed7718a92020-02-14 22:23:12 -07005163 mask |= POLLERR | POLLPRI;
5164
5165 ipt.pt._qproc = io_async_queue_proc;
5166
5167 ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask,
5168 io_async_wake);
Jens Axboea36da652020-08-11 09:50:19 -06005169 if (ret || ipt.error) {
Jens Axboed4e7cd32020-08-15 11:44:50 -07005170 io_poll_remove_double(req);
Jens Axboed7718a92020-02-14 22:23:12 -07005171 spin_unlock_irq(&ctx->completion_lock);
Jens Axboe807abcb2020-07-17 17:09:27 -06005172 kfree(apoll->double_poll);
Jens Axboed7718a92020-02-14 22:23:12 -07005173 kfree(apoll);
5174 return false;
5175 }
5176 spin_unlock_irq(&ctx->completion_lock);
5177 trace_io_uring_poll_arm(ctx, req->opcode, req->user_data, mask,
5178 apoll->poll.events);
5179 return true;
5180}
5181
5182static bool __io_poll_remove_one(struct io_kiocb *req,
5183 struct io_poll_iocb *poll)
5184{
Jens Axboeb41e9852020-02-17 09:52:41 -07005185 bool do_complete = false;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005186
5187 spin_lock(&poll->head->lock);
5188 WRITE_ONCE(poll->canceled, true);
Jens Axboe392edb42019-12-09 17:52:20 -07005189 if (!list_empty(&poll->wait.entry)) {
5190 list_del_init(&poll->wait.entry);
Jens Axboeb41e9852020-02-17 09:52:41 -07005191 do_complete = true;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005192 }
5193 spin_unlock(&poll->head->lock);
Jens Axboe3bfa5bc2020-05-17 13:54:12 -06005194 hash_del(&req->hash_node);
Jens Axboed7718a92020-02-14 22:23:12 -07005195 return do_complete;
5196}
5197
5198static bool io_poll_remove_one(struct io_kiocb *req)
5199{
5200 bool do_complete;
5201
Jens Axboed4e7cd32020-08-15 11:44:50 -07005202 io_poll_remove_double(req);
5203
Jens Axboed7718a92020-02-14 22:23:12 -07005204 if (req->opcode == IORING_OP_POLL_ADD) {
5205 do_complete = __io_poll_remove_one(req, &req->poll);
5206 } else {
Jens Axboe3bfa5bc2020-05-17 13:54:12 -06005207 struct async_poll *apoll = req->apoll;
5208
Jens Axboed7718a92020-02-14 22:23:12 -07005209 /* non-poll requests have submit ref still */
Jens Axboe3bfa5bc2020-05-17 13:54:12 -06005210 do_complete = __io_poll_remove_one(req, &apoll->poll);
5211 if (do_complete) {
Jens Axboed7718a92020-02-14 22:23:12 -07005212 io_put_req(req);
Jens Axboe807abcb2020-07-17 17:09:27 -06005213 kfree(apoll->double_poll);
Jens Axboe3bfa5bc2020-05-17 13:54:12 -06005214 kfree(apoll);
5215 }
Xiaoguang Wangb1f573b2020-04-12 14:50:54 +08005216 }
5217
Jens Axboeb41e9852020-02-17 09:52:41 -07005218 if (do_complete) {
5219 io_cqring_fill_event(req, -ECANCELED);
5220 io_commit_cqring(req->ctx);
Jens Axboef254ac02020-08-12 17:33:30 -06005221 req_set_fail_links(req);
Pavel Begunkov216578e2020-10-13 09:44:00 +01005222 io_put_req_deferred(req, 1);
Jens Axboeb41e9852020-02-17 09:52:41 -07005223 }
5224
5225 return do_complete;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005226}
5227
Jens Axboe76e1b642020-09-26 15:05:03 -06005228/*
5229 * Returns true if we found and killed one or more poll requests
5230 */
Pavel Begunkov6b819282020-11-06 13:00:25 +00005231static bool io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk,
5232 struct files_struct *files)
Jens Axboe221c5eb2019-01-17 09:41:58 -07005233{
Jens Axboe78076bb2019-12-04 19:56:40 -07005234 struct hlist_node *tmp;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005235 struct io_kiocb *req;
Jens Axboe8e2e1fa2020-04-13 17:05:14 -06005236 int posted = 0, i;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005237
5238 spin_lock_irq(&ctx->completion_lock);
Jens Axboe78076bb2019-12-04 19:56:40 -07005239 for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
5240 struct hlist_head *list;
5241
5242 list = &ctx->cancel_hash[i];
Jens Axboef3606e32020-09-22 08:18:24 -06005243 hlist_for_each_entry_safe(req, tmp, list, hash_node) {
Pavel Begunkov6b819282020-11-06 13:00:25 +00005244 if (io_match_task(req, tsk, files))
Jens Axboef3606e32020-09-22 08:18:24 -06005245 posted += io_poll_remove_one(req);
5246 }
Jens Axboe221c5eb2019-01-17 09:41:58 -07005247 }
5248 spin_unlock_irq(&ctx->completion_lock);
Jens Axboeb41e9852020-02-17 09:52:41 -07005249
Jens Axboe8e2e1fa2020-04-13 17:05:14 -06005250 if (posted)
5251 io_cqring_ev_posted(ctx);
Jens Axboe76e1b642020-09-26 15:05:03 -06005252
5253 return posted != 0;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005254}
5255
Jens Axboe47f46762019-11-09 17:43:02 -07005256static int io_poll_cancel(struct io_ring_ctx *ctx, __u64 sqe_addr)
5257{
Jens Axboe78076bb2019-12-04 19:56:40 -07005258 struct hlist_head *list;
Jens Axboe47f46762019-11-09 17:43:02 -07005259 struct io_kiocb *req;
5260
Jens Axboe78076bb2019-12-04 19:56:40 -07005261 list = &ctx->cancel_hash[hash_long(sqe_addr, ctx->cancel_hash_bits)];
5262 hlist_for_each_entry(req, list, hash_node) {
Jens Axboeb41e9852020-02-17 09:52:41 -07005263 if (sqe_addr != req->user_data)
5264 continue;
5265 if (io_poll_remove_one(req))
Jens Axboeeac406c2019-11-14 12:09:58 -07005266 return 0;
Jens Axboeb41e9852020-02-17 09:52:41 -07005267 return -EALREADY;
Jens Axboe47f46762019-11-09 17:43:02 -07005268 }
5269
5270 return -ENOENT;
5271}
5272
Jens Axboe3529d8c2019-12-19 18:24:38 -07005273static int io_poll_remove_prep(struct io_kiocb *req,
5274 const struct io_uring_sqe *sqe)
Jens Axboe221c5eb2019-01-17 09:41:58 -07005275{
Jens Axboe221c5eb2019-01-17 09:41:58 -07005276 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
5277 return -EINVAL;
5278 if (sqe->ioprio || sqe->off || sqe->len || sqe->buf_index ||
5279 sqe->poll_events)
5280 return -EINVAL;
5281
Pavel Begunkov018043b2020-10-27 23:17:18 +00005282 req->poll_remove.addr = READ_ONCE(sqe->addr);
Jens Axboe0969e782019-12-17 18:40:57 -07005283 return 0;
5284}
5285
5286/*
5287 * Find a running poll command that matches one specified in sqe->addr,
5288 * and remove it if found.
5289 */
Pavel Begunkov61e98202021-02-10 00:03:08 +00005290static int io_poll_remove(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe0969e782019-12-17 18:40:57 -07005291{
5292 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe0969e782019-12-17 18:40:57 -07005293 int ret;
5294
Jens Axboe221c5eb2019-01-17 09:41:58 -07005295 spin_lock_irq(&ctx->completion_lock);
Pavel Begunkov018043b2020-10-27 23:17:18 +00005296 ret = io_poll_cancel(ctx, req->poll_remove.addr);
Jens Axboe221c5eb2019-01-17 09:41:58 -07005297 spin_unlock_irq(&ctx->completion_lock);
5298
Jens Axboe4e88d6e2019-12-07 20:59:47 -07005299 if (ret < 0)
5300 req_set_fail_links(req);
Jens Axboee1e16092020-06-22 09:17:17 -06005301 io_req_complete(req, ret);
Jens Axboe221c5eb2019-01-17 09:41:58 -07005302 return 0;
5303}
5304
Jens Axboe221c5eb2019-01-17 09:41:58 -07005305static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
5306 void *key)
5307{
Jens Axboec2f2eb72020-02-10 09:07:05 -07005308 struct io_kiocb *req = wait->private;
5309 struct io_poll_iocb *poll = &req->poll;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005310
Jens Axboed7718a92020-02-14 22:23:12 -07005311 return __io_async_wake(req, poll, key_to_poll(key), io_poll_task_func);
Jens Axboe221c5eb2019-01-17 09:41:58 -07005312}
5313
Jens Axboe221c5eb2019-01-17 09:41:58 -07005314static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head,
5315 struct poll_table_struct *p)
5316{
5317 struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
5318
Jens Axboee8c2bc12020-08-15 18:44:09 -07005319 __io_queue_proc(&pt->req->poll, pt, head, (struct io_poll_iocb **) &pt->req->async_data);
Jens Axboeeac406c2019-11-14 12:09:58 -07005320}
5321
Jens Axboe3529d8c2019-12-19 18:24:38 -07005322static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboe221c5eb2019-01-17 09:41:58 -07005323{
5324 struct io_poll_iocb *poll = &req->poll;
Jiufei Xue5769a352020-06-17 17:53:55 +08005325 u32 events;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005326
5327 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
5328 return -EINVAL;
5329 if (sqe->addr || sqe->ioprio || sqe->off || sqe->len || sqe->buf_index)
5330 return -EINVAL;
5331
Jiufei Xue5769a352020-06-17 17:53:55 +08005332 events = READ_ONCE(sqe->poll32_events);
5333#ifdef __BIG_ENDIAN
5334 events = swahw32(events);
5335#endif
Jens Axboe464dca62021-03-19 14:06:24 -06005336 poll->events = demangle_poll(events) | (events & EPOLLEXCLUSIVE);
Jens Axboe0969e782019-12-17 18:40:57 -07005337 return 0;
5338}
5339
Pavel Begunkov61e98202021-02-10 00:03:08 +00005340static int io_poll_add(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe0969e782019-12-17 18:40:57 -07005341{
5342 struct io_poll_iocb *poll = &req->poll;
5343 struct io_ring_ctx *ctx = req->ctx;
5344 struct io_poll_table ipt;
Jens Axboe0969e782019-12-17 18:40:57 -07005345 __poll_t mask;
Jens Axboe0969e782019-12-17 18:40:57 -07005346
Jens Axboed7718a92020-02-14 22:23:12 -07005347 ipt.pt._qproc = io_poll_queue_proc;
Jens Axboe36703242019-07-25 10:20:18 -06005348
Jens Axboed7718a92020-02-14 22:23:12 -07005349 mask = __io_arm_poll_handler(req, &req->poll, &ipt, poll->events,
5350 io_poll_wake);
Jens Axboe221c5eb2019-01-17 09:41:58 -07005351
Jens Axboe8c838782019-03-12 15:48:16 -06005352 if (mask) { /* no async, we'd stolen it */
Jens Axboe8c838782019-03-12 15:48:16 -06005353 ipt.error = 0;
Jens Axboeb0dd8a42019-11-18 12:14:54 -07005354 io_poll_complete(req, mask, 0);
Jens Axboe8c838782019-03-12 15:48:16 -06005355 }
Jens Axboe221c5eb2019-01-17 09:41:58 -07005356 spin_unlock_irq(&ctx->completion_lock);
5357
Jens Axboe8c838782019-03-12 15:48:16 -06005358 if (mask) {
5359 io_cqring_ev_posted(ctx);
Pavel Begunkov014db002020-03-03 21:33:12 +03005360 io_put_req(req);
Jens Axboe221c5eb2019-01-17 09:41:58 -07005361 }
Jens Axboe8c838782019-03-12 15:48:16 -06005362 return ipt.error;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005363}
5364
Jens Axboe5262f562019-09-17 12:26:57 -06005365static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
5366{
Jens Axboead8a48a2019-11-15 08:49:11 -07005367 struct io_timeout_data *data = container_of(timer,
5368 struct io_timeout_data, timer);
5369 struct io_kiocb *req = data->req;
5370 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe5262f562019-09-17 12:26:57 -06005371 unsigned long flags;
5372
Jens Axboe5262f562019-09-17 12:26:57 -06005373 spin_lock_irqsave(&ctx->completion_lock, flags);
Pavel Begunkova71976f2020-10-10 18:34:11 +01005374 list_del_init(&req->timeout.list);
Pavel Begunkov01cec8c2020-07-30 18:43:50 +03005375 atomic_set(&req->ctx->cq_timeouts,
5376 atomic_read(&req->ctx->cq_timeouts) + 1);
5377
Jens Axboe78e19bb2019-11-06 15:21:34 -07005378 io_cqring_fill_event(req, -ETIME);
Jens Axboe5262f562019-09-17 12:26:57 -06005379 io_commit_cqring(ctx);
5380 spin_unlock_irqrestore(&ctx->completion_lock, flags);
5381
5382 io_cqring_ev_posted(ctx);
Jens Axboe4e88d6e2019-12-07 20:59:47 -07005383 req_set_fail_links(req);
Jens Axboe5262f562019-09-17 12:26:57 -06005384 io_put_req(req);
5385 return HRTIMER_NORESTART;
5386}
5387
Pavel Begunkovfbd15842020-11-30 19:11:15 +00005388static struct io_kiocb *io_timeout_extract(struct io_ring_ctx *ctx,
5389 __u64 user_data)
Jens Axboe47f46762019-11-09 17:43:02 -07005390{
Pavel Begunkovfbd15842020-11-30 19:11:15 +00005391 struct io_timeout_data *io;
Jens Axboef254ac02020-08-12 17:33:30 -06005392 struct io_kiocb *req;
5393 int ret = -ENOENT;
5394
5395 list_for_each_entry(req, &ctx->timeout_list, timeout.list) {
5396 if (user_data == req->user_data) {
5397 ret = 0;
5398 break;
5399 }
5400 }
5401
5402 if (ret == -ENOENT)
Pavel Begunkovfbd15842020-11-30 19:11:15 +00005403 return ERR_PTR(ret);
Jens Axboef254ac02020-08-12 17:33:30 -06005404
Pavel Begunkovfbd15842020-11-30 19:11:15 +00005405 io = req->async_data;
5406 ret = hrtimer_try_to_cancel(&io->timer);
5407 if (ret == -1)
5408 return ERR_PTR(-EALREADY);
5409 list_del_init(&req->timeout.list);
5410 return req;
5411}
5412
5413static int io_timeout_cancel(struct io_ring_ctx *ctx, __u64 user_data)
5414{
5415 struct io_kiocb *req = io_timeout_extract(ctx, user_data);
5416
5417 if (IS_ERR(req))
5418 return PTR_ERR(req);
5419
5420 req_set_fail_links(req);
5421 io_cqring_fill_event(req, -ECANCELED);
5422 io_put_req_deferred(req, 1);
5423 return 0;
Jens Axboef254ac02020-08-12 17:33:30 -06005424}
5425
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +00005426static int io_timeout_update(struct io_ring_ctx *ctx, __u64 user_data,
5427 struct timespec64 *ts, enum hrtimer_mode mode)
5428{
5429 struct io_kiocb *req = io_timeout_extract(ctx, user_data);
5430 struct io_timeout_data *data;
5431
5432 if (IS_ERR(req))
5433 return PTR_ERR(req);
5434
5435 req->timeout.off = 0; /* noseq */
5436 data = req->async_data;
5437 list_add_tail(&req->timeout.list, &ctx->timeout_list);
5438 hrtimer_init(&data->timer, CLOCK_MONOTONIC, mode);
5439 data->timer.function = io_timeout_fn;
5440 hrtimer_start(&data->timer, timespec64_to_ktime(*ts), mode);
5441 return 0;
Jens Axboe47f46762019-11-09 17:43:02 -07005442}
5443
Jens Axboe3529d8c2019-12-19 18:24:38 -07005444static int io_timeout_remove_prep(struct io_kiocb *req,
5445 const struct io_uring_sqe *sqe)
Jens Axboeb29472e2019-12-17 18:50:29 -07005446{
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +00005447 struct io_timeout_rem *tr = &req->timeout_rem;
5448
Jens Axboeb29472e2019-12-17 18:50:29 -07005449 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
5450 return -EINVAL;
Daniele Albano61710e42020-07-18 14:15:16 -06005451 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
5452 return -EINVAL;
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +00005453 if (sqe->ioprio || sqe->buf_index || sqe->len)
Jens Axboeb29472e2019-12-17 18:50:29 -07005454 return -EINVAL;
5455
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +00005456 tr->addr = READ_ONCE(sqe->addr);
5457 tr->flags = READ_ONCE(sqe->timeout_flags);
5458 if (tr->flags & IORING_TIMEOUT_UPDATE) {
5459 if (tr->flags & ~(IORING_TIMEOUT_UPDATE|IORING_TIMEOUT_ABS))
5460 return -EINVAL;
5461 if (get_timespec64(&tr->ts, u64_to_user_ptr(sqe->addr2)))
5462 return -EFAULT;
5463 } else if (tr->flags) {
5464 /* timeout removal doesn't support flags */
5465 return -EINVAL;
5466 }
5467
Jens Axboeb29472e2019-12-17 18:50:29 -07005468 return 0;
5469}
5470
Pavel Begunkov8662dae2021-01-19 13:32:44 +00005471static inline enum hrtimer_mode io_translate_timeout_mode(unsigned int flags)
5472{
5473 return (flags & IORING_TIMEOUT_ABS) ? HRTIMER_MODE_ABS
5474 : HRTIMER_MODE_REL;
5475}
5476
Jens Axboe11365042019-10-16 09:08:32 -06005477/*
5478 * Remove or update an existing timeout command
5479 */
Pavel Begunkov61e98202021-02-10 00:03:08 +00005480static int io_timeout_remove(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe11365042019-10-16 09:08:32 -06005481{
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +00005482 struct io_timeout_rem *tr = &req->timeout_rem;
Jens Axboe11365042019-10-16 09:08:32 -06005483 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe47f46762019-11-09 17:43:02 -07005484 int ret;
Jens Axboe11365042019-10-16 09:08:32 -06005485
Jens Axboe11365042019-10-16 09:08:32 -06005486 spin_lock_irq(&ctx->completion_lock);
Pavel Begunkov8662dae2021-01-19 13:32:44 +00005487 if (!(req->timeout_rem.flags & IORING_TIMEOUT_UPDATE))
Pavel Begunkov9c8e11b2020-11-30 19:11:16 +00005488 ret = io_timeout_cancel(ctx, tr->addr);
Pavel Begunkov8662dae2021-01-19 13:32:44 +00005489 else
5490 ret = io_timeout_update(ctx, tr->addr, &tr->ts,
5491 io_translate_timeout_mode(tr->flags));
Jens Axboe11365042019-10-16 09:08:32 -06005492
Jens Axboe47f46762019-11-09 17:43:02 -07005493 io_cqring_fill_event(req, ret);
Jens Axboe11365042019-10-16 09:08:32 -06005494 io_commit_cqring(ctx);
5495 spin_unlock_irq(&ctx->completion_lock);
Jens Axboe5262f562019-09-17 12:26:57 -06005496 io_cqring_ev_posted(ctx);
Jens Axboe4e88d6e2019-12-07 20:59:47 -07005497 if (ret < 0)
5498 req_set_fail_links(req);
Jackie Liuec9c02a2019-11-08 23:50:36 +08005499 io_put_req(req);
Jens Axboe11365042019-10-16 09:08:32 -06005500 return 0;
Jens Axboe5262f562019-09-17 12:26:57 -06005501}
5502
Jens Axboe3529d8c2019-12-19 18:24:38 -07005503static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
Jens Axboe2d283902019-12-04 11:08:05 -07005504 bool is_timeout_link)
Jens Axboe5262f562019-09-17 12:26:57 -06005505{
Jens Axboead8a48a2019-11-15 08:49:11 -07005506 struct io_timeout_data *data;
Jens Axboea41525a2019-10-15 16:48:15 -06005507 unsigned flags;
Pavel Begunkov56080b02020-05-26 20:34:04 +03005508 u32 off = READ_ONCE(sqe->off);
Jens Axboe5262f562019-09-17 12:26:57 -06005509
Jens Axboead8a48a2019-11-15 08:49:11 -07005510 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboe5262f562019-09-17 12:26:57 -06005511 return -EINVAL;
Jens Axboead8a48a2019-11-15 08:49:11 -07005512 if (sqe->ioprio || sqe->buf_index || sqe->len != 1)
Jens Axboea41525a2019-10-15 16:48:15 -06005513 return -EINVAL;
Pavel Begunkov56080b02020-05-26 20:34:04 +03005514 if (off && is_timeout_link)
Jens Axboe2d283902019-12-04 11:08:05 -07005515 return -EINVAL;
Jens Axboea41525a2019-10-15 16:48:15 -06005516 flags = READ_ONCE(sqe->timeout_flags);
5517 if (flags & ~IORING_TIMEOUT_ABS)
Jens Axboe5262f562019-09-17 12:26:57 -06005518 return -EINVAL;
Arnd Bergmannbdf20072019-10-01 09:53:29 -06005519
Pavel Begunkovbfe68a22020-05-30 14:54:18 +03005520 req->timeout.off = off;
Jens Axboe26a61672019-12-20 09:02:01 -07005521
Jens Axboee8c2bc12020-08-15 18:44:09 -07005522 if (!req->async_data && io_alloc_async_data(req))
Jens Axboe26a61672019-12-20 09:02:01 -07005523 return -ENOMEM;
5524
Jens Axboee8c2bc12020-08-15 18:44:09 -07005525 data = req->async_data;
Jens Axboead8a48a2019-11-15 08:49:11 -07005526 data->req = req;
Jens Axboead8a48a2019-11-15 08:49:11 -07005527
5528 if (get_timespec64(&data->ts, u64_to_user_ptr(sqe->addr)))
Jens Axboe5262f562019-09-17 12:26:57 -06005529 return -EFAULT;
5530
Pavel Begunkov8662dae2021-01-19 13:32:44 +00005531 data->mode = io_translate_timeout_mode(flags);
Jens Axboead8a48a2019-11-15 08:49:11 -07005532 hrtimer_init(&data->timer, CLOCK_MONOTONIC, data->mode);
Pavel Begunkov2482b582021-03-25 18:32:44 +00005533 if (is_timeout_link)
5534 io_req_track_inflight(req);
Jens Axboead8a48a2019-11-15 08:49:11 -07005535 return 0;
5536}
5537
Pavel Begunkov61e98202021-02-10 00:03:08 +00005538static int io_timeout(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboead8a48a2019-11-15 08:49:11 -07005539{
Jens Axboead8a48a2019-11-15 08:49:11 -07005540 struct io_ring_ctx *ctx = req->ctx;
Jens Axboee8c2bc12020-08-15 18:44:09 -07005541 struct io_timeout_data *data = req->async_data;
Jens Axboead8a48a2019-11-15 08:49:11 -07005542 struct list_head *entry;
Pavel Begunkovbfe68a22020-05-30 14:54:18 +03005543 u32 tail, off = req->timeout.off;
Jens Axboead8a48a2019-11-15 08:49:11 -07005544
Pavel Begunkov733f5c92020-05-26 20:34:03 +03005545 spin_lock_irq(&ctx->completion_lock);
Jens Axboe93bd25b2019-11-11 23:34:31 -07005546
Jens Axboe5262f562019-09-17 12:26:57 -06005547 /*
5548 * sqe->off holds how many events that need to occur for this
Jens Axboe93bd25b2019-11-11 23:34:31 -07005549 * timeout event to be satisfied. If it isn't set, then this is
5550 * a pure timeout request, sequence isn't used.
Jens Axboe5262f562019-09-17 12:26:57 -06005551 */
Pavel Begunkov8eb7e2d2020-06-29 13:13:02 +03005552 if (io_is_timeout_noseq(req)) {
Jens Axboe93bd25b2019-11-11 23:34:31 -07005553 entry = ctx->timeout_list.prev;
5554 goto add;
5555 }
Jens Axboe5262f562019-09-17 12:26:57 -06005556
Pavel Begunkovbfe68a22020-05-30 14:54:18 +03005557 tail = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
5558 req->timeout.target_seq = tail + off;
Jens Axboe5262f562019-09-17 12:26:57 -06005559
Marcelo Diop-Gonzalezf0105052021-01-15 11:54:40 -05005560 /* Update the last seq here in case io_flush_timeouts() hasn't.
5561 * This is safe because ->completion_lock is held, and submissions
5562 * and completions are never mixed in the same ->completion_lock section.
5563 */
5564 ctx->cq_last_tm_flush = tail;
5565
Jens Axboe5262f562019-09-17 12:26:57 -06005566 /*
5567 * Insertion sort, ensuring the first entry in the list is always
5568 * the one we need first.
5569 */
Jens Axboe5262f562019-09-17 12:26:57 -06005570 list_for_each_prev(entry, &ctx->timeout_list) {
Pavel Begunkov135fcde2020-07-13 23:37:12 +03005571 struct io_kiocb *nxt = list_entry(entry, struct io_kiocb,
5572 timeout.list);
Jens Axboe5262f562019-09-17 12:26:57 -06005573
Pavel Begunkov8eb7e2d2020-06-29 13:13:02 +03005574 if (io_is_timeout_noseq(nxt))
Jens Axboe93bd25b2019-11-11 23:34:31 -07005575 continue;
Pavel Begunkovbfe68a22020-05-30 14:54:18 +03005576 /* nxt.seq is behind @tail, otherwise would've been completed */
5577 if (off >= nxt->timeout.target_seq - tail)
Jens Axboe5262f562019-09-17 12:26:57 -06005578 break;
5579 }
Jens Axboe93bd25b2019-11-11 23:34:31 -07005580add:
Pavel Begunkov135fcde2020-07-13 23:37:12 +03005581 list_add(&req->timeout.list, entry);
Jens Axboead8a48a2019-11-15 08:49:11 -07005582 data->timer.function = io_timeout_fn;
5583 hrtimer_start(&data->timer, timespec64_to_ktime(data->ts), data->mode);
Jens Axboe842f9612019-10-29 12:34:10 -06005584 spin_unlock_irq(&ctx->completion_lock);
Jens Axboe5262f562019-09-17 12:26:57 -06005585 return 0;
5586}
5587
Pavel Begunkovf458dd842021-03-08 12:14:14 +00005588struct io_cancel_data {
5589 struct io_ring_ctx *ctx;
5590 u64 user_data;
5591};
5592
Jens Axboe62755e32019-10-28 21:49:21 -06005593static bool io_cancel_cb(struct io_wq_work *work, void *data)
Jens Axboede0617e2019-04-06 21:51:27 -06005594{
Jens Axboe62755e32019-10-28 21:49:21 -06005595 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
Pavel Begunkovf458dd842021-03-08 12:14:14 +00005596 struct io_cancel_data *cd = data;
Jens Axboede0617e2019-04-06 21:51:27 -06005597
Pavel Begunkovf458dd842021-03-08 12:14:14 +00005598 return req->ctx == cd->ctx && req->user_data == cd->user_data;
Jens Axboe62755e32019-10-28 21:49:21 -06005599}
5600
Pavel Begunkovf458dd842021-03-08 12:14:14 +00005601static int io_async_cancel_one(struct io_uring_task *tctx, u64 user_data,
5602 struct io_ring_ctx *ctx)
Jens Axboe62755e32019-10-28 21:49:21 -06005603{
Pavel Begunkovf458dd842021-03-08 12:14:14 +00005604 struct io_cancel_data data = { .ctx = ctx, .user_data = user_data, };
Jens Axboe62755e32019-10-28 21:49:21 -06005605 enum io_wq_cancel cancel_ret;
Jens Axboe62755e32019-10-28 21:49:21 -06005606 int ret = 0;
5607
Pavel Begunkovf458dd842021-03-08 12:14:14 +00005608 if (!tctx || !tctx->io_wq)
Jens Axboe5aa75ed2021-02-16 12:56:50 -07005609 return -ENOENT;
5610
Pavel Begunkovf458dd842021-03-08 12:14:14 +00005611 cancel_ret = io_wq_cancel_cb(tctx->io_wq, io_cancel_cb, &data, false);
Jens Axboe62755e32019-10-28 21:49:21 -06005612 switch (cancel_ret) {
5613 case IO_WQ_CANCEL_OK:
5614 ret = 0;
5615 break;
5616 case IO_WQ_CANCEL_RUNNING:
5617 ret = -EALREADY;
5618 break;
5619 case IO_WQ_CANCEL_NOTFOUND:
5620 ret = -ENOENT;
5621 break;
5622 }
5623
Jens Axboee977d6d2019-11-05 12:39:45 -07005624 return ret;
5625}
5626
Jens Axboe47f46762019-11-09 17:43:02 -07005627static void io_async_find_and_cancel(struct io_ring_ctx *ctx,
5628 struct io_kiocb *req, __u64 sqe_addr,
Pavel Begunkov014db002020-03-03 21:33:12 +03005629 int success_ret)
Jens Axboe47f46762019-11-09 17:43:02 -07005630{
5631 unsigned long flags;
5632 int ret;
5633
Pavel Begunkovf458dd842021-03-08 12:14:14 +00005634 ret = io_async_cancel_one(req->task->io_uring, sqe_addr, ctx);
Jens Axboe47f46762019-11-09 17:43:02 -07005635 if (ret != -ENOENT) {
5636 spin_lock_irqsave(&ctx->completion_lock, flags);
5637 goto done;
5638 }
5639
5640 spin_lock_irqsave(&ctx->completion_lock, flags);
5641 ret = io_timeout_cancel(ctx, sqe_addr);
5642 if (ret != -ENOENT)
5643 goto done;
5644 ret = io_poll_cancel(ctx, sqe_addr);
5645done:
Jens Axboeb0dd8a42019-11-18 12:14:54 -07005646 if (!ret)
5647 ret = success_ret;
Jens Axboe47f46762019-11-09 17:43:02 -07005648 io_cqring_fill_event(req, ret);
5649 io_commit_cqring(ctx);
5650 spin_unlock_irqrestore(&ctx->completion_lock, flags);
5651 io_cqring_ev_posted(ctx);
5652
Jens Axboe4e88d6e2019-12-07 20:59:47 -07005653 if (ret < 0)
5654 req_set_fail_links(req);
Pavel Begunkov014db002020-03-03 21:33:12 +03005655 io_put_req(req);
Jens Axboe47f46762019-11-09 17:43:02 -07005656}
5657
Jens Axboe3529d8c2019-12-19 18:24:38 -07005658static int io_async_cancel_prep(struct io_kiocb *req,
5659 const struct io_uring_sqe *sqe)
Jens Axboee977d6d2019-11-05 12:39:45 -07005660{
Jens Axboefbf23842019-12-17 18:45:56 -07005661 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboee977d6d2019-11-05 12:39:45 -07005662 return -EINVAL;
Daniele Albano61710e42020-07-18 14:15:16 -06005663 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
5664 return -EINVAL;
5665 if (sqe->ioprio || sqe->off || sqe->len || sqe->cancel_flags)
Jens Axboee977d6d2019-11-05 12:39:45 -07005666 return -EINVAL;
5667
Jens Axboefbf23842019-12-17 18:45:56 -07005668 req->cancel.addr = READ_ONCE(sqe->addr);
5669 return 0;
5670}
5671
Pavel Begunkov61e98202021-02-10 00:03:08 +00005672static int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboefbf23842019-12-17 18:45:56 -07005673{
5674 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkov58f99372021-03-12 16:25:55 +00005675 u64 sqe_addr = req->cancel.addr;
5676 struct io_tctx_node *node;
5677 int ret;
Jens Axboefbf23842019-12-17 18:45:56 -07005678
Pavel Begunkov58f99372021-03-12 16:25:55 +00005679 /* tasks should wait for their io-wq threads, so safe w/o sync */
5680 ret = io_async_cancel_one(req->task->io_uring, sqe_addr, ctx);
5681 spin_lock_irq(&ctx->completion_lock);
5682 if (ret != -ENOENT)
5683 goto done;
5684 ret = io_timeout_cancel(ctx, sqe_addr);
5685 if (ret != -ENOENT)
5686 goto done;
5687 ret = io_poll_cancel(ctx, sqe_addr);
5688 if (ret != -ENOENT)
5689 goto done;
5690 spin_unlock_irq(&ctx->completion_lock);
5691
5692 /* slow path, try all io-wq's */
5693 io_ring_submit_lock(ctx, !(issue_flags & IO_URING_F_NONBLOCK));
5694 ret = -ENOENT;
5695 list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
5696 struct io_uring_task *tctx = node->task->io_uring;
5697
5698 if (!tctx || !tctx->io_wq)
5699 continue;
5700 ret = io_async_cancel_one(tctx, req->cancel.addr, ctx);
5701 if (ret != -ENOENT)
5702 break;
5703 }
5704 io_ring_submit_unlock(ctx, !(issue_flags & IO_URING_F_NONBLOCK));
5705
5706 spin_lock_irq(&ctx->completion_lock);
5707done:
5708 io_cqring_fill_event(req, ret);
5709 io_commit_cqring(ctx);
5710 spin_unlock_irq(&ctx->completion_lock);
5711 io_cqring_ev_posted(ctx);
5712
5713 if (ret < 0)
5714 req_set_fail_links(req);
5715 io_put_req(req);
Jens Axboe62755e32019-10-28 21:49:21 -06005716 return 0;
5717}
5718
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00005719static int io_rsrc_update_prep(struct io_kiocb *req,
Jens Axboe05f3fb32019-12-09 11:22:50 -07005720 const struct io_uring_sqe *sqe)
5721{
Jens Axboe6ca56f82020-09-18 16:51:19 -06005722 if (unlikely(req->ctx->flags & IORING_SETUP_SQPOLL))
5723 return -EINVAL;
Daniele Albano61710e42020-07-18 14:15:16 -06005724 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
5725 return -EINVAL;
5726 if (sqe->ioprio || sqe->rw_flags)
Jens Axboe05f3fb32019-12-09 11:22:50 -07005727 return -EINVAL;
5728
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00005729 req->rsrc_update.offset = READ_ONCE(sqe->off);
5730 req->rsrc_update.nr_args = READ_ONCE(sqe->len);
5731 if (!req->rsrc_update.nr_args)
Jens Axboe05f3fb32019-12-09 11:22:50 -07005732 return -EINVAL;
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00005733 req->rsrc_update.arg = READ_ONCE(sqe->addr);
Jens Axboe05f3fb32019-12-09 11:22:50 -07005734 return 0;
5735}
5736
Pavel Begunkov889fca72021-02-10 00:03:09 +00005737static int io_files_update(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboe05f3fb32019-12-09 11:22:50 -07005738{
5739 struct io_ring_ctx *ctx = req->ctx;
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00005740 struct io_uring_rsrc_update up;
Jens Axboe05f3fb32019-12-09 11:22:50 -07005741 int ret;
5742
Pavel Begunkov45d189c2021-02-10 00:03:07 +00005743 if (issue_flags & IO_URING_F_NONBLOCK)
Jens Axboe05f3fb32019-12-09 11:22:50 -07005744 return -EAGAIN;
Jens Axboe05f3fb32019-12-09 11:22:50 -07005745
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00005746 up.offset = req->rsrc_update.offset;
5747 up.data = req->rsrc_update.arg;
Jens Axboe05f3fb32019-12-09 11:22:50 -07005748
5749 mutex_lock(&ctx->uring_lock);
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00005750 ret = __io_sqe_files_update(ctx, &up, req->rsrc_update.nr_args);
Jens Axboe05f3fb32019-12-09 11:22:50 -07005751 mutex_unlock(&ctx->uring_lock);
5752
5753 if (ret < 0)
5754 req_set_fail_links(req);
Pavel Begunkov889fca72021-02-10 00:03:09 +00005755 __io_req_complete(req, issue_flags, ret, 0);
Jens Axboe05f3fb32019-12-09 11:22:50 -07005756 return 0;
5757}
5758
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005759static int io_req_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboef67676d2019-12-02 11:03:47 -07005760{
Jens Axboed625c6e2019-12-17 19:53:05 -07005761 switch (req->opcode) {
Jens Axboee7815732019-12-17 19:45:06 -07005762 case IORING_OP_NOP:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005763 return 0;
Jens Axboef67676d2019-12-02 11:03:47 -07005764 case IORING_OP_READV:
5765 case IORING_OP_READ_FIXED:
Jens Axboe3a6820f2019-12-22 15:19:35 -07005766 case IORING_OP_READ:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005767 return io_read_prep(req, sqe);
Jens Axboef67676d2019-12-02 11:03:47 -07005768 case IORING_OP_WRITEV:
5769 case IORING_OP_WRITE_FIXED:
Jens Axboe3a6820f2019-12-22 15:19:35 -07005770 case IORING_OP_WRITE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005771 return io_write_prep(req, sqe);
Jens Axboe0969e782019-12-17 18:40:57 -07005772 case IORING_OP_POLL_ADD:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005773 return io_poll_add_prep(req, sqe);
Jens Axboe0969e782019-12-17 18:40:57 -07005774 case IORING_OP_POLL_REMOVE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005775 return io_poll_remove_prep(req, sqe);
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07005776 case IORING_OP_FSYNC:
Pavel Begunkov1155c762021-02-18 18:29:38 +00005777 return io_fsync_prep(req, sqe);
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07005778 case IORING_OP_SYNC_FILE_RANGE:
Pavel Begunkov1155c762021-02-18 18:29:38 +00005779 return io_sfr_prep(req, sqe);
Jens Axboe03b12302019-12-02 18:50:25 -07005780 case IORING_OP_SENDMSG:
Jens Axboefddafac2020-01-04 20:19:44 -07005781 case IORING_OP_SEND:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005782 return io_sendmsg_prep(req, sqe);
Jens Axboe03b12302019-12-02 18:50:25 -07005783 case IORING_OP_RECVMSG:
Jens Axboefddafac2020-01-04 20:19:44 -07005784 case IORING_OP_RECV:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005785 return io_recvmsg_prep(req, sqe);
Jens Axboef499a022019-12-02 16:28:46 -07005786 case IORING_OP_CONNECT:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005787 return io_connect_prep(req, sqe);
Jens Axboe2d283902019-12-04 11:08:05 -07005788 case IORING_OP_TIMEOUT:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005789 return io_timeout_prep(req, sqe, false);
Jens Axboeb29472e2019-12-17 18:50:29 -07005790 case IORING_OP_TIMEOUT_REMOVE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005791 return io_timeout_remove_prep(req, sqe);
Jens Axboefbf23842019-12-17 18:45:56 -07005792 case IORING_OP_ASYNC_CANCEL:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005793 return io_async_cancel_prep(req, sqe);
Jens Axboe2d283902019-12-04 11:08:05 -07005794 case IORING_OP_LINK_TIMEOUT:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005795 return io_timeout_prep(req, sqe, true);
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07005796 case IORING_OP_ACCEPT:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005797 return io_accept_prep(req, sqe);
Jens Axboed63d1b52019-12-10 10:38:56 -07005798 case IORING_OP_FALLOCATE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005799 return io_fallocate_prep(req, sqe);
Jens Axboe15b71ab2019-12-11 11:20:36 -07005800 case IORING_OP_OPENAT:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005801 return io_openat_prep(req, sqe);
Jens Axboeb5dba592019-12-11 14:02:38 -07005802 case IORING_OP_CLOSE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005803 return io_close_prep(req, sqe);
Jens Axboe05f3fb32019-12-09 11:22:50 -07005804 case IORING_OP_FILES_UPDATE:
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00005805 return io_rsrc_update_prep(req, sqe);
Jens Axboeeddc7ef2019-12-13 21:18:10 -07005806 case IORING_OP_STATX:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005807 return io_statx_prep(req, sqe);
Jens Axboe4840e412019-12-25 22:03:45 -07005808 case IORING_OP_FADVISE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005809 return io_fadvise_prep(req, sqe);
Jens Axboec1ca7572019-12-25 22:18:28 -07005810 case IORING_OP_MADVISE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005811 return io_madvise_prep(req, sqe);
Jens Axboecebdb982020-01-08 17:59:24 -07005812 case IORING_OP_OPENAT2:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005813 return io_openat2_prep(req, sqe);
Jens Axboe3e4827b2020-01-08 15:18:09 -07005814 case IORING_OP_EPOLL_CTL:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005815 return io_epoll_ctl_prep(req, sqe);
Pavel Begunkov7d67af22020-02-24 11:32:45 +03005816 case IORING_OP_SPLICE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005817 return io_splice_prep(req, sqe);
Jens Axboeddf0322d2020-02-23 16:41:33 -07005818 case IORING_OP_PROVIDE_BUFFERS:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005819 return io_provide_buffers_prep(req, sqe);
Jens Axboe067524e2020-03-02 16:32:28 -07005820 case IORING_OP_REMOVE_BUFFERS:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005821 return io_remove_buffers_prep(req, sqe);
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03005822 case IORING_OP_TEE:
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005823 return io_tee_prep(req, sqe);
Jens Axboe36f4fa62020-09-05 11:14:22 -06005824 case IORING_OP_SHUTDOWN:
5825 return io_shutdown_prep(req, sqe);
Jens Axboe80a261f2020-09-28 14:23:58 -06005826 case IORING_OP_RENAMEAT:
5827 return io_renameat_prep(req, sqe);
Jens Axboe14a11432020-09-28 14:27:37 -06005828 case IORING_OP_UNLINKAT:
5829 return io_unlinkat_prep(req, sqe);
Jens Axboef67676d2019-12-02 11:03:47 -07005830 }
5831
Pavel Begunkovbfe76552020-09-30 22:57:55 +03005832 printk_once(KERN_WARNING "io_uring: unhandled opcode %d\n",
5833 req->opcode);
5834 return-EINVAL;
5835}
5836
Pavel Begunkov93642ef2021-02-18 18:29:44 +00005837static int io_req_prep_async(struct io_kiocb *req)
Jens Axboedef596e2019-01-09 08:59:42 -07005838{
Pavel Begunkovb7e298d2021-02-28 22:35:19 +00005839 if (!io_op_defs[req->opcode].needs_async_setup)
5840 return 0;
5841 if (WARN_ON_ONCE(req->async_data))
5842 return -EFAULT;
5843 if (io_alloc_async_data(req))
5844 return -EAGAIN;
5845
Pavel Begunkov93642ef2021-02-18 18:29:44 +00005846 switch (req->opcode) {
5847 case IORING_OP_READV:
Pavel Begunkov93642ef2021-02-18 18:29:44 +00005848 return io_rw_prep_async(req, READ);
5849 case IORING_OP_WRITEV:
Pavel Begunkov93642ef2021-02-18 18:29:44 +00005850 return io_rw_prep_async(req, WRITE);
5851 case IORING_OP_SENDMSG:
Pavel Begunkov93642ef2021-02-18 18:29:44 +00005852 return io_sendmsg_prep_async(req);
5853 case IORING_OP_RECVMSG:
Pavel Begunkov93642ef2021-02-18 18:29:44 +00005854 return io_recvmsg_prep_async(req);
5855 case IORING_OP_CONNECT:
5856 return io_connect_prep_async(req);
5857 }
Pavel Begunkovb7e298d2021-02-28 22:35:19 +00005858 printk_once(KERN_WARNING "io_uring: prep_async() bad opcode %d\n",
5859 req->opcode);
5860 return -EFAULT;
Jens Axboedef596e2019-01-09 08:59:42 -07005861}
5862
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03005863static u32 io_get_sequence(struct io_kiocb *req)
5864{
5865 struct io_kiocb *pos;
5866 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkovf2f87372020-10-27 23:25:37 +00005867 u32 total_submitted, nr_reqs = 0;
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03005868
Pavel Begunkovf2f87372020-10-27 23:25:37 +00005869 io_for_each_link(pos, req)
5870 nr_reqs++;
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03005871
5872 total_submitted = ctx->cached_sq_head - ctx->cached_sq_dropped;
5873 return total_submitted - nr_reqs;
5874}
5875
Pavel Begunkovbe7053b2021-02-18 18:29:45 +00005876static int io_req_defer(struct io_kiocb *req)
Jens Axboedef596e2019-01-09 08:59:42 -07005877{
5878 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkov27dc8332020-07-13 23:37:14 +03005879 struct io_defer_entry *de;
Jens Axboedef596e2019-01-09 08:59:42 -07005880 int ret;
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03005881 u32 seq;
Jens Axboedef596e2019-01-09 08:59:42 -07005882
5883 /* Still need defer if there is pending req in defer list. */
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03005884 if (likely(list_empty_careful(&ctx->defer_list) &&
5885 !(req->flags & REQ_F_IO_DRAIN)))
5886 return 0;
5887
5888 seq = io_get_sequence(req);
5889 /* Still a chance to pass the sequence check */
5890 if (!req_need_defer(req, seq) && list_empty_careful(&ctx->defer_list))
Jens Axboedef596e2019-01-09 08:59:42 -07005891 return 0;
5892
Pavel Begunkovb7e298d2021-02-28 22:35:19 +00005893 ret = io_req_prep_async(req);
Pavel Begunkovbe7053b2021-02-18 18:29:45 +00005894 if (ret)
5895 return ret;
Pavel Begunkovcbdcb432020-06-29 19:18:43 +03005896 io_prep_async_link(req);
Pavel Begunkov27dc8332020-07-13 23:37:14 +03005897 de = kmalloc(sizeof(*de), GFP_KERNEL);
5898 if (!de)
5899 return -ENOMEM;
Jens Axboe31b51512019-01-18 22:56:34 -07005900
5901 spin_lock_irq(&ctx->completion_lock);
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03005902 if (!req_need_defer(req, seq) && list_empty(&ctx->defer_list)) {
Jens Axboe31b51512019-01-18 22:56:34 -07005903 spin_unlock_irq(&ctx->completion_lock);
Pavel Begunkov27dc8332020-07-13 23:37:14 +03005904 kfree(de);
Pavel Begunkovae348172020-07-23 20:25:20 +03005905 io_queue_async_work(req);
5906 return -EIOCBQUEUED;
Jens Axboe31b51512019-01-18 22:56:34 -07005907 }
5908
5909 trace_io_uring_defer(ctx, req, req->user_data);
Pavel Begunkov27dc8332020-07-13 23:37:14 +03005910 de->req = req;
Pavel Begunkov9cf7c102020-07-13 23:37:15 +03005911 de->seq = seq;
Pavel Begunkov27dc8332020-07-13 23:37:14 +03005912 list_add_tail(&de->list, &ctx->defer_list);
Jens Axboe31b51512019-01-18 22:56:34 -07005913 spin_unlock_irq(&ctx->completion_lock);
5914 return -EIOCBQUEUED;
5915}
5916
Pavel Begunkov68fb8972021-03-19 17:22:41 +00005917static void io_clean_op(struct io_kiocb *req)
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03005918{
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03005919 if (req->flags & REQ_F_BUFFER_SELECTED) {
5920 switch (req->opcode) {
5921 case IORING_OP_READV:
5922 case IORING_OP_READ_FIXED:
5923 case IORING_OP_READ:
Jens Axboebcda7ba2020-02-23 16:42:51 -07005924 kfree((void *)(unsigned long)req->rw.addr);
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03005925 break;
5926 case IORING_OP_RECVMSG:
5927 case IORING_OP_RECV:
Jens Axboe52de1fe2020-02-27 10:15:42 -07005928 kfree(req->sr_msg.kbuf);
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03005929 break;
5930 }
5931 req->flags &= ~REQ_F_BUFFER_SELECTED;
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03005932 }
5933
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03005934 if (req->flags & REQ_F_NEED_CLEANUP) {
5935 switch (req->opcode) {
5936 case IORING_OP_READV:
5937 case IORING_OP_READ_FIXED:
5938 case IORING_OP_READ:
5939 case IORING_OP_WRITEV:
5940 case IORING_OP_WRITE_FIXED:
Jens Axboee8c2bc12020-08-15 18:44:09 -07005941 case IORING_OP_WRITE: {
5942 struct io_async_rw *io = req->async_data;
5943 if (io->free_iovec)
5944 kfree(io->free_iovec);
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03005945 break;
Jens Axboee8c2bc12020-08-15 18:44:09 -07005946 }
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03005947 case IORING_OP_RECVMSG:
Jens Axboee8c2bc12020-08-15 18:44:09 -07005948 case IORING_OP_SENDMSG: {
5949 struct io_async_msghdr *io = req->async_data;
Pavel Begunkov257e84a2021-02-05 00:58:00 +00005950
5951 kfree(io->free_iov);
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03005952 break;
Jens Axboee8c2bc12020-08-15 18:44:09 -07005953 }
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03005954 case IORING_OP_SPLICE:
5955 case IORING_OP_TEE:
Pavel Begunkove1d767f2021-03-19 17:22:43 +00005956 if (!(req->splice.flags & SPLICE_F_FD_IN_FIXED))
5957 io_put_file(req->splice.file_in);
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03005958 break;
Jens Axboef3cd48502020-09-24 14:55:54 -06005959 case IORING_OP_OPENAT:
5960 case IORING_OP_OPENAT2:
5961 if (req->open.filename)
5962 putname(req->open.filename);
5963 break;
Jens Axboe80a261f2020-09-28 14:23:58 -06005964 case IORING_OP_RENAMEAT:
5965 putname(req->rename.oldpath);
5966 putname(req->rename.newpath);
5967 break;
Jens Axboe14a11432020-09-28 14:27:37 -06005968 case IORING_OP_UNLINKAT:
5969 putname(req->unlink.filename);
5970 break;
Pavel Begunkov0e1b6fe32020-07-16 23:28:02 +03005971 }
5972 req->flags &= ~REQ_F_NEED_CLEANUP;
5973 }
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03005974}
5975
Pavel Begunkov889fca72021-02-10 00:03:09 +00005976static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboeedafcce2019-01-09 09:16:05 -07005977{
Jens Axboeedafcce2019-01-09 09:16:05 -07005978 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe5730b272021-02-27 15:57:30 -07005979 const struct cred *creds = NULL;
Jens Axboed625c6e2019-12-17 19:53:05 -07005980 int ret;
Jens Axboeedafcce2019-01-09 09:16:05 -07005981
Jens Axboe003e8dc2021-03-06 09:22:27 -07005982 if (req->work.creds && req->work.creds != current_cred())
5983 creds = override_creds(req->work.creds);
Jens Axboe5730b272021-02-27 15:57:30 -07005984
Jens Axboed625c6e2019-12-17 19:53:05 -07005985 switch (req->opcode) {
Jens Axboe2b188cc2019-01-07 10:46:33 -07005986 case IORING_OP_NOP:
Pavel Begunkov889fca72021-02-10 00:03:09 +00005987 ret = io_nop(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07005988 break;
Jens Axboe2b188cc2019-01-07 10:46:33 -07005989 case IORING_OP_READV:
Jens Axboe3529d8c2019-12-19 18:24:38 -07005990 case IORING_OP_READ_FIXED:
Jens Axboe3a6820f2019-12-22 15:19:35 -07005991 case IORING_OP_READ:
Pavel Begunkov889fca72021-02-10 00:03:09 +00005992 ret = io_read(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07005993 break;
5994 case IORING_OP_WRITEV:
Jens Axboe2b188cc2019-01-07 10:46:33 -07005995 case IORING_OP_WRITE_FIXED:
Jens Axboe3a6820f2019-12-22 15:19:35 -07005996 case IORING_OP_WRITE:
Pavel Begunkov889fca72021-02-10 00:03:09 +00005997 ret = io_write(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07005998 break;
5999 case IORING_OP_FSYNC:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006000 ret = io_fsync(req, issue_flags);
Jackie Liuba5290c2019-10-09 09:19:59 +08006001 break;
Jens Axboe2b188cc2019-01-07 10:46:33 -07006002 case IORING_OP_POLL_ADD:
Pavel Begunkov61e98202021-02-10 00:03:08 +00006003 ret = io_poll_add(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006004 break;
6005 case IORING_OP_POLL_REMOVE:
Pavel Begunkov61e98202021-02-10 00:03:08 +00006006 ret = io_poll_remove(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006007 break;
Jens Axboeb76da702019-11-20 13:05:32 -07006008 case IORING_OP_SYNC_FILE_RANGE:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006009 ret = io_sync_file_range(req, issue_flags);
Jens Axboeb76da702019-11-20 13:05:32 -07006010 break;
6011 case IORING_OP_SENDMSG:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006012 ret = io_sendmsg(req, issue_flags);
Pavel Begunkov062d04d2020-10-10 18:34:12 +01006013 break;
Jens Axboefddafac2020-01-04 20:19:44 -07006014 case IORING_OP_SEND:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006015 ret = io_send(req, issue_flags);
Jens Axboeb76da702019-11-20 13:05:32 -07006016 break;
6017 case IORING_OP_RECVMSG:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006018 ret = io_recvmsg(req, issue_flags);
Pavel Begunkov062d04d2020-10-10 18:34:12 +01006019 break;
Jens Axboefddafac2020-01-04 20:19:44 -07006020 case IORING_OP_RECV:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006021 ret = io_recv(req, issue_flags);
Jens Axboeb76da702019-11-20 13:05:32 -07006022 break;
Jens Axboe561fb042019-10-24 07:25:42 -06006023 case IORING_OP_TIMEOUT:
Pavel Begunkov61e98202021-02-10 00:03:08 +00006024 ret = io_timeout(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006025 break;
6026 case IORING_OP_TIMEOUT_REMOVE:
Pavel Begunkov61e98202021-02-10 00:03:08 +00006027 ret = io_timeout_remove(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006028 break;
6029 case IORING_OP_ACCEPT:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006030 ret = io_accept(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006031 break;
6032 case IORING_OP_CONNECT:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006033 ret = io_connect(req, issue_flags);
Jens Axboe31b51512019-01-18 22:56:34 -07006034 break;
6035 case IORING_OP_ASYNC_CANCEL:
Pavel Begunkov61e98202021-02-10 00:03:08 +00006036 ret = io_async_cancel(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006037 break;
Jens Axboed63d1b52019-12-10 10:38:56 -07006038 case IORING_OP_FALLOCATE:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006039 ret = io_fallocate(req, issue_flags);
Jens Axboed63d1b52019-12-10 10:38:56 -07006040 break;
Jens Axboe15b71ab2019-12-11 11:20:36 -07006041 case IORING_OP_OPENAT:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006042 ret = io_openat(req, issue_flags);
Jens Axboe15b71ab2019-12-11 11:20:36 -07006043 break;
Jens Axboeb5dba592019-12-11 14:02:38 -07006044 case IORING_OP_CLOSE:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006045 ret = io_close(req, issue_flags);
Jens Axboeb5dba592019-12-11 14:02:38 -07006046 break;
Jens Axboe05f3fb32019-12-09 11:22:50 -07006047 case IORING_OP_FILES_UPDATE:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006048 ret = io_files_update(req, issue_flags);
Jens Axboe05f3fb32019-12-09 11:22:50 -07006049 break;
Jens Axboeeddc7ef2019-12-13 21:18:10 -07006050 case IORING_OP_STATX:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006051 ret = io_statx(req, issue_flags);
Jens Axboeeddc7ef2019-12-13 21:18:10 -07006052 break;
Jens Axboe4840e412019-12-25 22:03:45 -07006053 case IORING_OP_FADVISE:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006054 ret = io_fadvise(req, issue_flags);
Jens Axboe4840e412019-12-25 22:03:45 -07006055 break;
Jens Axboec1ca7572019-12-25 22:18:28 -07006056 case IORING_OP_MADVISE:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006057 ret = io_madvise(req, issue_flags);
Jens Axboec1ca7572019-12-25 22:18:28 -07006058 break;
Jens Axboecebdb982020-01-08 17:59:24 -07006059 case IORING_OP_OPENAT2:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006060 ret = io_openat2(req, issue_flags);
Jens Axboecebdb982020-01-08 17:59:24 -07006061 break;
Jens Axboe3e4827b2020-01-08 15:18:09 -07006062 case IORING_OP_EPOLL_CTL:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006063 ret = io_epoll_ctl(req, issue_flags);
Jens Axboe3e4827b2020-01-08 15:18:09 -07006064 break;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03006065 case IORING_OP_SPLICE:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006066 ret = io_splice(req, issue_flags);
Pavel Begunkov7d67af22020-02-24 11:32:45 +03006067 break;
Jens Axboeddf0322d2020-02-23 16:41:33 -07006068 case IORING_OP_PROVIDE_BUFFERS:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006069 ret = io_provide_buffers(req, issue_flags);
Jens Axboeddf0322d2020-02-23 16:41:33 -07006070 break;
Jens Axboe067524e2020-03-02 16:32:28 -07006071 case IORING_OP_REMOVE_BUFFERS:
Pavel Begunkov889fca72021-02-10 00:03:09 +00006072 ret = io_remove_buffers(req, issue_flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006073 break;
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03006074 case IORING_OP_TEE:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006075 ret = io_tee(req, issue_flags);
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03006076 break;
Jens Axboe36f4fa62020-09-05 11:14:22 -06006077 case IORING_OP_SHUTDOWN:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006078 ret = io_shutdown(req, issue_flags);
Jens Axboe36f4fa62020-09-05 11:14:22 -06006079 break;
Jens Axboe80a261f2020-09-28 14:23:58 -06006080 case IORING_OP_RENAMEAT:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006081 ret = io_renameat(req, issue_flags);
Jens Axboe80a261f2020-09-28 14:23:58 -06006082 break;
Jens Axboe14a11432020-09-28 14:27:37 -06006083 case IORING_OP_UNLINKAT:
Pavel Begunkov45d189c2021-02-10 00:03:07 +00006084 ret = io_unlinkat(req, issue_flags);
Jens Axboe14a11432020-09-28 14:27:37 -06006085 break;
Jens Axboe2b188cc2019-01-07 10:46:33 -07006086 default:
6087 ret = -EINVAL;
6088 break;
6089 }
Jens Axboe31b51512019-01-18 22:56:34 -07006090
Jens Axboe5730b272021-02-27 15:57:30 -07006091 if (creds)
6092 revert_creds(creds);
6093
Jens Axboe2b188cc2019-01-07 10:46:33 -07006094 if (ret)
6095 return ret;
6096
Jens Axboeb5325762020-05-19 21:20:27 -06006097 /* If the op doesn't have a file, we're not polling for it */
6098 if ((ctx->flags & IORING_SETUP_IOPOLL) && req->file) {
Jens Axboe11ba8202020-01-15 21:51:17 -07006099 const bool in_async = io_wq_current_is_worker();
6100
Jens Axboe11ba8202020-01-15 21:51:17 -07006101 /* workqueue context doesn't hold uring_lock, grab it now */
6102 if (in_async)
6103 mutex_lock(&ctx->uring_lock);
6104
Xiaoguang Wang2e9dbe92020-11-13 00:44:08 +08006105 io_iopoll_req_issued(req, in_async);
Jens Axboe11ba8202020-01-15 21:51:17 -07006106
6107 if (in_async)
6108 mutex_unlock(&ctx->uring_lock);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006109 }
6110
6111 return 0;
6112}
6113
Pavel Begunkov5280f7e2021-02-04 13:52:08 +00006114static void io_wq_submit_work(struct io_wq_work *work)
Pavel Begunkovd4c81f32020-06-08 21:08:19 +03006115{
Jens Axboe2b188cc2019-01-07 10:46:33 -07006116 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
Pavel Begunkov6df1db62020-07-03 22:15:06 +03006117 struct io_kiocb *timeout;
Jens Axboe561fb042019-10-24 07:25:42 -06006118 int ret = 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07006119
Pavel Begunkov6df1db62020-07-03 22:15:06 +03006120 timeout = io_prep_linked_timeout(req);
6121 if (timeout)
6122 io_queue_linked_timeout(timeout);
Pavel Begunkovd4c81f32020-06-08 21:08:19 +03006123
Jens Axboe4014d942021-01-19 15:53:54 -07006124 if (work->flags & IO_WQ_WORK_CANCEL)
Jens Axboe561fb042019-10-24 07:25:42 -06006125 ret = -ECANCELED;
Jens Axboe31b51512019-01-18 22:56:34 -07006126
Jens Axboe561fb042019-10-24 07:25:42 -06006127 if (!ret) {
Jens Axboe561fb042019-10-24 07:25:42 -06006128 do {
Pavel Begunkov889fca72021-02-10 00:03:09 +00006129 ret = io_issue_sqe(req, 0);
Jens Axboe561fb042019-10-24 07:25:42 -06006130 /*
6131 * We can get EAGAIN for polled IO even though we're
6132 * forcing a sync submission from here, since we can't
6133 * wait for request slots on the block side.
6134 */
6135 if (ret != -EAGAIN)
6136 break;
6137 cond_resched();
6138 } while (1);
6139 }
Jens Axboe31b51512019-01-18 22:56:34 -07006140
Pavel Begunkova3df76982021-02-18 22:32:52 +00006141 /* avoid locking problems by failing it from a clean context */
Jens Axboe561fb042019-10-24 07:25:42 -06006142 if (ret) {
Pavel Begunkova3df76982021-02-18 22:32:52 +00006143 /* io-wq is going to take one down */
Jens Axboede9b4cc2021-02-24 13:28:27 -07006144 req_ref_get(req);
Pavel Begunkova3df76982021-02-18 22:32:52 +00006145 io_req_task_queue_fail(req, ret);
Jens Axboeedafcce2019-01-09 09:16:05 -07006146 }
Jens Axboe31b51512019-01-18 22:56:34 -07006147}
Jens Axboe2b188cc2019-01-07 10:46:33 -07006148
Jens Axboe7b29f922021-03-12 08:30:14 -07006149#define FFS_ASYNC_READ 0x1UL
6150#define FFS_ASYNC_WRITE 0x2UL
6151#ifdef CONFIG_64BIT
6152#define FFS_ISREG 0x4UL
6153#else
6154#define FFS_ISREG 0x0UL
6155#endif
6156#define FFS_MASK ~(FFS_ASYNC_READ|FFS_ASYNC_WRITE|FFS_ISREG)
6157
Pavel Begunkovdafecf12021-02-28 22:35:11 +00006158static inline struct file **io_fixed_file_slot(struct fixed_rsrc_data *file_data,
6159 unsigned i)
Jens Axboe09bb8392019-03-13 12:39:28 -06006160{
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00006161 struct fixed_rsrc_table *table;
Jens Axboe65e19f52019-10-26 07:20:21 -06006162
Pavel Begunkovdafecf12021-02-28 22:35:11 +00006163 table = &file_data->table[i >> IORING_FILE_TABLE_SHIFT];
6164 return &table->files[i & IORING_FILE_TABLE_MASK];
6165}
6166
6167static inline struct file *io_file_from_index(struct io_ring_ctx *ctx,
6168 int index)
6169{
Jens Axboe7b29f922021-03-12 08:30:14 -07006170 struct file **file_slot = io_fixed_file_slot(ctx->file_data, index);
6171
6172 return (struct file *) ((unsigned long) *file_slot & FFS_MASK);
Jens Axboe65e19f52019-10-26 07:20:21 -06006173}
6174
Pavel Begunkov8371adf2020-10-10 18:34:08 +01006175static struct file *io_file_get(struct io_submit_state *state,
6176 struct io_kiocb *req, int fd, bool fixed)
Pavel Begunkov8da11c12020-02-24 11:32:44 +03006177{
6178 struct io_ring_ctx *ctx = req->ctx;
6179 struct file *file;
6180
6181 if (fixed) {
Jens Axboe7b29f922021-03-12 08:30:14 -07006182 unsigned long file_ptr;
6183
Pavel Begunkov479f5172020-10-10 18:34:07 +01006184 if (unlikely((unsigned int)fd >= ctx->nr_user_files))
Pavel Begunkov8371adf2020-10-10 18:34:08 +01006185 return NULL;
Pavel Begunkov8da11c12020-02-24 11:32:44 +03006186 fd = array_index_nospec(fd, ctx->nr_user_files);
Jens Axboe7b29f922021-03-12 08:30:14 -07006187 file_ptr = (unsigned long) *io_fixed_file_slot(ctx->file_data, fd);
6188 file = (struct file *) (file_ptr & FFS_MASK);
6189 file_ptr &= ~FFS_MASK;
6190 /* mask in overlapping REQ_F and FFS bits */
6191 req->flags |= (file_ptr << REQ_F_ASYNC_READ_BIT);
Pavel Begunkov36f72fe2020-11-18 19:57:26 +00006192 io_set_resource_node(req);
Pavel Begunkov8da11c12020-02-24 11:32:44 +03006193 } else {
6194 trace_io_uring_file_get(ctx, fd);
6195 file = __io_file_get(state, fd);
Jens Axboed44f5542021-03-12 08:27:05 -07006196
6197 /* we don't allow fixed io_uring files */
6198 if (file && unlikely(file->f_op == &io_uring_fops))
6199 io_req_track_inflight(req);
Pavel Begunkov8da11c12020-02-24 11:32:44 +03006200 }
6201
Pavel Begunkov8371adf2020-10-10 18:34:08 +01006202 return file;
Pavel Begunkov8da11c12020-02-24 11:32:44 +03006203}
6204
Jens Axboe2665abf2019-11-05 12:40:47 -07006205static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
6206{
Jens Axboead8a48a2019-11-15 08:49:11 -07006207 struct io_timeout_data *data = container_of(timer,
6208 struct io_timeout_data, timer);
Pavel Begunkov90cd7e42020-10-27 23:25:36 +00006209 struct io_kiocb *prev, *req = data->req;
Jens Axboe2665abf2019-11-05 12:40:47 -07006210 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe2665abf2019-11-05 12:40:47 -07006211 unsigned long flags;
Jens Axboe2665abf2019-11-05 12:40:47 -07006212
6213 spin_lock_irqsave(&ctx->completion_lock, flags);
Pavel Begunkov90cd7e42020-10-27 23:25:36 +00006214 prev = req->timeout.head;
6215 req->timeout.head = NULL;
Jens Axboe2665abf2019-11-05 12:40:47 -07006216
6217 /*
6218 * We don't expect the list to be empty, that will only happen if we
6219 * race with the completion of the linked work.
6220 */
Jens Axboede9b4cc2021-02-24 13:28:27 -07006221 if (prev && req_ref_inc_not_zero(prev))
Pavel Begunkovf2f87372020-10-27 23:25:37 +00006222 io_remove_next_linked(prev);
Pavel Begunkov90cd7e42020-10-27 23:25:36 +00006223 else
6224 prev = NULL;
Jens Axboe2665abf2019-11-05 12:40:47 -07006225 spin_unlock_irqrestore(&ctx->completion_lock, flags);
6226
6227 if (prev) {
Pavel Begunkov014db002020-03-03 21:33:12 +03006228 io_async_find_and_cancel(ctx, req, prev->user_data, -ETIME);
Pavel Begunkov9ae1f8d2021-02-01 18:59:51 +00006229 io_put_req_deferred(prev, 1);
Jens Axboe47f46762019-11-09 17:43:02 -07006230 } else {
Pavel Begunkov9ae1f8d2021-02-01 18:59:51 +00006231 io_req_complete_post(req, -ETIME, 0);
6232 io_put_req_deferred(req, 1);
Jens Axboe2665abf2019-11-05 12:40:47 -07006233 }
Jens Axboe2665abf2019-11-05 12:40:47 -07006234 return HRTIMER_NORESTART;
6235}
6236
Pavel Begunkovde968c12021-03-19 17:22:33 +00006237static void io_queue_linked_timeout(struct io_kiocb *req)
Jens Axboe2665abf2019-11-05 12:40:47 -07006238{
Pavel Begunkovde968c12021-03-19 17:22:33 +00006239 struct io_ring_ctx *ctx = req->ctx;
6240
6241 spin_lock_irq(&ctx->completion_lock);
Jens Axboe76a46e02019-11-10 23:34:16 -07006242 /*
Pavel Begunkovf2f87372020-10-27 23:25:37 +00006243 * If the back reference is NULL, then our linked request finished
6244 * before we got a chance to setup the timer
Jens Axboe76a46e02019-11-10 23:34:16 -07006245 */
Pavel Begunkov90cd7e42020-10-27 23:25:36 +00006246 if (req->timeout.head) {
Jens Axboee8c2bc12020-08-15 18:44:09 -07006247 struct io_timeout_data *data = req->async_data;
Jens Axboe94ae5e72019-11-14 19:39:52 -07006248
Jens Axboead8a48a2019-11-15 08:49:11 -07006249 data->timer.function = io_link_timeout_fn;
6250 hrtimer_start(&data->timer, timespec64_to_ktime(data->ts),
6251 data->mode);
Jens Axboe2665abf2019-11-05 12:40:47 -07006252 }
Jens Axboe76a46e02019-11-10 23:34:16 -07006253 spin_unlock_irq(&ctx->completion_lock);
Jens Axboe2665abf2019-11-05 12:40:47 -07006254 /* drop submission reference */
Jens Axboe76a46e02019-11-10 23:34:16 -07006255 io_put_req(req);
Jens Axboe2665abf2019-11-05 12:40:47 -07006256}
6257
Jens Axboead8a48a2019-11-15 08:49:11 -07006258static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req)
Jens Axboe2665abf2019-11-05 12:40:47 -07006259{
Pavel Begunkovf2f87372020-10-27 23:25:37 +00006260 struct io_kiocb *nxt = req->link;
Jens Axboe2b188cc2019-01-07 10:46:33 -07006261
Pavel Begunkovf2f87372020-10-27 23:25:37 +00006262 if (!nxt || (req->flags & REQ_F_LINK_TIMEOUT) ||
6263 nxt->opcode != IORING_OP_LINK_TIMEOUT)
Jens Axboed7718a92020-02-14 22:23:12 -07006264 return NULL;
Jens Axboe2665abf2019-11-05 12:40:47 -07006265
Pavel Begunkov90cd7e42020-10-27 23:25:36 +00006266 nxt->timeout.head = req;
Pavel Begunkov900fad42020-10-19 16:39:16 +01006267 nxt->flags |= REQ_F_LTIMEOUT_ACTIVE;
Jens Axboe76a46e02019-11-10 23:34:16 -07006268 req->flags |= REQ_F_LINK_TIMEOUT;
Jens Axboe76a46e02019-11-10 23:34:16 -07006269 return nxt;
Jens Axboe2665abf2019-11-05 12:40:47 -07006270}
6271
Pavel Begunkovc5eef2b2021-02-10 00:03:22 +00006272static void __io_queue_sqe(struct io_kiocb *req)
Jens Axboe2b188cc2019-01-07 10:46:33 -07006273{
Pavel Begunkovd3d72982021-02-12 03:23:51 +00006274 struct io_kiocb *linked_timeout = io_prep_linked_timeout(req);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006275 int ret;
6276
Pavel Begunkovc5eef2b2021-02-10 00:03:22 +00006277 ret = io_issue_sqe(req, IO_URING_F_NONBLOCK|IO_URING_F_COMPLETE_DEFER);
Jens Axboe491381ce2019-10-17 09:20:46 -06006278
6279 /*
6280 * We async punt it if the file wasn't marked NOWAIT, or if the file
6281 * doesn't support non-blocking read/write attempts
6282 */
Pavel Begunkov18400382021-03-19 17:22:34 +00006283 if (likely(!ret)) {
Pavel Begunkov0d63c142020-10-22 16:47:18 +01006284 /* drop submission reference */
Pavel Begunkove342c802021-01-19 13:32:47 +00006285 if (req->flags & REQ_F_COMPLETE_INLINE) {
Pavel Begunkovc5eef2b2021-02-10 00:03:22 +00006286 struct io_ring_ctx *ctx = req->ctx;
6287 struct io_comp_state *cs = &ctx->submit_state.comp;
Jens Axboee65ef562019-03-12 10:16:44 -06006288
Pavel Begunkov6dd0be12021-02-10 00:03:13 +00006289 cs->reqs[cs->nr++] = req;
Pavel Begunkovd3d72982021-02-12 03:23:51 +00006290 if (cs->nr == ARRAY_SIZE(cs->reqs))
Pavel Begunkovc5eef2b2021-02-10 00:03:22 +00006291 io_submit_flush_completions(cs, ctx);
Pavel Begunkov9affd662021-01-19 13:32:46 +00006292 } else {
Pavel Begunkovd3d72982021-02-12 03:23:51 +00006293 io_put_req(req);
Pavel Begunkov0d63c142020-10-22 16:47:18 +01006294 }
Pavel Begunkov18400382021-03-19 17:22:34 +00006295 } else if (ret == -EAGAIN && !(req->flags & REQ_F_NOWAIT)) {
6296 if (!io_arm_poll_handler(req)) {
6297 /*
6298 * Queued up for async execution, worker will release
6299 * submit reference when the iocb is actually submitted.
6300 */
6301 io_queue_async_work(req);
6302 }
Pavel Begunkov0d63c142020-10-22 16:47:18 +01006303 } else {
Pavel Begunkovf41db2732021-02-28 22:35:12 +00006304 io_req_complete_failed(req, ret);
Jens Axboe9e645e112019-05-10 16:07:28 -06006305 }
Pavel Begunkovd3d72982021-02-12 03:23:51 +00006306 if (linked_timeout)
6307 io_queue_linked_timeout(linked_timeout);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006308}
6309
Pavel Begunkovbe7053b2021-02-18 18:29:45 +00006310static void io_queue_sqe(struct io_kiocb *req)
Jackie Liu4fe2c962019-09-09 20:50:40 +08006311{
6312 int ret;
6313
Pavel Begunkovbe7053b2021-02-18 18:29:45 +00006314 ret = io_req_defer(req);
Jackie Liu4fe2c962019-09-09 20:50:40 +08006315 if (ret) {
6316 if (ret != -EIOCBQUEUED) {
Pavel Begunkov11185912020-01-22 23:09:35 +03006317fail_req:
Pavel Begunkovf41db2732021-02-28 22:35:12 +00006318 io_req_complete_failed(req, ret);
Jackie Liu4fe2c962019-09-09 20:50:40 +08006319 }
Pavel Begunkov25508782019-12-30 21:24:47 +03006320 } else if (req->flags & REQ_F_FORCE_ASYNC) {
Pavel Begunkovb7e298d2021-02-28 22:35:19 +00006321 ret = io_req_prep_async(req);
Pavel Begunkovbe7053b2021-02-18 18:29:45 +00006322 if (unlikely(ret))
6323 goto fail_req;
Jens Axboece35a472019-12-17 08:04:44 -07006324 io_queue_async_work(req);
6325 } else {
Pavel Begunkovc5eef2b2021-02-10 00:03:22 +00006326 __io_queue_sqe(req);
Jens Axboece35a472019-12-17 08:04:44 -07006327 }
Jackie Liu4fe2c962019-09-09 20:50:40 +08006328}
6329
Stefano Garzarella21b55db2020-08-27 16:58:30 +02006330/*
6331 * Check SQE restrictions (opcode and flags).
6332 *
6333 * Returns 'true' if SQE is allowed, 'false' otherwise.
6334 */
6335static inline bool io_check_restriction(struct io_ring_ctx *ctx,
6336 struct io_kiocb *req,
6337 unsigned int sqe_flags)
6338{
6339 if (!ctx->restricted)
6340 return true;
6341
6342 if (!test_bit(req->opcode, ctx->restrictions.sqe_op))
6343 return false;
6344
6345 if ((sqe_flags & ctx->restrictions.sqe_flags_required) !=
6346 ctx->restrictions.sqe_flags_required)
6347 return false;
6348
6349 if (sqe_flags & ~(ctx->restrictions.sqe_flags_allowed |
6350 ctx->restrictions.sqe_flags_required))
6351 return false;
6352
6353 return true;
6354}
6355
Pavel Begunkovef4ff582020-04-12 02:05:05 +03006356static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
Pavel Begunkov258b29a2021-02-10 00:03:10 +00006357 const struct io_uring_sqe *sqe)
Pavel Begunkov0553b8b2020-04-08 08:58:45 +03006358{
Pavel Begunkov258b29a2021-02-10 00:03:10 +00006359 struct io_submit_state *state;
Pavel Begunkovef4ff582020-04-12 02:05:05 +03006360 unsigned int sqe_flags;
Jens Axboe003e8dc2021-03-06 09:22:27 -07006361 int personality, ret = 0;
Pavel Begunkovef4ff582020-04-12 02:05:05 +03006362
Pavel Begunkov0553b8b2020-04-08 08:58:45 +03006363 req->opcode = READ_ONCE(sqe->opcode);
Pavel Begunkov5be9ad12021-02-12 18:41:17 +00006364 /* same numerical values with corresponding REQ_F_*, safe to copy */
6365 req->flags = sqe_flags = READ_ONCE(sqe->flags);
Pavel Begunkov0553b8b2020-04-08 08:58:45 +03006366 req->user_data = READ_ONCE(sqe->user_data);
Jens Axboee8c2bc12020-08-15 18:44:09 -07006367 req->async_data = NULL;
Pavel Begunkov0553b8b2020-04-08 08:58:45 +03006368 req->file = NULL;
6369 req->ctx = ctx;
Pavel Begunkovf2f87372020-10-27 23:25:37 +00006370 req->link = NULL;
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00006371 req->fixed_rsrc_refs = NULL;
Pavel Begunkov0553b8b2020-04-08 08:58:45 +03006372 /* one is dropped after submission, the other at completion */
Jens Axboeabc54d62021-02-24 13:32:30 -07006373 atomic_set(&req->refs, 2);
Pavel Begunkov4dd28242020-06-15 10:33:13 +03006374 req->task = current;
Pavel Begunkov0553b8b2020-04-08 08:58:45 +03006375 req->result = 0;
Jens Axboe93e68e02021-03-09 07:02:21 -07006376 req->work.creds = NULL;
Pavel Begunkovef4ff582020-04-12 02:05:05 +03006377
Pavel Begunkov5be9ad12021-02-12 18:41:17 +00006378 /* enforce forwards compatibility on users */
Pavel Begunkovebf4a5d2021-02-20 01:39:53 +00006379 if (unlikely(sqe_flags & ~SQE_VALID_FLAGS)) {
6380 req->flags = 0;
Pavel Begunkov5be9ad12021-02-12 18:41:17 +00006381 return -EINVAL;
Pavel Begunkovebf4a5d2021-02-20 01:39:53 +00006382 }
Pavel Begunkov5be9ad12021-02-12 18:41:17 +00006383
Pavel Begunkovef4ff582020-04-12 02:05:05 +03006384 if (unlikely(req->opcode >= IORING_OP_LAST))
6385 return -EINVAL;
6386
Stefano Garzarella21b55db2020-08-27 16:58:30 +02006387 if (unlikely(!io_check_restriction(ctx, req, sqe_flags)))
6388 return -EACCES;
6389
Pavel Begunkovef4ff582020-04-12 02:05:05 +03006390 if ((sqe_flags & IOSQE_BUFFER_SELECT) &&
6391 !io_op_defs[req->opcode].buffer_select)
6392 return -EOPNOTSUPP;
6393
Jens Axboe003e8dc2021-03-06 09:22:27 -07006394 personality = READ_ONCE(sqe->personality);
6395 if (personality) {
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +00006396 req->work.creds = xa_load(&ctx->personalities, personality);
Jens Axboe003e8dc2021-03-06 09:22:27 -07006397 if (!req->work.creds)
6398 return -EINVAL;
6399 get_cred(req->work.creds);
Jens Axboe003e8dc2021-03-06 09:22:27 -07006400 }
Pavel Begunkov258b29a2021-02-10 00:03:10 +00006401 state = &ctx->submit_state;
Pavel Begunkovef4ff582020-04-12 02:05:05 +03006402
Jens Axboe27926b62020-10-28 09:33:23 -06006403 /*
6404 * Plug now if we have more than 1 IO left after this, and the target
6405 * is potentially a read/write to block based storage.
6406 */
6407 if (!state->plug_started && state->ios_left > 1 &&
6408 io_op_defs[req->opcode].plug) {
6409 blk_start_plug(&state->plug);
6410 state->plug_started = true;
6411 }
Jens Axboe63ff8222020-05-07 14:56:15 -06006412
Pavel Begunkovbd5bbda2020-11-20 15:50:51 +00006413 if (io_op_defs[req->opcode].needs_file) {
6414 bool fixed = req->flags & REQ_F_FIXED_FILE;
Jens Axboe63ff8222020-05-07 14:56:15 -06006415
Pavel Begunkovbd5bbda2020-11-20 15:50:51 +00006416 req->file = io_file_get(state, req, READ_ONCE(sqe->fd), fixed);
Pavel Begunkovba13e232021-02-01 18:59:52 +00006417 if (unlikely(!req->file))
Pavel Begunkovbd5bbda2020-11-20 15:50:51 +00006418 ret = -EBADF;
6419 }
6420
Pavel Begunkov71b547c2020-10-10 18:34:09 +01006421 state->ios_left--;
6422 return ret;
Pavel Begunkov0553b8b2020-04-08 08:58:45 +03006423}
6424
Pavel Begunkova6b8cadc2021-02-18 18:29:41 +00006425static int io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
Pavel Begunkova1ab7b32021-02-18 18:29:42 +00006426 const struct io_uring_sqe *sqe)
Jens Axboe6c271ce2019-01-10 11:22:30 -07006427{
Pavel Begunkova1ab7b32021-02-18 18:29:42 +00006428 struct io_submit_link *link = &ctx->submit_state.link;
Jens Axboe6c271ce2019-01-10 11:22:30 -07006429 int ret;
6430
Pavel Begunkova6b8cadc2021-02-18 18:29:41 +00006431 ret = io_init_req(ctx, req, sqe);
6432 if (unlikely(ret)) {
6433fail_req:
Pavel Begunkovde59bc12021-02-18 18:29:47 +00006434 if (link->head) {
6435 /* fail even hard links since we don't submit */
Pavel Begunkovcf109602021-02-18 18:29:43 +00006436 link->head->flags |= REQ_F_FAIL_LINK;
Pavel Begunkovf41db2732021-02-28 22:35:12 +00006437 io_req_complete_failed(link->head, -ECANCELED);
Pavel Begunkovde59bc12021-02-18 18:29:47 +00006438 link->head = NULL;
6439 }
Pavel Begunkovf41db2732021-02-28 22:35:12 +00006440 io_req_complete_failed(req, ret);
Pavel Begunkova6b8cadc2021-02-18 18:29:41 +00006441 return ret;
6442 }
Pavel Begunkovbe7053b2021-02-18 18:29:45 +00006443 ret = io_req_prep(req, sqe);
6444 if (unlikely(ret))
6445 goto fail_req;
Pavel Begunkova6b8cadc2021-02-18 18:29:41 +00006446
Pavel Begunkovbe7053b2021-02-18 18:29:45 +00006447 /* don't need @sqe from now on */
Pavel Begunkova6b8cadc2021-02-18 18:29:41 +00006448 trace_io_uring_submit_sqe(ctx, req->opcode, req->user_data,
6449 true, ctx->flags & IORING_SETUP_SQPOLL);
6450
Jens Axboe6c271ce2019-01-10 11:22:30 -07006451 /*
6452 * If we already have a head request, queue this one for async
6453 * submittal once the head completes. If we don't have a head but
6454 * IOSQE_IO_LINK is set in the sqe, start a new head. This one will be
6455 * submitted sync once the chain is complete. If none of those
6456 * conditions are true (normal request), then just queue it.
6457 */
6458 if (link->head) {
6459 struct io_kiocb *head = link->head;
6460
6461 /*
6462 * Taking sequential execution of a link, draining both sides
6463 * of the link also fullfils IOSQE_IO_DRAIN semantics for all
6464 * requests in the link. So, it drains the head and the
6465 * next after the link request. The last one is done via
6466 * drain_next flag to persist the effect across calls.
6467 */
6468 if (req->flags & REQ_F_IO_DRAIN) {
6469 head->flags |= REQ_F_IO_DRAIN;
6470 ctx->drain_next = 1;
6471 }
Pavel Begunkovb7e298d2021-02-28 22:35:19 +00006472 ret = io_req_prep_async(req);
Pavel Begunkovcf109602021-02-18 18:29:43 +00006473 if (unlikely(ret))
Pavel Begunkova6b8cadc2021-02-18 18:29:41 +00006474 goto fail_req;
Jens Axboe6c271ce2019-01-10 11:22:30 -07006475 trace_io_uring_link(ctx, req, head);
6476 link->last->link = req;
6477 link->last = req;
6478
6479 /* last request of a link, enqueue the link */
6480 if (!(req->flags & (REQ_F_LINK | REQ_F_HARDLINK))) {
Pavel Begunkovde59bc12021-02-18 18:29:47 +00006481 io_queue_sqe(head);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006482 link->head = NULL;
6483 }
Jackie Liu4fe2c962019-09-09 20:50:40 +08006484 } else {
6485 if (unlikely(ctx->drain_next)) {
6486 req->flags |= REQ_F_IO_DRAIN;
6487 ctx->drain_next = 0;
6488 }
6489 if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) {
Jackie Liu4fe2c962019-09-09 20:50:40 +08006490 link->head = req;
6491 link->last = req;
6492 } else {
Pavel Begunkovbe7053b2021-02-18 18:29:45 +00006493 io_queue_sqe(req);
Jackie Liu4fe2c962019-09-09 20:50:40 +08006494 }
6495 }
6496
6497 return 0;
6498}
6499
6500/*
6501 * Batched submission is done, ensure local IO is flushed out.
6502 */
6503static void io_submit_state_end(struct io_submit_state *state,
6504 struct io_ring_ctx *ctx)
Pavel Begunkov1b4a51b2019-11-21 11:54:28 +03006505{
Pavel Begunkova1ab7b32021-02-18 18:29:42 +00006506 if (state->link.head)
Pavel Begunkovde59bc12021-02-18 18:29:47 +00006507 io_queue_sqe(state->link.head);
Jens Axboe3529d8c2019-12-19 18:24:38 -07006508 if (state->comp.nr)
Jens Axboe9e645e112019-05-10 16:07:28 -06006509 io_submit_flush_completions(&state->comp, ctx);
Jackie Liua197f662019-11-08 08:09:12 -07006510 if (state->plug_started)
Pavel Begunkov32fe5252019-12-17 22:26:58 +03006511 blk_finish_plug(&state->plug);
Jens Axboe75c6a032020-01-28 10:15:23 -07006512 io_state_file_put(state);
Jens Axboe9e645e112019-05-10 16:07:28 -06006513}
Pavel Begunkov32fe5252019-12-17 22:26:58 +03006514
Jens Axboe9e645e112019-05-10 16:07:28 -06006515/*
6516 * Start submission side cache.
Pavel Begunkov32fe5252019-12-17 22:26:58 +03006517 */
Jens Axboe9e645e112019-05-10 16:07:28 -06006518static void io_submit_state_start(struct io_submit_state *state,
Pavel Begunkov196be952019-11-07 01:41:06 +03006519 unsigned int max_ios)
Jens Axboe9e645e112019-05-10 16:07:28 -06006520{
6521 state->plug_started = false;
Jens Axboebcda7ba2020-02-23 16:42:51 -07006522 state->ios_left = max_ios;
Pavel Begunkova1ab7b32021-02-18 18:29:42 +00006523 /* set only head, no need to init link_last in advance */
6524 state->link.head = NULL;
Jens Axboe75c6a032020-01-28 10:15:23 -07006525}
6526
Jens Axboe193155c2020-02-22 23:22:19 -07006527static void io_commit_sqring(struct io_ring_ctx *ctx)
6528{
Jens Axboe75c6a032020-01-28 10:15:23 -07006529 struct io_rings *rings = ctx->rings;
6530
6531 /*
Jens Axboe193155c2020-02-22 23:22:19 -07006532 * Ensure any loads from the SQEs are done at this point,
Jens Axboe75c6a032020-01-28 10:15:23 -07006533 * since once we write the new head, the application could
6534 * write new data to them.
Pavel Begunkov6b47ee62020-01-18 20:22:41 +03006535 */
Pavel Begunkov8da11c12020-02-24 11:32:44 +03006536 smp_store_release(&rings->sq.head, ctx->cached_sq_head);
Jens Axboebcda7ba2020-02-23 16:42:51 -07006537}
6538
Jens Axboe9e645e112019-05-10 16:07:28 -06006539/*
Jens Axboe3529d8c2019-12-19 18:24:38 -07006540 * Fetch an sqe, if one is available. Note that sqe_ptr will point to memory
Jens Axboe9e645e112019-05-10 16:07:28 -06006541 * that is mapped by userspace. This means that care needs to be taken to
6542 * ensure that reads are stable, as we cannot rely on userspace always
Jens Axboe78e19bb2019-11-06 15:21:34 -07006543 * being a good citizen. If members of the sqe are validated and then later
6544 * used, it's important that those reads are done through READ_ONCE() to
Pavel Begunkov2e6e1fd2019-12-05 16:15:45 +03006545 * prevent a re-load down the line.
Jens Axboe9e645e112019-05-10 16:07:28 -06006546 */
6547static const struct io_uring_sqe *io_get_sqe(struct io_ring_ctx *ctx)
Jens Axboe9e645e112019-05-10 16:07:28 -06006548{
6549 u32 *sq_array = ctx->sq_array;
6550 unsigned head;
6551
6552 /*
6553 * The cached sq head (or cq tail) serves two purposes:
6554 *
6555 * 1) allows us to batch the cost of updating the user visible
Pavel Begunkov9d763772019-12-17 02:22:07 +03006556 * head updates.
Jens Axboe9e645e112019-05-10 16:07:28 -06006557 * 2) allows the kernel side to track the head on its own, even
Pavel Begunkov8cdf2192020-01-25 00:40:24 +03006558 * though the application is the one updating it.
6559 */
6560 head = READ_ONCE(sq_array[ctx->cached_sq_head++ & ctx->sq_mask]);
6561 if (likely(head < ctx->sq_entries))
6562 return &ctx->sq_sqes[head];
6563
6564 /* drop invalid entries */
Pavel Begunkov711be032020-01-17 03:57:59 +03006565 ctx->cached_sq_dropped++;
6566 WRITE_ONCE(ctx->rings->sq_dropped, ctx->cached_sq_dropped);
6567 return NULL;
6568}
Jens Axboeb7bb4f72019-12-15 22:13:43 -07006569
Jens Axboe0f212202020-09-13 13:09:39 -06006570static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr)
Jens Axboe6c271ce2019-01-10 11:22:30 -07006571{
Pavel Begunkov46c4e162021-02-18 18:29:37 +00006572 int submitted = 0;
Jens Axboe6c271ce2019-01-10 11:22:30 -07006573
Jens Axboec4a2ed72019-11-21 21:01:26 -07006574 /* if we have a backlog and couldn't flush it all, return BUSY */
Jens Axboead3eb2c2019-12-18 17:12:20 -07006575 if (test_bit(0, &ctx->sq_check_overflow)) {
Pavel Begunkov6c2450a2021-02-23 12:40:22 +00006576 if (!__io_cqring_overflow_flush(ctx, false))
Jens Axboead3eb2c2019-12-18 17:12:20 -07006577 return -EBUSY;
6578 }
Jens Axboe6c271ce2019-01-10 11:22:30 -07006579
Pavel Begunkovee7d46d2019-12-30 21:24:45 +03006580 /* make sure SQ entry isn't read before tail */
6581 nr = min3(nr, ctx->sq_entries, io_sqring_entries(ctx));
Pavel Begunkov9ef4f122019-12-30 21:24:44 +03006582
Pavel Begunkov2b85edf2019-12-28 14:13:03 +03006583 if (!percpu_ref_tryget_many(&ctx->refs, nr))
6584 return -EAGAIN;
Jens Axboe6c271ce2019-01-10 11:22:30 -07006585
Jens Axboed8a6df12020-10-15 16:24:45 -06006586 percpu_counter_add(&current->io_uring->inflight, nr);
Jens Axboefaf7b512020-10-07 12:48:53 -06006587 refcount_add(nr, &current->usage);
Pavel Begunkovba88ff12021-02-10 00:03:11 +00006588 io_submit_state_start(&ctx->submit_state, nr);
Pavel Begunkovb14cca02020-01-17 04:45:59 +03006589
Pavel Begunkov46c4e162021-02-18 18:29:37 +00006590 while (submitted < nr) {
Jens Axboe3529d8c2019-12-19 18:24:38 -07006591 const struct io_uring_sqe *sqe;
Pavel Begunkov196be952019-11-07 01:41:06 +03006592 struct io_kiocb *req;
Pavel Begunkovfb5ccc92019-10-25 12:31:30 +03006593
Pavel Begunkov258b29a2021-02-10 00:03:10 +00006594 req = io_alloc_req(ctx);
Pavel Begunkov196be952019-11-07 01:41:06 +03006595 if (unlikely(!req)) {
6596 if (!submitted)
6597 submitted = -EAGAIN;
Pavel Begunkovfb5ccc92019-10-25 12:31:30 +03006598 break;
Jens Axboe9e645e112019-05-10 16:07:28 -06006599 }
Pavel Begunkov4fccfcb2021-02-12 11:55:17 +00006600 sqe = io_get_sqe(ctx);
6601 if (unlikely(!sqe)) {
6602 kmem_cache_free(req_cachep, req);
6603 break;
6604 }
Jens Axboed3656342019-12-18 09:50:26 -07006605 /* will complete beyond this point, count as submitted */
6606 submitted++;
Pavel Begunkova1ab7b32021-02-18 18:29:42 +00006607 if (io_submit_sqe(ctx, req, sqe))
Jens Axboed3656342019-12-18 09:50:26 -07006608 break;
Jens Axboe6c271ce2019-01-10 11:22:30 -07006609 }
6610
Pavel Begunkov9466f432020-01-25 22:34:01 +03006611 if (unlikely(submitted != nr)) {
6612 int ref_used = (submitted == -EAGAIN) ? 0 : submitted;
Jens Axboed8a6df12020-10-15 16:24:45 -06006613 struct io_uring_task *tctx = current->io_uring;
6614 int unused = nr - ref_used;
Pavel Begunkov9466f432020-01-25 22:34:01 +03006615
Jens Axboed8a6df12020-10-15 16:24:45 -06006616 percpu_ref_put_many(&ctx->refs, unused);
6617 percpu_counter_sub(&tctx->inflight, unused);
6618 put_task_struct_many(current, unused);
Pavel Begunkov9466f432020-01-25 22:34:01 +03006619 }
Jens Axboe6c271ce2019-01-10 11:22:30 -07006620
Pavel Begunkova1ab7b32021-02-18 18:29:42 +00006621 io_submit_state_end(&ctx->submit_state, ctx);
Pavel Begunkovae9428c2019-11-06 00:22:14 +03006622 /* Commit SQ ring head once we've consumed and submitted all SQEs */
6623 io_commit_sqring(ctx);
6624
Jens Axboe6c271ce2019-01-10 11:22:30 -07006625 return submitted;
6626}
6627
Xiaoguang Wang23b36282020-07-23 20:57:24 +08006628static inline void io_ring_set_wakeup_flag(struct io_ring_ctx *ctx)
6629{
6630 /* Tell userspace we may need a wakeup call */
6631 spin_lock_irq(&ctx->completion_lock);
6632 ctx->rings->sq_flags |= IORING_SQ_NEED_WAKEUP;
6633 spin_unlock_irq(&ctx->completion_lock);
6634}
6635
6636static inline void io_ring_clear_wakeup_flag(struct io_ring_ctx *ctx)
6637{
6638 spin_lock_irq(&ctx->completion_lock);
6639 ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP;
6640 spin_unlock_irq(&ctx->completion_lock);
6641}
6642
Xiaoguang Wang08369242020-11-03 14:15:59 +08006643static int __io_sq_thread(struct io_ring_ctx *ctx, bool cap_entries)
Jens Axboe6c271ce2019-01-10 11:22:30 -07006644{
Jens Axboec8d1ba52020-09-14 11:07:26 -06006645 unsigned int to_submit;
Xiaoguang Wangbdcd3ea2020-02-25 22:12:08 +08006646 int ret = 0;
Jens Axboe6c271ce2019-01-10 11:22:30 -07006647
Jens Axboec8d1ba52020-09-14 11:07:26 -06006648 to_submit = io_sqring_entries(ctx);
Jens Axboee95eee22020-09-08 09:11:32 -06006649 /* if we're handling multiple rings, cap submit size for fairness */
6650 if (cap_entries && to_submit > 8)
6651 to_submit = 8;
6652
Xiaoguang Wang906a3c62020-11-12 14:56:00 +08006653 if (!list_empty(&ctx->iopoll_list) || to_submit) {
6654 unsigned nr_events = 0;
6655
Xiaoguang Wang08369242020-11-03 14:15:59 +08006656 mutex_lock(&ctx->uring_lock);
Xiaoguang Wang906a3c62020-11-12 14:56:00 +08006657 if (!list_empty(&ctx->iopoll_list))
6658 io_do_iopoll(ctx, &nr_events, 0);
6659
Pavel Begunkov0298ef92021-03-08 13:20:57 +00006660 if (to_submit && likely(!percpu_ref_is_dying(&ctx->refs)) &&
6661 !(ctx->flags & IORING_SETUP_R_DISABLED))
Xiaoguang Wang08369242020-11-03 14:15:59 +08006662 ret = io_submit_sqes(ctx, to_submit);
6663 mutex_unlock(&ctx->uring_lock);
6664 }
Jens Axboe90554202020-09-03 12:12:41 -06006665
6666 if (!io_sqring_full(ctx) && wq_has_sleeper(&ctx->sqo_sq_wait))
6667 wake_up(&ctx->sqo_sq_wait);
6668
Xiaoguang Wang08369242020-11-03 14:15:59 +08006669 return ret;
6670}
6671
6672static void io_sqd_update_thread_idle(struct io_sq_data *sqd)
6673{
6674 struct io_ring_ctx *ctx;
6675 unsigned sq_thread_idle = 0;
6676
Pavel Begunkovc9dca272021-03-10 13:13:55 +00006677 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
6678 sq_thread_idle = max(sq_thread_idle, ctx->sq_thread_idle);
Xiaoguang Wang08369242020-11-03 14:15:59 +08006679 sqd->sq_thread_idle = sq_thread_idle;
Jens Axboec8d1ba52020-09-14 11:07:26 -06006680}
6681
Jens Axboe6c271ce2019-01-10 11:22:30 -07006682static int io_sq_thread(void *data)
6683{
Jens Axboe69fb2132020-09-14 11:16:23 -06006684 struct io_sq_data *sqd = data;
6685 struct io_ring_ctx *ctx;
Xiaoguang Wanga0d92052020-11-12 14:55:59 +08006686 unsigned long timeout = 0;
Jens Axboe37d1e2e2021-02-17 21:03:43 -07006687 char buf[TASK_COMM_LEN];
Xiaoguang Wang08369242020-11-03 14:15:59 +08006688 DEFINE_WAIT(wait);
Jens Axboe6c271ce2019-01-10 11:22:30 -07006689
Pavel Begunkov696ee882021-04-01 09:55:04 +01006690 snprintf(buf, sizeof(buf), "iou-sqp-%d", sqd->task_pid);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07006691 set_task_comm(current, buf);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07006692 current->pf_io_worker = NULL;
Jens Axboe28cea78a2020-09-14 10:51:17 -06006693
Jens Axboe37d1e2e2021-02-17 21:03:43 -07006694 if (sqd->sq_cpu != -1)
6695 set_cpus_allowed_ptr(current, cpumask_of(sqd->sq_cpu));
6696 else
6697 set_cpus_allowed_ptr(current, cpu_online_mask);
6698 current->flags |= PF_NO_SETAFFINITY;
6699
Pavel Begunkov09a6f4e2021-03-14 20:57:10 +00006700 mutex_lock(&sqd->lock);
Jens Axboe05962f92021-03-06 13:58:48 -07006701 while (!test_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state)) {
Xiaoguang Wang08369242020-11-03 14:15:59 +08006702 int ret;
6703 bool cap_entries, sqt_spin, needs_sched;
Jens Axboec1edbf52019-11-10 16:56:04 -07006704
Jens Axboe82734c52021-03-29 06:52:44 -06006705 if (test_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state) ||
6706 signal_pending(current)) {
6707 bool did_sig = false;
6708
Pavel Begunkov09a6f4e2021-03-14 20:57:10 +00006709 mutex_unlock(&sqd->lock);
Jens Axboe82734c52021-03-29 06:52:44 -06006710 if (signal_pending(current)) {
6711 struct ksignal ksig;
6712
6713 did_sig = get_signal(&ksig);
6714 }
Jens Axboe05962f92021-03-06 13:58:48 -07006715 cond_resched();
Pavel Begunkov09a6f4e2021-03-14 20:57:10 +00006716 mutex_lock(&sqd->lock);
Jens Axboe82734c52021-03-29 06:52:44 -06006717 if (did_sig)
6718 break;
Pavel Begunkov521d6a72021-03-11 23:29:38 +00006719 io_run_task_work();
Pavel Begunkovb7f5a0b2021-03-15 14:23:08 +00006720 io_run_task_work_head(&sqd->park_task_work);
Xiaoguang Wang08369242020-11-03 14:15:59 +08006721 timeout = jiffies + sqd->sq_thread_idle;
Pavel Begunkov7d41e852021-03-10 13:13:54 +00006722 continue;
Xiaoguang Wang08369242020-11-03 14:15:59 +08006723 }
Xiaoguang Wang08369242020-11-03 14:15:59 +08006724 sqt_spin = false;
Jens Axboee95eee22020-09-08 09:11:32 -06006725 cap_entries = !list_is_singular(&sqd->ctx_list);
Jens Axboe69fb2132020-09-14 11:16:23 -06006726 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) {
Stefan Metzmacher7c30f36a2021-03-07 11:54:28 +01006727 const struct cred *creds = NULL;
6728
6729 if (ctx->sq_creds != current_cred())
6730 creds = override_creds(ctx->sq_creds);
Xiaoguang Wang08369242020-11-03 14:15:59 +08006731 ret = __io_sq_thread(ctx, cap_entries);
Stefan Metzmacher7c30f36a2021-03-07 11:54:28 +01006732 if (creds)
6733 revert_creds(creds);
Xiaoguang Wang08369242020-11-03 14:15:59 +08006734 if (!sqt_spin && (ret > 0 || !list_empty(&ctx->iopoll_list)))
6735 sqt_spin = true;
Jens Axboe6c271ce2019-01-10 11:22:30 -07006736 }
6737
Xiaoguang Wang08369242020-11-03 14:15:59 +08006738 if (sqt_spin || !time_after(jiffies, timeout)) {
Jens Axboec8d1ba52020-09-14 11:07:26 -06006739 io_run_task_work();
6740 cond_resched();
Xiaoguang Wang08369242020-11-03 14:15:59 +08006741 if (sqt_spin)
6742 timeout = jiffies + sqd->sq_thread_idle;
6743 continue;
6744 }
6745
Xiaoguang Wang08369242020-11-03 14:15:59 +08006746 needs_sched = true;
6747 prepare_to_wait(&sqd->wait, &wait, TASK_INTERRUPTIBLE);
6748 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) {
6749 if ((ctx->flags & IORING_SETUP_IOPOLL) &&
6750 !list_empty_careful(&ctx->iopoll_list)) {
6751 needs_sched = false;
6752 break;
6753 }
6754 if (io_sqring_entries(ctx)) {
6755 needs_sched = false;
6756 break;
6757 }
6758 }
6759
Jens Axboe05962f92021-03-06 13:58:48 -07006760 if (needs_sched && !test_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state)) {
Jens Axboe69fb2132020-09-14 11:16:23 -06006761 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
6762 io_ring_set_wakeup_flag(ctx);
Xiaoguang Wang08369242020-11-03 14:15:59 +08006763
Pavel Begunkov09a6f4e2021-03-14 20:57:10 +00006764 mutex_unlock(&sqd->lock);
Jens Axboe69fb2132020-09-14 11:16:23 -06006765 schedule();
Pavel Begunkov09a6f4e2021-03-14 20:57:10 +00006766 mutex_lock(&sqd->lock);
Jens Axboe69fb2132020-09-14 11:16:23 -06006767 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
6768 io_ring_clear_wakeup_flag(ctx);
Jens Axboe6c271ce2019-01-10 11:22:30 -07006769 }
Xiaoguang Wang08369242020-11-03 14:15:59 +08006770
6771 finish_wait(&sqd->wait, &wait);
Pavel Begunkovb7f5a0b2021-03-15 14:23:08 +00006772 io_run_task_work_head(&sqd->park_task_work);
Xiaoguang Wang08369242020-11-03 14:15:59 +08006773 timeout = jiffies + sqd->sq_thread_idle;
Jens Axboe6c271ce2019-01-10 11:22:30 -07006774 }
6775
Jens Axboe37d1e2e2021-02-17 21:03:43 -07006776 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
6777 io_uring_cancel_sqpoll(ctx);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07006778 sqd->thread = NULL;
Jens Axboe05962f92021-03-06 13:58:48 -07006779 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
Jens Axboe5f3f26f2021-02-25 10:17:46 -07006780 io_ring_set_wakeup_flag(ctx);
Pavel Begunkov09a6f4e2021-03-14 20:57:10 +00006781 mutex_unlock(&sqd->lock);
Pavel Begunkov521d6a72021-03-11 23:29:38 +00006782
6783 io_run_task_work();
Pavel Begunkovb7f5a0b2021-03-15 14:23:08 +00006784 io_run_task_work_head(&sqd->park_task_work);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07006785 complete(&sqd->exited);
6786 do_exit(0);
Jens Axboe6c271ce2019-01-10 11:22:30 -07006787}
6788
Jens Axboebda52162019-09-24 13:47:15 -06006789struct io_wait_queue {
6790 struct wait_queue_entry wq;
6791 struct io_ring_ctx *ctx;
6792 unsigned to_wait;
6793 unsigned nr_timeouts;
6794};
6795
Pavel Begunkov6c503152021-01-04 20:36:36 +00006796static inline bool io_should_wake(struct io_wait_queue *iowq)
Jens Axboebda52162019-09-24 13:47:15 -06006797{
6798 struct io_ring_ctx *ctx = iowq->ctx;
6799
6800 /*
Brian Gianforcarod195a662019-12-13 03:09:50 -08006801 * Wake up if we have enough events, or if a timeout occurred since we
Jens Axboebda52162019-09-24 13:47:15 -06006802 * started waiting. For timeouts, we always want to return to userspace,
6803 * regardless of event count.
6804 */
Pavel Begunkov6c503152021-01-04 20:36:36 +00006805 return io_cqring_events(ctx) >= iowq->to_wait ||
Jens Axboebda52162019-09-24 13:47:15 -06006806 atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts;
6807}
6808
6809static int io_wake_function(struct wait_queue_entry *curr, unsigned int mode,
6810 int wake_flags, void *key)
6811{
6812 struct io_wait_queue *iowq = container_of(curr, struct io_wait_queue,
6813 wq);
6814
Pavel Begunkov6c503152021-01-04 20:36:36 +00006815 /*
6816 * Cannot safely flush overflowed CQEs from here, ensure we wake up
6817 * the task, and the next invocation will do it.
6818 */
6819 if (io_should_wake(iowq) || test_bit(0, &iowq->ctx->cq_check_overflow))
6820 return autoremove_wake_function(curr, mode, wake_flags, key);
6821 return -1;
Jens Axboebda52162019-09-24 13:47:15 -06006822}
6823
Jens Axboeaf9c1a42020-09-24 13:32:18 -06006824static int io_run_task_work_sig(void)
6825{
6826 if (io_run_task_work())
6827 return 1;
6828 if (!signal_pending(current))
6829 return 0;
Jens Axboe0b8cfa92021-03-21 14:16:08 -06006830 if (test_thread_flag(TIF_NOTIFY_SIGNAL))
Jens Axboe792ee0f62020-10-22 20:17:18 -06006831 return -ERESTARTSYS;
Jens Axboeaf9c1a42020-09-24 13:32:18 -06006832 return -EINTR;
6833}
6834
Pavel Begunkoveeb60b92021-02-04 13:51:58 +00006835/* when returns >0, the caller should retry */
6836static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
6837 struct io_wait_queue *iowq,
6838 signed long *timeout)
6839{
6840 int ret;
6841
6842 /* make sure we run task_work before checking for signals */
6843 ret = io_run_task_work_sig();
6844 if (ret || io_should_wake(iowq))
6845 return ret;
6846 /* let the caller flush overflows, retry */
6847 if (test_bit(0, &ctx->cq_check_overflow))
6848 return 1;
6849
6850 *timeout = schedule_timeout(*timeout);
6851 return !*timeout ? -ETIME : 1;
6852}
6853
Jens Axboe2b188cc2019-01-07 10:46:33 -07006854/*
6855 * Wait until events become available, if we don't already have some. The
6856 * application must reap them itself, as they reside on the shared cq ring.
6857 */
6858static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
Hao Xuc73ebb62020-11-03 10:54:37 +08006859 const sigset_t __user *sig, size_t sigsz,
6860 struct __kernel_timespec __user *uts)
Jens Axboe2b188cc2019-01-07 10:46:33 -07006861{
Jens Axboebda52162019-09-24 13:47:15 -06006862 struct io_wait_queue iowq = {
6863 .wq = {
6864 .private = current,
6865 .func = io_wake_function,
6866 .entry = LIST_HEAD_INIT(iowq.wq.entry),
6867 },
6868 .ctx = ctx,
6869 .to_wait = min_events,
6870 };
Hristo Venev75b28af2019-08-26 17:23:46 +00006871 struct io_rings *rings = ctx->rings;
Pavel Begunkovc1d5a222021-02-04 13:51:57 +00006872 signed long timeout = MAX_SCHEDULE_TIMEOUT;
6873 int ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07006874
Jens Axboeb41e9852020-02-17 09:52:41 -07006875 do {
Pavel Begunkov6c2450a2021-02-23 12:40:22 +00006876 io_cqring_overflow_flush(ctx, false);
Pavel Begunkov6c503152021-01-04 20:36:36 +00006877 if (io_cqring_events(ctx) >= min_events)
Jens Axboeb41e9852020-02-17 09:52:41 -07006878 return 0;
Jens Axboe4c6e2772020-07-01 11:29:10 -06006879 if (!io_run_task_work())
Jens Axboeb41e9852020-02-17 09:52:41 -07006880 break;
Jens Axboeb41e9852020-02-17 09:52:41 -07006881 } while (1);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006882
6883 if (sig) {
Arnd Bergmann9e75ad52019-03-25 15:34:53 +01006884#ifdef CONFIG_COMPAT
6885 if (in_compat_syscall())
6886 ret = set_compat_user_sigmask((const compat_sigset_t __user *)sig,
Oleg Nesterovb7724342019-07-16 16:29:53 -07006887 sigsz);
Arnd Bergmann9e75ad52019-03-25 15:34:53 +01006888 else
6889#endif
Oleg Nesterovb7724342019-07-16 16:29:53 -07006890 ret = set_user_sigmask(sig, sigsz);
Arnd Bergmann9e75ad52019-03-25 15:34:53 +01006891
Jens Axboe2b188cc2019-01-07 10:46:33 -07006892 if (ret)
6893 return ret;
6894 }
6895
Hao Xuc73ebb62020-11-03 10:54:37 +08006896 if (uts) {
Pavel Begunkovc1d5a222021-02-04 13:51:57 +00006897 struct timespec64 ts;
6898
Hao Xuc73ebb62020-11-03 10:54:37 +08006899 if (get_timespec64(&ts, uts))
6900 return -EFAULT;
6901 timeout = timespec64_to_jiffies(&ts);
6902 }
6903
Jens Axboebda52162019-09-24 13:47:15 -06006904 iowq.nr_timeouts = atomic_read(&ctx->cq_timeouts);
Dmitrii Dolgovc826bd72019-10-15 19:02:01 +02006905 trace_io_uring_cqring_wait(ctx, min_events);
Jens Axboebda52162019-09-24 13:47:15 -06006906 do {
Jens Axboeca0a2652021-03-04 17:15:48 -07006907 /* if we can't even flush overflow, don't wait for more */
Pavel Begunkov6c2450a2021-02-23 12:40:22 +00006908 if (!io_cqring_overflow_flush(ctx, false)) {
Jens Axboeca0a2652021-03-04 17:15:48 -07006909 ret = -EBUSY;
6910 break;
6911 }
Jens Axboebda52162019-09-24 13:47:15 -06006912 prepare_to_wait_exclusive(&ctx->wait, &iowq.wq,
6913 TASK_INTERRUPTIBLE);
Pavel Begunkoveeb60b92021-02-04 13:51:58 +00006914 ret = io_cqring_wait_schedule(ctx, &iowq, &timeout);
6915 finish_wait(&ctx->wait, &iowq.wq);
Jens Axboeca0a2652021-03-04 17:15:48 -07006916 cond_resched();
Pavel Begunkoveeb60b92021-02-04 13:51:58 +00006917 } while (ret > 0);
Jens Axboebda52162019-09-24 13:47:15 -06006918
Jens Axboeb7db41c2020-07-04 08:55:50 -06006919 restore_saved_sigmask_unless(ret == -EINTR);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006920
Hristo Venev75b28af2019-08-26 17:23:46 +00006921 return READ_ONCE(rings->cq.head) == READ_ONCE(rings->cq.tail) ? ret : 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07006922}
6923
Jens Axboe6b063142019-01-10 22:13:58 -07006924static void __io_sqe_files_unregister(struct io_ring_ctx *ctx)
6925{
6926#if defined(CONFIG_UNIX)
6927 if (ctx->ring_sock) {
6928 struct sock *sock = ctx->ring_sock->sk;
6929 struct sk_buff *skb;
6930
6931 while ((skb = skb_dequeue(&sock->sk_receive_queue)) != NULL)
6932 kfree_skb(skb);
6933 }
6934#else
6935 int i;
6936
Jens Axboe65e19f52019-10-26 07:20:21 -06006937 for (i = 0; i < ctx->nr_user_files; i++) {
6938 struct file *file;
6939
6940 file = io_file_from_index(ctx, i);
6941 if (file)
6942 fput(file);
6943 }
Jens Axboe6b063142019-01-10 22:13:58 -07006944#endif
6945}
6946
Bijan Mottahedeh00835dc2021-01-15 17:37:52 +00006947static void io_rsrc_data_ref_zero(struct percpu_ref *ref)
Jens Axboe05f3fb32019-12-09 11:22:50 -07006948{
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00006949 struct fixed_rsrc_data *data;
Jens Axboe05f3fb32019-12-09 11:22:50 -07006950
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00006951 data = container_of(ref, struct fixed_rsrc_data, refs);
Jens Axboe05f3fb32019-12-09 11:22:50 -07006952 complete(&data->done);
6953}
6954
Bijan Mottahedeh2a63b2d2021-01-15 17:37:47 +00006955static inline void io_rsrc_ref_lock(struct io_ring_ctx *ctx)
Pavel Begunkov1642b442020-12-30 21:34:14 +00006956{
Bijan Mottahedeh2a63b2d2021-01-15 17:37:47 +00006957 spin_lock_bh(&ctx->rsrc_ref_lock);
Pavel Begunkov1642b442020-12-30 21:34:14 +00006958}
6959
Bijan Mottahedeh2a63b2d2021-01-15 17:37:47 +00006960static inline void io_rsrc_ref_unlock(struct io_ring_ctx *ctx)
Jens Axboe6b063142019-01-10 22:13:58 -07006961{
Bijan Mottahedeh2a63b2d2021-01-15 17:37:47 +00006962 spin_unlock_bh(&ctx->rsrc_ref_lock);
6963}
6964
Bijan Mottahedehd67d2262021-01-15 17:37:46 +00006965static void io_sqe_rsrc_set_node(struct io_ring_ctx *ctx,
6966 struct fixed_rsrc_data *rsrc_data,
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00006967 struct fixed_rsrc_ref_node *ref_node)
Jens Axboe6b063142019-01-10 22:13:58 -07006968{
Bijan Mottahedeh2a63b2d2021-01-15 17:37:47 +00006969 io_rsrc_ref_lock(ctx);
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00006970 rsrc_data->node = ref_node;
Bijan Mottahedehd67d2262021-01-15 17:37:46 +00006971 list_add_tail(&ref_node->node, &ctx->rsrc_ref_list);
Bijan Mottahedeh2a63b2d2021-01-15 17:37:47 +00006972 io_rsrc_ref_unlock(ctx);
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00006973 percpu_ref_get(&rsrc_data->refs);
Jens Axboe6b063142019-01-10 22:13:58 -07006974}
6975
Hao Xu8bad28d2021-02-19 17:19:36 +08006976static void io_sqe_rsrc_kill_node(struct io_ring_ctx *ctx, struct fixed_rsrc_data *data)
Jens Axboe6b063142019-01-10 22:13:58 -07006977{
Hao Xu8bad28d2021-02-19 17:19:36 +08006978 struct fixed_rsrc_ref_node *ref_node = NULL;
Jens Axboe65e19f52019-10-26 07:20:21 -06006979
Bijan Mottahedeh2a63b2d2021-01-15 17:37:47 +00006980 io_rsrc_ref_lock(ctx);
Pavel Begunkov1e5d7702020-11-18 14:56:25 +00006981 ref_node = data->node;
Pavel Begunkove6cb0072021-02-20 18:03:47 +00006982 data->node = NULL;
Bijan Mottahedeh2a63b2d2021-01-15 17:37:47 +00006983 io_rsrc_ref_unlock(ctx);
Xiaoguang Wang05589552020-03-31 14:05:18 +08006984 if (ref_node)
6985 percpu_ref_kill(&ref_node->refs);
Hao Xu8bad28d2021-02-19 17:19:36 +08006986}
Xiaoguang Wang05589552020-03-31 14:05:18 +08006987
Pavel Begunkov8dd03af2021-03-19 17:22:36 +00006988static int io_rsrc_refnode_prealloc(struct io_ring_ctx *ctx)
6989{
6990 if (ctx->rsrc_backup_node)
6991 return 0;
6992 ctx->rsrc_backup_node = alloc_fixed_rsrc_ref_node(ctx);
6993 return ctx->rsrc_backup_node ? 0 : -ENOMEM;
6994}
6995
6996static struct fixed_rsrc_ref_node *
6997io_rsrc_refnode_get(struct io_ring_ctx *ctx,
6998 struct fixed_rsrc_data *rsrc_data,
6999 void (*rsrc_put)(struct io_ring_ctx *ctx,
7000 struct io_rsrc_put *prsrc))
7001{
7002 struct fixed_rsrc_ref_node *node = ctx->rsrc_backup_node;
7003
7004 WARN_ON_ONCE(!node);
7005
7006 ctx->rsrc_backup_node = NULL;
7007 node->rsrc_data = rsrc_data;
7008 node->rsrc_put = rsrc_put;
7009 return node;
7010}
7011
Hao Xu8bad28d2021-02-19 17:19:36 +08007012static int io_rsrc_ref_quiesce(struct fixed_rsrc_data *data,
7013 struct io_ring_ctx *ctx,
Pavel Begunkovf2303b12021-02-20 18:03:49 +00007014 void (*rsrc_put)(struct io_ring_ctx *ctx,
7015 struct io_rsrc_put *prsrc))
Hao Xu8bad28d2021-02-19 17:19:36 +08007016{
Pavel Begunkov8dd03af2021-03-19 17:22:36 +00007017 struct fixed_rsrc_ref_node *node;
Hao Xu8bad28d2021-02-19 17:19:36 +08007018 int ret;
Xiaoguang Wang05589552020-03-31 14:05:18 +08007019
Hao Xu8bad28d2021-02-19 17:19:36 +08007020 if (data->quiesce)
7021 return -ENXIO;
7022
7023 data->quiesce = true;
Pavel Begunkov1ffc5422020-12-30 21:34:15 +00007024 do {
Pavel Begunkov8dd03af2021-03-19 17:22:36 +00007025 ret = io_rsrc_refnode_prealloc(ctx);
7026 if (ret)
Pavel Begunkovf2303b12021-02-20 18:03:49 +00007027 break;
Hao Xu8bad28d2021-02-19 17:19:36 +08007028 io_sqe_rsrc_kill_node(ctx, data);
7029 percpu_ref_kill(&data->refs);
7030 flush_delayed_work(&ctx->rsrc_put_work);
7031
Pavel Begunkov1ffc5422020-12-30 21:34:15 +00007032 ret = wait_for_completion_interruptible(&data->done);
7033 if (!ret)
7034 break;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007035
Jens Axboecb5e1b82021-02-25 07:37:35 -07007036 percpu_ref_resurrect(&data->refs);
Pavel Begunkov8dd03af2021-03-19 17:22:36 +00007037 node = io_rsrc_refnode_get(ctx, data, rsrc_put);
7038 io_sqe_rsrc_set_node(ctx, data, node);
Jens Axboecb5e1b82021-02-25 07:37:35 -07007039 reinit_completion(&data->done);
Pavel Begunkov8dd03af2021-03-19 17:22:36 +00007040
Hao Xu8bad28d2021-02-19 17:19:36 +08007041 mutex_unlock(&ctx->uring_lock);
7042 ret = io_run_task_work_sig();
7043 mutex_lock(&ctx->uring_lock);
Pavel Begunkovf2303b12021-02-20 18:03:49 +00007044 } while (ret >= 0);
Hao Xu8bad28d2021-02-19 17:19:36 +08007045 data->quiesce = false;
7046
Hao Xu8bad28d2021-02-19 17:19:36 +08007047 return ret;
Bijan Mottahedehd7954b22021-01-15 17:37:50 +00007048}
7049
Bijan Mottahedeh1ad555c2021-01-15 17:37:51 +00007050static struct fixed_rsrc_data *alloc_fixed_rsrc_data(struct io_ring_ctx *ctx)
7051{
7052 struct fixed_rsrc_data *data;
7053
7054 data = kzalloc(sizeof(*data), GFP_KERNEL);
7055 if (!data)
7056 return NULL;
7057
Bijan Mottahedeh00835dc2021-01-15 17:37:52 +00007058 if (percpu_ref_init(&data->refs, io_rsrc_data_ref_zero,
Bijan Mottahedeh1ad555c2021-01-15 17:37:51 +00007059 PERCPU_REF_ALLOW_REINIT, GFP_KERNEL)) {
7060 kfree(data);
7061 return NULL;
7062 }
7063 data->ctx = ctx;
7064 init_completion(&data->done);
7065 return data;
7066}
7067
7068static void free_fixed_rsrc_data(struct fixed_rsrc_data *data)
7069{
7070 percpu_ref_exit(&data->refs);
7071 kfree(data->table);
7072 kfree(data);
7073}
7074
Bijan Mottahedehd7954b22021-01-15 17:37:50 +00007075static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
7076{
7077 struct fixed_rsrc_data *data = ctx->file_data;
Bijan Mottahedehd7954b22021-01-15 17:37:50 +00007078 unsigned nr_tables, i;
7079 int ret;
7080
Hao Xu8bad28d2021-02-19 17:19:36 +08007081 /*
7082 * percpu_ref_is_dying() is to stop parallel files unregister
7083 * Since we possibly drop uring lock later in this function to
7084 * run task work.
7085 */
7086 if (!data || percpu_ref_is_dying(&data->refs))
Bijan Mottahedehd7954b22021-01-15 17:37:50 +00007087 return -ENXIO;
Pavel Begunkovf2303b12021-02-20 18:03:49 +00007088 ret = io_rsrc_ref_quiesce(data, ctx, io_ring_file_put);
Bijan Mottahedehd7954b22021-01-15 17:37:50 +00007089 if (ret)
7090 return ret;
7091
Jens Axboe6b063142019-01-10 22:13:58 -07007092 __io_sqe_files_unregister(ctx);
Jens Axboe65e19f52019-10-26 07:20:21 -06007093 nr_tables = DIV_ROUND_UP(ctx->nr_user_files, IORING_MAX_FILES_TABLE);
7094 for (i = 0; i < nr_tables; i++)
Jens Axboe05f3fb32019-12-09 11:22:50 -07007095 kfree(data->table[i].files);
Bijan Mottahedeh1ad555c2021-01-15 17:37:51 +00007096 free_fixed_rsrc_data(data);
Jens Axboe05f3fb32019-12-09 11:22:50 -07007097 ctx->file_data = NULL;
Jens Axboe6b063142019-01-10 22:13:58 -07007098 ctx->nr_user_files = 0;
7099 return 0;
7100}
7101
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007102static void io_sq_thread_unpark(struct io_sq_data *sqd)
Pavel Begunkov09a6f4e2021-03-14 20:57:10 +00007103 __releases(&sqd->lock)
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007104{
Pavel Begunkov521d6a72021-03-11 23:29:38 +00007105 WARN_ON_ONCE(sqd->thread == current);
7106
Pavel Begunkov9e138a42021-03-14 20:57:12 +00007107 /*
7108 * Do the dance but not conditional clear_bit() because it'd race with
7109 * other threads incrementing park_pending and setting the bit.
7110 */
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007111 clear_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state);
Pavel Begunkov9e138a42021-03-14 20:57:12 +00007112 if (atomic_dec_return(&sqd->park_pending))
7113 set_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state);
Pavel Begunkov09a6f4e2021-03-14 20:57:10 +00007114 mutex_unlock(&sqd->lock);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007115}
7116
Jens Axboe86e0d672021-03-05 08:44:39 -07007117static void io_sq_thread_park(struct io_sq_data *sqd)
Pavel Begunkov09a6f4e2021-03-14 20:57:10 +00007118 __acquires(&sqd->lock)
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007119{
Pavel Begunkov521d6a72021-03-11 23:29:38 +00007120 WARN_ON_ONCE(sqd->thread == current);
7121
Pavel Begunkov9e138a42021-03-14 20:57:12 +00007122 atomic_inc(&sqd->park_pending);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007123 set_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state);
Pavel Begunkov09a6f4e2021-03-14 20:57:10 +00007124 mutex_lock(&sqd->lock);
Jens Axboe05962f92021-03-06 13:58:48 -07007125 if (sqd->thread)
Jens Axboe86e0d672021-03-05 08:44:39 -07007126 wake_up_process(sqd->thread);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007127}
7128
7129static void io_sq_thread_stop(struct io_sq_data *sqd)
7130{
Pavel Begunkov521d6a72021-03-11 23:29:38 +00007131 WARN_ON_ONCE(sqd->thread == current);
7132
Pavel Begunkov09a6f4e2021-03-14 20:57:10 +00007133 mutex_lock(&sqd->lock);
Jens Axboe05962f92021-03-06 13:58:48 -07007134 set_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state);
Jens Axboee8f98f242021-03-09 16:32:13 -07007135 if (sqd->thread)
7136 wake_up_process(sqd->thread);
Pavel Begunkov09a6f4e2021-03-14 20:57:10 +00007137 mutex_unlock(&sqd->lock);
Jens Axboe05962f92021-03-06 13:58:48 -07007138 wait_for_completion(&sqd->exited);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007139}
7140
Jens Axboe534ca6d2020-09-02 13:52:19 -06007141static void io_put_sq_data(struct io_sq_data *sqd)
Jens Axboe6c271ce2019-01-10 11:22:30 -07007142{
Jens Axboe534ca6d2020-09-02 13:52:19 -06007143 if (refcount_dec_and_test(&sqd->refs)) {
Pavel Begunkov9e138a42021-03-14 20:57:12 +00007144 WARN_ON_ONCE(atomic_read(&sqd->park_pending));
7145
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007146 io_sq_thread_stop(sqd);
7147 kfree(sqd);
7148 }
7149}
7150
7151static void io_sq_thread_finish(struct io_ring_ctx *ctx)
7152{
7153 struct io_sq_data *sqd = ctx->sq_data;
7154
7155 if (sqd) {
Jens Axboe05962f92021-03-06 13:58:48 -07007156 io_sq_thread_park(sqd);
Pavel Begunkov521d6a72021-03-11 23:29:38 +00007157 list_del_init(&ctx->sqd_list);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007158 io_sqd_update_thread_idle(sqd);
Jens Axboe05962f92021-03-06 13:58:48 -07007159 io_sq_thread_unpark(sqd);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007160
7161 io_put_sq_data(sqd);
7162 ctx->sq_data = NULL;
Stefan Metzmacher7c30f36a2021-03-07 11:54:28 +01007163 if (ctx->sq_creds)
7164 put_cred(ctx->sq_creds);
Jens Axboe534ca6d2020-09-02 13:52:19 -06007165 }
7166}
7167
Jens Axboeaa061652020-09-02 14:50:27 -06007168static struct io_sq_data *io_attach_sq_data(struct io_uring_params *p)
7169{
7170 struct io_ring_ctx *ctx_attach;
7171 struct io_sq_data *sqd;
7172 struct fd f;
7173
7174 f = fdget(p->wq_fd);
7175 if (!f.file)
7176 return ERR_PTR(-ENXIO);
7177 if (f.file->f_op != &io_uring_fops) {
7178 fdput(f);
7179 return ERR_PTR(-EINVAL);
7180 }
7181
7182 ctx_attach = f.file->private_data;
7183 sqd = ctx_attach->sq_data;
7184 if (!sqd) {
7185 fdput(f);
7186 return ERR_PTR(-EINVAL);
7187 }
Jens Axboe5c2469e2021-03-11 10:17:56 -07007188 if (sqd->task_tgid != current->tgid) {
7189 fdput(f);
7190 return ERR_PTR(-EPERM);
7191 }
Jens Axboeaa061652020-09-02 14:50:27 -06007192
7193 refcount_inc(&sqd->refs);
7194 fdput(f);
7195 return sqd;
7196}
7197
Pavel Begunkov26984fb2021-03-11 23:29:37 +00007198static struct io_sq_data *io_get_sq_data(struct io_uring_params *p,
7199 bool *attached)
Jens Axboe534ca6d2020-09-02 13:52:19 -06007200{
7201 struct io_sq_data *sqd;
7202
Pavel Begunkov26984fb2021-03-11 23:29:37 +00007203 *attached = false;
Jens Axboe5c2469e2021-03-11 10:17:56 -07007204 if (p->flags & IORING_SETUP_ATTACH_WQ) {
7205 sqd = io_attach_sq_data(p);
Pavel Begunkov26984fb2021-03-11 23:29:37 +00007206 if (!IS_ERR(sqd)) {
7207 *attached = true;
Jens Axboe5c2469e2021-03-11 10:17:56 -07007208 return sqd;
Pavel Begunkov26984fb2021-03-11 23:29:37 +00007209 }
Jens Axboe5c2469e2021-03-11 10:17:56 -07007210 /* fall through for EPERM case, setup new sqd/task */
7211 if (PTR_ERR(sqd) != -EPERM)
7212 return sqd;
7213 }
Jens Axboeaa061652020-09-02 14:50:27 -06007214
Jens Axboe534ca6d2020-09-02 13:52:19 -06007215 sqd = kzalloc(sizeof(*sqd), GFP_KERNEL);
7216 if (!sqd)
7217 return ERR_PTR(-ENOMEM);
7218
Pavel Begunkov9e138a42021-03-14 20:57:12 +00007219 atomic_set(&sqd->park_pending, 0);
Jens Axboe534ca6d2020-09-02 13:52:19 -06007220 refcount_set(&sqd->refs, 1);
Jens Axboe69fb2132020-09-14 11:16:23 -06007221 INIT_LIST_HEAD(&sqd->ctx_list);
Pavel Begunkov09a6f4e2021-03-14 20:57:10 +00007222 mutex_init(&sqd->lock);
Jens Axboe534ca6d2020-09-02 13:52:19 -06007223 init_waitqueue_head(&sqd->wait);
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007224 init_completion(&sqd->exited);
Jens Axboe534ca6d2020-09-02 13:52:19 -06007225 return sqd;
7226}
7227
Jens Axboe6b063142019-01-10 22:13:58 -07007228#if defined(CONFIG_UNIX)
Jens Axboe6b063142019-01-10 22:13:58 -07007229/*
7230 * Ensure the UNIX gc is aware of our file set, so we are certain that
7231 * the io_uring can be safely unregistered on process exit, even if we have
7232 * loops in the file referencing.
7233 */
7234static int __io_sqe_files_scm(struct io_ring_ctx *ctx, int nr, int offset)
7235{
7236 struct sock *sk = ctx->ring_sock->sk;
7237 struct scm_fp_list *fpl;
7238 struct sk_buff *skb;
Jens Axboe08a45172019-10-03 08:11:03 -06007239 int i, nr_files;
Jens Axboe6b063142019-01-10 22:13:58 -07007240
Jens Axboe6b063142019-01-10 22:13:58 -07007241 fpl = kzalloc(sizeof(*fpl), GFP_KERNEL);
7242 if (!fpl)
7243 return -ENOMEM;
7244
7245 skb = alloc_skb(0, GFP_KERNEL);
7246 if (!skb) {
7247 kfree(fpl);
7248 return -ENOMEM;
7249 }
7250
7251 skb->sk = sk;
Jens Axboe6b063142019-01-10 22:13:58 -07007252
Jens Axboe08a45172019-10-03 08:11:03 -06007253 nr_files = 0;
Jens Axboe62e398b2021-02-21 16:19:37 -07007254 fpl->user = get_uid(current_user());
Jens Axboe6b063142019-01-10 22:13:58 -07007255 for (i = 0; i < nr; i++) {
Jens Axboe65e19f52019-10-26 07:20:21 -06007256 struct file *file = io_file_from_index(ctx, i + offset);
7257
7258 if (!file)
Jens Axboe08a45172019-10-03 08:11:03 -06007259 continue;
Jens Axboe65e19f52019-10-26 07:20:21 -06007260 fpl->fp[nr_files] = get_file(file);
Jens Axboe08a45172019-10-03 08:11:03 -06007261 unix_inflight(fpl->user, fpl->fp[nr_files]);
7262 nr_files++;
Jens Axboe6b063142019-01-10 22:13:58 -07007263 }
7264
Jens Axboe08a45172019-10-03 08:11:03 -06007265 if (nr_files) {
7266 fpl->max = SCM_MAX_FD;
7267 fpl->count = nr_files;
7268 UNIXCB(skb).fp = fpl;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007269 skb->destructor = unix_destruct_scm;
Jens Axboe08a45172019-10-03 08:11:03 -06007270 refcount_add(skb->truesize, &sk->sk_wmem_alloc);
7271 skb_queue_head(&sk->sk_receive_queue, skb);
Jens Axboe6b063142019-01-10 22:13:58 -07007272
Jens Axboe08a45172019-10-03 08:11:03 -06007273 for (i = 0; i < nr_files; i++)
7274 fput(fpl->fp[i]);
7275 } else {
7276 kfree_skb(skb);
7277 kfree(fpl);
7278 }
Jens Axboe6b063142019-01-10 22:13:58 -07007279
7280 return 0;
7281}
7282
7283/*
7284 * If UNIX sockets are enabled, fd passing can cause a reference cycle which
7285 * causes regular reference counting to break down. We rely on the UNIX
7286 * garbage collection to take care of this problem for us.
7287 */
7288static int io_sqe_files_scm(struct io_ring_ctx *ctx)
7289{
7290 unsigned left, total;
7291 int ret = 0;
7292
7293 total = 0;
7294 left = ctx->nr_user_files;
7295 while (left) {
7296 unsigned this_files = min_t(unsigned, left, SCM_MAX_FD);
Jens Axboe6b063142019-01-10 22:13:58 -07007297
7298 ret = __io_sqe_files_scm(ctx, this_files, total);
7299 if (ret)
7300 break;
7301 left -= this_files;
7302 total += this_files;
7303 }
7304
7305 if (!ret)
7306 return 0;
7307
7308 while (total < ctx->nr_user_files) {
Jens Axboe65e19f52019-10-26 07:20:21 -06007309 struct file *file = io_file_from_index(ctx, total);
7310
7311 if (file)
7312 fput(file);
Jens Axboe6b063142019-01-10 22:13:58 -07007313 total++;
7314 }
7315
7316 return ret;
7317}
7318#else
7319static int io_sqe_files_scm(struct io_ring_ctx *ctx)
7320{
7321 return 0;
7322}
7323#endif
7324
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007325static int io_sqe_alloc_file_tables(struct fixed_rsrc_data *file_data,
Pavel Begunkov5398ae62020-10-10 18:34:14 +01007326 unsigned nr_tables, unsigned nr_files)
Jens Axboe65e19f52019-10-26 07:20:21 -06007327{
7328 int i;
7329
7330 for (i = 0; i < nr_tables; i++) {
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007331 struct fixed_rsrc_table *table = &file_data->table[i];
Jens Axboe65e19f52019-10-26 07:20:21 -06007332 unsigned this_files;
7333
7334 this_files = min(nr_files, IORING_MAX_FILES_TABLE);
7335 table->files = kcalloc(this_files, sizeof(struct file *),
7336 GFP_KERNEL);
7337 if (!table->files)
7338 break;
7339 nr_files -= this_files;
7340 }
7341
7342 if (i == nr_tables)
7343 return 0;
7344
7345 for (i = 0; i < nr_tables; i++) {
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007346 struct fixed_rsrc_table *table = &file_data->table[i];
Jens Axboe65e19f52019-10-26 07:20:21 -06007347 kfree(table->files);
7348 }
7349 return 1;
7350}
7351
Bijan Mottahedeh50238532021-01-15 17:37:45 +00007352static void io_ring_file_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc)
Jens Axboec3a31e62019-10-03 13:59:56 -06007353{
Bijan Mottahedeh50238532021-01-15 17:37:45 +00007354 struct file *file = prsrc->file;
Jens Axboec3a31e62019-10-03 13:59:56 -06007355#if defined(CONFIG_UNIX)
Jens Axboec3a31e62019-10-03 13:59:56 -06007356 struct sock *sock = ctx->ring_sock->sk;
7357 struct sk_buff_head list, *head = &sock->sk_receive_queue;
7358 struct sk_buff *skb;
7359 int i;
7360
7361 __skb_queue_head_init(&list);
7362
7363 /*
7364 * Find the skb that holds this file in its SCM_RIGHTS. When found,
7365 * remove this entry and rearrange the file array.
7366 */
7367 skb = skb_dequeue(head);
7368 while (skb) {
7369 struct scm_fp_list *fp;
7370
7371 fp = UNIXCB(skb).fp;
7372 for (i = 0; i < fp->count; i++) {
7373 int left;
7374
7375 if (fp->fp[i] != file)
7376 continue;
7377
7378 unix_notinflight(fp->user, fp->fp[i]);
7379 left = fp->count - 1 - i;
7380 if (left) {
7381 memmove(&fp->fp[i], &fp->fp[i + 1],
7382 left * sizeof(struct file *));
7383 }
7384 fp->count--;
7385 if (!fp->count) {
7386 kfree_skb(skb);
7387 skb = NULL;
7388 } else {
7389 __skb_queue_tail(&list, skb);
7390 }
7391 fput(file);
7392 file = NULL;
7393 break;
7394 }
7395
7396 if (!file)
7397 break;
7398
7399 __skb_queue_tail(&list, skb);
7400
7401 skb = skb_dequeue(head);
7402 }
7403
7404 if (skb_peek(&list)) {
7405 spin_lock_irq(&head->lock);
7406 while ((skb = __skb_dequeue(&list)) != NULL)
7407 __skb_queue_tail(head, skb);
7408 spin_unlock_irq(&head->lock);
7409 }
7410#else
Jens Axboe05f3fb32019-12-09 11:22:50 -07007411 fput(file);
Jens Axboec3a31e62019-10-03 13:59:56 -06007412#endif
7413}
7414
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007415static void __io_rsrc_put_work(struct fixed_rsrc_ref_node *ref_node)
Jens Axboe05f3fb32019-12-09 11:22:50 -07007416{
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007417 struct fixed_rsrc_data *rsrc_data = ref_node->rsrc_data;
7418 struct io_ring_ctx *ctx = rsrc_data->ctx;
7419 struct io_rsrc_put *prsrc, *tmp;
Xiaoguang Wang05589552020-03-31 14:05:18 +08007420
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007421 list_for_each_entry_safe(prsrc, tmp, &ref_node->rsrc_list, list) {
7422 list_del(&prsrc->list);
Bijan Mottahedeh50238532021-01-15 17:37:45 +00007423 ref_node->rsrc_put(ctx, prsrc);
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007424 kfree(prsrc);
Jens Axboe05f3fb32019-12-09 11:22:50 -07007425 }
7426
Xiaoguang Wang05589552020-03-31 14:05:18 +08007427 percpu_ref_exit(&ref_node->refs);
7428 kfree(ref_node);
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007429 percpu_ref_put(&rsrc_data->refs);
Jens Axboe05f3fb32019-12-09 11:22:50 -07007430}
7431
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007432static void io_rsrc_put_work(struct work_struct *work)
Jens Axboe4a38aed22020-05-14 17:21:15 -06007433{
7434 struct io_ring_ctx *ctx;
7435 struct llist_node *node;
7436
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007437 ctx = container_of(work, struct io_ring_ctx, rsrc_put_work.work);
7438 node = llist_del_all(&ctx->rsrc_put_llist);
Jens Axboe4a38aed22020-05-14 17:21:15 -06007439
7440 while (node) {
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007441 struct fixed_rsrc_ref_node *ref_node;
Jens Axboe4a38aed22020-05-14 17:21:15 -06007442 struct llist_node *next = node->next;
7443
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007444 ref_node = llist_entry(node, struct fixed_rsrc_ref_node, llist);
7445 __io_rsrc_put_work(ref_node);
Jens Axboe4a38aed22020-05-14 17:21:15 -06007446 node = next;
7447 }
7448}
7449
Bijan Mottahedeh00835dc2021-01-15 17:37:52 +00007450static void io_rsrc_node_ref_zero(struct percpu_ref *ref)
Jens Axboe05f3fb32019-12-09 11:22:50 -07007451{
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007452 struct fixed_rsrc_ref_node *ref_node;
7453 struct fixed_rsrc_data *data;
Jens Axboe4a38aed22020-05-14 17:21:15 -06007454 struct io_ring_ctx *ctx;
Pavel Begunkove2978222020-11-18 14:56:26 +00007455 bool first_add = false;
Jens Axboe4a38aed22020-05-14 17:21:15 -06007456 int delay = HZ;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007457
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007458 ref_node = container_of(ref, struct fixed_rsrc_ref_node, refs);
7459 data = ref_node->rsrc_data;
Pavel Begunkove2978222020-11-18 14:56:26 +00007460 ctx = data->ctx;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007461
Bijan Mottahedeh2a63b2d2021-01-15 17:37:47 +00007462 io_rsrc_ref_lock(ctx);
Pavel Begunkove2978222020-11-18 14:56:26 +00007463 ref_node->done = true;
7464
Bijan Mottahedehd67d2262021-01-15 17:37:46 +00007465 while (!list_empty(&ctx->rsrc_ref_list)) {
7466 ref_node = list_first_entry(&ctx->rsrc_ref_list,
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007467 struct fixed_rsrc_ref_node, node);
Pavel Begunkove2978222020-11-18 14:56:26 +00007468 /* recycle ref nodes in order */
7469 if (!ref_node->done)
7470 break;
7471 list_del(&ref_node->node);
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007472 first_add |= llist_add(&ref_node->llist, &ctx->rsrc_put_llist);
Pavel Begunkove2978222020-11-18 14:56:26 +00007473 }
Bijan Mottahedeh2a63b2d2021-01-15 17:37:47 +00007474 io_rsrc_ref_unlock(ctx);
Pavel Begunkove2978222020-11-18 14:56:26 +00007475
7476 if (percpu_ref_is_dying(&data->refs))
Jens Axboe4a38aed22020-05-14 17:21:15 -06007477 delay = 0;
7478
Jens Axboe4a38aed22020-05-14 17:21:15 -06007479 if (!delay)
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007480 mod_delayed_work(system_wq, &ctx->rsrc_put_work, 0);
Jens Axboe4a38aed22020-05-14 17:21:15 -06007481 else if (first_add)
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007482 queue_delayed_work(system_wq, &ctx->rsrc_put_work, delay);
Xiaoguang Wang05589552020-03-31 14:05:18 +08007483}
7484
Bijan Mottahedeh68025352021-01-15 17:37:48 +00007485static struct fixed_rsrc_ref_node *alloc_fixed_rsrc_ref_node(
Xiaoguang Wang05589552020-03-31 14:05:18 +08007486 struct io_ring_ctx *ctx)
7487{
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007488 struct fixed_rsrc_ref_node *ref_node;
Xiaoguang Wang05589552020-03-31 14:05:18 +08007489
7490 ref_node = kzalloc(sizeof(*ref_node), GFP_KERNEL);
7491 if (!ref_node)
Matthew Wilcox (Oracle)3e2224c2021-01-06 16:09:26 +00007492 return NULL;
Xiaoguang Wang05589552020-03-31 14:05:18 +08007493
Bijan Mottahedeh00835dc2021-01-15 17:37:52 +00007494 if (percpu_ref_init(&ref_node->refs, io_rsrc_node_ref_zero,
Xiaoguang Wang05589552020-03-31 14:05:18 +08007495 0, GFP_KERNEL)) {
7496 kfree(ref_node);
Matthew Wilcox (Oracle)3e2224c2021-01-06 16:09:26 +00007497 return NULL;
Xiaoguang Wang05589552020-03-31 14:05:18 +08007498 }
7499 INIT_LIST_HEAD(&ref_node->node);
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007500 INIT_LIST_HEAD(&ref_node->rsrc_list);
Pavel Begunkove2978222020-11-18 14:56:26 +00007501 ref_node->done = false;
Xiaoguang Wang05589552020-03-31 14:05:18 +08007502 return ref_node;
Xiaoguang Wang05589552020-03-31 14:05:18 +08007503}
7504
Pavel Begunkovbc9744c2021-01-15 17:37:49 +00007505static void init_fixed_file_ref_node(struct io_ring_ctx *ctx,
7506 struct fixed_rsrc_ref_node *ref_node)
Bijan Mottahedeh68025352021-01-15 17:37:48 +00007507{
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007508 ref_node->rsrc_data = ctx->file_data;
Bijan Mottahedeh50238532021-01-15 17:37:45 +00007509 ref_node->rsrc_put = io_ring_file_put;
Xiaoguang Wang05589552020-03-31 14:05:18 +08007510}
7511
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007512static void destroy_fixed_rsrc_ref_node(struct fixed_rsrc_ref_node *ref_node)
Xiaoguang Wang05589552020-03-31 14:05:18 +08007513{
7514 percpu_ref_exit(&ref_node->refs);
7515 kfree(ref_node);
Jens Axboe05f3fb32019-12-09 11:22:50 -07007516}
7517
Pavel Begunkovea64ec022021-02-04 13:52:07 +00007518
Jens Axboe05f3fb32019-12-09 11:22:50 -07007519static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
7520 unsigned nr_args)
7521{
7522 __s32 __user *fds = (__s32 __user *) arg;
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01007523 unsigned nr_tables, i;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007524 struct file *file;
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01007525 int fd, ret = -ENOMEM;
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007526 struct fixed_rsrc_ref_node *ref_node;
7527 struct fixed_rsrc_data *file_data;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007528
7529 if (ctx->file_data)
7530 return -EBUSY;
7531 if (!nr_args)
7532 return -EINVAL;
7533 if (nr_args > IORING_MAX_FIXED_FILES)
7534 return -EMFILE;
7535
Bijan Mottahedeh1ad555c2021-01-15 17:37:51 +00007536 file_data = alloc_fixed_rsrc_data(ctx);
Pavel Begunkov5398ae62020-10-10 18:34:14 +01007537 if (!file_data)
Jens Axboe05f3fb32019-12-09 11:22:50 -07007538 return -ENOMEM;
Dan Carpenter13770a72021-02-01 15:23:42 +03007539 ctx->file_data = file_data;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007540
7541 nr_tables = DIV_ROUND_UP(nr_args, IORING_MAX_FILES_TABLE);
Colin Ian King035fbaf2020-10-12 15:03:41 +01007542 file_data->table = kcalloc(nr_tables, sizeof(*file_data->table),
Pavel Begunkov5398ae62020-10-10 18:34:14 +01007543 GFP_KERNEL);
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01007544 if (!file_data->table)
7545 goto out_free;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007546
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01007547 if (io_sqe_alloc_file_tables(file_data, nr_tables, nr_args))
Jens Axboe05f3fb32019-12-09 11:22:50 -07007548 goto out_free;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007549
Jens Axboe05f3fb32019-12-09 11:22:50 -07007550 for (i = 0; i < nr_args; i++, ctx->nr_user_files++) {
Jens Axboe7b29f922021-03-12 08:30:14 -07007551 unsigned long file_ptr;
7552
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01007553 if (copy_from_user(&fd, &fds[i], sizeof(fd))) {
7554 ret = -EFAULT;
7555 goto out_fput;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007556 }
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01007557 /* allow sparse sets */
7558 if (fd == -1)
7559 continue;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007560
Jens Axboe05f3fb32019-12-09 11:22:50 -07007561 file = fget(fd);
Jens Axboe05f3fb32019-12-09 11:22:50 -07007562 ret = -EBADF;
7563 if (!file)
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01007564 goto out_fput;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007565
7566 /*
7567 * Don't allow io_uring instances to be registered. If UNIX
7568 * isn't enabled, then this causes a reference cycle and this
7569 * instance can never get freed. If UNIX is enabled we'll
7570 * handle it just fine, but there's still no point in allowing
7571 * a ring fd as it doesn't support regular read/write anyway.
7572 */
7573 if (file->f_op == &io_uring_fops) {
7574 fput(file);
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01007575 goto out_fput;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007576 }
Jens Axboe7b29f922021-03-12 08:30:14 -07007577 file_ptr = (unsigned long) file;
7578 if (__io_file_supports_async(file, READ))
7579 file_ptr |= FFS_ASYNC_READ;
7580 if (__io_file_supports_async(file, WRITE))
7581 file_ptr |= FFS_ASYNC_WRITE;
7582 if (S_ISREG(file_inode(file)->i_mode))
7583 file_ptr |= FFS_ISREG;
7584 *io_fixed_file_slot(file_data, i) = (struct file *) file_ptr;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007585 }
7586
Jens Axboe05f3fb32019-12-09 11:22:50 -07007587 ret = io_sqe_files_scm(ctx);
Xiaoguang Wang05589552020-03-31 14:05:18 +08007588 if (ret) {
Jens Axboe05f3fb32019-12-09 11:22:50 -07007589 io_sqe_files_unregister(ctx);
Xiaoguang Wang05589552020-03-31 14:05:18 +08007590 return ret;
7591 }
Jens Axboe05f3fb32019-12-09 11:22:50 -07007592
Pavel Begunkovbc9744c2021-01-15 17:37:49 +00007593 ref_node = alloc_fixed_rsrc_ref_node(ctx);
Matthew Wilcox (Oracle)3e2224c2021-01-06 16:09:26 +00007594 if (!ref_node) {
Xiaoguang Wang05589552020-03-31 14:05:18 +08007595 io_sqe_files_unregister(ctx);
Matthew Wilcox (Oracle)3e2224c2021-01-06 16:09:26 +00007596 return -ENOMEM;
Xiaoguang Wang05589552020-03-31 14:05:18 +08007597 }
Pavel Begunkovbc9744c2021-01-15 17:37:49 +00007598 init_fixed_file_ref_node(ctx, ref_node);
Xiaoguang Wang05589552020-03-31 14:05:18 +08007599
Bijan Mottahedehd67d2262021-01-15 17:37:46 +00007600 io_sqe_rsrc_set_node(ctx, file_data, ref_node);
Jens Axboe05f3fb32019-12-09 11:22:50 -07007601 return ret;
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01007602out_fput:
7603 for (i = 0; i < ctx->nr_user_files; i++) {
7604 file = io_file_from_index(ctx, i);
7605 if (file)
7606 fput(file);
7607 }
7608 for (i = 0; i < nr_tables; i++)
7609 kfree(file_data->table[i].files);
7610 ctx->nr_user_files = 0;
Pavel Begunkov600cf3f2020-10-10 18:34:15 +01007611out_free:
Bijan Mottahedeh1ad555c2021-01-15 17:37:51 +00007612 free_fixed_rsrc_data(ctx->file_data);
Jens Axboe55cbc252020-10-14 07:35:57 -06007613 ctx->file_data = NULL;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007614 return ret;
7615}
7616
Jens Axboec3a31e62019-10-03 13:59:56 -06007617static int io_sqe_file_register(struct io_ring_ctx *ctx, struct file *file,
7618 int index)
7619{
7620#if defined(CONFIG_UNIX)
7621 struct sock *sock = ctx->ring_sock->sk;
7622 struct sk_buff_head *head = &sock->sk_receive_queue;
7623 struct sk_buff *skb;
7624
7625 /*
7626 * See if we can merge this file into an existing skb SCM_RIGHTS
7627 * file set. If there's no room, fall back to allocating a new skb
7628 * and filling it in.
7629 */
7630 spin_lock_irq(&head->lock);
7631 skb = skb_peek(head);
7632 if (skb) {
7633 struct scm_fp_list *fpl = UNIXCB(skb).fp;
7634
7635 if (fpl->count < SCM_MAX_FD) {
7636 __skb_unlink(skb, head);
7637 spin_unlock_irq(&head->lock);
7638 fpl->fp[fpl->count] = get_file(file);
7639 unix_inflight(fpl->user, fpl->fp[fpl->count]);
7640 fpl->count++;
7641 spin_lock_irq(&head->lock);
7642 __skb_queue_head(head, skb);
7643 } else {
7644 skb = NULL;
7645 }
7646 }
7647 spin_unlock_irq(&head->lock);
7648
7649 if (skb) {
7650 fput(file);
7651 return 0;
7652 }
7653
7654 return __io_sqe_files_scm(ctx, 1, index);
7655#else
7656 return 0;
7657#endif
7658}
7659
Bijan Mottahedeh50238532021-01-15 17:37:45 +00007660static int io_queue_rsrc_removal(struct fixed_rsrc_data *data, void *rsrc)
Jens Axboe05f3fb32019-12-09 11:22:50 -07007661{
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007662 struct io_rsrc_put *prsrc;
7663 struct fixed_rsrc_ref_node *ref_node = data->node;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007664
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007665 prsrc = kzalloc(sizeof(*prsrc), GFP_KERNEL);
7666 if (!prsrc)
Hillf Dantona5318d32020-03-23 17:47:15 +08007667 return -ENOMEM;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007668
Bijan Mottahedeh50238532021-01-15 17:37:45 +00007669 prsrc->rsrc = rsrc;
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007670 list_add(&prsrc->list, &ref_node->rsrc_list);
Xiaoguang Wang05589552020-03-31 14:05:18 +08007671
Hillf Dantona5318d32020-03-23 17:47:15 +08007672 return 0;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007673}
7674
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007675static inline int io_queue_file_removal(struct fixed_rsrc_data *data,
7676 struct file *file)
7677{
Bijan Mottahedeh50238532021-01-15 17:37:45 +00007678 return io_queue_rsrc_removal(data, (void *)file);
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007679}
7680
Jens Axboe05f3fb32019-12-09 11:22:50 -07007681static int __io_sqe_files_update(struct io_ring_ctx *ctx,
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007682 struct io_uring_rsrc_update *up,
Jens Axboe05f3fb32019-12-09 11:22:50 -07007683 unsigned nr_args)
7684{
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007685 struct fixed_rsrc_data *data = ctx->file_data;
7686 struct fixed_rsrc_ref_node *ref_node;
Pavel Begunkovea64ec022021-02-04 13:52:07 +00007687 struct file *file, **file_slot;
Jens Axboec3a31e62019-10-03 13:59:56 -06007688 __s32 __user *fds;
7689 int fd, i, err;
7690 __u32 done;
Xiaoguang Wang05589552020-03-31 14:05:18 +08007691 bool needs_switch = false;
Jens Axboec3a31e62019-10-03 13:59:56 -06007692
Jens Axboe05f3fb32019-12-09 11:22:50 -07007693 if (check_add_overflow(up->offset, nr_args, &done))
Jens Axboec3a31e62019-10-03 13:59:56 -06007694 return -EOVERFLOW;
7695 if (done > ctx->nr_user_files)
7696 return -EINVAL;
Pavel Begunkov8dd03af2021-03-19 17:22:36 +00007697 err = io_rsrc_refnode_prealloc(ctx);
7698 if (err)
7699 return err;
Xiaoguang Wang05589552020-03-31 14:05:18 +08007700
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007701 fds = u64_to_user_ptr(up->data);
Pavel Begunkov67973b92021-01-26 13:51:09 +00007702 for (done = 0; done < nr_args; done++) {
Jens Axboec3a31e62019-10-03 13:59:56 -06007703 err = 0;
7704 if (copy_from_user(&fd, &fds[done], sizeof(fd))) {
7705 err = -EFAULT;
7706 break;
7707 }
noah4e0377a2021-01-26 15:23:28 -05007708 if (fd == IORING_REGISTER_FILES_SKIP)
7709 continue;
7710
Pavel Begunkov67973b92021-01-26 13:51:09 +00007711 i = array_index_nospec(up->offset + done, ctx->nr_user_files);
Pavel Begunkovea64ec022021-02-04 13:52:07 +00007712 file_slot = io_fixed_file_slot(ctx->file_data, i);
7713
7714 if (*file_slot) {
Jens Axboe7b29f922021-03-12 08:30:14 -07007715 file = (struct file *) ((unsigned long) *file_slot & FFS_MASK);
7716 err = io_queue_file_removal(data, file);
Hillf Dantona5318d32020-03-23 17:47:15 +08007717 if (err)
7718 break;
Pavel Begunkovea64ec022021-02-04 13:52:07 +00007719 *file_slot = NULL;
Xiaoguang Wang05589552020-03-31 14:05:18 +08007720 needs_switch = true;
Jens Axboec3a31e62019-10-03 13:59:56 -06007721 }
7722 if (fd != -1) {
Jens Axboec3a31e62019-10-03 13:59:56 -06007723 file = fget(fd);
7724 if (!file) {
7725 err = -EBADF;
7726 break;
7727 }
7728 /*
7729 * Don't allow io_uring instances to be registered. If
7730 * UNIX isn't enabled, then this causes a reference
7731 * cycle and this instance can never get freed. If UNIX
7732 * is enabled we'll handle it just fine, but there's
7733 * still no point in allowing a ring fd as it doesn't
7734 * support regular read/write anyway.
7735 */
7736 if (file->f_op == &io_uring_fops) {
7737 fput(file);
7738 err = -EBADF;
7739 break;
7740 }
Jens Axboee68a3ff2021-02-11 07:45:08 -07007741 *file_slot = file;
Jens Axboec3a31e62019-10-03 13:59:56 -06007742 err = io_sqe_file_register(ctx, file, i);
Yang Yingliangf3bd9da2020-07-09 10:11:41 +00007743 if (err) {
Jens Axboee68a3ff2021-02-11 07:45:08 -07007744 *file_slot = NULL;
Yang Yingliangf3bd9da2020-07-09 10:11:41 +00007745 fput(file);
Jens Axboec3a31e62019-10-03 13:59:56 -06007746 break;
Yang Yingliangf3bd9da2020-07-09 10:11:41 +00007747 }
Jens Axboec3a31e62019-10-03 13:59:56 -06007748 }
Jens Axboe05f3fb32019-12-09 11:22:50 -07007749 }
7750
Xiaoguang Wang05589552020-03-31 14:05:18 +08007751 if (needs_switch) {
Pavel Begunkovb2e96852020-10-10 18:34:16 +01007752 percpu_ref_kill(&data->node->refs);
Pavel Begunkov8dd03af2021-03-19 17:22:36 +00007753 ref_node = io_rsrc_refnode_get(ctx, data, io_ring_file_put);
Bijan Mottahedehd67d2262021-01-15 17:37:46 +00007754 io_sqe_rsrc_set_node(ctx, data, ref_node);
Pavel Begunkov8dd03af2021-03-19 17:22:36 +00007755 }
Jens Axboec3a31e62019-10-03 13:59:56 -06007756 return done ? done : err;
7757}
Xiaoguang Wang05589552020-03-31 14:05:18 +08007758
Jens Axboe05f3fb32019-12-09 11:22:50 -07007759static int io_sqe_files_update(struct io_ring_ctx *ctx, void __user *arg,
7760 unsigned nr_args)
7761{
Bijan Mottahedeh269bbe52021-01-15 17:37:44 +00007762 struct io_uring_rsrc_update up;
Jens Axboe05f3fb32019-12-09 11:22:50 -07007763
7764 if (!ctx->file_data)
7765 return -ENXIO;
7766 if (!nr_args)
7767 return -EINVAL;
7768 if (copy_from_user(&up, arg, sizeof(up)))
7769 return -EFAULT;
7770 if (up.resv)
7771 return -EINVAL;
7772
7773 return __io_sqe_files_update(ctx, &up, nr_args);
7774}
Jens Axboec3a31e62019-10-03 13:59:56 -06007775
Pavel Begunkov5280f7e2021-02-04 13:52:08 +00007776static struct io_wq_work *io_free_work(struct io_wq_work *work)
Jens Axboe7d723062019-11-12 22:31:31 -07007777{
7778 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
7779
Pavel Begunkov5280f7e2021-02-04 13:52:08 +00007780 req = io_put_req_find_next(req);
7781 return req ? &req->work : NULL;
Jens Axboe7d723062019-11-12 22:31:31 -07007782}
7783
Jens Axboe5aa75ed2021-02-16 12:56:50 -07007784static struct io_wq *io_init_wq_offload(struct io_ring_ctx *ctx)
Pavel Begunkov24369c22020-01-28 03:15:48 +03007785{
Jens Axboee9418942021-02-19 12:33:30 -07007786 struct io_wq_hash *hash;
Pavel Begunkov24369c22020-01-28 03:15:48 +03007787 struct io_wq_data data;
Pavel Begunkov24369c22020-01-28 03:15:48 +03007788 unsigned int concurrency;
Pavel Begunkov24369c22020-01-28 03:15:48 +03007789
Jens Axboee9418942021-02-19 12:33:30 -07007790 hash = ctx->hash_map;
7791 if (!hash) {
7792 hash = kzalloc(sizeof(*hash), GFP_KERNEL);
7793 if (!hash)
7794 return ERR_PTR(-ENOMEM);
7795 refcount_set(&hash->refs, 1);
7796 init_waitqueue_head(&hash->wait);
7797 ctx->hash_map = hash;
7798 }
7799
7800 data.hash = hash;
Pavel Begunkove9fd9392020-03-04 16:14:12 +03007801 data.free_work = io_free_work;
Pavel Begunkovf5fa38c2020-06-08 21:08:20 +03007802 data.do_work = io_wq_submit_work;
Pavel Begunkov24369c22020-01-28 03:15:48 +03007803
Jens Axboed25e3a32021-02-16 11:41:41 -07007804 /* Do QD, or 4 * CPUS, whatever is smallest */
7805 concurrency = min(ctx->sq_entries, 4 * num_online_cpus());
Pavel Begunkov24369c22020-01-28 03:15:48 +03007806
Jens Axboe5aa75ed2021-02-16 12:56:50 -07007807 return io_wq_create(concurrency, &data);
Pavel Begunkov24369c22020-01-28 03:15:48 +03007808}
7809
Jens Axboe5aa75ed2021-02-16 12:56:50 -07007810static int io_uring_alloc_task_context(struct task_struct *task,
7811 struct io_ring_ctx *ctx)
Jens Axboe0f212202020-09-13 13:09:39 -06007812{
7813 struct io_uring_task *tctx;
Jens Axboed8a6df12020-10-15 16:24:45 -06007814 int ret;
Jens Axboe0f212202020-09-13 13:09:39 -06007815
7816 tctx = kmalloc(sizeof(*tctx), GFP_KERNEL);
7817 if (unlikely(!tctx))
7818 return -ENOMEM;
7819
Jens Axboed8a6df12020-10-15 16:24:45 -06007820 ret = percpu_counter_init(&tctx->inflight, 0, GFP_KERNEL);
7821 if (unlikely(ret)) {
7822 kfree(tctx);
7823 return ret;
7824 }
7825
Jens Axboe5aa75ed2021-02-16 12:56:50 -07007826 tctx->io_wq = io_init_wq_offload(ctx);
7827 if (IS_ERR(tctx->io_wq)) {
7828 ret = PTR_ERR(tctx->io_wq);
7829 percpu_counter_destroy(&tctx->inflight);
7830 kfree(tctx);
7831 return ret;
7832 }
7833
Jens Axboe0f212202020-09-13 13:09:39 -06007834 xa_init(&tctx->xa);
7835 init_waitqueue_head(&tctx->wait);
7836 tctx->last = NULL;
Jens Axboefdaf0832020-10-30 09:37:30 -06007837 atomic_set(&tctx->in_idle, 0);
Jens Axboe0f212202020-09-13 13:09:39 -06007838 task->io_uring = tctx;
Jens Axboe7cbf1722021-02-10 00:03:20 +00007839 spin_lock_init(&tctx->task_lock);
7840 INIT_WQ_LIST(&tctx->task_list);
7841 tctx->task_state = 0;
7842 init_task_work(&tctx->task_work, tctx_task_work);
Jens Axboe0f212202020-09-13 13:09:39 -06007843 return 0;
7844}
7845
7846void __io_uring_free(struct task_struct *tsk)
7847{
7848 struct io_uring_task *tctx = tsk->io_uring;
7849
7850 WARN_ON_ONCE(!xa_empty(&tctx->xa));
Pavel Begunkovef8eaa42021-02-27 11:16:45 +00007851 WARN_ON_ONCE(tctx->io_wq);
7852
Jens Axboed8a6df12020-10-15 16:24:45 -06007853 percpu_counter_destroy(&tctx->inflight);
Jens Axboe0f212202020-09-13 13:09:39 -06007854 kfree(tctx);
7855 tsk->io_uring = NULL;
7856}
7857
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02007858static int io_sq_offload_create(struct io_ring_ctx *ctx,
7859 struct io_uring_params *p)
Jens Axboe2b188cc2019-01-07 10:46:33 -07007860{
7861 int ret;
7862
Jens Axboed25e3a32021-02-16 11:41:41 -07007863 /* Retain compatibility with failing for an invalid attach attempt */
7864 if ((ctx->flags & (IORING_SETUP_ATTACH_WQ | IORING_SETUP_SQPOLL)) ==
7865 IORING_SETUP_ATTACH_WQ) {
7866 struct fd f;
7867
7868 f = fdget(p->wq_fd);
7869 if (!f.file)
7870 return -ENXIO;
7871 if (f.file->f_op != &io_uring_fops) {
7872 fdput(f);
7873 return -EINVAL;
7874 }
7875 fdput(f);
7876 }
Jens Axboe6c271ce2019-01-10 11:22:30 -07007877 if (ctx->flags & IORING_SETUP_SQPOLL) {
Jens Axboe46fe18b2021-03-04 12:39:36 -07007878 struct task_struct *tsk;
Jens Axboe534ca6d2020-09-02 13:52:19 -06007879 struct io_sq_data *sqd;
Pavel Begunkov26984fb2021-03-11 23:29:37 +00007880 bool attached;
Jens Axboe534ca6d2020-09-02 13:52:19 -06007881
Jens Axboe3ec482d2019-04-08 10:51:01 -06007882 ret = -EPERM;
Jens Axboece59fc62020-09-02 13:28:09 -06007883 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_NICE))
Jens Axboe3ec482d2019-04-08 10:51:01 -06007884 goto err;
7885
Pavel Begunkov26984fb2021-03-11 23:29:37 +00007886 sqd = io_get_sq_data(p, &attached);
Jens Axboe534ca6d2020-09-02 13:52:19 -06007887 if (IS_ERR(sqd)) {
7888 ret = PTR_ERR(sqd);
7889 goto err;
7890 }
Jens Axboe69fb2132020-09-14 11:16:23 -06007891
Stefan Metzmacher7c30f36a2021-03-07 11:54:28 +01007892 ctx->sq_creds = get_current_cred();
Jens Axboe534ca6d2020-09-02 13:52:19 -06007893 ctx->sq_data = sqd;
Jens Axboe917257d2019-04-13 09:28:55 -06007894 ctx->sq_thread_idle = msecs_to_jiffies(p->sq_thread_idle);
7895 if (!ctx->sq_thread_idle)
7896 ctx->sq_thread_idle = HZ;
7897
Pavel Begunkov26984fb2021-03-11 23:29:37 +00007898 ret = 0;
Pavel Begunkov78d7f6b2021-03-10 13:13:53 +00007899 io_sq_thread_park(sqd);
Pavel Begunkovde75a3d2021-03-18 11:54:35 +00007900 list_add(&ctx->sqd_list, &sqd->ctx_list);
7901 io_sqd_update_thread_idle(sqd);
Pavel Begunkov26984fb2021-03-11 23:29:37 +00007902 /* don't attach to a dying SQPOLL thread, would be racy */
Pavel Begunkovde75a3d2021-03-18 11:54:35 +00007903 if (attached && !sqd->thread)
Pavel Begunkov26984fb2021-03-11 23:29:37 +00007904 ret = -ENXIO;
Pavel Begunkov78d7f6b2021-03-10 13:13:53 +00007905 io_sq_thread_unpark(sqd);
7906
Pavel Begunkovde75a3d2021-03-18 11:54:35 +00007907 if (ret < 0)
7908 goto err;
7909 if (attached)
Jens Axboe5aa75ed2021-02-16 12:56:50 -07007910 return 0;
Jens Axboeaa061652020-09-02 14:50:27 -06007911
Jens Axboe6c271ce2019-01-10 11:22:30 -07007912 if (p->flags & IORING_SETUP_SQ_AFF) {
Jens Axboe44a9bd12019-05-14 20:00:30 -06007913 int cpu = p->sq_thread_cpu;
Jens Axboe6c271ce2019-01-10 11:22:30 -07007914
Jens Axboe917257d2019-04-13 09:28:55 -06007915 ret = -EINVAL;
Jens Axboe44a9bd12019-05-14 20:00:30 -06007916 if (cpu >= nr_cpu_ids)
Jens Axboee8f98f242021-03-09 16:32:13 -07007917 goto err_sqpoll;
Shenghui Wang7889f442019-05-07 16:03:19 +08007918 if (!cpu_online(cpu))
Jens Axboee8f98f242021-03-09 16:32:13 -07007919 goto err_sqpoll;
Jens Axboe917257d2019-04-13 09:28:55 -06007920
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007921 sqd->sq_cpu = cpu;
Jens Axboe6c271ce2019-01-10 11:22:30 -07007922 } else {
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007923 sqd->sq_cpu = -1;
Jens Axboe6c271ce2019-01-10 11:22:30 -07007924 }
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007925
7926 sqd->task_pid = current->pid;
Jens Axboe5c2469e2021-03-11 10:17:56 -07007927 sqd->task_tgid = current->tgid;
Jens Axboe46fe18b2021-03-04 12:39:36 -07007928 tsk = create_io_thread(io_sq_thread, sqd, NUMA_NO_NODE);
7929 if (IS_ERR(tsk)) {
7930 ret = PTR_ERR(tsk);
Jens Axboee8f98f242021-03-09 16:32:13 -07007931 goto err_sqpoll;
Jens Axboe6c271ce2019-01-10 11:22:30 -07007932 }
Pavel Begunkov97a73a02021-03-08 17:30:54 +00007933
Jens Axboe46fe18b2021-03-04 12:39:36 -07007934 sqd->thread = tsk;
Pavel Begunkov97a73a02021-03-08 17:30:54 +00007935 ret = io_uring_alloc_task_context(tsk, ctx);
Jens Axboe46fe18b2021-03-04 12:39:36 -07007936 wake_up_new_task(tsk);
Jens Axboe0f212202020-09-13 13:09:39 -06007937 if (ret)
7938 goto err;
Jens Axboe6c271ce2019-01-10 11:22:30 -07007939 } else if (p->flags & IORING_SETUP_SQ_AFF) {
7940 /* Can't have SQ_AFF without SQPOLL */
7941 ret = -EINVAL;
7942 goto err;
7943 }
7944
Jens Axboe2b188cc2019-01-07 10:46:33 -07007945 return 0;
7946err:
Jens Axboe37d1e2e2021-02-17 21:03:43 -07007947 io_sq_thread_finish(ctx);
Jens Axboe2b188cc2019-01-07 10:46:33 -07007948 return ret;
Jens Axboee8f98f242021-03-09 16:32:13 -07007949err_sqpoll:
7950 complete(&ctx->sq_data->exited);
7951 goto err;
Jens Axboe2b188cc2019-01-07 10:46:33 -07007952}
7953
Bijan Mottahedeha087e2b2020-06-16 16:36:07 -07007954static inline void __io_unaccount_mem(struct user_struct *user,
7955 unsigned long nr_pages)
Jens Axboe2b188cc2019-01-07 10:46:33 -07007956{
7957 atomic_long_sub(nr_pages, &user->locked_vm);
7958}
7959
Bijan Mottahedeha087e2b2020-06-16 16:36:07 -07007960static inline int __io_account_mem(struct user_struct *user,
7961 unsigned long nr_pages)
Jens Axboe2b188cc2019-01-07 10:46:33 -07007962{
7963 unsigned long page_limit, cur_pages, new_pages;
7964
7965 /* Don't allow more pages than we can safely lock */
7966 page_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
7967
7968 do {
7969 cur_pages = atomic_long_read(&user->locked_vm);
7970 new_pages = cur_pages + nr_pages;
7971 if (new_pages > page_limit)
7972 return -ENOMEM;
7973 } while (atomic_long_cmpxchg(&user->locked_vm, cur_pages,
7974 new_pages) != cur_pages);
7975
7976 return 0;
7977}
7978
Jens Axboe26bfa89e2021-02-09 20:14:12 -07007979static void io_unaccount_mem(struct io_ring_ctx *ctx, unsigned long nr_pages)
Bijan Mottahedeha087e2b2020-06-16 16:36:07 -07007980{
Jens Axboe62e398b2021-02-21 16:19:37 -07007981 if (ctx->user)
Bijan Mottahedeha087e2b2020-06-16 16:36:07 -07007982 __io_unaccount_mem(ctx->user, nr_pages);
Bijan Mottahedeh30975822020-06-16 16:36:09 -07007983
Jens Axboe26bfa89e2021-02-09 20:14:12 -07007984 if (ctx->mm_account)
7985 atomic64_sub(nr_pages, &ctx->mm_account->pinned_vm);
Bijan Mottahedeha087e2b2020-06-16 16:36:07 -07007986}
7987
Jens Axboe26bfa89e2021-02-09 20:14:12 -07007988static int io_account_mem(struct io_ring_ctx *ctx, unsigned long nr_pages)
Bijan Mottahedeha087e2b2020-06-16 16:36:07 -07007989{
Bijan Mottahedeh30975822020-06-16 16:36:09 -07007990 int ret;
7991
Jens Axboe62e398b2021-02-21 16:19:37 -07007992 if (ctx->user) {
Bijan Mottahedeh30975822020-06-16 16:36:09 -07007993 ret = __io_account_mem(ctx->user, nr_pages);
7994 if (ret)
7995 return ret;
7996 }
7997
Jens Axboe26bfa89e2021-02-09 20:14:12 -07007998 if (ctx->mm_account)
7999 atomic64_add(nr_pages, &ctx->mm_account->pinned_vm);
Bijan Mottahedeha087e2b2020-06-16 16:36:07 -07008000
8001 return 0;
8002}
8003
Jens Axboe2b188cc2019-01-07 10:46:33 -07008004static void io_mem_free(void *ptr)
8005{
Mark Rutland52e04ef2019-04-30 17:30:21 +01008006 struct page *page;
Jens Axboe2b188cc2019-01-07 10:46:33 -07008007
Mark Rutland52e04ef2019-04-30 17:30:21 +01008008 if (!ptr)
8009 return;
8010
8011 page = virt_to_head_page(ptr);
Jens Axboe2b188cc2019-01-07 10:46:33 -07008012 if (put_page_testzero(page))
8013 free_compound_page(page);
8014}
8015
8016static void *io_mem_alloc(size_t size)
8017{
8018 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN | __GFP_COMP |
Jens Axboe26bfa89e2021-02-09 20:14:12 -07008019 __GFP_NORETRY | __GFP_ACCOUNT;
Jens Axboe2b188cc2019-01-07 10:46:33 -07008020
8021 return (void *) __get_free_pages(gfp_flags, get_order(size));
8022}
8023
Hristo Venev75b28af2019-08-26 17:23:46 +00008024static unsigned long rings_size(unsigned sq_entries, unsigned cq_entries,
8025 size_t *sq_offset)
8026{
8027 struct io_rings *rings;
8028 size_t off, sq_array_size;
8029
8030 off = struct_size(rings, cqes, cq_entries);
8031 if (off == SIZE_MAX)
8032 return SIZE_MAX;
8033
8034#ifdef CONFIG_SMP
8035 off = ALIGN(off, SMP_CACHE_BYTES);
8036 if (off == 0)
8037 return SIZE_MAX;
8038#endif
8039
Dmitry Vyukovb36200f2020-07-11 11:31:11 +02008040 if (sq_offset)
8041 *sq_offset = off;
8042
Hristo Venev75b28af2019-08-26 17:23:46 +00008043 sq_array_size = array_size(sizeof(u32), sq_entries);
8044 if (sq_array_size == SIZE_MAX)
8045 return SIZE_MAX;
8046
8047 if (check_add_overflow(off, sq_array_size, &off))
8048 return SIZE_MAX;
8049
Hristo Venev75b28af2019-08-26 17:23:46 +00008050 return off;
8051}
8052
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008053static int io_sqe_buffers_unregister(struct io_ring_ctx *ctx)
Jens Axboeedafcce2019-01-09 09:16:05 -07008054{
8055 int i, j;
8056
8057 if (!ctx->user_bufs)
8058 return -ENXIO;
8059
8060 for (i = 0; i < ctx->nr_user_bufs; i++) {
8061 struct io_mapped_ubuf *imu = &ctx->user_bufs[i];
8062
8063 for (j = 0; j < imu->nr_bvecs; j++)
John Hubbardf1f6a7d2020-01-30 22:13:35 -08008064 unpin_user_page(imu->bvec[j].bv_page);
Jens Axboeedafcce2019-01-09 09:16:05 -07008065
Jens Axboede293932020-09-17 16:19:16 -06008066 if (imu->acct_pages)
Jens Axboe26bfa89e2021-02-09 20:14:12 -07008067 io_unaccount_mem(ctx, imu->acct_pages);
Mark Rutlandd4ef6472019-05-01 16:59:16 +01008068 kvfree(imu->bvec);
Jens Axboeedafcce2019-01-09 09:16:05 -07008069 imu->nr_bvecs = 0;
8070 }
8071
8072 kfree(ctx->user_bufs);
8073 ctx->user_bufs = NULL;
8074 ctx->nr_user_bufs = 0;
8075 return 0;
8076}
8077
8078static int io_copy_iov(struct io_ring_ctx *ctx, struct iovec *dst,
8079 void __user *arg, unsigned index)
8080{
8081 struct iovec __user *src;
8082
8083#ifdef CONFIG_COMPAT
8084 if (ctx->compat) {
8085 struct compat_iovec __user *ciovs;
8086 struct compat_iovec ciov;
8087
8088 ciovs = (struct compat_iovec __user *) arg;
8089 if (copy_from_user(&ciov, &ciovs[index], sizeof(ciov)))
8090 return -EFAULT;
8091
Jens Axboed55e5f52019-12-11 16:12:15 -07008092 dst->iov_base = u64_to_user_ptr((u64)ciov.iov_base);
Jens Axboeedafcce2019-01-09 09:16:05 -07008093 dst->iov_len = ciov.iov_len;
8094 return 0;
8095 }
8096#endif
8097 src = (struct iovec __user *) arg;
8098 if (copy_from_user(dst, &src[index], sizeof(*dst)))
8099 return -EFAULT;
8100 return 0;
8101}
8102
Jens Axboede293932020-09-17 16:19:16 -06008103/*
8104 * Not super efficient, but this is just a registration time. And we do cache
8105 * the last compound head, so generally we'll only do a full search if we don't
8106 * match that one.
8107 *
8108 * We check if the given compound head page has already been accounted, to
8109 * avoid double accounting it. This allows us to account the full size of the
8110 * page, not just the constituent pages of a huge page.
8111 */
8112static bool headpage_already_acct(struct io_ring_ctx *ctx, struct page **pages,
8113 int nr_pages, struct page *hpage)
8114{
8115 int i, j;
8116
8117 /* check current page array */
8118 for (i = 0; i < nr_pages; i++) {
8119 if (!PageCompound(pages[i]))
8120 continue;
8121 if (compound_head(pages[i]) == hpage)
8122 return true;
8123 }
8124
8125 /* check previously registered pages */
8126 for (i = 0; i < ctx->nr_user_bufs; i++) {
8127 struct io_mapped_ubuf *imu = &ctx->user_bufs[i];
8128
8129 for (j = 0; j < imu->nr_bvecs; j++) {
8130 if (!PageCompound(imu->bvec[j].bv_page))
8131 continue;
8132 if (compound_head(imu->bvec[j].bv_page) == hpage)
8133 return true;
8134 }
8135 }
8136
8137 return false;
8138}
8139
8140static int io_buffer_account_pin(struct io_ring_ctx *ctx, struct page **pages,
8141 int nr_pages, struct io_mapped_ubuf *imu,
8142 struct page **last_hpage)
8143{
8144 int i, ret;
8145
8146 for (i = 0; i < nr_pages; i++) {
8147 if (!PageCompound(pages[i])) {
8148 imu->acct_pages++;
8149 } else {
8150 struct page *hpage;
8151
8152 hpage = compound_head(pages[i]);
8153 if (hpage == *last_hpage)
8154 continue;
8155 *last_hpage = hpage;
8156 if (headpage_already_acct(ctx, pages, i, hpage))
8157 continue;
8158 imu->acct_pages += page_size(hpage) >> PAGE_SHIFT;
8159 }
8160 }
8161
8162 if (!imu->acct_pages)
8163 return 0;
8164
Jens Axboe26bfa89e2021-02-09 20:14:12 -07008165 ret = io_account_mem(ctx, imu->acct_pages);
Jens Axboede293932020-09-17 16:19:16 -06008166 if (ret)
8167 imu->acct_pages = 0;
8168 return ret;
8169}
8170
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008171static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov,
8172 struct io_mapped_ubuf *imu,
8173 struct page **last_hpage)
Jens Axboeedafcce2019-01-09 09:16:05 -07008174{
8175 struct vm_area_struct **vmas = NULL;
8176 struct page **pages = NULL;
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008177 unsigned long off, start, end, ubuf;
8178 size_t size;
8179 int ret, pret, nr_pages, i;
Jens Axboeedafcce2019-01-09 09:16:05 -07008180
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008181 ubuf = (unsigned long) iov->iov_base;
8182 end = (ubuf + iov->iov_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
8183 start = ubuf >> PAGE_SHIFT;
8184 nr_pages = end - start;
8185
8186 ret = -ENOMEM;
8187
8188 pages = kvmalloc_array(nr_pages, sizeof(struct page *), GFP_KERNEL);
8189 if (!pages)
8190 goto done;
8191
8192 vmas = kvmalloc_array(nr_pages, sizeof(struct vm_area_struct *),
8193 GFP_KERNEL);
8194 if (!vmas)
8195 goto done;
8196
8197 imu->bvec = kvmalloc_array(nr_pages, sizeof(struct bio_vec),
8198 GFP_KERNEL);
8199 if (!imu->bvec)
8200 goto done;
8201
8202 ret = 0;
8203 mmap_read_lock(current->mm);
8204 pret = pin_user_pages(ubuf, nr_pages, FOLL_WRITE | FOLL_LONGTERM,
8205 pages, vmas);
8206 if (pret == nr_pages) {
8207 /* don't support file backed memory */
8208 for (i = 0; i < nr_pages; i++) {
8209 struct vm_area_struct *vma = vmas[i];
8210
8211 if (vma->vm_file &&
8212 !is_file_hugepages(vma->vm_file)) {
8213 ret = -EOPNOTSUPP;
8214 break;
8215 }
8216 }
8217 } else {
8218 ret = pret < 0 ? pret : -EFAULT;
8219 }
8220 mmap_read_unlock(current->mm);
8221 if (ret) {
8222 /*
8223 * if we did partial map, or found file backed vmas,
8224 * release any pages we did get
8225 */
8226 if (pret > 0)
8227 unpin_user_pages(pages, pret);
8228 kvfree(imu->bvec);
8229 goto done;
8230 }
8231
8232 ret = io_buffer_account_pin(ctx, pages, pret, imu, last_hpage);
8233 if (ret) {
8234 unpin_user_pages(pages, pret);
8235 kvfree(imu->bvec);
8236 goto done;
8237 }
8238
8239 off = ubuf & ~PAGE_MASK;
8240 size = iov->iov_len;
8241 for (i = 0; i < nr_pages; i++) {
8242 size_t vec_len;
8243
8244 vec_len = min_t(size_t, size, PAGE_SIZE - off);
8245 imu->bvec[i].bv_page = pages[i];
8246 imu->bvec[i].bv_len = vec_len;
8247 imu->bvec[i].bv_offset = off;
8248 off = 0;
8249 size -= vec_len;
8250 }
8251 /* store original address for later verification */
8252 imu->ubuf = ubuf;
8253 imu->len = iov->iov_len;
8254 imu->nr_bvecs = nr_pages;
8255 ret = 0;
8256done:
8257 kvfree(pages);
8258 kvfree(vmas);
8259 return ret;
8260}
8261
Bijan Mottahedeh2b358602021-01-06 12:39:11 -08008262static int io_buffers_map_alloc(struct io_ring_ctx *ctx, unsigned int nr_args)
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008263{
Jens Axboeedafcce2019-01-09 09:16:05 -07008264 if (ctx->user_bufs)
8265 return -EBUSY;
8266 if (!nr_args || nr_args > UIO_MAXIOV)
8267 return -EINVAL;
8268
8269 ctx->user_bufs = kcalloc(nr_args, sizeof(struct io_mapped_ubuf),
8270 GFP_KERNEL);
8271 if (!ctx->user_bufs)
8272 return -ENOMEM;
8273
Bijan Mottahedeh2b358602021-01-06 12:39:11 -08008274 return 0;
8275}
8276
8277static int io_buffer_validate(struct iovec *iov)
8278{
8279 /*
8280 * Don't impose further limits on the size and buffer
8281 * constraints here, we'll -EINVAL later when IO is
8282 * submitted if they are wrong.
8283 */
8284 if (!iov->iov_base || !iov->iov_len)
8285 return -EFAULT;
8286
8287 /* arbitrary limit, but we need something */
8288 if (iov->iov_len > SZ_1G)
8289 return -EFAULT;
8290
8291 return 0;
8292}
8293
8294static int io_sqe_buffers_register(struct io_ring_ctx *ctx, void __user *arg,
8295 unsigned int nr_args)
8296{
8297 int i, ret;
8298 struct iovec iov;
8299 struct page *last_hpage = NULL;
8300
8301 ret = io_buffers_map_alloc(ctx, nr_args);
8302 if (ret)
8303 return ret;
8304
Jens Axboeedafcce2019-01-09 09:16:05 -07008305 for (i = 0; i < nr_args; i++) {
8306 struct io_mapped_ubuf *imu = &ctx->user_bufs[i];
Jens Axboeedafcce2019-01-09 09:16:05 -07008307
8308 ret = io_copy_iov(ctx, &iov, arg, i);
8309 if (ret)
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008310 break;
Jens Axboeedafcce2019-01-09 09:16:05 -07008311
Bijan Mottahedeh2b358602021-01-06 12:39:11 -08008312 ret = io_buffer_validate(&iov);
8313 if (ret)
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008314 break;
Jens Axboeedafcce2019-01-09 09:16:05 -07008315
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008316 ret = io_sqe_buffer_register(ctx, &iov, imu, &last_hpage);
8317 if (ret)
8318 break;
Jens Axboeedafcce2019-01-09 09:16:05 -07008319
8320 ctx->nr_user_bufs++;
8321 }
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008322
8323 if (ret)
8324 io_sqe_buffers_unregister(ctx);
8325
Jens Axboeedafcce2019-01-09 09:16:05 -07008326 return ret;
8327}
8328
Jens Axboe9b402842019-04-11 11:45:41 -06008329static int io_eventfd_register(struct io_ring_ctx *ctx, void __user *arg)
8330{
8331 __s32 __user *fds = arg;
8332 int fd;
8333
8334 if (ctx->cq_ev_fd)
8335 return -EBUSY;
8336
8337 if (copy_from_user(&fd, fds, sizeof(*fds)))
8338 return -EFAULT;
8339
8340 ctx->cq_ev_fd = eventfd_ctx_fdget(fd);
8341 if (IS_ERR(ctx->cq_ev_fd)) {
8342 int ret = PTR_ERR(ctx->cq_ev_fd);
8343 ctx->cq_ev_fd = NULL;
8344 return ret;
8345 }
8346
8347 return 0;
8348}
8349
8350static int io_eventfd_unregister(struct io_ring_ctx *ctx)
8351{
8352 if (ctx->cq_ev_fd) {
8353 eventfd_ctx_put(ctx->cq_ev_fd);
8354 ctx->cq_ev_fd = NULL;
8355 return 0;
8356 }
8357
8358 return -ENXIO;
8359}
8360
Jens Axboe5a2e7452020-02-23 16:23:11 -07008361static void io_destroy_buffers(struct io_ring_ctx *ctx)
8362{
Jens Axboe9e15c3a2021-03-13 12:29:43 -07008363 struct io_buffer *buf;
8364 unsigned long index;
8365
8366 xa_for_each(&ctx->io_buffers, index, buf)
8367 __io_remove_buffers(ctx, buf, index, -1U);
Jens Axboe5a2e7452020-02-23 16:23:11 -07008368}
8369
Jens Axboe68e68ee2021-02-13 09:00:02 -07008370static void io_req_cache_free(struct list_head *list, struct task_struct *tsk)
Jens Axboe1b4c3512021-02-10 00:03:19 +00008371{
Jens Axboe68e68ee2021-02-13 09:00:02 -07008372 struct io_kiocb *req, *nxt;
Jens Axboe1b4c3512021-02-10 00:03:19 +00008373
Jens Axboe68e68ee2021-02-13 09:00:02 -07008374 list_for_each_entry_safe(req, nxt, list, compl.list) {
8375 if (tsk && req->task != tsk)
8376 continue;
Jens Axboe1b4c3512021-02-10 00:03:19 +00008377 list_del(&req->compl.list);
8378 kmem_cache_free(req_cachep, req);
8379 }
8380}
8381
Jens Axboe4010fec2021-02-27 15:04:18 -07008382static void io_req_caches_free(struct io_ring_ctx *ctx)
Jens Axboe2b188cc2019-01-07 10:46:33 -07008383{
Pavel Begunkovbf019da2021-02-10 00:03:17 +00008384 struct io_submit_state *submit_state = &ctx->submit_state;
Pavel Begunkove5547d22021-02-23 22:17:20 +00008385 struct io_comp_state *cs = &ctx->submit_state.comp;
Pavel Begunkovbf019da2021-02-10 00:03:17 +00008386
Jens Axboe9a4fdbd2021-02-13 09:09:44 -07008387 mutex_lock(&ctx->uring_lock);
8388
Pavel Begunkov8e5c66c2021-02-22 11:45:55 +00008389 if (submit_state->free_reqs) {
Jens Axboe9a4fdbd2021-02-13 09:09:44 -07008390 kmem_cache_free_bulk(req_cachep, submit_state->free_reqs,
8391 submit_state->reqs);
Pavel Begunkov8e5c66c2021-02-22 11:45:55 +00008392 submit_state->free_reqs = 0;
8393 }
Jens Axboe9a4fdbd2021-02-13 09:09:44 -07008394
Pavel Begunkovdac7a092021-03-19 17:22:39 +00008395 io_flush_cached_locked_reqs(ctx, cs);
Pavel Begunkove5547d22021-02-23 22:17:20 +00008396 io_req_cache_free(&cs->free_list, NULL);
Jens Axboe9a4fdbd2021-02-13 09:09:44 -07008397 mutex_unlock(&ctx->uring_lock);
8398}
8399
Jens Axboe2b188cc2019-01-07 10:46:33 -07008400static void io_ring_ctx_free(struct io_ring_ctx *ctx)
8401{
Pavel Begunkov04fc6c82021-02-12 03:23:54 +00008402 /*
8403 * Some may use context even when all refs and requests have been put,
Pavel Begunkov180f8292021-03-14 20:57:09 +00008404 * and they are free to do so while still holding uring_lock or
8405 * completion_lock, see __io_req_task_submit(). Wait for them to finish.
Pavel Begunkov04fc6c82021-02-12 03:23:54 +00008406 */
8407 mutex_lock(&ctx->uring_lock);
8408 mutex_unlock(&ctx->uring_lock);
Pavel Begunkov180f8292021-03-14 20:57:09 +00008409 spin_lock_irq(&ctx->completion_lock);
8410 spin_unlock_irq(&ctx->completion_lock);
Pavel Begunkov04fc6c82021-02-12 03:23:54 +00008411
Jens Axboe37d1e2e2021-02-17 21:03:43 -07008412 io_sq_thread_finish(ctx);
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08008413 io_sqe_buffers_unregister(ctx);
Jens Axboe2aede0e2020-09-14 10:45:53 -06008414
Jens Axboe37d1e2e2021-02-17 21:03:43 -07008415 if (ctx->mm_account) {
Jens Axboe2aede0e2020-09-14 10:45:53 -06008416 mmdrop(ctx->mm_account);
8417 ctx->mm_account = NULL;
Bijan Mottahedeh30975822020-06-16 16:36:09 -07008418 }
Jens Axboedef596e2019-01-09 08:59:42 -07008419
Hao Xu8bad28d2021-02-19 17:19:36 +08008420 mutex_lock(&ctx->uring_lock);
Jens Axboe6b063142019-01-10 22:13:58 -07008421 io_sqe_files_unregister(ctx);
Hao Xu8bad28d2021-02-19 17:19:36 +08008422 mutex_unlock(&ctx->uring_lock);
Jens Axboe9b402842019-04-11 11:45:41 -06008423 io_eventfd_unregister(ctx);
Jens Axboe5a2e7452020-02-23 16:23:11 -07008424 io_destroy_buffers(ctx);
Jens Axboedef596e2019-01-09 08:59:42 -07008425
Pavel Begunkov8dd03af2021-03-19 17:22:36 +00008426 if (ctx->rsrc_backup_node)
8427 destroy_fixed_rsrc_ref_node(ctx->rsrc_backup_node);
8428
Jens Axboe2b188cc2019-01-07 10:46:33 -07008429#if defined(CONFIG_UNIX)
Eric Biggers355e8d22019-06-12 14:58:43 -07008430 if (ctx->ring_sock) {
8431 ctx->ring_sock->file = NULL; /* so that iput() is called */
Jens Axboe2b188cc2019-01-07 10:46:33 -07008432 sock_release(ctx->ring_sock);
Eric Biggers355e8d22019-06-12 14:58:43 -07008433 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07008434#endif
8435
Hristo Venev75b28af2019-08-26 17:23:46 +00008436 io_mem_free(ctx->rings);
Jens Axboe2b188cc2019-01-07 10:46:33 -07008437 io_mem_free(ctx->sq_sqes);
Jens Axboe2b188cc2019-01-07 10:46:33 -07008438
8439 percpu_ref_exit(&ctx->refs);
Jens Axboe2b188cc2019-01-07 10:46:33 -07008440 free_uid(ctx->user);
Jens Axboe4010fec2021-02-27 15:04:18 -07008441 io_req_caches_free(ctx);
Jens Axboee9418942021-02-19 12:33:30 -07008442 if (ctx->hash_map)
8443 io_wq_put_hash(ctx->hash_map);
Jens Axboe78076bb2019-12-04 19:56:40 -07008444 kfree(ctx->cancel_hash);
Jens Axboe2b188cc2019-01-07 10:46:33 -07008445 kfree(ctx);
8446}
8447
8448static __poll_t io_uring_poll(struct file *file, poll_table *wait)
8449{
8450 struct io_ring_ctx *ctx = file->private_data;
8451 __poll_t mask = 0;
8452
8453 poll_wait(file, &ctx->cq_wait, wait);
Stefan Bühler4f7067c2019-04-24 23:54:17 +02008454 /*
8455 * synchronizes with barrier from wq_has_sleeper call in
8456 * io_commit_cqring
8457 */
Jens Axboe2b188cc2019-01-07 10:46:33 -07008458 smp_rmb();
Jens Axboe90554202020-09-03 12:12:41 -06008459 if (!io_sqring_full(ctx))
Jens Axboe2b188cc2019-01-07 10:46:33 -07008460 mask |= EPOLLOUT | EPOLLWRNORM;
Hao Xued670c32021-02-05 16:34:21 +08008461
8462 /*
8463 * Don't flush cqring overflow list here, just do a simple check.
8464 * Otherwise there could possible be ABBA deadlock:
8465 * CPU0 CPU1
8466 * ---- ----
8467 * lock(&ctx->uring_lock);
8468 * lock(&ep->mtx);
8469 * lock(&ctx->uring_lock);
8470 * lock(&ep->mtx);
8471 *
8472 * Users may get EPOLLIN meanwhile seeing nothing in cqring, this
8473 * pushs them to do the flush.
8474 */
8475 if (io_cqring_events(ctx) || test_bit(0, &ctx->cq_check_overflow))
Jens Axboe2b188cc2019-01-07 10:46:33 -07008476 mask |= EPOLLIN | EPOLLRDNORM;
8477
8478 return mask;
8479}
8480
8481static int io_uring_fasync(int fd, struct file *file, int on)
8482{
8483 struct io_ring_ctx *ctx = file->private_data;
8484
8485 return fasync_helper(fd, file, on, &ctx->cq_fasync);
8486}
8487
Yejune Deng0bead8c2020-12-24 11:02:20 +08008488static int io_unregister_personality(struct io_ring_ctx *ctx, unsigned id)
Jens Axboe071698e2020-01-28 10:04:42 -07008489{
Jens Axboe4379bf82021-02-15 13:40:22 -07008490 const struct cred *creds;
Jens Axboe071698e2020-01-28 10:04:42 -07008491
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +00008492 creds = xa_erase(&ctx->personalities, id);
Jens Axboe4379bf82021-02-15 13:40:22 -07008493 if (creds) {
8494 put_cred(creds);
Yejune Deng0bead8c2020-12-24 11:02:20 +08008495 return 0;
Jens Axboe1e6fa522020-10-15 08:46:24 -06008496 }
Yejune Deng0bead8c2020-12-24 11:02:20 +08008497
8498 return -EINVAL;
8499}
8500
Pavel Begunkov9b465712021-03-15 14:23:07 +00008501static inline bool io_run_ctx_fallback(struct io_ring_ctx *ctx)
Jens Axboe7c25c0d2021-02-16 07:17:00 -07008502{
Pavel Begunkov9b465712021-03-15 14:23:07 +00008503 return io_run_task_work_head(&ctx->exit_task_work);
Jens Axboe7c25c0d2021-02-16 07:17:00 -07008504}
8505
Pavel Begunkovd56d9382021-03-06 11:02:13 +00008506struct io_tctx_exit {
8507 struct callback_head task_work;
8508 struct completion completion;
Pavel Begunkovbaf186c2021-03-06 11:02:15 +00008509 struct io_ring_ctx *ctx;
Pavel Begunkovd56d9382021-03-06 11:02:13 +00008510};
8511
8512static void io_tctx_exit_cb(struct callback_head *cb)
8513{
8514 struct io_uring_task *tctx = current->io_uring;
8515 struct io_tctx_exit *work;
8516
8517 work = container_of(cb, struct io_tctx_exit, task_work);
8518 /*
8519 * When @in_idle, we're in cancellation and it's racy to remove the
8520 * node. It'll be removed by the end of cancellation, just ignore it.
8521 */
8522 if (!atomic_read(&tctx->in_idle))
Pavel Begunkovbaf186c2021-03-06 11:02:15 +00008523 io_uring_del_task_file((unsigned long)work->ctx);
Pavel Begunkovd56d9382021-03-06 11:02:13 +00008524 complete(&work->completion);
8525}
8526
Jens Axboe85faa7b2020-04-09 18:14:00 -06008527static void io_ring_exit_work(struct work_struct *work)
8528{
Pavel Begunkovd56d9382021-03-06 11:02:13 +00008529 struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx, exit_work);
Pavel Begunkovb5bb3a22021-03-06 11:02:16 +00008530 unsigned long timeout = jiffies + HZ * 60 * 5;
Pavel Begunkovd56d9382021-03-06 11:02:13 +00008531 struct io_tctx_exit exit;
8532 struct io_tctx_node *node;
8533 int ret;
Jens Axboe85faa7b2020-04-09 18:14:00 -06008534
Pavel Begunkova185f1d2021-03-23 10:52:38 +00008535 /* prevent SQPOLL from submitting new requests */
8536 if (ctx->sq_data) {
8537 io_sq_thread_park(ctx->sq_data);
8538 list_del_init(&ctx->sqd_list);
8539 io_sqd_update_thread_idle(ctx->sq_data);
8540 io_sq_thread_unpark(ctx->sq_data);
8541 }
8542
Jens Axboe56952e92020-06-17 15:00:04 -06008543 /*
8544 * If we're doing polled IO and end up having requests being
8545 * submitted async (out-of-line), then completions can come in while
8546 * we're waiting for refs to drop. We need to reap these manually,
8547 * as nobody else will be looking for them.
8548 */
Pavel Begunkovb2edc0a2020-07-07 16:36:22 +03008549 do {
Pavel Begunkov9936c7c2021-02-04 13:51:56 +00008550 io_uring_try_cancel_requests(ctx, NULL, NULL);
Pavel Begunkovb5bb3a22021-03-06 11:02:16 +00008551
8552 WARN_ON_ONCE(time_after(jiffies, timeout));
Pavel Begunkovb2edc0a2020-07-07 16:36:22 +03008553 } while (!wait_for_completion_timeout(&ctx->ref_comp, HZ/20));
Pavel Begunkovd56d9382021-03-06 11:02:13 +00008554
8555 mutex_lock(&ctx->uring_lock);
8556 while (!list_empty(&ctx->tctx_list)) {
Pavel Begunkovb5bb3a22021-03-06 11:02:16 +00008557 WARN_ON_ONCE(time_after(jiffies, timeout));
8558
Pavel Begunkovd56d9382021-03-06 11:02:13 +00008559 node = list_first_entry(&ctx->tctx_list, struct io_tctx_node,
8560 ctx_node);
Pavel Begunkovbaf186c2021-03-06 11:02:15 +00008561 exit.ctx = ctx;
Pavel Begunkovd56d9382021-03-06 11:02:13 +00008562 init_completion(&exit.completion);
8563 init_task_work(&exit.task_work, io_tctx_exit_cb);
8564 ret = task_work_add(node->task, &exit.task_work, TWA_SIGNAL);
8565 if (WARN_ON_ONCE(ret))
8566 continue;
8567 wake_up_process(node->task);
8568
8569 mutex_unlock(&ctx->uring_lock);
8570 wait_for_completion(&exit.completion);
8571 cond_resched();
8572 mutex_lock(&ctx->uring_lock);
8573 }
8574 mutex_unlock(&ctx->uring_lock);
8575
Jens Axboe85faa7b2020-04-09 18:14:00 -06008576 io_ring_ctx_free(ctx);
8577}
8578
Pavel Begunkov80c4cbd2021-03-25 18:32:43 +00008579/* Returns true if we found and killed one or more timeouts */
8580static bool io_kill_timeouts(struct io_ring_ctx *ctx, struct task_struct *tsk,
8581 struct files_struct *files)
8582{
8583 struct io_kiocb *req, *tmp;
8584 int canceled = 0;
8585
8586 spin_lock_irq(&ctx->completion_lock);
8587 list_for_each_entry_safe(req, tmp, &ctx->timeout_list, timeout.list) {
8588 if (io_match_task(req, tsk, files)) {
8589 io_kill_timeout(req, -ECANCELED);
8590 canceled++;
8591 }
8592 }
Pavel Begunkov51520422021-03-29 11:39:29 +01008593 if (canceled != 0)
8594 io_commit_cqring(ctx);
Pavel Begunkov80c4cbd2021-03-25 18:32:43 +00008595 spin_unlock_irq(&ctx->completion_lock);
Pavel Begunkov80c4cbd2021-03-25 18:32:43 +00008596 if (canceled != 0)
8597 io_cqring_ev_posted(ctx);
8598 return canceled != 0;
8599}
8600
Jens Axboe2b188cc2019-01-07 10:46:33 -07008601static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
8602{
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +00008603 unsigned long index;
8604 struct creds *creds;
8605
Jens Axboe2b188cc2019-01-07 10:46:33 -07008606 mutex_lock(&ctx->uring_lock);
8607 percpu_ref_kill(&ctx->refs);
Pavel Begunkovcda286f2020-12-17 00:24:35 +00008608 /* if force is set, the ring is going away. always drop after that */
8609 ctx->cq_overflow_flushed = 1;
Pavel Begunkov634578f2020-12-06 22:22:44 +00008610 if (ctx->rings)
Pavel Begunkov6c2450a2021-02-23 12:40:22 +00008611 __io_cqring_overflow_flush(ctx, true);
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +00008612 xa_for_each(&ctx->personalities, index, creds)
8613 io_unregister_personality(ctx, index);
Jens Axboe2b188cc2019-01-07 10:46:33 -07008614 mutex_unlock(&ctx->uring_lock);
8615
Pavel Begunkov6b819282020-11-06 13:00:25 +00008616 io_kill_timeouts(ctx, NULL, NULL);
8617 io_poll_remove_all(ctx, NULL, NULL);
Jens Axboe561fb042019-10-24 07:25:42 -06008618
Jens Axboe15dff282019-11-13 09:09:23 -07008619 /* if we failed setting up the ctx, we might not have any rings */
Pavel Begunkovb2edc0a2020-07-07 16:36:22 +03008620 io_iopoll_try_reap_events(ctx);
Jens Axboe309fc032020-07-10 09:13:34 -06008621
Jens Axboe85faa7b2020-04-09 18:14:00 -06008622 INIT_WORK(&ctx->exit_work, io_ring_exit_work);
Jens Axboefc666772020-08-19 11:10:51 -06008623 /*
8624 * Use system_unbound_wq to avoid spawning tons of event kworkers
8625 * if we're exiting a ton of rings at the same time. It just adds
8626 * noise and overhead, there's no discernable change in runtime
8627 * over using system_wq.
8628 */
8629 queue_work(system_unbound_wq, &ctx->exit_work);
Jens Axboe2b188cc2019-01-07 10:46:33 -07008630}
8631
8632static int io_uring_release(struct inode *inode, struct file *file)
8633{
8634 struct io_ring_ctx *ctx = file->private_data;
8635
8636 file->private_data = NULL;
8637 io_ring_ctx_wait_and_kill(ctx);
8638 return 0;
8639}
8640
Pavel Begunkovf6edbab2020-11-06 13:00:26 +00008641struct io_task_cancel {
8642 struct task_struct *task;
8643 struct files_struct *files;
8644};
Pavel Begunkov67c4d9e2020-06-15 10:24:05 +03008645
Pavel Begunkovf6edbab2020-11-06 13:00:26 +00008646static bool io_cancel_task_cb(struct io_wq_work *work, void *data)
Jens Axboeb711d4e2020-08-16 08:23:05 -07008647{
Pavel Begunkov9a472ef2020-11-05 22:31:37 +00008648 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
Pavel Begunkovf6edbab2020-11-06 13:00:26 +00008649 struct io_task_cancel *cancel = data;
Pavel Begunkov9a472ef2020-11-05 22:31:37 +00008650 bool ret;
8651
Pavel Begunkovf6edbab2020-11-06 13:00:26 +00008652 if (cancel->files && (req->flags & REQ_F_LINK_TIMEOUT)) {
Pavel Begunkov9a472ef2020-11-05 22:31:37 +00008653 unsigned long flags;
8654 struct io_ring_ctx *ctx = req->ctx;
8655
8656 /* protect against races with linked timeouts */
8657 spin_lock_irqsave(&ctx->completion_lock, flags);
Pavel Begunkovf6edbab2020-11-06 13:00:26 +00008658 ret = io_match_task(req, cancel->task, cancel->files);
Pavel Begunkov9a472ef2020-11-05 22:31:37 +00008659 spin_unlock_irqrestore(&ctx->completion_lock, flags);
8660 } else {
Pavel Begunkovf6edbab2020-11-06 13:00:26 +00008661 ret = io_match_task(req, cancel->task, cancel->files);
Pavel Begunkov9a472ef2020-11-05 22:31:37 +00008662 }
8663 return ret;
Jens Axboeb711d4e2020-08-16 08:23:05 -07008664}
8665
Pavel Begunkove1915f72021-03-11 23:29:35 +00008666static bool io_cancel_defer_files(struct io_ring_ctx *ctx,
Pavel Begunkovef9865a2020-11-05 14:06:19 +00008667 struct task_struct *task,
Pavel Begunkovb7ddce32020-09-06 00:45:14 +03008668 struct files_struct *files)
8669{
Pavel Begunkove1915f72021-03-11 23:29:35 +00008670 struct io_defer_entry *de;
Pavel Begunkovb7ddce32020-09-06 00:45:14 +03008671 LIST_HEAD(list);
8672
8673 spin_lock_irq(&ctx->completion_lock);
8674 list_for_each_entry_reverse(de, &ctx->defer_list, list) {
Pavel Begunkov08d23632020-11-06 13:00:22 +00008675 if (io_match_task(de->req, task, files)) {
Pavel Begunkovb7ddce32020-09-06 00:45:14 +03008676 list_cut_position(&list, &ctx->defer_list, &de->list);
8677 break;
8678 }
8679 }
8680 spin_unlock_irq(&ctx->completion_lock);
Pavel Begunkove1915f72021-03-11 23:29:35 +00008681 if (list_empty(&list))
8682 return false;
Pavel Begunkovb7ddce32020-09-06 00:45:14 +03008683
8684 while (!list_empty(&list)) {
8685 de = list_first_entry(&list, struct io_defer_entry, list);
8686 list_del_init(&de->list);
Pavel Begunkovf41db2732021-02-28 22:35:12 +00008687 io_req_complete_failed(de->req, -ECANCELED);
Pavel Begunkovb7ddce32020-09-06 00:45:14 +03008688 kfree(de);
8689 }
Pavel Begunkove1915f72021-03-11 23:29:35 +00008690 return true;
Pavel Begunkovb7ddce32020-09-06 00:45:14 +03008691}
8692
Pavel Begunkov1b007642021-03-06 11:02:17 +00008693static bool io_cancel_ctx_cb(struct io_wq_work *work, void *data)
8694{
8695 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
8696
8697 return req->ctx == data;
8698}
8699
8700static bool io_uring_try_cancel_iowq(struct io_ring_ctx *ctx)
8701{
8702 struct io_tctx_node *node;
8703 enum io_wq_cancel cret;
8704 bool ret = false;
8705
8706 mutex_lock(&ctx->uring_lock);
8707 list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
8708 struct io_uring_task *tctx = node->task->io_uring;
8709
8710 /*
8711 * io_wq will stay alive while we hold uring_lock, because it's
8712 * killed after ctx nodes, which requires to take the lock.
8713 */
8714 if (!tctx || !tctx->io_wq)
8715 continue;
8716 cret = io_wq_cancel_cb(tctx->io_wq, io_cancel_ctx_cb, ctx, true);
8717 ret |= (cret != IO_WQ_CANCEL_NOTFOUND);
8718 }
8719 mutex_unlock(&ctx->uring_lock);
8720
8721 return ret;
8722}
8723
Pavel Begunkov9936c7c2021-02-04 13:51:56 +00008724static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
8725 struct task_struct *task,
8726 struct files_struct *files)
8727{
8728 struct io_task_cancel cancel = { .task = task, .files = files, };
Pavel Begunkov1b007642021-03-06 11:02:17 +00008729 struct io_uring_task *tctx = task ? task->io_uring : NULL;
Pavel Begunkov9936c7c2021-02-04 13:51:56 +00008730
8731 while (1) {
8732 enum io_wq_cancel cret;
8733 bool ret = false;
8734
Pavel Begunkov1b007642021-03-06 11:02:17 +00008735 if (!task) {
8736 ret |= io_uring_try_cancel_iowq(ctx);
8737 } else if (tctx && tctx->io_wq) {
8738 /*
8739 * Cancels requests of all rings, not only @ctx, but
8740 * it's fine as the task is in exit/exec.
8741 */
Jens Axboe5aa75ed2021-02-16 12:56:50 -07008742 cret = io_wq_cancel_cb(tctx->io_wq, io_cancel_task_cb,
Pavel Begunkov9936c7c2021-02-04 13:51:56 +00008743 &cancel, true);
8744 ret |= (cret != IO_WQ_CANCEL_NOTFOUND);
8745 }
8746
8747 /* SQPOLL thread does its own polling */
Jens Axboed052d1d2021-03-11 10:49:20 -07008748 if ((!(ctx->flags & IORING_SETUP_SQPOLL) && !files) ||
8749 (ctx->sq_data && ctx->sq_data->thread == current)) {
Pavel Begunkov9936c7c2021-02-04 13:51:56 +00008750 while (!list_empty_careful(&ctx->iopoll_list)) {
8751 io_iopoll_try_reap_events(ctx);
8752 ret = true;
8753 }
8754 }
8755
Pavel Begunkove1915f72021-03-11 23:29:35 +00008756 ret |= io_cancel_defer_files(ctx, task, files);
Pavel Begunkov9936c7c2021-02-04 13:51:56 +00008757 ret |= io_poll_remove_all(ctx, task, files);
8758 ret |= io_kill_timeouts(ctx, task, files);
8759 ret |= io_run_task_work();
Pavel Begunkovba50a032021-02-26 15:47:56 +00008760 ret |= io_run_ctx_fallback(ctx);
Pavel Begunkov9936c7c2021-02-04 13:51:56 +00008761 if (!ret)
8762 break;
8763 cond_resched();
8764 }
8765}
8766
Pavel Begunkovca70f002021-01-26 15:28:27 +00008767static int io_uring_count_inflight(struct io_ring_ctx *ctx,
8768 struct task_struct *task,
8769 struct files_struct *files)
8770{
8771 struct io_kiocb *req;
8772 int cnt = 0;
8773
8774 spin_lock_irq(&ctx->inflight_lock);
8775 list_for_each_entry(req, &ctx->inflight_list, inflight_entry)
8776 cnt += io_match_task(req, task, files);
8777 spin_unlock_irq(&ctx->inflight_lock);
8778 return cnt;
8779}
8780
Pavel Begunkovb52fda02020-11-06 13:00:24 +00008781static void io_uring_cancel_files(struct io_ring_ctx *ctx,
Pavel Begunkovdf9923f2020-11-06 13:00:23 +00008782 struct task_struct *task,
Jens Axboefcb323c2019-10-24 12:39:47 -06008783 struct files_struct *files)
8784{
Jens Axboefcb323c2019-10-24 12:39:47 -06008785 while (!list_empty_careful(&ctx->inflight_list)) {
Xiaoguang Wangd8f1b972020-04-26 15:54:43 +08008786 DEFINE_WAIT(wait);
Pavel Begunkovca70f002021-01-26 15:28:27 +00008787 int inflight;
Jens Axboefcb323c2019-10-24 12:39:47 -06008788
Pavel Begunkovca70f002021-01-26 15:28:27 +00008789 inflight = io_uring_count_inflight(ctx, task, files);
8790 if (!inflight)
Jens Axboefcb323c2019-10-24 12:39:47 -06008791 break;
Pavel Begunkovf6edbab2020-11-06 13:00:26 +00008792
Pavel Begunkov9936c7c2021-02-04 13:51:56 +00008793 io_uring_try_cancel_requests(ctx, task, files);
Pavel Begunkovca70f002021-01-26 15:28:27 +00008794
8795 prepare_to_wait(&task->io_uring->wait, &wait,
8796 TASK_UNINTERRUPTIBLE);
8797 if (inflight == io_uring_count_inflight(ctx, task, files))
8798 schedule();
Pavel Begunkovc98de082020-11-15 12:56:32 +00008799 finish_wait(&task->io_uring->wait, &wait);
Jens Axboe0f212202020-09-13 13:09:39 -06008800 }
Jens Axboe0f212202020-09-13 13:09:39 -06008801}
8802
Pavel Begunkovcf27f3b2021-03-19 17:22:31 +00008803static int __io_uring_add_task_file(struct io_ring_ctx *ctx)
Jens Axboe0f212202020-09-13 13:09:39 -06008804{
Matthew Wilcox (Oracle)236434c2020-10-09 13:49:52 +01008805 struct io_uring_task *tctx = current->io_uring;
Pavel Begunkov13bf43f2021-03-06 11:02:12 +00008806 struct io_tctx_node *node;
Pavel Begunkova528b042020-12-21 18:34:04 +00008807 int ret;
Matthew Wilcox (Oracle)236434c2020-10-09 13:49:52 +01008808
8809 if (unlikely(!tctx)) {
Jens Axboe5aa75ed2021-02-16 12:56:50 -07008810 ret = io_uring_alloc_task_context(current, ctx);
Jens Axboe0f212202020-09-13 13:09:39 -06008811 if (unlikely(ret))
8812 return ret;
Matthew Wilcox (Oracle)236434c2020-10-09 13:49:52 +01008813 tctx = current->io_uring;
Jens Axboe0f212202020-09-13 13:09:39 -06008814 }
Pavel Begunkovcf27f3b2021-03-19 17:22:31 +00008815 if (!xa_load(&tctx->xa, (unsigned long)ctx)) {
8816 node = kmalloc(sizeof(*node), GFP_KERNEL);
8817 if (!node)
8818 return -ENOMEM;
8819 node->ctx = ctx;
8820 node->task = current;
Jens Axboe0f212202020-09-13 13:09:39 -06008821
Pavel Begunkovcf27f3b2021-03-19 17:22:31 +00008822 ret = xa_err(xa_store(&tctx->xa, (unsigned long)ctx,
8823 node, GFP_KERNEL));
8824 if (ret) {
8825 kfree(node);
8826 return ret;
Jens Axboe0f212202020-09-13 13:09:39 -06008827 }
Pavel Begunkovcf27f3b2021-03-19 17:22:31 +00008828
8829 mutex_lock(&ctx->uring_lock);
8830 list_add(&node->ctx_node, &ctx->tctx_list);
8831 mutex_unlock(&ctx->uring_lock);
Jens Axboe0f212202020-09-13 13:09:39 -06008832 }
Pavel Begunkovcf27f3b2021-03-19 17:22:31 +00008833 tctx->last = ctx;
Jens Axboe0f212202020-09-13 13:09:39 -06008834 return 0;
8835}
8836
8837/*
Pavel Begunkovcf27f3b2021-03-19 17:22:31 +00008838 * Note that this task has used io_uring. We use it for cancelation purposes.
8839 */
8840static inline int io_uring_add_task_file(struct io_ring_ctx *ctx)
8841{
8842 struct io_uring_task *tctx = current->io_uring;
8843
8844 if (likely(tctx && tctx->last == ctx))
8845 return 0;
8846 return __io_uring_add_task_file(ctx);
8847}
8848
8849/*
Jens Axboe0f212202020-09-13 13:09:39 -06008850 * Remove this io_uring_file -> task mapping.
8851 */
Pavel Begunkov29412672021-03-06 11:02:11 +00008852static void io_uring_del_task_file(unsigned long index)
Jens Axboe0f212202020-09-13 13:09:39 -06008853{
8854 struct io_uring_task *tctx = current->io_uring;
Pavel Begunkov13bf43f2021-03-06 11:02:12 +00008855 struct io_tctx_node *node;
Pavel Begunkov29412672021-03-06 11:02:11 +00008856
Pavel Begunkoveebd2e32021-03-06 11:02:14 +00008857 if (!tctx)
8858 return;
Pavel Begunkov13bf43f2021-03-06 11:02:12 +00008859 node = xa_erase(&tctx->xa, index);
8860 if (!node)
Pavel Begunkov29412672021-03-06 11:02:11 +00008861 return;
Jens Axboe0f212202020-09-13 13:09:39 -06008862
Pavel Begunkov13bf43f2021-03-06 11:02:12 +00008863 WARN_ON_ONCE(current != node->task);
8864 WARN_ON_ONCE(list_empty(&node->ctx_node));
8865
8866 mutex_lock(&node->ctx->uring_lock);
8867 list_del(&node->ctx_node);
8868 mutex_unlock(&node->ctx->uring_lock);
8869
Pavel Begunkovbaf186c2021-03-06 11:02:15 +00008870 if (tctx->last == node->ctx)
Jens Axboe0f212202020-09-13 13:09:39 -06008871 tctx->last = NULL;
Pavel Begunkov13bf43f2021-03-06 11:02:12 +00008872 kfree(node);
Jens Axboe0f212202020-09-13 13:09:39 -06008873}
8874
Pavel Begunkov8452d4a2021-02-27 11:16:46 +00008875static void io_uring_clean_tctx(struct io_uring_task *tctx)
Pavel Begunkovde7f1d92021-01-04 20:43:29 +00008876{
Pavel Begunkov13bf43f2021-03-06 11:02:12 +00008877 struct io_tctx_node *node;
Pavel Begunkovde7f1d92021-01-04 20:43:29 +00008878 unsigned long index;
8879
Pavel Begunkov13bf43f2021-03-06 11:02:12 +00008880 xa_for_each(&tctx->xa, index, node)
Pavel Begunkov29412672021-03-06 11:02:11 +00008881 io_uring_del_task_file(index);
Pavel Begunkov8452d4a2021-02-27 11:16:46 +00008882 if (tctx->io_wq) {
8883 io_wq_put_and_exit(tctx->io_wq);
8884 tctx->io_wq = NULL;
8885 }
Pavel Begunkovde7f1d92021-01-04 20:43:29 +00008886}
8887
Pavel Begunkov521d6a72021-03-11 23:29:38 +00008888static s64 tctx_inflight(struct io_uring_task *tctx)
8889{
8890 return percpu_counter_sum(&tctx->inflight);
8891}
8892
8893static void io_sqpoll_cancel_cb(struct callback_head *cb)
8894{
8895 struct io_tctx_exit *work = container_of(cb, struct io_tctx_exit, task_work);
8896 struct io_ring_ctx *ctx = work->ctx;
8897 struct io_sq_data *sqd = ctx->sq_data;
8898
8899 if (sqd->thread)
8900 io_uring_cancel_sqpoll(ctx);
8901 complete(&work->completion);
8902}
8903
8904static void io_sqpoll_cancel_sync(struct io_ring_ctx *ctx)
8905{
8906 struct io_sq_data *sqd = ctx->sq_data;
8907 struct io_tctx_exit work = { .ctx = ctx, };
8908 struct task_struct *task;
8909
8910 io_sq_thread_park(sqd);
8911 list_del_init(&ctx->sqd_list);
8912 io_sqd_update_thread_idle(sqd);
8913 task = sqd->thread;
8914 if (task) {
8915 init_completion(&work.completion);
8916 init_task_work(&work.task_work, io_sqpoll_cancel_cb);
Pavel Begunkovb7f5a0b2021-03-15 14:23:08 +00008917 io_task_work_add_head(&sqd->park_task_work, &work.task_work);
Pavel Begunkov521d6a72021-03-11 23:29:38 +00008918 wake_up_process(task);
8919 }
8920 io_sq_thread_unpark(sqd);
8921
8922 if (task)
8923 wait_for_completion(&work.completion);
8924}
8925
Jens Axboe0f212202020-09-13 13:09:39 -06008926void __io_uring_files_cancel(struct files_struct *files)
8927{
8928 struct io_uring_task *tctx = current->io_uring;
Pavel Begunkov13bf43f2021-03-06 11:02:12 +00008929 struct io_tctx_node *node;
Matthew Wilcox (Oracle)ce765372020-10-09 13:49:51 +01008930 unsigned long index;
Jens Axboe0f212202020-09-13 13:09:39 -06008931
8932 /* make sure overflow events are dropped */
Jens Axboefdaf0832020-10-30 09:37:30 -06008933 atomic_inc(&tctx->in_idle);
Pavel Begunkov521d6a72021-03-11 23:29:38 +00008934 xa_for_each(&tctx->xa, index, node) {
8935 struct io_ring_ctx *ctx = node->ctx;
8936
8937 if (ctx->sq_data) {
8938 io_sqpoll_cancel_sync(ctx);
8939 continue;
8940 }
8941 io_uring_cancel_files(ctx, current, files);
8942 if (!files)
8943 io_uring_try_cancel_requests(ctx, current, NULL);
8944 }
Jens Axboefdaf0832020-10-30 09:37:30 -06008945 atomic_dec(&tctx->in_idle);
Pavel Begunkovde7f1d92021-01-04 20:43:29 +00008946
Pavel Begunkov8452d4a2021-02-27 11:16:46 +00008947 if (files)
8948 io_uring_clean_tctx(tctx);
Jens Axboefdaf0832020-10-30 09:37:30 -06008949}
8950
Pavel Begunkov521d6a72021-03-11 23:29:38 +00008951/* should only be called by SQPOLL task */
Pavel Begunkov0e9ddb32021-02-07 22:34:26 +00008952static void io_uring_cancel_sqpoll(struct io_ring_ctx *ctx)
8953{
Jens Axboe37d1e2e2021-02-17 21:03:43 -07008954 struct io_sq_data *sqd = ctx->sq_data;
Pavel Begunkov521d6a72021-03-11 23:29:38 +00008955 struct io_uring_task *tctx = current->io_uring;
Jens Axboefdaf0832020-10-30 09:37:30 -06008956 s64 inflight;
Pavel Begunkov0e9ddb32021-02-07 22:34:26 +00008957 DEFINE_WAIT(wait);
Jens Axboefdaf0832020-10-30 09:37:30 -06008958
Pavel Begunkov521d6a72021-03-11 23:29:38 +00008959 WARN_ON_ONCE(!sqd || ctx->sq_data->thread != current);
8960
Pavel Begunkov0e9ddb32021-02-07 22:34:26 +00008961 atomic_inc(&tctx->in_idle);
8962 do {
8963 /* read completions before cancelations */
8964 inflight = tctx_inflight(tctx);
8965 if (!inflight)
8966 break;
Pavel Begunkov521d6a72021-03-11 23:29:38 +00008967 io_uring_try_cancel_requests(ctx, current, NULL);
Jens Axboefdaf0832020-10-30 09:37:30 -06008968
Pavel Begunkov0e9ddb32021-02-07 22:34:26 +00008969 prepare_to_wait(&tctx->wait, &wait, TASK_UNINTERRUPTIBLE);
8970 /*
8971 * If we've seen completions, retry without waiting. This
8972 * avoids a race where a completion comes in before we did
8973 * prepare_to_wait().
8974 */
8975 if (inflight == tctx_inflight(tctx))
8976 schedule();
8977 finish_wait(&tctx->wait, &wait);
8978 } while (1);
8979 atomic_dec(&tctx->in_idle);
Jens Axboe0f212202020-09-13 13:09:39 -06008980}
8981
Jens Axboe0f212202020-09-13 13:09:39 -06008982/*
8983 * Find any io_uring fd that this task has registered or done IO on, and cancel
8984 * requests.
8985 */
8986void __io_uring_task_cancel(void)
8987{
8988 struct io_uring_task *tctx = current->io_uring;
8989 DEFINE_WAIT(wait);
Jens Axboed8a6df12020-10-15 16:24:45 -06008990 s64 inflight;
Jens Axboe0f212202020-09-13 13:09:39 -06008991
8992 /* make sure overflow events are dropped */
Jens Axboefdaf0832020-10-30 09:37:30 -06008993 atomic_inc(&tctx->in_idle);
Pavel Begunkov5a978dc2021-03-27 09:59:30 +00008994 __io_uring_files_cancel(NULL);
8995
Jens Axboed8a6df12020-10-15 16:24:45 -06008996 do {
Jens Axboe0f212202020-09-13 13:09:39 -06008997 /* read completions before cancelations */
Jens Axboefdaf0832020-10-30 09:37:30 -06008998 inflight = tctx_inflight(tctx);
Jens Axboed8a6df12020-10-15 16:24:45 -06008999 if (!inflight)
9000 break;
Jens Axboe0f212202020-09-13 13:09:39 -06009001 __io_uring_files_cancel(NULL);
9002
9003 prepare_to_wait(&tctx->wait, &wait, TASK_UNINTERRUPTIBLE);
9004
9005 /*
Pavel Begunkova1bb3cd2021-01-26 15:28:26 +00009006 * If we've seen completions, retry without waiting. This
9007 * avoids a race where a completion comes in before we did
9008 * prepare_to_wait().
Jens Axboe0f212202020-09-13 13:09:39 -06009009 */
Pavel Begunkova1bb3cd2021-01-26 15:28:26 +00009010 if (inflight == tctx_inflight(tctx))
9011 schedule();
Pavel Begunkovf57555e2020-12-20 13:21:44 +00009012 finish_wait(&tctx->wait, &wait);
Jens Axboed8a6df12020-10-15 16:24:45 -06009013 } while (1);
Jens Axboe0f212202020-09-13 13:09:39 -06009014
Jens Axboefdaf0832020-10-30 09:37:30 -06009015 atomic_dec(&tctx->in_idle);
Pavel Begunkovde7f1d92021-01-04 20:43:29 +00009016
Pavel Begunkov8452d4a2021-02-27 11:16:46 +00009017 io_uring_clean_tctx(tctx);
9018 /* all current's requests should be gone, we can kill tctx */
9019 __io_uring_free(current);
Pavel Begunkov44e728b2020-06-15 10:24:04 +03009020}
9021
Roman Penyaev6c5c2402019-11-28 12:53:22 +01009022static void *io_uring_validate_mmap_request(struct file *file,
9023 loff_t pgoff, size_t sz)
Jens Axboe2b188cc2019-01-07 10:46:33 -07009024{
Jens Axboe2b188cc2019-01-07 10:46:33 -07009025 struct io_ring_ctx *ctx = file->private_data;
Roman Penyaev6c5c2402019-11-28 12:53:22 +01009026 loff_t offset = pgoff << PAGE_SHIFT;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009027 struct page *page;
9028 void *ptr;
9029
9030 switch (offset) {
9031 case IORING_OFF_SQ_RING:
Hristo Venev75b28af2019-08-26 17:23:46 +00009032 case IORING_OFF_CQ_RING:
9033 ptr = ctx->rings;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009034 break;
9035 case IORING_OFF_SQES:
9036 ptr = ctx->sq_sqes;
9037 break;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009038 default:
Roman Penyaev6c5c2402019-11-28 12:53:22 +01009039 return ERR_PTR(-EINVAL);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009040 }
9041
9042 page = virt_to_head_page(ptr);
Matthew Wilcox (Oracle)a50b8542019-09-23 15:34:25 -07009043 if (sz > page_size(page))
Roman Penyaev6c5c2402019-11-28 12:53:22 +01009044 return ERR_PTR(-EINVAL);
9045
9046 return ptr;
9047}
9048
9049#ifdef CONFIG_MMU
9050
9051static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
9052{
9053 size_t sz = vma->vm_end - vma->vm_start;
9054 unsigned long pfn;
9055 void *ptr;
9056
9057 ptr = io_uring_validate_mmap_request(file, vma->vm_pgoff, sz);
9058 if (IS_ERR(ptr))
9059 return PTR_ERR(ptr);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009060
9061 pfn = virt_to_phys(ptr) >> PAGE_SHIFT;
9062 return remap_pfn_range(vma, vma->vm_start, pfn, sz, vma->vm_page_prot);
9063}
9064
Roman Penyaev6c5c2402019-11-28 12:53:22 +01009065#else /* !CONFIG_MMU */
9066
9067static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
9068{
9069 return vma->vm_flags & (VM_SHARED | VM_MAYSHARE) ? 0 : -EINVAL;
9070}
9071
9072static unsigned int io_uring_nommu_mmap_capabilities(struct file *file)
9073{
9074 return NOMMU_MAP_DIRECT | NOMMU_MAP_READ | NOMMU_MAP_WRITE;
9075}
9076
9077static unsigned long io_uring_nommu_get_unmapped_area(struct file *file,
9078 unsigned long addr, unsigned long len,
9079 unsigned long pgoff, unsigned long flags)
9080{
9081 void *ptr;
9082
9083 ptr = io_uring_validate_mmap_request(file, pgoff, len);
9084 if (IS_ERR(ptr))
9085 return PTR_ERR(ptr);
9086
9087 return (unsigned long) ptr;
9088}
9089
9090#endif /* !CONFIG_MMU */
9091
Pavel Begunkovd9d05212021-01-08 20:57:25 +00009092static int io_sqpoll_wait_sq(struct io_ring_ctx *ctx)
Jens Axboe90554202020-09-03 12:12:41 -06009093{
9094 DEFINE_WAIT(wait);
9095
9096 do {
9097 if (!io_sqring_full(ctx))
9098 break;
Jens Axboe90554202020-09-03 12:12:41 -06009099 prepare_to_wait(&ctx->sqo_sq_wait, &wait, TASK_INTERRUPTIBLE);
9100
9101 if (!io_sqring_full(ctx))
9102 break;
Jens Axboe90554202020-09-03 12:12:41 -06009103 schedule();
9104 } while (!signal_pending(current));
9105
9106 finish_wait(&ctx->sqo_sq_wait, &wait);
Yang Li51993282021-03-09 14:30:41 +08009107 return 0;
Jens Axboe90554202020-09-03 12:12:41 -06009108}
9109
Hao Xuc73ebb62020-11-03 10:54:37 +08009110static int io_get_ext_arg(unsigned flags, const void __user *argp, size_t *argsz,
9111 struct __kernel_timespec __user **ts,
9112 const sigset_t __user **sig)
9113{
9114 struct io_uring_getevents_arg arg;
9115
9116 /*
9117 * If EXT_ARG isn't set, then we have no timespec and the argp pointer
9118 * is just a pointer to the sigset_t.
9119 */
9120 if (!(flags & IORING_ENTER_EXT_ARG)) {
9121 *sig = (const sigset_t __user *) argp;
9122 *ts = NULL;
9123 return 0;
9124 }
9125
9126 /*
9127 * EXT_ARG is set - ensure we agree on the size of it and copy in our
9128 * timespec and sigset_t pointers if good.
9129 */
9130 if (*argsz != sizeof(arg))
9131 return -EINVAL;
9132 if (copy_from_user(&arg, argp, sizeof(arg)))
9133 return -EFAULT;
9134 *sig = u64_to_user_ptr(arg.sigmask);
9135 *argsz = arg.sigmask_sz;
9136 *ts = u64_to_user_ptr(arg.ts);
9137 return 0;
9138}
9139
Jens Axboe2b188cc2019-01-07 10:46:33 -07009140SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
Hao Xuc73ebb62020-11-03 10:54:37 +08009141 u32, min_complete, u32, flags, const void __user *, argp,
9142 size_t, argsz)
Jens Axboe2b188cc2019-01-07 10:46:33 -07009143{
9144 struct io_ring_ctx *ctx;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009145 int submitted = 0;
9146 struct fd f;
Pavel Begunkov33f993d2021-03-19 17:22:30 +00009147 long ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009148
Jens Axboe4c6e2772020-07-01 11:29:10 -06009149 io_run_task_work();
Jens Axboeb41e9852020-02-17 09:52:41 -07009150
Pavel Begunkov33f993d2021-03-19 17:22:30 +00009151 if (unlikely(flags & ~(IORING_ENTER_GETEVENTS | IORING_ENTER_SQ_WAKEUP |
9152 IORING_ENTER_SQ_WAIT | IORING_ENTER_EXT_ARG)))
Jens Axboe2b188cc2019-01-07 10:46:33 -07009153 return -EINVAL;
9154
9155 f = fdget(fd);
Pavel Begunkov33f993d2021-03-19 17:22:30 +00009156 if (unlikely(!f.file))
Jens Axboe2b188cc2019-01-07 10:46:33 -07009157 return -EBADF;
9158
9159 ret = -EOPNOTSUPP;
Pavel Begunkov33f993d2021-03-19 17:22:30 +00009160 if (unlikely(f.file->f_op != &io_uring_fops))
Jens Axboe2b188cc2019-01-07 10:46:33 -07009161 goto out_fput;
9162
9163 ret = -ENXIO;
9164 ctx = f.file->private_data;
Pavel Begunkov33f993d2021-03-19 17:22:30 +00009165 if (unlikely(!percpu_ref_tryget(&ctx->refs)))
Jens Axboe2b188cc2019-01-07 10:46:33 -07009166 goto out_fput;
9167
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02009168 ret = -EBADFD;
Pavel Begunkov33f993d2021-03-19 17:22:30 +00009169 if (unlikely(ctx->flags & IORING_SETUP_R_DISABLED))
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02009170 goto out;
9171
Jens Axboe6c271ce2019-01-10 11:22:30 -07009172 /*
9173 * For SQ polling, the thread will do all submissions and completions.
9174 * Just return the requested submit count, and wake the thread if
9175 * we were asked to.
9176 */
Jens Axboeb2a9ead2019-09-12 14:19:16 -06009177 ret = 0;
Jens Axboe6c271ce2019-01-10 11:22:30 -07009178 if (ctx->flags & IORING_SETUP_SQPOLL) {
Pavel Begunkov6c2450a2021-02-23 12:40:22 +00009179 io_cqring_overflow_flush(ctx, false);
Pavel Begunkov89448c42020-12-17 00:24:39 +00009180
Pavel Begunkovd9d05212021-01-08 20:57:25 +00009181 ret = -EOWNERDEAD;
Stefan Metzmacher04147482021-03-07 11:54:29 +01009182 if (unlikely(ctx->sq_data->thread == NULL)) {
9183 goto out;
9184 }
Jens Axboe6c271ce2019-01-10 11:22:30 -07009185 if (flags & IORING_ENTER_SQ_WAKEUP)
Jens Axboe534ca6d2020-09-02 13:52:19 -06009186 wake_up(&ctx->sq_data->wait);
Pavel Begunkovd9d05212021-01-08 20:57:25 +00009187 if (flags & IORING_ENTER_SQ_WAIT) {
9188 ret = io_sqpoll_wait_sq(ctx);
9189 if (ret)
9190 goto out;
9191 }
Jens Axboe6c271ce2019-01-10 11:22:30 -07009192 submitted = to_submit;
Jens Axboeb2a9ead2019-09-12 14:19:16 -06009193 } else if (to_submit) {
Pavel Begunkovbaf186c2021-03-06 11:02:15 +00009194 ret = io_uring_add_task_file(ctx);
Jens Axboe0f212202020-09-13 13:09:39 -06009195 if (unlikely(ret))
9196 goto out;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009197 mutex_lock(&ctx->uring_lock);
Jens Axboe0f212202020-09-13 13:09:39 -06009198 submitted = io_submit_sqes(ctx, to_submit);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009199 mutex_unlock(&ctx->uring_lock);
Pavel Begunkov7c504e652019-12-18 19:53:45 +03009200
9201 if (submitted != to_submit)
9202 goto out;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009203 }
9204 if (flags & IORING_ENTER_GETEVENTS) {
Hao Xuc73ebb62020-11-03 10:54:37 +08009205 const sigset_t __user *sig;
9206 struct __kernel_timespec __user *ts;
9207
9208 ret = io_get_ext_arg(flags, argp, &argsz, &ts, &sig);
9209 if (unlikely(ret))
9210 goto out;
9211
Jens Axboe2b188cc2019-01-07 10:46:33 -07009212 min_complete = min(min_complete, ctx->cq_entries);
9213
Xiaoguang Wang32b22442020-03-11 09:26:09 +08009214 /*
9215 * When SETUP_IOPOLL and SETUP_SQPOLL are both enabled, user
9216 * space applications don't need to do io completion events
9217 * polling again, they can rely on io_sq_thread to do polling
9218 * work, which can reduce cpu usage and uring_lock contention.
9219 */
9220 if (ctx->flags & IORING_SETUP_IOPOLL &&
9221 !(ctx->flags & IORING_SETUP_SQPOLL)) {
Pavel Begunkov7668b922020-07-07 16:36:21 +03009222 ret = io_iopoll_check(ctx, min_complete);
Jens Axboedef596e2019-01-09 08:59:42 -07009223 } else {
Hao Xuc73ebb62020-11-03 10:54:37 +08009224 ret = io_cqring_wait(ctx, min_complete, sig, argsz, ts);
Jens Axboedef596e2019-01-09 08:59:42 -07009225 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07009226 }
9227
Pavel Begunkov7c504e652019-12-18 19:53:45 +03009228out:
Pavel Begunkov6805b322019-10-08 02:18:42 +03009229 percpu_ref_put(&ctx->refs);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009230out_fput:
9231 fdput(f);
9232 return submitted ? submitted : ret;
9233}
9234
Tobias Klauserbebdb652020-02-26 18:38:32 +01009235#ifdef CONFIG_PROC_FS
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +00009236static int io_uring_show_cred(struct seq_file *m, unsigned int id,
9237 const struct cred *cred)
Jens Axboe87ce9552020-01-30 08:25:34 -07009238{
Jens Axboe87ce9552020-01-30 08:25:34 -07009239 struct user_namespace *uns = seq_user_ns(m);
9240 struct group_info *gi;
9241 kernel_cap_t cap;
9242 unsigned __capi;
9243 int g;
9244
9245 seq_printf(m, "%5d\n", id);
9246 seq_put_decimal_ull(m, "\tUid:\t", from_kuid_munged(uns, cred->uid));
9247 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->euid));
9248 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->suid));
9249 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->fsuid));
9250 seq_put_decimal_ull(m, "\n\tGid:\t", from_kgid_munged(uns, cred->gid));
9251 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->egid));
9252 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->sgid));
9253 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->fsgid));
9254 seq_puts(m, "\n\tGroups:\t");
9255 gi = cred->group_info;
9256 for (g = 0; g < gi->ngroups; g++) {
9257 seq_put_decimal_ull(m, g ? " " : "",
9258 from_kgid_munged(uns, gi->gid[g]));
9259 }
9260 seq_puts(m, "\n\tCapEff:\t");
9261 cap = cred->cap_effective;
9262 CAP_FOR_EACH_U32(__capi)
9263 seq_put_hex_ll(m, NULL, cap.cap[CAP_LAST_U32 - __capi], 8);
9264 seq_putc(m, '\n');
9265 return 0;
9266}
9267
9268static void __io_uring_show_fdinfo(struct io_ring_ctx *ctx, struct seq_file *m)
9269{
Joseph Qidbbe9c62020-09-29 09:01:22 -06009270 struct io_sq_data *sq = NULL;
Jens Axboefad8e0d2020-09-28 08:57:48 -06009271 bool has_lock;
Jens Axboe87ce9552020-01-30 08:25:34 -07009272 int i;
9273
Jens Axboefad8e0d2020-09-28 08:57:48 -06009274 /*
9275 * Avoid ABBA deadlock between the seq lock and the io_uring mutex,
9276 * since fdinfo case grabs it in the opposite direction of normal use
9277 * cases. If we fail to get the lock, we just don't iterate any
9278 * structures that could be going away outside the io_uring mutex.
9279 */
9280 has_lock = mutex_trylock(&ctx->uring_lock);
9281
Jens Axboe5f3f26f2021-02-25 10:17:46 -07009282 if (has_lock && (ctx->flags & IORING_SETUP_SQPOLL)) {
Joseph Qidbbe9c62020-09-29 09:01:22 -06009283 sq = ctx->sq_data;
Jens Axboe5f3f26f2021-02-25 10:17:46 -07009284 if (!sq->thread)
9285 sq = NULL;
9286 }
Joseph Qidbbe9c62020-09-29 09:01:22 -06009287
9288 seq_printf(m, "SqThread:\t%d\n", sq ? task_pid_nr(sq->thread) : -1);
9289 seq_printf(m, "SqThreadCpu:\t%d\n", sq ? task_cpu(sq->thread) : -1);
Jens Axboe87ce9552020-01-30 08:25:34 -07009290 seq_printf(m, "UserFiles:\t%u\n", ctx->nr_user_files);
Jens Axboefad8e0d2020-09-28 08:57:48 -06009291 for (i = 0; has_lock && i < ctx->nr_user_files; i++) {
Jens Axboe7b29f922021-03-12 08:30:14 -07009292 struct file *f = io_file_from_index(ctx, i);
Jens Axboe87ce9552020-01-30 08:25:34 -07009293
Jens Axboe87ce9552020-01-30 08:25:34 -07009294 if (f)
9295 seq_printf(m, "%5u: %s\n", i, file_dentry(f)->d_iname);
9296 else
9297 seq_printf(m, "%5u: <none>\n", i);
9298 }
9299 seq_printf(m, "UserBufs:\t%u\n", ctx->nr_user_bufs);
Jens Axboefad8e0d2020-09-28 08:57:48 -06009300 for (i = 0; has_lock && i < ctx->nr_user_bufs; i++) {
Jens Axboe87ce9552020-01-30 08:25:34 -07009301 struct io_mapped_ubuf *buf = &ctx->user_bufs[i];
9302
9303 seq_printf(m, "%5u: 0x%llx/%u\n", i, buf->ubuf,
9304 (unsigned int) buf->len);
9305 }
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +00009306 if (has_lock && !xa_empty(&ctx->personalities)) {
9307 unsigned long index;
9308 const struct cred *cred;
9309
Jens Axboe87ce9552020-01-30 08:25:34 -07009310 seq_printf(m, "Personalities:\n");
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +00009311 xa_for_each(&ctx->personalities, index, cred)
9312 io_uring_show_cred(m, index, cred);
Jens Axboe87ce9552020-01-30 08:25:34 -07009313 }
Jens Axboed7718a92020-02-14 22:23:12 -07009314 seq_printf(m, "PollList:\n");
9315 spin_lock_irq(&ctx->completion_lock);
9316 for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
9317 struct hlist_head *list = &ctx->cancel_hash[i];
9318 struct io_kiocb *req;
9319
9320 hlist_for_each_entry(req, list, hash_node)
9321 seq_printf(m, " op=%d, task_works=%d\n", req->opcode,
9322 req->task->task_works != NULL);
9323 }
9324 spin_unlock_irq(&ctx->completion_lock);
Jens Axboefad8e0d2020-09-28 08:57:48 -06009325 if (has_lock)
9326 mutex_unlock(&ctx->uring_lock);
Jens Axboe87ce9552020-01-30 08:25:34 -07009327}
9328
9329static void io_uring_show_fdinfo(struct seq_file *m, struct file *f)
9330{
9331 struct io_ring_ctx *ctx = f->private_data;
9332
9333 if (percpu_ref_tryget(&ctx->refs)) {
9334 __io_uring_show_fdinfo(ctx, m);
9335 percpu_ref_put(&ctx->refs);
9336 }
9337}
Tobias Klauserbebdb652020-02-26 18:38:32 +01009338#endif
Jens Axboe87ce9552020-01-30 08:25:34 -07009339
Jens Axboe2b188cc2019-01-07 10:46:33 -07009340static const struct file_operations io_uring_fops = {
9341 .release = io_uring_release,
9342 .mmap = io_uring_mmap,
Roman Penyaev6c5c2402019-11-28 12:53:22 +01009343#ifndef CONFIG_MMU
9344 .get_unmapped_area = io_uring_nommu_get_unmapped_area,
9345 .mmap_capabilities = io_uring_nommu_mmap_capabilities,
9346#endif
Jens Axboe2b188cc2019-01-07 10:46:33 -07009347 .poll = io_uring_poll,
9348 .fasync = io_uring_fasync,
Tobias Klauserbebdb652020-02-26 18:38:32 +01009349#ifdef CONFIG_PROC_FS
Jens Axboe87ce9552020-01-30 08:25:34 -07009350 .show_fdinfo = io_uring_show_fdinfo,
Tobias Klauserbebdb652020-02-26 18:38:32 +01009351#endif
Jens Axboe2b188cc2019-01-07 10:46:33 -07009352};
9353
9354static int io_allocate_scq_urings(struct io_ring_ctx *ctx,
9355 struct io_uring_params *p)
9356{
Hristo Venev75b28af2019-08-26 17:23:46 +00009357 struct io_rings *rings;
9358 size_t size, sq_array_offset;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009359
Jens Axboebd740482020-08-05 12:58:23 -06009360 /* make sure these are sane, as we already accounted them */
9361 ctx->sq_entries = p->sq_entries;
9362 ctx->cq_entries = p->cq_entries;
9363
Hristo Venev75b28af2019-08-26 17:23:46 +00009364 size = rings_size(p->sq_entries, p->cq_entries, &sq_array_offset);
9365 if (size == SIZE_MAX)
9366 return -EOVERFLOW;
9367
9368 rings = io_mem_alloc(size);
9369 if (!rings)
Jens Axboe2b188cc2019-01-07 10:46:33 -07009370 return -ENOMEM;
9371
Hristo Venev75b28af2019-08-26 17:23:46 +00009372 ctx->rings = rings;
9373 ctx->sq_array = (u32 *)((char *)rings + sq_array_offset);
9374 rings->sq_ring_mask = p->sq_entries - 1;
9375 rings->cq_ring_mask = p->cq_entries - 1;
9376 rings->sq_ring_entries = p->sq_entries;
9377 rings->cq_ring_entries = p->cq_entries;
9378 ctx->sq_mask = rings->sq_ring_mask;
9379 ctx->cq_mask = rings->cq_ring_mask;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009380
9381 size = array_size(sizeof(struct io_uring_sqe), p->sq_entries);
Jens Axboeeb065d32019-11-20 09:26:29 -07009382 if (size == SIZE_MAX) {
9383 io_mem_free(ctx->rings);
9384 ctx->rings = NULL;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009385 return -EOVERFLOW;
Jens Axboeeb065d32019-11-20 09:26:29 -07009386 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07009387
9388 ctx->sq_sqes = io_mem_alloc(size);
Jens Axboeeb065d32019-11-20 09:26:29 -07009389 if (!ctx->sq_sqes) {
9390 io_mem_free(ctx->rings);
9391 ctx->rings = NULL;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009392 return -ENOMEM;
Jens Axboeeb065d32019-11-20 09:26:29 -07009393 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07009394
Jens Axboe2b188cc2019-01-07 10:46:33 -07009395 return 0;
9396}
9397
Pavel Begunkov9faadcc2020-12-21 18:34:05 +00009398static int io_uring_install_fd(struct io_ring_ctx *ctx, struct file *file)
9399{
9400 int ret, fd;
9401
9402 fd = get_unused_fd_flags(O_RDWR | O_CLOEXEC);
9403 if (fd < 0)
9404 return fd;
9405
Pavel Begunkovbaf186c2021-03-06 11:02:15 +00009406 ret = io_uring_add_task_file(ctx);
Pavel Begunkov9faadcc2020-12-21 18:34:05 +00009407 if (ret) {
9408 put_unused_fd(fd);
9409 return ret;
9410 }
9411 fd_install(fd, file);
9412 return fd;
9413}
9414
Jens Axboe2b188cc2019-01-07 10:46:33 -07009415/*
9416 * Allocate an anonymous fd, this is what constitutes the application
9417 * visible backing of an io_uring instance. The application mmaps this
9418 * fd to gain access to the SQ/CQ ring details. If UNIX sockets are enabled,
9419 * we have to tie this fd to a socket for file garbage collection purposes.
9420 */
Pavel Begunkov9faadcc2020-12-21 18:34:05 +00009421static struct file *io_uring_get_file(struct io_ring_ctx *ctx)
Jens Axboe2b188cc2019-01-07 10:46:33 -07009422{
9423 struct file *file;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009424#if defined(CONFIG_UNIX)
Pavel Begunkov9faadcc2020-12-21 18:34:05 +00009425 int ret;
9426
Jens Axboe2b188cc2019-01-07 10:46:33 -07009427 ret = sock_create_kern(&init_net, PF_UNIX, SOCK_RAW, IPPROTO_IP,
9428 &ctx->ring_sock);
9429 if (ret)
Pavel Begunkov9faadcc2020-12-21 18:34:05 +00009430 return ERR_PTR(ret);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009431#endif
9432
Jens Axboe2b188cc2019-01-07 10:46:33 -07009433 file = anon_inode_getfile("[io_uring]", &io_uring_fops, ctx,
9434 O_RDWR | O_CLOEXEC);
Pavel Begunkov9faadcc2020-12-21 18:34:05 +00009435#if defined(CONFIG_UNIX)
Jens Axboe2b188cc2019-01-07 10:46:33 -07009436 if (IS_ERR(file)) {
Pavel Begunkov9faadcc2020-12-21 18:34:05 +00009437 sock_release(ctx->ring_sock);
9438 ctx->ring_sock = NULL;
9439 } else {
9440 ctx->ring_sock->file = file;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009441 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07009442#endif
Pavel Begunkov9faadcc2020-12-21 18:34:05 +00009443 return file;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009444}
9445
Xiaoguang Wang7f136572020-05-05 16:28:53 +08009446static int io_uring_create(unsigned entries, struct io_uring_params *p,
9447 struct io_uring_params __user *params)
Jens Axboe2b188cc2019-01-07 10:46:33 -07009448{
Jens Axboe2b188cc2019-01-07 10:46:33 -07009449 struct io_ring_ctx *ctx;
Pavel Begunkov9faadcc2020-12-21 18:34:05 +00009450 struct file *file;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009451 int ret;
9452
Jens Axboe8110c1a2019-12-28 15:39:54 -07009453 if (!entries)
Jens Axboe2b188cc2019-01-07 10:46:33 -07009454 return -EINVAL;
Jens Axboe8110c1a2019-12-28 15:39:54 -07009455 if (entries > IORING_MAX_ENTRIES) {
9456 if (!(p->flags & IORING_SETUP_CLAMP))
9457 return -EINVAL;
9458 entries = IORING_MAX_ENTRIES;
9459 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07009460
9461 /*
9462 * Use twice as many entries for the CQ ring. It's possible for the
9463 * application to drive a higher depth than the size of the SQ ring,
9464 * since the sqes are only used at submission time. This allows for
Jens Axboe33a107f2019-10-04 12:10:03 -06009465 * some flexibility in overcommitting a bit. If the application has
9466 * set IORING_SETUP_CQSIZE, it will have passed in the desired number
9467 * of CQ ring entries manually.
Jens Axboe2b188cc2019-01-07 10:46:33 -07009468 */
9469 p->sq_entries = roundup_pow_of_two(entries);
Jens Axboe33a107f2019-10-04 12:10:03 -06009470 if (p->flags & IORING_SETUP_CQSIZE) {
9471 /*
9472 * If IORING_SETUP_CQSIZE is set, we do the same roundup
9473 * to a power-of-two, if it isn't already. We do NOT impose
9474 * any cq vs sq ring sizing.
9475 */
Joseph Qieb2667b32020-11-24 15:03:03 +08009476 if (!p->cq_entries)
Jens Axboe33a107f2019-10-04 12:10:03 -06009477 return -EINVAL;
Jens Axboe8110c1a2019-12-28 15:39:54 -07009478 if (p->cq_entries > IORING_MAX_CQ_ENTRIES) {
9479 if (!(p->flags & IORING_SETUP_CLAMP))
9480 return -EINVAL;
9481 p->cq_entries = IORING_MAX_CQ_ENTRIES;
9482 }
Joseph Qieb2667b32020-11-24 15:03:03 +08009483 p->cq_entries = roundup_pow_of_two(p->cq_entries);
9484 if (p->cq_entries < p->sq_entries)
9485 return -EINVAL;
Jens Axboe33a107f2019-10-04 12:10:03 -06009486 } else {
9487 p->cq_entries = 2 * p->sq_entries;
9488 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07009489
Jens Axboe2b188cc2019-01-07 10:46:33 -07009490 ctx = io_ring_ctx_alloc(p);
Jens Axboe62e398b2021-02-21 16:19:37 -07009491 if (!ctx)
Jens Axboe2b188cc2019-01-07 10:46:33 -07009492 return -ENOMEM;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009493 ctx->compat = in_compat_syscall();
Jens Axboe62e398b2021-02-21 16:19:37 -07009494 if (!capable(CAP_IPC_LOCK))
9495 ctx->user = get_uid(current_user());
Jens Axboe2aede0e2020-09-14 10:45:53 -06009496
9497 /*
9498 * This is just grabbed for accounting purposes. When a process exits,
9499 * the mm is exited and dropped before the files, hence we need to hang
9500 * on to this mm purely for the purposes of being able to unaccount
9501 * memory (locked/pinned vm). It's not used for anything else.
9502 */
Jens Axboe6b7898e2020-08-25 07:58:00 -06009503 mmgrab(current->mm);
Jens Axboe2aede0e2020-09-14 10:45:53 -06009504 ctx->mm_account = current->mm;
Jens Axboe6b7898e2020-08-25 07:58:00 -06009505
Jens Axboe2b188cc2019-01-07 10:46:33 -07009506 ret = io_allocate_scq_urings(ctx, p);
9507 if (ret)
9508 goto err;
9509
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02009510 ret = io_sq_offload_create(ctx, p);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009511 if (ret)
9512 goto err;
9513
Jens Axboe2b188cc2019-01-07 10:46:33 -07009514 memset(&p->sq_off, 0, sizeof(p->sq_off));
Hristo Venev75b28af2019-08-26 17:23:46 +00009515 p->sq_off.head = offsetof(struct io_rings, sq.head);
9516 p->sq_off.tail = offsetof(struct io_rings, sq.tail);
9517 p->sq_off.ring_mask = offsetof(struct io_rings, sq_ring_mask);
9518 p->sq_off.ring_entries = offsetof(struct io_rings, sq_ring_entries);
9519 p->sq_off.flags = offsetof(struct io_rings, sq_flags);
9520 p->sq_off.dropped = offsetof(struct io_rings, sq_dropped);
9521 p->sq_off.array = (char *)ctx->sq_array - (char *)ctx->rings;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009522
9523 memset(&p->cq_off, 0, sizeof(p->cq_off));
Hristo Venev75b28af2019-08-26 17:23:46 +00009524 p->cq_off.head = offsetof(struct io_rings, cq.head);
9525 p->cq_off.tail = offsetof(struct io_rings, cq.tail);
9526 p->cq_off.ring_mask = offsetof(struct io_rings, cq_ring_mask);
9527 p->cq_off.ring_entries = offsetof(struct io_rings, cq_ring_entries);
9528 p->cq_off.overflow = offsetof(struct io_rings, cq_overflow);
9529 p->cq_off.cqes = offsetof(struct io_rings, cqes);
Stefano Garzarella0d9b5b32020-05-15 18:38:04 +02009530 p->cq_off.flags = offsetof(struct io_rings, cq_flags);
Jens Axboeac90f242019-09-06 10:26:21 -06009531
Xiaoguang Wang7f136572020-05-05 16:28:53 +08009532 p->features = IORING_FEAT_SINGLE_MMAP | IORING_FEAT_NODROP |
9533 IORING_FEAT_SUBMIT_STABLE | IORING_FEAT_RW_CUR_POS |
Jiufei Xue5769a352020-06-17 17:53:55 +08009534 IORING_FEAT_CUR_PERSONALITY | IORING_FEAT_FAST_POLL |
Hao Xuc73ebb62020-11-03 10:54:37 +08009535 IORING_FEAT_POLL_32BITS | IORING_FEAT_SQPOLL_NONFIXED |
Jens Axboe1c0aa1f2021-02-20 11:55:28 -07009536 IORING_FEAT_EXT_ARG | IORING_FEAT_NATIVE_WORKERS;
Xiaoguang Wang7f136572020-05-05 16:28:53 +08009537
9538 if (copy_to_user(params, p, sizeof(*p))) {
9539 ret = -EFAULT;
9540 goto err;
9541 }
Jens Axboed1719f72020-07-30 13:43:53 -06009542
Pavel Begunkov9faadcc2020-12-21 18:34:05 +00009543 file = io_uring_get_file(ctx);
9544 if (IS_ERR(file)) {
9545 ret = PTR_ERR(file);
9546 goto err;
9547 }
9548
Jens Axboed1719f72020-07-30 13:43:53 -06009549 /*
Jens Axboe044c1ab2019-10-28 09:15:33 -06009550 * Install ring fd as the very last thing, so we don't risk someone
9551 * having closed it before we finish setup
9552 */
Pavel Begunkov9faadcc2020-12-21 18:34:05 +00009553 ret = io_uring_install_fd(ctx, file);
9554 if (ret < 0) {
9555 /* fput will clean it up */
9556 fput(file);
9557 return ret;
9558 }
Jens Axboe044c1ab2019-10-28 09:15:33 -06009559
Dmitrii Dolgovc826bd72019-10-15 19:02:01 +02009560 trace_io_uring_create(ret, ctx, p->sq_entries, p->cq_entries, p->flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009561 return ret;
9562err:
9563 io_ring_ctx_wait_and_kill(ctx);
9564 return ret;
9565}
9566
9567/*
9568 * Sets up an aio uring context, and returns the fd. Applications asks for a
9569 * ring size, we return the actual sq/cq ring sizes (among other things) in the
9570 * params structure passed in.
9571 */
9572static long io_uring_setup(u32 entries, struct io_uring_params __user *params)
9573{
9574 struct io_uring_params p;
Jens Axboe2b188cc2019-01-07 10:46:33 -07009575 int i;
9576
9577 if (copy_from_user(&p, params, sizeof(p)))
9578 return -EFAULT;
9579 for (i = 0; i < ARRAY_SIZE(p.resv); i++) {
9580 if (p.resv[i])
9581 return -EINVAL;
9582 }
9583
Jens Axboe6c271ce2019-01-10 11:22:30 -07009584 if (p.flags & ~(IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL |
Jens Axboe8110c1a2019-12-28 15:39:54 -07009585 IORING_SETUP_SQ_AFF | IORING_SETUP_CQSIZE |
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02009586 IORING_SETUP_CLAMP | IORING_SETUP_ATTACH_WQ |
9587 IORING_SETUP_R_DISABLED))
Jens Axboe2b188cc2019-01-07 10:46:33 -07009588 return -EINVAL;
9589
Xiaoguang Wang7f136572020-05-05 16:28:53 +08009590 return io_uring_create(entries, &p, params);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009591}
9592
9593SYSCALL_DEFINE2(io_uring_setup, u32, entries,
9594 struct io_uring_params __user *, params)
9595{
9596 return io_uring_setup(entries, params);
9597}
9598
Jens Axboe66f4af92020-01-16 15:36:52 -07009599static int io_probe(struct io_ring_ctx *ctx, void __user *arg, unsigned nr_args)
9600{
9601 struct io_uring_probe *p;
9602 size_t size;
9603 int i, ret;
9604
9605 size = struct_size(p, ops, nr_args);
9606 if (size == SIZE_MAX)
9607 return -EOVERFLOW;
9608 p = kzalloc(size, GFP_KERNEL);
9609 if (!p)
9610 return -ENOMEM;
9611
9612 ret = -EFAULT;
9613 if (copy_from_user(p, arg, size))
9614 goto out;
9615 ret = -EINVAL;
9616 if (memchr_inv(p, 0, size))
9617 goto out;
9618
9619 p->last_op = IORING_OP_LAST - 1;
9620 if (nr_args > IORING_OP_LAST)
9621 nr_args = IORING_OP_LAST;
9622
9623 for (i = 0; i < nr_args; i++) {
9624 p->ops[i].op = i;
9625 if (!io_op_defs[i].not_supported)
9626 p->ops[i].flags = IO_URING_OP_SUPPORTED;
9627 }
9628 p->ops_len = i;
9629
9630 ret = 0;
9631 if (copy_to_user(arg, p, size))
9632 ret = -EFAULT;
9633out:
9634 kfree(p);
9635 return ret;
9636}
9637
Jens Axboe071698e2020-01-28 10:04:42 -07009638static int io_register_personality(struct io_ring_ctx *ctx)
9639{
Jens Axboe4379bf82021-02-15 13:40:22 -07009640 const struct cred *creds;
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +00009641 u32 id;
Jens Axboe1e6fa522020-10-15 08:46:24 -06009642 int ret;
Jens Axboe071698e2020-01-28 10:04:42 -07009643
Jens Axboe4379bf82021-02-15 13:40:22 -07009644 creds = get_current_cred();
Jens Axboe1e6fa522020-10-15 08:46:24 -06009645
Matthew Wilcox (Oracle)61cf9372021-03-08 14:16:16 +00009646 ret = xa_alloc_cyclic(&ctx->personalities, &id, (void *)creds,
9647 XA_LIMIT(0, USHRT_MAX), &ctx->pers_next, GFP_KERNEL);
9648 if (!ret)
9649 return id;
9650 put_cred(creds);
Jens Axboe1e6fa522020-10-15 08:46:24 -06009651 return ret;
Jens Axboe071698e2020-01-28 10:04:42 -07009652}
9653
Stefano Garzarella21b55db2020-08-27 16:58:30 +02009654static int io_register_restrictions(struct io_ring_ctx *ctx, void __user *arg,
9655 unsigned int nr_args)
9656{
9657 struct io_uring_restriction *res;
9658 size_t size;
9659 int i, ret;
9660
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02009661 /* Restrictions allowed only if rings started disabled */
9662 if (!(ctx->flags & IORING_SETUP_R_DISABLED))
9663 return -EBADFD;
9664
Stefano Garzarella21b55db2020-08-27 16:58:30 +02009665 /* We allow only a single restrictions registration */
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02009666 if (ctx->restrictions.registered)
Stefano Garzarella21b55db2020-08-27 16:58:30 +02009667 return -EBUSY;
9668
9669 if (!arg || nr_args > IORING_MAX_RESTRICTIONS)
9670 return -EINVAL;
9671
9672 size = array_size(nr_args, sizeof(*res));
9673 if (size == SIZE_MAX)
9674 return -EOVERFLOW;
9675
9676 res = memdup_user(arg, size);
9677 if (IS_ERR(res))
9678 return PTR_ERR(res);
9679
9680 ret = 0;
9681
9682 for (i = 0; i < nr_args; i++) {
9683 switch (res[i].opcode) {
9684 case IORING_RESTRICTION_REGISTER_OP:
9685 if (res[i].register_op >= IORING_REGISTER_LAST) {
9686 ret = -EINVAL;
9687 goto out;
9688 }
9689
9690 __set_bit(res[i].register_op,
9691 ctx->restrictions.register_op);
9692 break;
9693 case IORING_RESTRICTION_SQE_OP:
9694 if (res[i].sqe_op >= IORING_OP_LAST) {
9695 ret = -EINVAL;
9696 goto out;
9697 }
9698
9699 __set_bit(res[i].sqe_op, ctx->restrictions.sqe_op);
9700 break;
9701 case IORING_RESTRICTION_SQE_FLAGS_ALLOWED:
9702 ctx->restrictions.sqe_flags_allowed = res[i].sqe_flags;
9703 break;
9704 case IORING_RESTRICTION_SQE_FLAGS_REQUIRED:
9705 ctx->restrictions.sqe_flags_required = res[i].sqe_flags;
9706 break;
9707 default:
9708 ret = -EINVAL;
9709 goto out;
9710 }
9711 }
9712
9713out:
9714 /* Reset all restrictions if an error happened */
9715 if (ret != 0)
9716 memset(&ctx->restrictions, 0, sizeof(ctx->restrictions));
9717 else
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02009718 ctx->restrictions.registered = true;
Stefano Garzarella21b55db2020-08-27 16:58:30 +02009719
9720 kfree(res);
9721 return ret;
9722}
9723
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02009724static int io_register_enable_rings(struct io_ring_ctx *ctx)
9725{
9726 if (!(ctx->flags & IORING_SETUP_R_DISABLED))
9727 return -EBADFD;
9728
9729 if (ctx->restrictions.registered)
9730 ctx->restricted = 1;
9731
Pavel Begunkov0298ef92021-03-08 13:20:57 +00009732 ctx->flags &= ~IORING_SETUP_R_DISABLED;
9733 if (ctx->sq_data && wq_has_sleeper(&ctx->sq_data->wait))
9734 wake_up(&ctx->sq_data->wait);
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02009735 return 0;
9736}
9737
Jens Axboe071698e2020-01-28 10:04:42 -07009738static bool io_register_op_must_quiesce(int op)
9739{
9740 switch (op) {
9741 case IORING_UNREGISTER_FILES:
9742 case IORING_REGISTER_FILES_UPDATE:
9743 case IORING_REGISTER_PROBE:
9744 case IORING_REGISTER_PERSONALITY:
9745 case IORING_UNREGISTER_PERSONALITY:
9746 return false;
9747 default:
9748 return true;
9749 }
9750}
9751
Jens Axboeedafcce2019-01-09 09:16:05 -07009752static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
9753 void __user *arg, unsigned nr_args)
Jens Axboeb19062a2019-04-15 10:49:38 -06009754 __releases(ctx->uring_lock)
9755 __acquires(ctx->uring_lock)
Jens Axboeedafcce2019-01-09 09:16:05 -07009756{
9757 int ret;
9758
Jens Axboe35fa71a2019-04-22 10:23:23 -06009759 /*
9760 * We're inside the ring mutex, if the ref is already dying, then
9761 * someone else killed the ctx or is already going through
9762 * io_uring_register().
9763 */
9764 if (percpu_ref_is_dying(&ctx->refs))
9765 return -ENXIO;
9766
Jens Axboe071698e2020-01-28 10:04:42 -07009767 if (io_register_op_must_quiesce(opcode)) {
Jens Axboe05f3fb32019-12-09 11:22:50 -07009768 percpu_ref_kill(&ctx->refs);
Jens Axboeb19062a2019-04-15 10:49:38 -06009769
Jens Axboe05f3fb32019-12-09 11:22:50 -07009770 /*
9771 * Drop uring mutex before waiting for references to exit. If
9772 * another thread is currently inside io_uring_enter() it might
9773 * need to grab the uring_lock to make progress. If we hold it
9774 * here across the drain wait, then we can deadlock. It's safe
9775 * to drop the mutex here, since no new references will come in
9776 * after we've killed the percpu ref.
9777 */
9778 mutex_unlock(&ctx->uring_lock);
Jens Axboeaf9c1a42020-09-24 13:32:18 -06009779 do {
9780 ret = wait_for_completion_interruptible(&ctx->ref_comp);
9781 if (!ret)
9782 break;
Jens Axboeed6930c2020-10-08 19:09:46 -06009783 ret = io_run_task_work_sig();
9784 if (ret < 0)
9785 break;
Jens Axboeaf9c1a42020-09-24 13:32:18 -06009786 } while (1);
9787
Jens Axboe05f3fb32019-12-09 11:22:50 -07009788 mutex_lock(&ctx->uring_lock);
Jens Axboeaf9c1a42020-09-24 13:32:18 -06009789
Jens Axboec1503682020-01-08 08:26:07 -07009790 if (ret) {
9791 percpu_ref_resurrect(&ctx->refs);
Stefano Garzarella21b55db2020-08-27 16:58:30 +02009792 goto out_quiesce;
9793 }
9794 }
9795
9796 if (ctx->restricted) {
9797 if (opcode >= IORING_REGISTER_LAST) {
9798 ret = -EINVAL;
9799 goto out;
9800 }
9801
9802 if (!test_bit(opcode, ctx->restrictions.register_op)) {
9803 ret = -EACCES;
Jens Axboec1503682020-01-08 08:26:07 -07009804 goto out;
9805 }
Jens Axboe05f3fb32019-12-09 11:22:50 -07009806 }
Jens Axboeedafcce2019-01-09 09:16:05 -07009807
9808 switch (opcode) {
9809 case IORING_REGISTER_BUFFERS:
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08009810 ret = io_sqe_buffers_register(ctx, arg, nr_args);
Jens Axboeedafcce2019-01-09 09:16:05 -07009811 break;
9812 case IORING_UNREGISTER_BUFFERS:
9813 ret = -EINVAL;
9814 if (arg || nr_args)
9815 break;
Bijan Mottahedeh0a96bbe2021-01-06 12:39:10 -08009816 ret = io_sqe_buffers_unregister(ctx);
Jens Axboeedafcce2019-01-09 09:16:05 -07009817 break;
Jens Axboe6b063142019-01-10 22:13:58 -07009818 case IORING_REGISTER_FILES:
9819 ret = io_sqe_files_register(ctx, arg, nr_args);
9820 break;
9821 case IORING_UNREGISTER_FILES:
9822 ret = -EINVAL;
9823 if (arg || nr_args)
9824 break;
9825 ret = io_sqe_files_unregister(ctx);
9826 break;
Jens Axboec3a31e62019-10-03 13:59:56 -06009827 case IORING_REGISTER_FILES_UPDATE:
9828 ret = io_sqe_files_update(ctx, arg, nr_args);
9829 break;
Jens Axboe9b402842019-04-11 11:45:41 -06009830 case IORING_REGISTER_EVENTFD:
Jens Axboef2842ab2020-01-08 11:04:00 -07009831 case IORING_REGISTER_EVENTFD_ASYNC:
Jens Axboe9b402842019-04-11 11:45:41 -06009832 ret = -EINVAL;
9833 if (nr_args != 1)
9834 break;
9835 ret = io_eventfd_register(ctx, arg);
Jens Axboef2842ab2020-01-08 11:04:00 -07009836 if (ret)
9837 break;
9838 if (opcode == IORING_REGISTER_EVENTFD_ASYNC)
9839 ctx->eventfd_async = 1;
9840 else
9841 ctx->eventfd_async = 0;
Jens Axboe9b402842019-04-11 11:45:41 -06009842 break;
9843 case IORING_UNREGISTER_EVENTFD:
9844 ret = -EINVAL;
9845 if (arg || nr_args)
9846 break;
9847 ret = io_eventfd_unregister(ctx);
9848 break;
Jens Axboe66f4af92020-01-16 15:36:52 -07009849 case IORING_REGISTER_PROBE:
9850 ret = -EINVAL;
9851 if (!arg || nr_args > 256)
9852 break;
9853 ret = io_probe(ctx, arg, nr_args);
9854 break;
Jens Axboe071698e2020-01-28 10:04:42 -07009855 case IORING_REGISTER_PERSONALITY:
9856 ret = -EINVAL;
9857 if (arg || nr_args)
9858 break;
9859 ret = io_register_personality(ctx);
9860 break;
9861 case IORING_UNREGISTER_PERSONALITY:
9862 ret = -EINVAL;
9863 if (arg)
9864 break;
9865 ret = io_unregister_personality(ctx, nr_args);
9866 break;
Stefano Garzarella7e84e1c2020-08-27 16:58:31 +02009867 case IORING_REGISTER_ENABLE_RINGS:
9868 ret = -EINVAL;
9869 if (arg || nr_args)
9870 break;
9871 ret = io_register_enable_rings(ctx);
9872 break;
Stefano Garzarella21b55db2020-08-27 16:58:30 +02009873 case IORING_REGISTER_RESTRICTIONS:
9874 ret = io_register_restrictions(ctx, arg, nr_args);
9875 break;
Jens Axboeedafcce2019-01-09 09:16:05 -07009876 default:
9877 ret = -EINVAL;
9878 break;
9879 }
9880
Stefano Garzarella21b55db2020-08-27 16:58:30 +02009881out:
Jens Axboe071698e2020-01-28 10:04:42 -07009882 if (io_register_op_must_quiesce(opcode)) {
Jens Axboe05f3fb32019-12-09 11:22:50 -07009883 /* bring the ctx back to life */
Jens Axboe05f3fb32019-12-09 11:22:50 -07009884 percpu_ref_reinit(&ctx->refs);
Stefano Garzarella21b55db2020-08-27 16:58:30 +02009885out_quiesce:
Jens Axboe0f158b42020-05-14 17:18:39 -06009886 reinit_completion(&ctx->ref_comp);
Jens Axboe05f3fb32019-12-09 11:22:50 -07009887 }
Jens Axboeedafcce2019-01-09 09:16:05 -07009888 return ret;
9889}
9890
9891SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode,
9892 void __user *, arg, unsigned int, nr_args)
9893{
9894 struct io_ring_ctx *ctx;
9895 long ret = -EBADF;
9896 struct fd f;
9897
9898 f = fdget(fd);
9899 if (!f.file)
9900 return -EBADF;
9901
9902 ret = -EOPNOTSUPP;
9903 if (f.file->f_op != &io_uring_fops)
9904 goto out_fput;
9905
9906 ctx = f.file->private_data;
9907
Pavel Begunkovb6c23dd2021-02-20 15:17:18 +00009908 io_run_task_work();
9909
Jens Axboeedafcce2019-01-09 09:16:05 -07009910 mutex_lock(&ctx->uring_lock);
9911 ret = __io_uring_register(ctx, opcode, arg, nr_args);
9912 mutex_unlock(&ctx->uring_lock);
Dmitrii Dolgovc826bd72019-10-15 19:02:01 +02009913 trace_io_uring_register(ctx, opcode, ctx->nr_user_files, ctx->nr_user_bufs,
9914 ctx->cq_ev_fd != NULL, ret);
Jens Axboeedafcce2019-01-09 09:16:05 -07009915out_fput:
9916 fdput(f);
9917 return ret;
9918}
9919
Jens Axboe2b188cc2019-01-07 10:46:33 -07009920static int __init io_uring_init(void)
9921{
Stefan Metzmacherd7f62e82020-01-29 14:39:41 +01009922#define __BUILD_BUG_VERIFY_ELEMENT(stype, eoffset, etype, ename) do { \
9923 BUILD_BUG_ON(offsetof(stype, ename) != eoffset); \
9924 BUILD_BUG_ON(sizeof(etype) != sizeof_field(stype, ename)); \
9925} while (0)
9926
9927#define BUILD_BUG_SQE_ELEM(eoffset, etype, ename) \
9928 __BUILD_BUG_VERIFY_ELEMENT(struct io_uring_sqe, eoffset, etype, ename)
9929 BUILD_BUG_ON(sizeof(struct io_uring_sqe) != 64);
9930 BUILD_BUG_SQE_ELEM(0, __u8, opcode);
9931 BUILD_BUG_SQE_ELEM(1, __u8, flags);
9932 BUILD_BUG_SQE_ELEM(2, __u16, ioprio);
9933 BUILD_BUG_SQE_ELEM(4, __s32, fd);
9934 BUILD_BUG_SQE_ELEM(8, __u64, off);
9935 BUILD_BUG_SQE_ELEM(8, __u64, addr2);
9936 BUILD_BUG_SQE_ELEM(16, __u64, addr);
Pavel Begunkov7d67af22020-02-24 11:32:45 +03009937 BUILD_BUG_SQE_ELEM(16, __u64, splice_off_in);
Stefan Metzmacherd7f62e82020-01-29 14:39:41 +01009938 BUILD_BUG_SQE_ELEM(24, __u32, len);
9939 BUILD_BUG_SQE_ELEM(28, __kernel_rwf_t, rw_flags);
9940 BUILD_BUG_SQE_ELEM(28, /* compat */ int, rw_flags);
9941 BUILD_BUG_SQE_ELEM(28, /* compat */ __u32, rw_flags);
9942 BUILD_BUG_SQE_ELEM(28, __u32, fsync_flags);
Jiufei Xue5769a352020-06-17 17:53:55 +08009943 BUILD_BUG_SQE_ELEM(28, /* compat */ __u16, poll_events);
9944 BUILD_BUG_SQE_ELEM(28, __u32, poll32_events);
Stefan Metzmacherd7f62e82020-01-29 14:39:41 +01009945 BUILD_BUG_SQE_ELEM(28, __u32, sync_range_flags);
9946 BUILD_BUG_SQE_ELEM(28, __u32, msg_flags);
9947 BUILD_BUG_SQE_ELEM(28, __u32, timeout_flags);
9948 BUILD_BUG_SQE_ELEM(28, __u32, accept_flags);
9949 BUILD_BUG_SQE_ELEM(28, __u32, cancel_flags);
9950 BUILD_BUG_SQE_ELEM(28, __u32, open_flags);
9951 BUILD_BUG_SQE_ELEM(28, __u32, statx_flags);
9952 BUILD_BUG_SQE_ELEM(28, __u32, fadvise_advice);
Pavel Begunkov7d67af22020-02-24 11:32:45 +03009953 BUILD_BUG_SQE_ELEM(28, __u32, splice_flags);
Stefan Metzmacherd7f62e82020-01-29 14:39:41 +01009954 BUILD_BUG_SQE_ELEM(32, __u64, user_data);
9955 BUILD_BUG_SQE_ELEM(40, __u16, buf_index);
9956 BUILD_BUG_SQE_ELEM(42, __u16, personality);
Pavel Begunkov7d67af22020-02-24 11:32:45 +03009957 BUILD_BUG_SQE_ELEM(44, __s32, splice_fd_in);
Stefan Metzmacherd7f62e82020-01-29 14:39:41 +01009958
Jens Axboed3656342019-12-18 09:50:26 -07009959 BUILD_BUG_ON(ARRAY_SIZE(io_op_defs) != IORING_OP_LAST);
Jens Axboe84557872020-03-03 15:28:17 -07009960 BUILD_BUG_ON(__REQ_F_LAST_BIT >= 8 * sizeof(int));
Jens Axboe91f245d2021-02-09 13:48:50 -07009961 req_cachep = KMEM_CACHE(io_kiocb, SLAB_HWCACHE_ALIGN | SLAB_PANIC |
9962 SLAB_ACCOUNT);
Jens Axboe2b188cc2019-01-07 10:46:33 -07009963 return 0;
9964};
9965__initcall(io_uring_init);