blob: 5353e96029c77d2c9afb1890657592458dcdc46d [file] [log] [blame]
Jens Axboe2b188cc2019-01-07 10:46:33 -07001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Shared application/kernel submission and completion ring pairs, for
4 * supporting fast/efficient IO.
5 *
6 * A note on the read/write ordering memory barriers that are matched between
Stefan Bühler1e84b972019-04-24 23:54:16 +02007 * the application and kernel side.
8 *
9 * After the application reads the CQ ring tail, it must use an
10 * appropriate smp_rmb() to pair with the smp_wmb() the kernel uses
11 * before writing the tail (using smp_load_acquire to read the tail will
12 * do). It also needs a smp_mb() before updating CQ head (ordering the
13 * entry load(s) with the head store), pairing with an implicit barrier
14 * through a control-dependency in io_get_cqring (smp_store_release to
15 * store head will do). Failure to do so could lead to reading invalid
16 * CQ entries.
17 *
18 * Likewise, the application must use an appropriate smp_wmb() before
19 * writing the SQ tail (ordering SQ entry stores with the tail store),
20 * which pairs with smp_load_acquire in io_get_sqring (smp_store_release
21 * to store the tail will do). And it needs a barrier ordering the SQ
22 * head load before writing new SQ entries (smp_load_acquire to read
23 * head will do).
24 *
25 * When using the SQ poll thread (IORING_SETUP_SQPOLL), the application
26 * needs to check the SQ flags for IORING_SQ_NEED_WAKEUP *after*
27 * updating the SQ tail; a full memory barrier smp_mb() is needed
28 * between.
Jens Axboe2b188cc2019-01-07 10:46:33 -070029 *
30 * Also see the examples in the liburing library:
31 *
32 * git://git.kernel.dk/liburing
33 *
34 * io_uring also uses READ/WRITE_ONCE() for _any_ store or load that happens
35 * from data shared between the kernel and application. This is done both
36 * for ordering purposes, but also to ensure that once a value is loaded from
37 * data that the application could potentially modify, it remains stable.
38 *
39 * Copyright (C) 2018-2019 Jens Axboe
Christoph Hellwigc992fe22019-01-11 09:43:02 -070040 * Copyright (c) 2018-2019 Christoph Hellwig
Jens Axboe2b188cc2019-01-07 10:46:33 -070041 */
42#include <linux/kernel.h>
43#include <linux/init.h>
44#include <linux/errno.h>
45#include <linux/syscalls.h>
46#include <linux/compat.h>
47#include <linux/refcount.h>
48#include <linux/uio.h>
Pavel Begunkov6b47ee62020-01-18 20:22:41 +030049#include <linux/bits.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070050
51#include <linux/sched/signal.h>
52#include <linux/fs.h>
53#include <linux/file.h>
54#include <linux/fdtable.h>
55#include <linux/mm.h>
56#include <linux/mman.h>
57#include <linux/mmu_context.h>
58#include <linux/percpu.h>
59#include <linux/slab.h>
Jens Axboe6c271ce2019-01-10 11:22:30 -070060#include <linux/kthread.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070061#include <linux/blkdev.h>
Jens Axboeedafcce2019-01-09 09:16:05 -070062#include <linux/bvec.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070063#include <linux/net.h>
64#include <net/sock.h>
65#include <net/af_unix.h>
Jens Axboe6b063142019-01-10 22:13:58 -070066#include <net/scm.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070067#include <linux/anon_inodes.h>
68#include <linux/sched/mm.h>
69#include <linux/uaccess.h>
70#include <linux/nospec.h>
Jens Axboeedafcce2019-01-09 09:16:05 -070071#include <linux/sizes.h>
72#include <linux/hugetlb.h>
Jens Axboeaa4c3962019-11-29 10:14:00 -070073#include <linux/highmem.h>
Jens Axboe15b71ab2019-12-11 11:20:36 -070074#include <linux/namei.h>
75#include <linux/fsnotify.h>
Jens Axboe4840e412019-12-25 22:03:45 -070076#include <linux/fadvise.h>
Jens Axboe3e4827b2020-01-08 15:18:09 -070077#include <linux/eventpoll.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070078
Dmitrii Dolgovc826bd72019-10-15 19:02:01 +020079#define CREATE_TRACE_POINTS
80#include <trace/events/io_uring.h>
81
Jens Axboe2b188cc2019-01-07 10:46:33 -070082#include <uapi/linux/io_uring.h>
83
84#include "internal.h"
Jens Axboe561fb042019-10-24 07:25:42 -060085#include "io-wq.h"
Jens Axboe2b188cc2019-01-07 10:46:33 -070086
Daniel Xu5277dea2019-09-14 14:23:45 -070087#define IORING_MAX_ENTRIES 32768
Jens Axboe33a107f2019-10-04 12:10:03 -060088#define IORING_MAX_CQ_ENTRIES (2 * IORING_MAX_ENTRIES)
Jens Axboe65e19f52019-10-26 07:20:21 -060089
90/*
91 * Shift of 9 is 512 entries, or exactly one page on 64-bit archs
92 */
93#define IORING_FILE_TABLE_SHIFT 9
94#define IORING_MAX_FILES_TABLE (1U << IORING_FILE_TABLE_SHIFT)
95#define IORING_FILE_TABLE_MASK (IORING_MAX_FILES_TABLE - 1)
96#define IORING_MAX_FIXED_FILES (64 * IORING_MAX_FILES_TABLE)
Jens Axboe2b188cc2019-01-07 10:46:33 -070097
98struct io_uring {
99 u32 head ____cacheline_aligned_in_smp;
100 u32 tail ____cacheline_aligned_in_smp;
101};
102
Stefan Bühler1e84b972019-04-24 23:54:16 +0200103/*
Hristo Venev75b28af2019-08-26 17:23:46 +0000104 * This data is shared with the application through the mmap at offsets
105 * IORING_OFF_SQ_RING and IORING_OFF_CQ_RING.
Stefan Bühler1e84b972019-04-24 23:54:16 +0200106 *
107 * The offsets to the member fields are published through struct
108 * io_sqring_offsets when calling io_uring_setup.
109 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000110struct io_rings {
Stefan Bühler1e84b972019-04-24 23:54:16 +0200111 /*
112 * Head and tail offsets into the ring; the offsets need to be
113 * masked to get valid indices.
114 *
Hristo Venev75b28af2019-08-26 17:23:46 +0000115 * The kernel controls head of the sq ring and the tail of the cq ring,
116 * and the application controls tail of the sq ring and the head of the
117 * cq ring.
Stefan Bühler1e84b972019-04-24 23:54:16 +0200118 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000119 struct io_uring sq, cq;
Stefan Bühler1e84b972019-04-24 23:54:16 +0200120 /*
Hristo Venev75b28af2019-08-26 17:23:46 +0000121 * Bitmasks to apply to head and tail offsets (constant, equals
Stefan Bühler1e84b972019-04-24 23:54:16 +0200122 * ring_entries - 1)
123 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000124 u32 sq_ring_mask, cq_ring_mask;
125 /* Ring sizes (constant, power of 2) */
126 u32 sq_ring_entries, cq_ring_entries;
Stefan Bühler1e84b972019-04-24 23:54:16 +0200127 /*
128 * Number of invalid entries dropped by the kernel due to
129 * invalid index stored in array
130 *
131 * Written by the kernel, shouldn't be modified by the
132 * application (i.e. get number of "new events" by comparing to
133 * cached value).
134 *
135 * After a new SQ head value was read by the application this
136 * counter includes all submissions that were dropped reaching
137 * the new SQ head (and possibly more).
138 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000139 u32 sq_dropped;
Stefan Bühler1e84b972019-04-24 23:54:16 +0200140 /*
141 * Runtime flags
142 *
143 * Written by the kernel, shouldn't be modified by the
144 * application.
145 *
146 * The application needs a full memory barrier before checking
147 * for IORING_SQ_NEED_WAKEUP after updating the sq tail.
148 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000149 u32 sq_flags;
Stefan Bühler1e84b972019-04-24 23:54:16 +0200150 /*
151 * Number of completion events lost because the queue was full;
152 * this should be avoided by the application by making sure
LimingWu0b4295b2019-12-05 20:18:18 +0800153 * there are not more requests pending than there is space in
Stefan Bühler1e84b972019-04-24 23:54:16 +0200154 * the completion queue.
155 *
156 * Written by the kernel, shouldn't be modified by the
157 * application (i.e. get number of "new events" by comparing to
158 * cached value).
159 *
160 * As completion events come in out of order this counter is not
161 * ordered with any other data.
162 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000163 u32 cq_overflow;
Stefan Bühler1e84b972019-04-24 23:54:16 +0200164 /*
165 * Ring buffer of completion events.
166 *
167 * The kernel writes completion events fresh every time they are
168 * produced, so the application is allowed to modify pending
169 * entries.
170 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000171 struct io_uring_cqe cqes[] ____cacheline_aligned_in_smp;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700172};
173
Jens Axboeedafcce2019-01-09 09:16:05 -0700174struct io_mapped_ubuf {
175 u64 ubuf;
176 size_t len;
177 struct bio_vec *bvec;
178 unsigned int nr_bvecs;
179};
180
Jens Axboe65e19f52019-10-26 07:20:21 -0600181struct fixed_file_table {
182 struct file **files;
Jens Axboe31b51512019-01-18 22:56:34 -0700183};
184
Jens Axboe05f3fb32019-12-09 11:22:50 -0700185enum {
186 FFD_F_ATOMIC,
187};
188
189struct fixed_file_data {
190 struct fixed_file_table *table;
191 struct io_ring_ctx *ctx;
192
193 struct percpu_ref refs;
194 struct llist_head put_llist;
195 unsigned long state;
196 struct work_struct ref_work;
197 struct completion done;
198};
199
Jens Axboe2b188cc2019-01-07 10:46:33 -0700200struct io_ring_ctx {
201 struct {
202 struct percpu_ref refs;
203 } ____cacheline_aligned_in_smp;
204
205 struct {
206 unsigned int flags;
Randy Dunlape1d85332020-02-05 20:57:10 -0800207 unsigned int compat: 1;
208 unsigned int account_mem: 1;
209 unsigned int cq_overflow_flushed: 1;
210 unsigned int drain_next: 1;
211 unsigned int eventfd_async: 1;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700212
Hristo Venev75b28af2019-08-26 17:23:46 +0000213 /*
214 * Ring buffer of indices into array of io_uring_sqe, which is
215 * mmapped by the application using the IORING_OFF_SQES offset.
216 *
217 * This indirection could e.g. be used to assign fixed
218 * io_uring_sqe entries to operations and only submit them to
219 * the queue when needed.
220 *
221 * The kernel modifies neither the indices array nor the entries
222 * array.
223 */
224 u32 *sq_array;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700225 unsigned cached_sq_head;
226 unsigned sq_entries;
227 unsigned sq_mask;
Jens Axboe6c271ce2019-01-10 11:22:30 -0700228 unsigned sq_thread_idle;
Jens Axboe498ccd92019-10-25 10:04:25 -0600229 unsigned cached_sq_dropped;
Jens Axboe206aefd2019-11-07 18:27:42 -0700230 atomic_t cached_cq_overflow;
Jens Axboead3eb2c2019-12-18 17:12:20 -0700231 unsigned long sq_check_overflow;
Jens Axboede0617e2019-04-06 21:51:27 -0600232
233 struct list_head defer_list;
Jens Axboe5262f562019-09-17 12:26:57 -0600234 struct list_head timeout_list;
Jens Axboe1d7bb1d2019-11-06 11:31:17 -0700235 struct list_head cq_overflow_list;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700236
Jens Axboefcb323c2019-10-24 12:39:47 -0600237 wait_queue_head_t inflight_wait;
Jens Axboead3eb2c2019-12-18 17:12:20 -0700238 struct io_uring_sqe *sq_sqes;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700239 } ____cacheline_aligned_in_smp;
240
Hristo Venev75b28af2019-08-26 17:23:46 +0000241 struct io_rings *rings;
242
Jens Axboe2b188cc2019-01-07 10:46:33 -0700243 /* IO offload */
Jens Axboe561fb042019-10-24 07:25:42 -0600244 struct io_wq *io_wq;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700245 struct task_struct *sqo_thread; /* if using sq thread polling */
246 struct mm_struct *sqo_mm;
247 wait_queue_head_t sqo_wait;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700248
Jens Axboe6b063142019-01-10 22:13:58 -0700249 /*
250 * If used, fixed file set. Writers must ensure that ->refs is dead,
251 * readers must ensure that ->refs is alive as long as the file* is
252 * used. Only updated through io_uring_register(2).
253 */
Jens Axboe05f3fb32019-12-09 11:22:50 -0700254 struct fixed_file_data *file_data;
Jens Axboe6b063142019-01-10 22:13:58 -0700255 unsigned nr_user_files;
Pavel Begunkovb14cca02020-01-17 04:45:59 +0300256 int ring_fd;
257 struct file *ring_file;
Jens Axboe6b063142019-01-10 22:13:58 -0700258
Jens Axboeedafcce2019-01-09 09:16:05 -0700259 /* if used, fixed mapped user buffers */
260 unsigned nr_user_bufs;
261 struct io_mapped_ubuf *user_bufs;
262
Jens Axboe2b188cc2019-01-07 10:46:33 -0700263 struct user_struct *user;
264
Jens Axboe0b8c0ec2019-12-02 08:50:00 -0700265 const struct cred *creds;
Jens Axboe181e4482019-11-25 08:52:30 -0700266
Jens Axboe206aefd2019-11-07 18:27:42 -0700267 /* 0 is for ctx quiesce/reinit/free, 1 is for sqo_thread started */
268 struct completion *completions;
269
Jens Axboe0ddf92e2019-11-08 08:52:53 -0700270 /* if all else fails... */
271 struct io_kiocb *fallback_req;
272
Jens Axboe206aefd2019-11-07 18:27:42 -0700273#if defined(CONFIG_UNIX)
274 struct socket *ring_sock;
275#endif
276
Jens Axboe071698e2020-01-28 10:04:42 -0700277 struct idr personality_idr;
278
Jens Axboe206aefd2019-11-07 18:27:42 -0700279 struct {
280 unsigned cached_cq_tail;
281 unsigned cq_entries;
282 unsigned cq_mask;
283 atomic_t cq_timeouts;
Jens Axboead3eb2c2019-12-18 17:12:20 -0700284 unsigned long cq_check_overflow;
Jens Axboe206aefd2019-11-07 18:27:42 -0700285 struct wait_queue_head cq_wait;
286 struct fasync_struct *cq_fasync;
287 struct eventfd_ctx *cq_ev_fd;
288 } ____cacheline_aligned_in_smp;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700289
290 struct {
291 struct mutex uring_lock;
292 wait_queue_head_t wait;
293 } ____cacheline_aligned_in_smp;
294
295 struct {
296 spinlock_t completion_lock;
Jens Axboee94f1412019-12-19 12:06:02 -0700297 struct llist_head poll_llist;
298
Jens Axboedef596e2019-01-09 08:59:42 -0700299 /*
300 * ->poll_list is protected by the ctx->uring_lock for
301 * io_uring instances that don't use IORING_SETUP_SQPOLL.
302 * For SQPOLL, only the single threaded io_sq_thread() will
303 * manipulate the list, hence no extra locking is needed there.
304 */
305 struct list_head poll_list;
Jens Axboe78076bb2019-12-04 19:56:40 -0700306 struct hlist_head *cancel_hash;
307 unsigned cancel_hash_bits;
Jens Axboee94f1412019-12-19 12:06:02 -0700308 bool poll_multi_file;
Jens Axboefcb323c2019-10-24 12:39:47 -0600309
310 spinlock_t inflight_lock;
311 struct list_head inflight_list;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700312 } ____cacheline_aligned_in_smp;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700313};
314
Jens Axboe09bb8392019-03-13 12:39:28 -0600315/*
316 * First field must be the file pointer in all the
317 * iocb unions! See also 'struct kiocb' in <linux/fs.h>
318 */
Jens Axboe221c5eb2019-01-17 09:41:58 -0700319struct io_poll_iocb {
320 struct file *file;
Jens Axboe0969e782019-12-17 18:40:57 -0700321 union {
322 struct wait_queue_head *head;
323 u64 addr;
324 };
Jens Axboe221c5eb2019-01-17 09:41:58 -0700325 __poll_t events;
Jens Axboe8c838782019-03-12 15:48:16 -0600326 bool done;
Jens Axboe221c5eb2019-01-17 09:41:58 -0700327 bool canceled;
Jens Axboe392edb42019-12-09 17:52:20 -0700328 struct wait_queue_entry wait;
Jens Axboe221c5eb2019-01-17 09:41:58 -0700329};
330
Jens Axboeb5dba592019-12-11 14:02:38 -0700331struct io_close {
332 struct file *file;
333 struct file *put_file;
334 int fd;
335};
336
Jens Axboead8a48a2019-11-15 08:49:11 -0700337struct io_timeout_data {
338 struct io_kiocb *req;
339 struct hrtimer timer;
340 struct timespec64 ts;
341 enum hrtimer_mode mode;
Pavel Begunkovcc42e0a2019-11-25 23:14:38 +0300342 u32 seq_offset;
Jens Axboead8a48a2019-11-15 08:49:11 -0700343};
344
Jens Axboe8ed8d3c2019-12-16 11:55:28 -0700345struct io_accept {
346 struct file *file;
347 struct sockaddr __user *addr;
348 int __user *addr_len;
349 int flags;
350};
351
352struct io_sync {
353 struct file *file;
354 loff_t len;
355 loff_t off;
356 int flags;
Jens Axboed63d1b52019-12-10 10:38:56 -0700357 int mode;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -0700358};
359
Jens Axboefbf23842019-12-17 18:45:56 -0700360struct io_cancel {
361 struct file *file;
362 u64 addr;
363};
364
Jens Axboeb29472e2019-12-17 18:50:29 -0700365struct io_timeout {
366 struct file *file;
367 u64 addr;
368 int flags;
Jens Axboe26a61672019-12-20 09:02:01 -0700369 unsigned count;
Jens Axboeb29472e2019-12-17 18:50:29 -0700370};
371
Jens Axboe9adbd452019-12-20 08:45:55 -0700372struct io_rw {
373 /* NOTE: kiocb has the file as the first member, so don't do it here */
374 struct kiocb kiocb;
375 u64 addr;
376 u64 len;
377};
378
Jens Axboe3fbb51c2019-12-20 08:51:52 -0700379struct io_connect {
380 struct file *file;
381 struct sockaddr __user *addr;
382 int addr_len;
383};
384
Jens Axboee47293f2019-12-20 08:58:21 -0700385struct io_sr_msg {
386 struct file *file;
Jens Axboefddafac2020-01-04 20:19:44 -0700387 union {
388 struct user_msghdr __user *msg;
389 void __user *buf;
390 };
Jens Axboee47293f2019-12-20 08:58:21 -0700391 int msg_flags;
Jens Axboefddafac2020-01-04 20:19:44 -0700392 size_t len;
Jens Axboee47293f2019-12-20 08:58:21 -0700393};
394
Jens Axboe15b71ab2019-12-11 11:20:36 -0700395struct io_open {
396 struct file *file;
397 int dfd;
Jens Axboeeddc7ef2019-12-13 21:18:10 -0700398 union {
Jens Axboeeddc7ef2019-12-13 21:18:10 -0700399 unsigned mask;
400 };
Jens Axboe15b71ab2019-12-11 11:20:36 -0700401 struct filename *filename;
Jens Axboeeddc7ef2019-12-13 21:18:10 -0700402 struct statx __user *buffer;
Jens Axboec12cedf2020-01-08 17:41:21 -0700403 struct open_how how;
Jens Axboe15b71ab2019-12-11 11:20:36 -0700404};
405
Jens Axboe05f3fb32019-12-09 11:22:50 -0700406struct io_files_update {
407 struct file *file;
408 u64 arg;
409 u32 nr_args;
410 u32 offset;
411};
412
Jens Axboe4840e412019-12-25 22:03:45 -0700413struct io_fadvise {
414 struct file *file;
415 u64 offset;
416 u32 len;
417 u32 advice;
418};
419
Jens Axboec1ca7572019-12-25 22:18:28 -0700420struct io_madvise {
421 struct file *file;
422 u64 addr;
423 u32 len;
424 u32 advice;
425};
426
Jens Axboe3e4827b2020-01-08 15:18:09 -0700427struct io_epoll {
428 struct file *file;
429 int epfd;
430 int op;
431 int fd;
432 struct epoll_event event;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700433};
434
Jens Axboef499a022019-12-02 16:28:46 -0700435struct io_async_connect {
436 struct sockaddr_storage address;
437};
438
Jens Axboe03b12302019-12-02 18:50:25 -0700439struct io_async_msghdr {
440 struct iovec fast_iov[UIO_FASTIOV];
441 struct iovec *iov;
442 struct sockaddr __user *uaddr;
443 struct msghdr msg;
444};
445
Jens Axboef67676d2019-12-02 11:03:47 -0700446struct io_async_rw {
447 struct iovec fast_iov[UIO_FASTIOV];
448 struct iovec *iov;
449 ssize_t nr_segs;
450 ssize_t size;
451};
452
Jens Axboe1a6b74f2019-12-02 10:33:15 -0700453struct io_async_ctx {
Jens Axboef67676d2019-12-02 11:03:47 -0700454 union {
455 struct io_async_rw rw;
Jens Axboe03b12302019-12-02 18:50:25 -0700456 struct io_async_msghdr msg;
Jens Axboef499a022019-12-02 16:28:46 -0700457 struct io_async_connect connect;
Jens Axboe2d283902019-12-04 11:08:05 -0700458 struct io_timeout_data timeout;
Jens Axboef67676d2019-12-02 11:03:47 -0700459 };
Jens Axboe1a6b74f2019-12-02 10:33:15 -0700460};
461
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300462enum {
463 REQ_F_FIXED_FILE_BIT = IOSQE_FIXED_FILE_BIT,
464 REQ_F_IO_DRAIN_BIT = IOSQE_IO_DRAIN_BIT,
465 REQ_F_LINK_BIT = IOSQE_IO_LINK_BIT,
466 REQ_F_HARDLINK_BIT = IOSQE_IO_HARDLINK_BIT,
467 REQ_F_FORCE_ASYNC_BIT = IOSQE_ASYNC_BIT,
468
469 REQ_F_LINK_NEXT_BIT,
470 REQ_F_FAIL_LINK_BIT,
471 REQ_F_INFLIGHT_BIT,
472 REQ_F_CUR_POS_BIT,
473 REQ_F_NOWAIT_BIT,
474 REQ_F_IOPOLL_COMPLETED_BIT,
475 REQ_F_LINK_TIMEOUT_BIT,
476 REQ_F_TIMEOUT_BIT,
477 REQ_F_ISREG_BIT,
478 REQ_F_MUST_PUNT_BIT,
479 REQ_F_TIMEOUT_NOSEQ_BIT,
480 REQ_F_COMP_LOCKED_BIT,
Pavel Begunkov99bc4c32020-02-07 22:04:45 +0300481 REQ_F_NEED_CLEANUP_BIT,
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300482};
483
484enum {
485 /* ctx owns file */
486 REQ_F_FIXED_FILE = BIT(REQ_F_FIXED_FILE_BIT),
487 /* drain existing IO first */
488 REQ_F_IO_DRAIN = BIT(REQ_F_IO_DRAIN_BIT),
489 /* linked sqes */
490 REQ_F_LINK = BIT(REQ_F_LINK_BIT),
491 /* doesn't sever on completion < 0 */
492 REQ_F_HARDLINK = BIT(REQ_F_HARDLINK_BIT),
493 /* IOSQE_ASYNC */
494 REQ_F_FORCE_ASYNC = BIT(REQ_F_FORCE_ASYNC_BIT),
495
496 /* already grabbed next link */
497 REQ_F_LINK_NEXT = BIT(REQ_F_LINK_NEXT_BIT),
498 /* fail rest of links */
499 REQ_F_FAIL_LINK = BIT(REQ_F_FAIL_LINK_BIT),
500 /* on inflight list */
501 REQ_F_INFLIGHT = BIT(REQ_F_INFLIGHT_BIT),
502 /* read/write uses file position */
503 REQ_F_CUR_POS = BIT(REQ_F_CUR_POS_BIT),
504 /* must not punt to workers */
505 REQ_F_NOWAIT = BIT(REQ_F_NOWAIT_BIT),
506 /* polled IO has completed */
507 REQ_F_IOPOLL_COMPLETED = BIT(REQ_F_IOPOLL_COMPLETED_BIT),
508 /* has linked timeout */
509 REQ_F_LINK_TIMEOUT = BIT(REQ_F_LINK_TIMEOUT_BIT),
510 /* timeout request */
511 REQ_F_TIMEOUT = BIT(REQ_F_TIMEOUT_BIT),
512 /* regular file */
513 REQ_F_ISREG = BIT(REQ_F_ISREG_BIT),
514 /* must be punted even for NONBLOCK */
515 REQ_F_MUST_PUNT = BIT(REQ_F_MUST_PUNT_BIT),
516 /* no timeout sequence */
517 REQ_F_TIMEOUT_NOSEQ = BIT(REQ_F_TIMEOUT_NOSEQ_BIT),
518 /* completion under lock */
519 REQ_F_COMP_LOCKED = BIT(REQ_F_COMP_LOCKED_BIT),
Pavel Begunkov99bc4c32020-02-07 22:04:45 +0300520 /* needs cleanup */
521 REQ_F_NEED_CLEANUP = BIT(REQ_F_NEED_CLEANUP_BIT),
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300522};
523
Jens Axboe09bb8392019-03-13 12:39:28 -0600524/*
525 * NOTE! Each of the iocb union members has the file pointer
526 * as the first entry in their struct definition. So you can
527 * access the file pointer through any of the sub-structs,
528 * or directly as just 'ki_filp' in this struct.
529 */
Jens Axboe2b188cc2019-01-07 10:46:33 -0700530struct io_kiocb {
Jens Axboe221c5eb2019-01-17 09:41:58 -0700531 union {
Jens Axboe09bb8392019-03-13 12:39:28 -0600532 struct file *file;
Jens Axboe9adbd452019-12-20 08:45:55 -0700533 struct io_rw rw;
Jens Axboe221c5eb2019-01-17 09:41:58 -0700534 struct io_poll_iocb poll;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -0700535 struct io_accept accept;
536 struct io_sync sync;
Jens Axboefbf23842019-12-17 18:45:56 -0700537 struct io_cancel cancel;
Jens Axboeb29472e2019-12-17 18:50:29 -0700538 struct io_timeout timeout;
Jens Axboe3fbb51c2019-12-20 08:51:52 -0700539 struct io_connect connect;
Jens Axboee47293f2019-12-20 08:58:21 -0700540 struct io_sr_msg sr_msg;
Jens Axboe15b71ab2019-12-11 11:20:36 -0700541 struct io_open open;
Jens Axboeb5dba592019-12-11 14:02:38 -0700542 struct io_close close;
Jens Axboe05f3fb32019-12-09 11:22:50 -0700543 struct io_files_update files_update;
Jens Axboe4840e412019-12-25 22:03:45 -0700544 struct io_fadvise fadvise;
Jens Axboec1ca7572019-12-25 22:18:28 -0700545 struct io_madvise madvise;
Jens Axboe3e4827b2020-01-08 15:18:09 -0700546 struct io_epoll epoll;
Jens Axboe221c5eb2019-01-17 09:41:58 -0700547 };
Jens Axboe2b188cc2019-01-07 10:46:33 -0700548
Jens Axboe1a6b74f2019-12-02 10:33:15 -0700549 struct io_async_ctx *io;
Pavel Begunkovb14cca02020-01-17 04:45:59 +0300550 /*
551 * llist_node is only used for poll deferred completions
552 */
553 struct llist_node llist_node;
Pavel Begunkovcf6fd4b2019-11-25 23:14:39 +0300554 bool in_async;
555 bool needs_fixed_file;
Jens Axboed625c6e2019-12-17 19:53:05 -0700556 u8 opcode;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700557
558 struct io_ring_ctx *ctx;
Jens Axboeeac406c2019-11-14 12:09:58 -0700559 union {
560 struct list_head list;
Jens Axboe78076bb2019-12-04 19:56:40 -0700561 struct hlist_node hash_node;
Jens Axboeeac406c2019-11-14 12:09:58 -0700562 };
Jens Axboe9e645e112019-05-10 16:07:28 -0600563 struct list_head link_list;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700564 unsigned int flags;
Jens Axboec16361c2019-01-17 08:39:48 -0700565 refcount_t refs;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700566 u64 user_data;
Jens Axboe9e645e112019-05-10 16:07:28 -0600567 u32 result;
Jens Axboede0617e2019-04-06 21:51:27 -0600568 u32 sequence;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700569
Jens Axboefcb323c2019-10-24 12:39:47 -0600570 struct list_head inflight_entry;
571
Jens Axboe561fb042019-10-24 07:25:42 -0600572 struct io_wq_work work;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700573};
574
575#define IO_PLUG_THRESHOLD 2
Jens Axboedef596e2019-01-09 08:59:42 -0700576#define IO_IOPOLL_BATCH 8
Jens Axboe2b188cc2019-01-07 10:46:33 -0700577
Jens Axboe9a56a232019-01-09 09:06:50 -0700578struct io_submit_state {
579 struct blk_plug plug;
580
581 /*
Jens Axboe2579f912019-01-09 09:10:43 -0700582 * io_kiocb alloc cache
583 */
584 void *reqs[IO_IOPOLL_BATCH];
Pavel Begunkov6c8a3132020-02-01 03:58:00 +0300585 unsigned int free_reqs;
Jens Axboe2579f912019-01-09 09:10:43 -0700586
587 /*
Jens Axboe9a56a232019-01-09 09:06:50 -0700588 * File reference cache
589 */
590 struct file *file;
591 unsigned int fd;
592 unsigned int has_refs;
593 unsigned int used_refs;
594 unsigned int ios_left;
595};
596
Jens Axboed3656342019-12-18 09:50:26 -0700597struct io_op_def {
598 /* needs req->io allocated for deferral/async */
599 unsigned async_ctx : 1;
600 /* needs current->mm setup, does mm access */
601 unsigned needs_mm : 1;
602 /* needs req->file assigned */
603 unsigned needs_file : 1;
604 /* needs req->file assigned IFF fd is >= 0 */
605 unsigned fd_non_neg : 1;
606 /* hash wq insertion if file is a regular file */
607 unsigned hash_reg_file : 1;
608 /* unbound wq insertion if file is a non-regular file */
609 unsigned unbound_nonreg_file : 1;
Jens Axboe66f4af92020-01-16 15:36:52 -0700610 /* opcode is not supported by this kernel */
611 unsigned not_supported : 1;
Jens Axboef86cd202020-01-29 13:46:44 -0700612 /* needs file table */
613 unsigned file_table : 1;
Jens Axboed3656342019-12-18 09:50:26 -0700614};
615
616static const struct io_op_def io_op_defs[] = {
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300617 [IORING_OP_NOP] = {},
618 [IORING_OP_READV] = {
Jens Axboed3656342019-12-18 09:50:26 -0700619 .async_ctx = 1,
620 .needs_mm = 1,
621 .needs_file = 1,
622 .unbound_nonreg_file = 1,
623 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300624 [IORING_OP_WRITEV] = {
Jens Axboed3656342019-12-18 09:50:26 -0700625 .async_ctx = 1,
626 .needs_mm = 1,
627 .needs_file = 1,
628 .hash_reg_file = 1,
629 .unbound_nonreg_file = 1,
630 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300631 [IORING_OP_FSYNC] = {
Jens Axboed3656342019-12-18 09:50:26 -0700632 .needs_file = 1,
633 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300634 [IORING_OP_READ_FIXED] = {
Jens Axboed3656342019-12-18 09:50:26 -0700635 .needs_file = 1,
636 .unbound_nonreg_file = 1,
637 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300638 [IORING_OP_WRITE_FIXED] = {
Jens Axboed3656342019-12-18 09:50:26 -0700639 .needs_file = 1,
640 .hash_reg_file = 1,
641 .unbound_nonreg_file = 1,
642 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300643 [IORING_OP_POLL_ADD] = {
Jens Axboed3656342019-12-18 09:50:26 -0700644 .needs_file = 1,
645 .unbound_nonreg_file = 1,
646 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300647 [IORING_OP_POLL_REMOVE] = {},
648 [IORING_OP_SYNC_FILE_RANGE] = {
Jens Axboed3656342019-12-18 09:50:26 -0700649 .needs_file = 1,
650 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300651 [IORING_OP_SENDMSG] = {
Jens Axboed3656342019-12-18 09:50:26 -0700652 .async_ctx = 1,
653 .needs_mm = 1,
654 .needs_file = 1,
655 .unbound_nonreg_file = 1,
656 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300657 [IORING_OP_RECVMSG] = {
Jens Axboed3656342019-12-18 09:50:26 -0700658 .async_ctx = 1,
659 .needs_mm = 1,
660 .needs_file = 1,
661 .unbound_nonreg_file = 1,
662 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300663 [IORING_OP_TIMEOUT] = {
Jens Axboed3656342019-12-18 09:50:26 -0700664 .async_ctx = 1,
665 .needs_mm = 1,
666 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300667 [IORING_OP_TIMEOUT_REMOVE] = {},
668 [IORING_OP_ACCEPT] = {
Jens Axboed3656342019-12-18 09:50:26 -0700669 .needs_mm = 1,
670 .needs_file = 1,
671 .unbound_nonreg_file = 1,
Jens Axboef86cd202020-01-29 13:46:44 -0700672 .file_table = 1,
Jens Axboed3656342019-12-18 09:50:26 -0700673 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300674 [IORING_OP_ASYNC_CANCEL] = {},
675 [IORING_OP_LINK_TIMEOUT] = {
Jens Axboed3656342019-12-18 09:50:26 -0700676 .async_ctx = 1,
677 .needs_mm = 1,
678 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300679 [IORING_OP_CONNECT] = {
Jens Axboed3656342019-12-18 09:50:26 -0700680 .async_ctx = 1,
681 .needs_mm = 1,
682 .needs_file = 1,
683 .unbound_nonreg_file = 1,
684 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300685 [IORING_OP_FALLOCATE] = {
Jens Axboed3656342019-12-18 09:50:26 -0700686 .needs_file = 1,
687 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300688 [IORING_OP_OPENAT] = {
Jens Axboed3656342019-12-18 09:50:26 -0700689 .needs_file = 1,
690 .fd_non_neg = 1,
Jens Axboef86cd202020-01-29 13:46:44 -0700691 .file_table = 1,
Jens Axboed3656342019-12-18 09:50:26 -0700692 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300693 [IORING_OP_CLOSE] = {
Jens Axboed3656342019-12-18 09:50:26 -0700694 .needs_file = 1,
Jens Axboef86cd202020-01-29 13:46:44 -0700695 .file_table = 1,
Jens Axboed3656342019-12-18 09:50:26 -0700696 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300697 [IORING_OP_FILES_UPDATE] = {
Jens Axboed3656342019-12-18 09:50:26 -0700698 .needs_mm = 1,
Jens Axboef86cd202020-01-29 13:46:44 -0700699 .file_table = 1,
Jens Axboed3656342019-12-18 09:50:26 -0700700 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300701 [IORING_OP_STATX] = {
Jens Axboed3656342019-12-18 09:50:26 -0700702 .needs_mm = 1,
703 .needs_file = 1,
704 .fd_non_neg = 1,
705 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300706 [IORING_OP_READ] = {
Jens Axboe3a6820f2019-12-22 15:19:35 -0700707 .needs_mm = 1,
708 .needs_file = 1,
709 .unbound_nonreg_file = 1,
710 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300711 [IORING_OP_WRITE] = {
Jens Axboe3a6820f2019-12-22 15:19:35 -0700712 .needs_mm = 1,
713 .needs_file = 1,
714 .unbound_nonreg_file = 1,
715 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300716 [IORING_OP_FADVISE] = {
Jens Axboe4840e412019-12-25 22:03:45 -0700717 .needs_file = 1,
718 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300719 [IORING_OP_MADVISE] = {
Jens Axboec1ca7572019-12-25 22:18:28 -0700720 .needs_mm = 1,
721 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300722 [IORING_OP_SEND] = {
Jens Axboefddafac2020-01-04 20:19:44 -0700723 .needs_mm = 1,
724 .needs_file = 1,
725 .unbound_nonreg_file = 1,
726 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300727 [IORING_OP_RECV] = {
Jens Axboefddafac2020-01-04 20:19:44 -0700728 .needs_mm = 1,
729 .needs_file = 1,
730 .unbound_nonreg_file = 1,
731 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300732 [IORING_OP_OPENAT2] = {
Jens Axboecebdb982020-01-08 17:59:24 -0700733 .needs_file = 1,
734 .fd_non_neg = 1,
Jens Axboef86cd202020-01-29 13:46:44 -0700735 .file_table = 1,
Jens Axboecebdb982020-01-08 17:59:24 -0700736 },
Jens Axboe3e4827b2020-01-08 15:18:09 -0700737 [IORING_OP_EPOLL_CTL] = {
738 .unbound_nonreg_file = 1,
739 .file_table = 1,
740 },
Jens Axboed3656342019-12-18 09:50:26 -0700741};
742
Jens Axboe561fb042019-10-24 07:25:42 -0600743static void io_wq_submit_work(struct io_wq_work **workptr);
Jens Axboe78e19bb2019-11-06 15:21:34 -0700744static void io_cqring_fill_event(struct io_kiocb *req, long res);
Jackie Liuec9c02a2019-11-08 23:50:36 +0800745static void io_put_req(struct io_kiocb *req);
Jens Axboe978db572019-11-14 22:39:04 -0700746static void __io_double_put_req(struct io_kiocb *req);
Jens Axboe94ae5e72019-11-14 19:39:52 -0700747static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req);
748static void io_queue_linked_timeout(struct io_kiocb *req);
Jens Axboe05f3fb32019-12-09 11:22:50 -0700749static int __io_sqe_files_update(struct io_ring_ctx *ctx,
750 struct io_uring_files_update *ip,
751 unsigned nr_args);
Jens Axboef86cd202020-01-29 13:46:44 -0700752static int io_grab_files(struct io_kiocb *req);
Jens Axboe2faf8522020-02-04 19:54:55 -0700753static void io_ring_file_ref_flush(struct fixed_file_data *data);
Pavel Begunkov99bc4c32020-02-07 22:04:45 +0300754static void io_cleanup_req(struct io_kiocb *req);
Jens Axboede0617e2019-04-06 21:51:27 -0600755
Jens Axboe2b188cc2019-01-07 10:46:33 -0700756static struct kmem_cache *req_cachep;
757
758static const struct file_operations io_uring_fops;
759
760struct sock *io_uring_get_socket(struct file *file)
761{
762#if defined(CONFIG_UNIX)
763 if (file->f_op == &io_uring_fops) {
764 struct io_ring_ctx *ctx = file->private_data;
765
766 return ctx->ring_sock->sk;
767 }
768#endif
769 return NULL;
770}
771EXPORT_SYMBOL(io_uring_get_socket);
772
773static void io_ring_ctx_ref_free(struct percpu_ref *ref)
774{
775 struct io_ring_ctx *ctx = container_of(ref, struct io_ring_ctx, refs);
776
Jens Axboe206aefd2019-11-07 18:27:42 -0700777 complete(&ctx->completions[0]);
Jens Axboe2b188cc2019-01-07 10:46:33 -0700778}
779
780static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
781{
782 struct io_ring_ctx *ctx;
Jens Axboe78076bb2019-12-04 19:56:40 -0700783 int hash_bits;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700784
785 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
786 if (!ctx)
787 return NULL;
788
Jens Axboe0ddf92e2019-11-08 08:52:53 -0700789 ctx->fallback_req = kmem_cache_alloc(req_cachep, GFP_KERNEL);
790 if (!ctx->fallback_req)
791 goto err;
792
Jens Axboe206aefd2019-11-07 18:27:42 -0700793 ctx->completions = kmalloc(2 * sizeof(struct completion), GFP_KERNEL);
794 if (!ctx->completions)
795 goto err;
796
Jens Axboe78076bb2019-12-04 19:56:40 -0700797 /*
798 * Use 5 bits less than the max cq entries, that should give us around
799 * 32 entries per hash list if totally full and uniformly spread.
800 */
801 hash_bits = ilog2(p->cq_entries);
802 hash_bits -= 5;
803 if (hash_bits <= 0)
804 hash_bits = 1;
805 ctx->cancel_hash_bits = hash_bits;
806 ctx->cancel_hash = kmalloc((1U << hash_bits) * sizeof(struct hlist_head),
807 GFP_KERNEL);
808 if (!ctx->cancel_hash)
809 goto err;
810 __hash_init(ctx->cancel_hash, 1U << hash_bits);
811
Roman Gushchin21482892019-05-07 10:01:48 -0700812 if (percpu_ref_init(&ctx->refs, io_ring_ctx_ref_free,
Jens Axboe206aefd2019-11-07 18:27:42 -0700813 PERCPU_REF_ALLOW_REINIT, GFP_KERNEL))
814 goto err;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700815
816 ctx->flags = p->flags;
817 init_waitqueue_head(&ctx->cq_wait);
Jens Axboe1d7bb1d2019-11-06 11:31:17 -0700818 INIT_LIST_HEAD(&ctx->cq_overflow_list);
Jens Axboe206aefd2019-11-07 18:27:42 -0700819 init_completion(&ctx->completions[0]);
820 init_completion(&ctx->completions[1]);
Jens Axboe071698e2020-01-28 10:04:42 -0700821 idr_init(&ctx->personality_idr);
Jens Axboe2b188cc2019-01-07 10:46:33 -0700822 mutex_init(&ctx->uring_lock);
823 init_waitqueue_head(&ctx->wait);
824 spin_lock_init(&ctx->completion_lock);
Jens Axboee94f1412019-12-19 12:06:02 -0700825 init_llist_head(&ctx->poll_llist);
Jens Axboedef596e2019-01-09 08:59:42 -0700826 INIT_LIST_HEAD(&ctx->poll_list);
Jens Axboede0617e2019-04-06 21:51:27 -0600827 INIT_LIST_HEAD(&ctx->defer_list);
Jens Axboe5262f562019-09-17 12:26:57 -0600828 INIT_LIST_HEAD(&ctx->timeout_list);
Jens Axboefcb323c2019-10-24 12:39:47 -0600829 init_waitqueue_head(&ctx->inflight_wait);
830 spin_lock_init(&ctx->inflight_lock);
831 INIT_LIST_HEAD(&ctx->inflight_list);
Jens Axboe2b188cc2019-01-07 10:46:33 -0700832 return ctx;
Jens Axboe206aefd2019-11-07 18:27:42 -0700833err:
Jens Axboe0ddf92e2019-11-08 08:52:53 -0700834 if (ctx->fallback_req)
835 kmem_cache_free(req_cachep, ctx->fallback_req);
Jens Axboe206aefd2019-11-07 18:27:42 -0700836 kfree(ctx->completions);
Jens Axboe78076bb2019-12-04 19:56:40 -0700837 kfree(ctx->cancel_hash);
Jens Axboe206aefd2019-11-07 18:27:42 -0700838 kfree(ctx);
839 return NULL;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700840}
841
Bob Liu9d858b22019-11-13 18:06:25 +0800842static inline bool __req_need_defer(struct io_kiocb *req)
Jens Axboede0617e2019-04-06 21:51:27 -0600843{
Jackie Liua197f662019-11-08 08:09:12 -0700844 struct io_ring_ctx *ctx = req->ctx;
845
Jens Axboe498ccd92019-10-25 10:04:25 -0600846 return req->sequence != ctx->cached_cq_tail + ctx->cached_sq_dropped
847 + atomic_read(&ctx->cached_cq_overflow);
Jens Axboede0617e2019-04-06 21:51:27 -0600848}
849
Bob Liu9d858b22019-11-13 18:06:25 +0800850static inline bool req_need_defer(struct io_kiocb *req)
Jens Axboe7adf4ea2019-10-10 21:42:58 -0600851{
Pavel Begunkov87987892020-01-18 01:22:30 +0300852 if (unlikely(req->flags & REQ_F_IO_DRAIN))
Bob Liu9d858b22019-11-13 18:06:25 +0800853 return __req_need_defer(req);
Jens Axboe7adf4ea2019-10-10 21:42:58 -0600854
Bob Liu9d858b22019-11-13 18:06:25 +0800855 return false;
Jens Axboe7adf4ea2019-10-10 21:42:58 -0600856}
857
858static struct io_kiocb *io_get_deferred_req(struct io_ring_ctx *ctx)
Jens Axboede0617e2019-04-06 21:51:27 -0600859{
860 struct io_kiocb *req;
861
Jens Axboe7adf4ea2019-10-10 21:42:58 -0600862 req = list_first_entry_or_null(&ctx->defer_list, struct io_kiocb, list);
Bob Liu9d858b22019-11-13 18:06:25 +0800863 if (req && !req_need_defer(req)) {
Jens Axboede0617e2019-04-06 21:51:27 -0600864 list_del_init(&req->list);
865 return req;
866 }
867
868 return NULL;
869}
870
Jens Axboe5262f562019-09-17 12:26:57 -0600871static struct io_kiocb *io_get_timeout_req(struct io_ring_ctx *ctx)
872{
Jens Axboe7adf4ea2019-10-10 21:42:58 -0600873 struct io_kiocb *req;
874
875 req = list_first_entry_or_null(&ctx->timeout_list, struct io_kiocb, list);
Jens Axboe93bd25b2019-11-11 23:34:31 -0700876 if (req) {
877 if (req->flags & REQ_F_TIMEOUT_NOSEQ)
878 return NULL;
Linus Torvaldsfb4b3d32019-11-25 10:40:27 -0800879 if (!__req_need_defer(req)) {
Jens Axboe93bd25b2019-11-11 23:34:31 -0700880 list_del_init(&req->list);
881 return req;
882 }
Jens Axboe7adf4ea2019-10-10 21:42:58 -0600883 }
884
885 return NULL;
Jens Axboe5262f562019-09-17 12:26:57 -0600886}
887
Jens Axboede0617e2019-04-06 21:51:27 -0600888static void __io_commit_cqring(struct io_ring_ctx *ctx)
Jens Axboe2b188cc2019-01-07 10:46:33 -0700889{
Hristo Venev75b28af2019-08-26 17:23:46 +0000890 struct io_rings *rings = ctx->rings;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700891
Pavel Begunkov07910152020-01-17 03:52:46 +0300892 /* order cqe stores with ring update */
893 smp_store_release(&rings->cq.tail, ctx->cached_cq_tail);
Jens Axboe2b188cc2019-01-07 10:46:33 -0700894
Pavel Begunkov07910152020-01-17 03:52:46 +0300895 if (wq_has_sleeper(&ctx->cq_wait)) {
896 wake_up_interruptible(&ctx->cq_wait);
897 kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN);
Jens Axboe2b188cc2019-01-07 10:46:33 -0700898 }
899}
900
Jens Axboecccf0ee2020-01-27 16:34:48 -0700901static inline void io_req_work_grab_env(struct io_kiocb *req,
902 const struct io_op_def *def)
Jens Axboe18d9be12019-09-10 09:13:05 -0600903{
Jens Axboecccf0ee2020-01-27 16:34:48 -0700904 if (!req->work.mm && def->needs_mm) {
905 mmgrab(current->mm);
906 req->work.mm = current->mm;
907 }
908 if (!req->work.creds)
909 req->work.creds = get_current_cred();
910}
911
912static inline void io_req_work_drop_env(struct io_kiocb *req)
913{
914 if (req->work.mm) {
915 mmdrop(req->work.mm);
916 req->work.mm = NULL;
917 }
918 if (req->work.creds) {
919 put_cred(req->work.creds);
920 req->work.creds = NULL;
921 }
Jens Axboe561fb042019-10-24 07:25:42 -0600922}
923
Jens Axboe94ae5e72019-11-14 19:39:52 -0700924static inline bool io_prep_async_work(struct io_kiocb *req,
925 struct io_kiocb **link)
Jens Axboe561fb042019-10-24 07:25:42 -0600926{
Jens Axboed3656342019-12-18 09:50:26 -0700927 const struct io_op_def *def = &io_op_defs[req->opcode];
Jens Axboe561fb042019-10-24 07:25:42 -0600928 bool do_hashed = false;
Jens Axboe54a91f32019-09-10 09:15:04 -0600929
Jens Axboed3656342019-12-18 09:50:26 -0700930 if (req->flags & REQ_F_ISREG) {
931 if (def->hash_reg_file)
Jens Axboe3529d8c2019-12-19 18:24:38 -0700932 do_hashed = true;
Jens Axboed3656342019-12-18 09:50:26 -0700933 } else {
934 if (def->unbound_nonreg_file)
Jens Axboe3529d8c2019-12-19 18:24:38 -0700935 req->work.flags |= IO_WQ_WORK_UNBOUND;
Jens Axboe54a91f32019-09-10 09:15:04 -0600936 }
Jens Axboecccf0ee2020-01-27 16:34:48 -0700937
938 io_req_work_grab_env(req, def);
Jens Axboe54a91f32019-09-10 09:15:04 -0600939
Jens Axboe94ae5e72019-11-14 19:39:52 -0700940 *link = io_prep_linked_timeout(req);
Jens Axboe561fb042019-10-24 07:25:42 -0600941 return do_hashed;
942}
943
Jackie Liua197f662019-11-08 08:09:12 -0700944static inline void io_queue_async_work(struct io_kiocb *req)
Jens Axboe561fb042019-10-24 07:25:42 -0600945{
Jackie Liua197f662019-11-08 08:09:12 -0700946 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe94ae5e72019-11-14 19:39:52 -0700947 struct io_kiocb *link;
948 bool do_hashed;
949
950 do_hashed = io_prep_async_work(req, &link);
Jens Axboe561fb042019-10-24 07:25:42 -0600951
952 trace_io_uring_queue_async_work(ctx, do_hashed, req, &req->work,
953 req->flags);
954 if (!do_hashed) {
955 io_wq_enqueue(ctx->io_wq, &req->work);
956 } else {
957 io_wq_enqueue_hashed(ctx->io_wq, &req->work,
958 file_inode(req->file));
959 }
Jens Axboe94ae5e72019-11-14 19:39:52 -0700960
961 if (link)
962 io_queue_linked_timeout(link);
Jens Axboe18d9be12019-09-10 09:13:05 -0600963}
964
Jens Axboe5262f562019-09-17 12:26:57 -0600965static void io_kill_timeout(struct io_kiocb *req)
966{
967 int ret;
968
Jens Axboe2d283902019-12-04 11:08:05 -0700969 ret = hrtimer_try_to_cancel(&req->io->timeout.timer);
Jens Axboe5262f562019-09-17 12:26:57 -0600970 if (ret != -1) {
971 atomic_inc(&req->ctx->cq_timeouts);
Jens Axboe842f9612019-10-29 12:34:10 -0600972 list_del_init(&req->list);
Jens Axboe78e19bb2019-11-06 15:21:34 -0700973 io_cqring_fill_event(req, 0);
Jackie Liuec9c02a2019-11-08 23:50:36 +0800974 io_put_req(req);
Jens Axboe5262f562019-09-17 12:26:57 -0600975 }
976}
977
978static void io_kill_timeouts(struct io_ring_ctx *ctx)
979{
980 struct io_kiocb *req, *tmp;
981
982 spin_lock_irq(&ctx->completion_lock);
983 list_for_each_entry_safe(req, tmp, &ctx->timeout_list, list)
984 io_kill_timeout(req);
985 spin_unlock_irq(&ctx->completion_lock);
986}
987
Jens Axboede0617e2019-04-06 21:51:27 -0600988static void io_commit_cqring(struct io_ring_ctx *ctx)
989{
990 struct io_kiocb *req;
991
Jens Axboe5262f562019-09-17 12:26:57 -0600992 while ((req = io_get_timeout_req(ctx)) != NULL)
993 io_kill_timeout(req);
994
Jens Axboede0617e2019-04-06 21:51:27 -0600995 __io_commit_cqring(ctx);
996
Pavel Begunkov87987892020-01-18 01:22:30 +0300997 while ((req = io_get_deferred_req(ctx)) != NULL)
Jackie Liua197f662019-11-08 08:09:12 -0700998 io_queue_async_work(req);
Jens Axboede0617e2019-04-06 21:51:27 -0600999}
1000
Jens Axboe2b188cc2019-01-07 10:46:33 -07001001static struct io_uring_cqe *io_get_cqring(struct io_ring_ctx *ctx)
1002{
Hristo Venev75b28af2019-08-26 17:23:46 +00001003 struct io_rings *rings = ctx->rings;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001004 unsigned tail;
1005
1006 tail = ctx->cached_cq_tail;
Stefan Bühler115e12e2019-04-24 23:54:18 +02001007 /*
1008 * writes to the cq entry need to come after reading head; the
1009 * control dependency is enough as we're using WRITE_ONCE to
1010 * fill the cq entry
1011 */
Hristo Venev75b28af2019-08-26 17:23:46 +00001012 if (tail - READ_ONCE(rings->cq.head) == rings->cq_ring_entries)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001013 return NULL;
1014
1015 ctx->cached_cq_tail++;
Hristo Venev75b28af2019-08-26 17:23:46 +00001016 return &rings->cqes[tail & ctx->cq_mask];
Jens Axboe2b188cc2019-01-07 10:46:33 -07001017}
1018
Jens Axboef2842ab2020-01-08 11:04:00 -07001019static inline bool io_should_trigger_evfd(struct io_ring_ctx *ctx)
1020{
Jens Axboef0b493e2020-02-01 21:30:11 -07001021 if (!ctx->cq_ev_fd)
1022 return false;
Jens Axboef2842ab2020-01-08 11:04:00 -07001023 if (!ctx->eventfd_async)
1024 return true;
1025 return io_wq_current_is_worker() || in_interrupt();
1026}
1027
Jens Axboef0b493e2020-02-01 21:30:11 -07001028static void __io_cqring_ev_posted(struct io_ring_ctx *ctx, bool trigger_ev)
Jens Axboe8c838782019-03-12 15:48:16 -06001029{
1030 if (waitqueue_active(&ctx->wait))
1031 wake_up(&ctx->wait);
1032 if (waitqueue_active(&ctx->sqo_wait))
1033 wake_up(&ctx->sqo_wait);
Jens Axboef0b493e2020-02-01 21:30:11 -07001034 if (trigger_ev)
Jens Axboe9b402842019-04-11 11:45:41 -06001035 eventfd_signal(ctx->cq_ev_fd, 1);
Jens Axboe8c838782019-03-12 15:48:16 -06001036}
1037
Jens Axboef0b493e2020-02-01 21:30:11 -07001038static void io_cqring_ev_posted(struct io_ring_ctx *ctx)
1039{
1040 __io_cqring_ev_posted(ctx, io_should_trigger_evfd(ctx));
1041}
1042
Jens Axboec4a2ed72019-11-21 21:01:26 -07001043/* Returns true if there are no backlogged entries after the flush */
1044static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001045{
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001046 struct io_rings *rings = ctx->rings;
1047 struct io_uring_cqe *cqe;
1048 struct io_kiocb *req;
1049 unsigned long flags;
1050 LIST_HEAD(list);
1051
1052 if (!force) {
1053 if (list_empty_careful(&ctx->cq_overflow_list))
Jens Axboec4a2ed72019-11-21 21:01:26 -07001054 return true;
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001055 if ((ctx->cached_cq_tail - READ_ONCE(rings->cq.head) ==
1056 rings->cq_ring_entries))
Jens Axboec4a2ed72019-11-21 21:01:26 -07001057 return false;
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001058 }
1059
1060 spin_lock_irqsave(&ctx->completion_lock, flags);
1061
1062 /* if force is set, the ring is going away. always drop after that */
1063 if (force)
Jens Axboe69b3e542020-01-08 11:01:46 -07001064 ctx->cq_overflow_flushed = 1;
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001065
Jens Axboec4a2ed72019-11-21 21:01:26 -07001066 cqe = NULL;
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001067 while (!list_empty(&ctx->cq_overflow_list)) {
1068 cqe = io_get_cqring(ctx);
1069 if (!cqe && !force)
1070 break;
1071
1072 req = list_first_entry(&ctx->cq_overflow_list, struct io_kiocb,
1073 list);
1074 list_move(&req->list, &list);
1075 if (cqe) {
1076 WRITE_ONCE(cqe->user_data, req->user_data);
1077 WRITE_ONCE(cqe->res, req->result);
1078 WRITE_ONCE(cqe->flags, 0);
1079 } else {
1080 WRITE_ONCE(ctx->rings->cq_overflow,
1081 atomic_inc_return(&ctx->cached_cq_overflow));
1082 }
1083 }
1084
1085 io_commit_cqring(ctx);
Jens Axboead3eb2c2019-12-18 17:12:20 -07001086 if (cqe) {
1087 clear_bit(0, &ctx->sq_check_overflow);
1088 clear_bit(0, &ctx->cq_check_overflow);
1089 }
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001090 spin_unlock_irqrestore(&ctx->completion_lock, flags);
1091 io_cqring_ev_posted(ctx);
1092
1093 while (!list_empty(&list)) {
1094 req = list_first_entry(&list, struct io_kiocb, list);
1095 list_del(&req->list);
Jackie Liuec9c02a2019-11-08 23:50:36 +08001096 io_put_req(req);
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001097 }
Jens Axboec4a2ed72019-11-21 21:01:26 -07001098
1099 return cqe != NULL;
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001100}
1101
Jens Axboe78e19bb2019-11-06 15:21:34 -07001102static void io_cqring_fill_event(struct io_kiocb *req, long res)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001103{
Jens Axboe78e19bb2019-11-06 15:21:34 -07001104 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001105 struct io_uring_cqe *cqe;
1106
Jens Axboe78e19bb2019-11-06 15:21:34 -07001107 trace_io_uring_complete(ctx, req->user_data, res);
Jens Axboe51c3ff62019-11-03 06:52:50 -07001108
Jens Axboe2b188cc2019-01-07 10:46:33 -07001109 /*
1110 * If we can't get a cq entry, userspace overflowed the
1111 * submission (by quite a lot). Increment the overflow count in
1112 * the ring.
1113 */
1114 cqe = io_get_cqring(ctx);
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001115 if (likely(cqe)) {
Jens Axboe78e19bb2019-11-06 15:21:34 -07001116 WRITE_ONCE(cqe->user_data, req->user_data);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001117 WRITE_ONCE(cqe->res, res);
1118 WRITE_ONCE(cqe->flags, 0);
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001119 } else if (ctx->cq_overflow_flushed) {
Jens Axboe2b188cc2019-01-07 10:46:33 -07001120 WRITE_ONCE(ctx->rings->cq_overflow,
1121 atomic_inc_return(&ctx->cached_cq_overflow));
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001122 } else {
Jens Axboead3eb2c2019-12-18 17:12:20 -07001123 if (list_empty(&ctx->cq_overflow_list)) {
1124 set_bit(0, &ctx->sq_check_overflow);
1125 set_bit(0, &ctx->cq_check_overflow);
1126 }
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001127 refcount_inc(&req->refs);
1128 req->result = res;
1129 list_add_tail(&req->list, &ctx->cq_overflow_list);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001130 }
1131}
1132
Jens Axboe78e19bb2019-11-06 15:21:34 -07001133static void io_cqring_add_event(struct io_kiocb *req, long res)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001134{
Jens Axboe78e19bb2019-11-06 15:21:34 -07001135 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001136 unsigned long flags;
1137
1138 spin_lock_irqsave(&ctx->completion_lock, flags);
Jens Axboe78e19bb2019-11-06 15:21:34 -07001139 io_cqring_fill_event(req, res);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001140 io_commit_cqring(ctx);
1141 spin_unlock_irqrestore(&ctx->completion_lock, flags);
1142
Jens Axboe8c838782019-03-12 15:48:16 -06001143 io_cqring_ev_posted(ctx);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001144}
1145
Jens Axboe0ddf92e2019-11-08 08:52:53 -07001146static inline bool io_is_fallback_req(struct io_kiocb *req)
1147{
1148 return req == (struct io_kiocb *)
1149 ((unsigned long) req->ctx->fallback_req & ~1UL);
1150}
1151
1152static struct io_kiocb *io_get_fallback_req(struct io_ring_ctx *ctx)
1153{
1154 struct io_kiocb *req;
1155
1156 req = ctx->fallback_req;
1157 if (!test_and_set_bit_lock(0, (unsigned long *) ctx->fallback_req))
1158 return req;
1159
1160 return NULL;
1161}
1162
Jens Axboe2579f912019-01-09 09:10:43 -07001163static struct io_kiocb *io_get_req(struct io_ring_ctx *ctx,
1164 struct io_submit_state *state)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001165{
Jens Axboefd6fab22019-03-14 16:30:06 -06001166 gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001167 struct io_kiocb *req;
1168
Jens Axboe2579f912019-01-09 09:10:43 -07001169 if (!state) {
Jens Axboefd6fab22019-03-14 16:30:06 -06001170 req = kmem_cache_alloc(req_cachep, gfp);
Jens Axboe2579f912019-01-09 09:10:43 -07001171 if (unlikely(!req))
Jens Axboe0ddf92e2019-11-08 08:52:53 -07001172 goto fallback;
Jens Axboe2579f912019-01-09 09:10:43 -07001173 } else if (!state->free_reqs) {
1174 size_t sz;
1175 int ret;
1176
1177 sz = min_t(size_t, state->ios_left, ARRAY_SIZE(state->reqs));
Jens Axboefd6fab22019-03-14 16:30:06 -06001178 ret = kmem_cache_alloc_bulk(req_cachep, gfp, sz, state->reqs);
1179
1180 /*
1181 * Bulk alloc is all-or-nothing. If we fail to get a batch,
1182 * retry single alloc to be on the safe side.
1183 */
1184 if (unlikely(ret <= 0)) {
1185 state->reqs[0] = kmem_cache_alloc(req_cachep, gfp);
1186 if (!state->reqs[0])
Jens Axboe0ddf92e2019-11-08 08:52:53 -07001187 goto fallback;
Jens Axboefd6fab22019-03-14 16:30:06 -06001188 ret = 1;
1189 }
Jens Axboe2579f912019-01-09 09:10:43 -07001190 state->free_reqs = ret - 1;
Pavel Begunkov6c8a3132020-02-01 03:58:00 +03001191 req = state->reqs[ret - 1];
Jens Axboe2579f912019-01-09 09:10:43 -07001192 } else {
Jens Axboe2579f912019-01-09 09:10:43 -07001193 state->free_reqs--;
Pavel Begunkov6c8a3132020-02-01 03:58:00 +03001194 req = state->reqs[state->free_reqs];
Jens Axboe2b188cc2019-01-07 10:46:33 -07001195 }
1196
Jens Axboe0ddf92e2019-11-08 08:52:53 -07001197got_it:
Jens Axboe1a6b74f2019-12-02 10:33:15 -07001198 req->io = NULL;
Jens Axboe60c112b2019-06-21 10:20:18 -06001199 req->file = NULL;
Jens Axboe2579f912019-01-09 09:10:43 -07001200 req->ctx = ctx;
1201 req->flags = 0;
Jens Axboee65ef562019-03-12 10:16:44 -06001202 /* one is dropped after submission, the other at completion */
1203 refcount_set(&req->refs, 2);
Jens Axboe9e645e112019-05-10 16:07:28 -06001204 req->result = 0;
Jens Axboe561fb042019-10-24 07:25:42 -06001205 INIT_IO_WORK(&req->work, io_wq_submit_work);
Jens Axboe2579f912019-01-09 09:10:43 -07001206 return req;
Jens Axboe0ddf92e2019-11-08 08:52:53 -07001207fallback:
1208 req = io_get_fallback_req(ctx);
1209 if (req)
1210 goto got_it;
Pavel Begunkov6805b322019-10-08 02:18:42 +03001211 percpu_ref_put(&ctx->refs);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001212 return NULL;
1213}
1214
Pavel Begunkov2b85edf2019-12-28 14:13:03 +03001215static void __io_req_do_free(struct io_kiocb *req)
Jens Axboedef596e2019-01-09 08:59:42 -07001216{
Pavel Begunkov2b85edf2019-12-28 14:13:03 +03001217 if (likely(!io_is_fallback_req(req)))
1218 kmem_cache_free(req_cachep, req);
1219 else
1220 clear_bit_unlock(0, (unsigned long *) req->ctx->fallback_req);
1221}
1222
Jens Axboec6ca97b302019-12-28 12:11:08 -07001223static void __io_req_aux_free(struct io_kiocb *req)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001224{
Jens Axboefcb323c2019-10-24 12:39:47 -06001225 struct io_ring_ctx *ctx = req->ctx;
1226
YueHaibing96fd84d2020-01-07 22:22:44 +08001227 kfree(req->io);
Jens Axboe05f3fb32019-12-09 11:22:50 -07001228 if (req->file) {
1229 if (req->flags & REQ_F_FIXED_FILE)
1230 percpu_ref_put(&ctx->file_data->refs);
1231 else
1232 fput(req->file);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001233 }
Jens Axboecccf0ee2020-01-27 16:34:48 -07001234
1235 io_req_work_drop_env(req);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001236}
1237
1238static void __io_free_req(struct io_kiocb *req)
1239{
Jens Axboec6ca97b302019-12-28 12:11:08 -07001240 __io_req_aux_free(req);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001241
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03001242 if (req->flags & REQ_F_NEED_CLEANUP)
1243 io_cleanup_req(req);
1244
Jens Axboefcb323c2019-10-24 12:39:47 -06001245 if (req->flags & REQ_F_INFLIGHT) {
Jens Axboec6ca97b302019-12-28 12:11:08 -07001246 struct io_ring_ctx *ctx = req->ctx;
Jens Axboefcb323c2019-10-24 12:39:47 -06001247 unsigned long flags;
1248
1249 spin_lock_irqsave(&ctx->inflight_lock, flags);
1250 list_del(&req->inflight_entry);
1251 if (waitqueue_active(&ctx->inflight_wait))
1252 wake_up(&ctx->inflight_wait);
1253 spin_unlock_irqrestore(&ctx->inflight_lock, flags);
1254 }
Pavel Begunkov2b85edf2019-12-28 14:13:03 +03001255
1256 percpu_ref_put(&req->ctx->refs);
1257 __io_req_do_free(req);
Jens Axboee65ef562019-03-12 10:16:44 -06001258}
1259
Jens Axboec6ca97b302019-12-28 12:11:08 -07001260struct req_batch {
1261 void *reqs[IO_IOPOLL_BATCH];
1262 int to_free;
1263 int need_iter;
1264};
1265
1266static void io_free_req_many(struct io_ring_ctx *ctx, struct req_batch *rb)
1267{
Jens Axboe10fef4b2020-01-09 07:52:28 -07001268 int fixed_refs = rb->to_free;
1269
Jens Axboec6ca97b302019-12-28 12:11:08 -07001270 if (!rb->to_free)
1271 return;
1272 if (rb->need_iter) {
1273 int i, inflight = 0;
1274 unsigned long flags;
1275
Jens Axboe10fef4b2020-01-09 07:52:28 -07001276 fixed_refs = 0;
Jens Axboec6ca97b302019-12-28 12:11:08 -07001277 for (i = 0; i < rb->to_free; i++) {
1278 struct io_kiocb *req = rb->reqs[i];
1279
Jens Axboe10fef4b2020-01-09 07:52:28 -07001280 if (req->flags & REQ_F_FIXED_FILE) {
Jens Axboec6ca97b302019-12-28 12:11:08 -07001281 req->file = NULL;
Jens Axboe10fef4b2020-01-09 07:52:28 -07001282 fixed_refs++;
1283 }
Jens Axboec6ca97b302019-12-28 12:11:08 -07001284 if (req->flags & REQ_F_INFLIGHT)
1285 inflight++;
Jens Axboec6ca97b302019-12-28 12:11:08 -07001286 __io_req_aux_free(req);
1287 }
1288 if (!inflight)
1289 goto do_free;
1290
1291 spin_lock_irqsave(&ctx->inflight_lock, flags);
1292 for (i = 0; i < rb->to_free; i++) {
1293 struct io_kiocb *req = rb->reqs[i];
1294
Jens Axboe10fef4b2020-01-09 07:52:28 -07001295 if (req->flags & REQ_F_INFLIGHT) {
Jens Axboec6ca97b302019-12-28 12:11:08 -07001296 list_del(&req->inflight_entry);
1297 if (!--inflight)
1298 break;
1299 }
1300 }
1301 spin_unlock_irqrestore(&ctx->inflight_lock, flags);
1302
1303 if (waitqueue_active(&ctx->inflight_wait))
1304 wake_up(&ctx->inflight_wait);
1305 }
1306do_free:
1307 kmem_cache_free_bulk(req_cachep, rb->to_free, rb->reqs);
Jens Axboe10fef4b2020-01-09 07:52:28 -07001308 if (fixed_refs)
1309 percpu_ref_put_many(&ctx->file_data->refs, fixed_refs);
Jens Axboec6ca97b302019-12-28 12:11:08 -07001310 percpu_ref_put_many(&ctx->refs, rb->to_free);
Jens Axboec6ca97b302019-12-28 12:11:08 -07001311 rb->to_free = rb->need_iter = 0;
Jens Axboee65ef562019-03-12 10:16:44 -06001312}
1313
Jackie Liua197f662019-11-08 08:09:12 -07001314static bool io_link_cancel_timeout(struct io_kiocb *req)
Jens Axboe9e645e112019-05-10 16:07:28 -06001315{
Jackie Liua197f662019-11-08 08:09:12 -07001316 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe2665abf2019-11-05 12:40:47 -07001317 int ret;
1318
Jens Axboe2d283902019-12-04 11:08:05 -07001319 ret = hrtimer_try_to_cancel(&req->io->timeout.timer);
Jens Axboe2665abf2019-11-05 12:40:47 -07001320 if (ret != -1) {
Jens Axboe78e19bb2019-11-06 15:21:34 -07001321 io_cqring_fill_event(req, -ECANCELED);
Jens Axboe2665abf2019-11-05 12:40:47 -07001322 io_commit_cqring(ctx);
1323 req->flags &= ~REQ_F_LINK;
Jackie Liuec9c02a2019-11-08 23:50:36 +08001324 io_put_req(req);
Jens Axboe2665abf2019-11-05 12:40:47 -07001325 return true;
1326 }
1327
1328 return false;
1329}
1330
Jens Axboeba816ad2019-09-28 11:36:45 -06001331static void io_req_link_next(struct io_kiocb *req, struct io_kiocb **nxtptr)
Jens Axboe9e645e112019-05-10 16:07:28 -06001332{
Jens Axboe2665abf2019-11-05 12:40:47 -07001333 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe2665abf2019-11-05 12:40:47 -07001334 bool wake_ev = false;
Jens Axboe9e645e112019-05-10 16:07:28 -06001335
Jens Axboe4d7dd462019-11-20 13:03:52 -07001336 /* Already got next link */
1337 if (req->flags & REQ_F_LINK_NEXT)
1338 return;
1339
Jens Axboe9e645e112019-05-10 16:07:28 -06001340 /*
1341 * The list should never be empty when we are called here. But could
1342 * potentially happen if the chain is messed up, check to be on the
1343 * safe side.
1344 */
Pavel Begunkov44932332019-12-05 16:16:35 +03001345 while (!list_empty(&req->link_list)) {
1346 struct io_kiocb *nxt = list_first_entry(&req->link_list,
1347 struct io_kiocb, link_list);
Jens Axboe94ae5e72019-11-14 19:39:52 -07001348
Pavel Begunkov44932332019-12-05 16:16:35 +03001349 if (unlikely((req->flags & REQ_F_LINK_TIMEOUT) &&
1350 (nxt->flags & REQ_F_TIMEOUT))) {
1351 list_del_init(&nxt->link_list);
Jens Axboe94ae5e72019-11-14 19:39:52 -07001352 wake_ev |= io_link_cancel_timeout(nxt);
Jens Axboe94ae5e72019-11-14 19:39:52 -07001353 req->flags &= ~REQ_F_LINK_TIMEOUT;
1354 continue;
1355 }
Jens Axboe9e645e112019-05-10 16:07:28 -06001356
Pavel Begunkov44932332019-12-05 16:16:35 +03001357 list_del_init(&req->link_list);
1358 if (!list_empty(&nxt->link_list))
1359 nxt->flags |= REQ_F_LINK;
Pavel Begunkovb18fdf72019-11-21 23:21:02 +03001360 *nxtptr = nxt;
Jens Axboe94ae5e72019-11-14 19:39:52 -07001361 break;
Jens Axboe9e645e112019-05-10 16:07:28 -06001362 }
Jens Axboe2665abf2019-11-05 12:40:47 -07001363
Jens Axboe4d7dd462019-11-20 13:03:52 -07001364 req->flags |= REQ_F_LINK_NEXT;
Jens Axboe2665abf2019-11-05 12:40:47 -07001365 if (wake_ev)
1366 io_cqring_ev_posted(ctx);
Jens Axboe9e645e112019-05-10 16:07:28 -06001367}
1368
1369/*
1370 * Called if REQ_F_LINK is set, and we fail the head request
1371 */
1372static void io_fail_links(struct io_kiocb *req)
1373{
Jens Axboe2665abf2019-11-05 12:40:47 -07001374 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe2665abf2019-11-05 12:40:47 -07001375 unsigned long flags;
1376
1377 spin_lock_irqsave(&ctx->completion_lock, flags);
Jens Axboe9e645e112019-05-10 16:07:28 -06001378
1379 while (!list_empty(&req->link_list)) {
Pavel Begunkov44932332019-12-05 16:16:35 +03001380 struct io_kiocb *link = list_first_entry(&req->link_list,
1381 struct io_kiocb, link_list);
Jens Axboe9e645e112019-05-10 16:07:28 -06001382
Pavel Begunkov44932332019-12-05 16:16:35 +03001383 list_del_init(&link->link_list);
Dmitrii Dolgovc826bd72019-10-15 19:02:01 +02001384 trace_io_uring_fail_link(req, link);
Jens Axboe2665abf2019-11-05 12:40:47 -07001385
1386 if ((req->flags & REQ_F_LINK_TIMEOUT) &&
Jens Axboed625c6e2019-12-17 19:53:05 -07001387 link->opcode == IORING_OP_LINK_TIMEOUT) {
Jackie Liua197f662019-11-08 08:09:12 -07001388 io_link_cancel_timeout(link);
Jens Axboe2665abf2019-11-05 12:40:47 -07001389 } else {
Jens Axboe78e19bb2019-11-06 15:21:34 -07001390 io_cqring_fill_event(link, -ECANCELED);
Jens Axboe978db572019-11-14 22:39:04 -07001391 __io_double_put_req(link);
Jens Axboe2665abf2019-11-05 12:40:47 -07001392 }
Jens Axboe5d960722019-11-19 15:31:28 -07001393 req->flags &= ~REQ_F_LINK_TIMEOUT;
Jens Axboe9e645e112019-05-10 16:07:28 -06001394 }
Jens Axboe2665abf2019-11-05 12:40:47 -07001395
1396 io_commit_cqring(ctx);
1397 spin_unlock_irqrestore(&ctx->completion_lock, flags);
1398 io_cqring_ev_posted(ctx);
Jens Axboe9e645e112019-05-10 16:07:28 -06001399}
1400
Jens Axboe4d7dd462019-11-20 13:03:52 -07001401static void io_req_find_next(struct io_kiocb *req, struct io_kiocb **nxt)
Jens Axboe9e645e112019-05-10 16:07:28 -06001402{
Jens Axboe4d7dd462019-11-20 13:03:52 -07001403 if (likely(!(req->flags & REQ_F_LINK)))
Jens Axboe2665abf2019-11-05 12:40:47 -07001404 return;
Jens Axboe2665abf2019-11-05 12:40:47 -07001405
Jens Axboe9e645e112019-05-10 16:07:28 -06001406 /*
1407 * If LINK is set, we have dependent requests in this chain. If we
1408 * didn't fail this request, queue the first one up, moving any other
1409 * dependencies to the next request. In case of failure, fail the rest
1410 * of the chain.
1411 */
Jens Axboe2665abf2019-11-05 12:40:47 -07001412 if (req->flags & REQ_F_FAIL_LINK) {
1413 io_fail_links(req);
Jens Axboe7c9e7f02019-11-12 08:15:53 -07001414 } else if ((req->flags & (REQ_F_LINK_TIMEOUT | REQ_F_COMP_LOCKED)) ==
1415 REQ_F_LINK_TIMEOUT) {
Jens Axboe2665abf2019-11-05 12:40:47 -07001416 struct io_ring_ctx *ctx = req->ctx;
1417 unsigned long flags;
1418
1419 /*
1420 * If this is a timeout link, we could be racing with the
1421 * timeout timer. Grab the completion lock for this case to
Jens Axboe7c9e7f02019-11-12 08:15:53 -07001422 * protect against that.
Jens Axboe2665abf2019-11-05 12:40:47 -07001423 */
1424 spin_lock_irqsave(&ctx->completion_lock, flags);
1425 io_req_link_next(req, nxt);
1426 spin_unlock_irqrestore(&ctx->completion_lock, flags);
1427 } else {
1428 io_req_link_next(req, nxt);
Jens Axboe9e645e112019-05-10 16:07:28 -06001429 }
Jens Axboe4d7dd462019-11-20 13:03:52 -07001430}
Jens Axboe9e645e112019-05-10 16:07:28 -06001431
Jackie Liuc69f8db2019-11-09 11:00:08 +08001432static void io_free_req(struct io_kiocb *req)
1433{
Pavel Begunkov944e58b2019-11-21 23:21:01 +03001434 struct io_kiocb *nxt = NULL;
1435
1436 io_req_find_next(req, &nxt);
Pavel Begunkov70cf9f32019-11-21 23:21:00 +03001437 __io_free_req(req);
Pavel Begunkov944e58b2019-11-21 23:21:01 +03001438
1439 if (nxt)
1440 io_queue_async_work(nxt);
Jackie Liuc69f8db2019-11-09 11:00:08 +08001441}
1442
Jens Axboeba816ad2019-09-28 11:36:45 -06001443/*
1444 * Drop reference to request, return next in chain (if there is one) if this
1445 * was the last reference to this request.
1446 */
Pavel Begunkovf9bd67f2019-11-21 23:21:03 +03001447__attribute__((nonnull))
Jackie Liuec9c02a2019-11-08 23:50:36 +08001448static void io_put_req_find_next(struct io_kiocb *req, struct io_kiocb **nxtptr)
Jens Axboee65ef562019-03-12 10:16:44 -06001449{
Pavel Begunkovf9bd67f2019-11-21 23:21:03 +03001450 io_req_find_next(req, nxtptr);
Jens Axboe4d7dd462019-11-20 13:03:52 -07001451
Jens Axboee65ef562019-03-12 10:16:44 -06001452 if (refcount_dec_and_test(&req->refs))
Jens Axboe4d7dd462019-11-20 13:03:52 -07001453 __io_free_req(req);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001454}
1455
Jens Axboe2b188cc2019-01-07 10:46:33 -07001456static void io_put_req(struct io_kiocb *req)
1457{
Jens Axboedef596e2019-01-09 08:59:42 -07001458 if (refcount_dec_and_test(&req->refs))
1459 io_free_req(req);
1460}
1461
Jens Axboe978db572019-11-14 22:39:04 -07001462/*
1463 * Must only be used if we don't need to care about links, usually from
1464 * within the completion handling itself.
1465 */
1466static void __io_double_put_req(struct io_kiocb *req)
Jens Axboea3a0e432019-08-20 11:03:11 -06001467{
Jens Axboe78e19bb2019-11-06 15:21:34 -07001468 /* drop both submit and complete references */
1469 if (refcount_sub_and_test(2, &req->refs))
1470 __io_free_req(req);
1471}
1472
Jens Axboe978db572019-11-14 22:39:04 -07001473static void io_double_put_req(struct io_kiocb *req)
1474{
1475 /* drop both submit and complete references */
1476 if (refcount_sub_and_test(2, &req->refs))
1477 io_free_req(req);
1478}
1479
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001480static unsigned io_cqring_events(struct io_ring_ctx *ctx, bool noflush)
Jens Axboea3a0e432019-08-20 11:03:11 -06001481{
Jens Axboe84f97dc2019-11-06 11:27:53 -07001482 struct io_rings *rings = ctx->rings;
1483
Jens Axboead3eb2c2019-12-18 17:12:20 -07001484 if (test_bit(0, &ctx->cq_check_overflow)) {
1485 /*
1486 * noflush == true is from the waitqueue handler, just ensure
1487 * we wake up the task, and the next invocation will flush the
1488 * entries. We cannot safely to it from here.
1489 */
1490 if (noflush && !list_empty(&ctx->cq_overflow_list))
1491 return -1U;
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001492
Jens Axboead3eb2c2019-12-18 17:12:20 -07001493 io_cqring_overflow_flush(ctx, false);
1494 }
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001495
Jens Axboea3a0e432019-08-20 11:03:11 -06001496 /* See comment at the top of this file */
1497 smp_rmb();
Jens Axboead3eb2c2019-12-18 17:12:20 -07001498 return ctx->cached_cq_tail - READ_ONCE(rings->cq.head);
Jens Axboea3a0e432019-08-20 11:03:11 -06001499}
1500
Pavel Begunkovfb5ccc92019-10-25 12:31:30 +03001501static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
1502{
1503 struct io_rings *rings = ctx->rings;
1504
1505 /* make sure SQ entry isn't read before tail */
1506 return smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head;
1507}
1508
Jens Axboe8237e042019-12-28 10:48:22 -07001509static inline bool io_req_multi_free(struct req_batch *rb, struct io_kiocb *req)
Jens Axboee94f1412019-12-19 12:06:02 -07001510{
Jens Axboec6ca97b302019-12-28 12:11:08 -07001511 if ((req->flags & REQ_F_LINK) || io_is_fallback_req(req))
1512 return false;
Jens Axboee94f1412019-12-19 12:06:02 -07001513
Jens Axboec6ca97b302019-12-28 12:11:08 -07001514 if (!(req->flags & REQ_F_FIXED_FILE) || req->io)
1515 rb->need_iter++;
1516
1517 rb->reqs[rb->to_free++] = req;
1518 if (unlikely(rb->to_free == ARRAY_SIZE(rb->reqs)))
1519 io_free_req_many(req->ctx, rb);
1520 return true;
Jens Axboee94f1412019-12-19 12:06:02 -07001521}
1522
Jens Axboedef596e2019-01-09 08:59:42 -07001523/*
1524 * Find and free completed poll iocbs
1525 */
1526static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
1527 struct list_head *done)
1528{
Jens Axboe8237e042019-12-28 10:48:22 -07001529 struct req_batch rb;
Jens Axboedef596e2019-01-09 08:59:42 -07001530 struct io_kiocb *req;
Jens Axboedef596e2019-01-09 08:59:42 -07001531
Jens Axboec6ca97b302019-12-28 12:11:08 -07001532 rb.to_free = rb.need_iter = 0;
Jens Axboedef596e2019-01-09 08:59:42 -07001533 while (!list_empty(done)) {
1534 req = list_first_entry(done, struct io_kiocb, list);
1535 list_del(&req->list);
1536
Jens Axboe78e19bb2019-11-06 15:21:34 -07001537 io_cqring_fill_event(req, req->result);
Jens Axboedef596e2019-01-09 08:59:42 -07001538 (*nr_events)++;
1539
Jens Axboe8237e042019-12-28 10:48:22 -07001540 if (refcount_dec_and_test(&req->refs) &&
1541 !io_req_multi_free(&rb, req))
1542 io_free_req(req);
Jens Axboedef596e2019-01-09 08:59:42 -07001543 }
Jens Axboedef596e2019-01-09 08:59:42 -07001544
Jens Axboe09bb8392019-03-13 12:39:28 -06001545 io_commit_cqring(ctx);
Jens Axboe8237e042019-12-28 10:48:22 -07001546 io_free_req_many(ctx, &rb);
Jens Axboedef596e2019-01-09 08:59:42 -07001547}
1548
1549static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
1550 long min)
1551{
1552 struct io_kiocb *req, *tmp;
1553 LIST_HEAD(done);
1554 bool spin;
1555 int ret;
1556
1557 /*
1558 * Only spin for completions if we don't have multiple devices hanging
1559 * off our complete list, and we're under the requested amount.
1560 */
1561 spin = !ctx->poll_multi_file && *nr_events < min;
1562
1563 ret = 0;
1564 list_for_each_entry_safe(req, tmp, &ctx->poll_list, list) {
Jens Axboe9adbd452019-12-20 08:45:55 -07001565 struct kiocb *kiocb = &req->rw.kiocb;
Jens Axboedef596e2019-01-09 08:59:42 -07001566
1567 /*
1568 * Move completed entries to our local list. If we find a
1569 * request that requires polling, break out and complete
1570 * the done list first, if we have entries there.
1571 */
1572 if (req->flags & REQ_F_IOPOLL_COMPLETED) {
1573 list_move_tail(&req->list, &done);
1574 continue;
1575 }
1576 if (!list_empty(&done))
1577 break;
1578
1579 ret = kiocb->ki_filp->f_op->iopoll(kiocb, spin);
1580 if (ret < 0)
1581 break;
1582
1583 if (ret && spin)
1584 spin = false;
1585 ret = 0;
1586 }
1587
1588 if (!list_empty(&done))
1589 io_iopoll_complete(ctx, nr_events, &done);
1590
1591 return ret;
1592}
1593
1594/*
Brian Gianforcarod195a662019-12-13 03:09:50 -08001595 * Poll for a minimum of 'min' events. Note that if min == 0 we consider that a
Jens Axboedef596e2019-01-09 08:59:42 -07001596 * non-spinning poll check - we'll still enter the driver poll loop, but only
1597 * as a non-spinning completion check.
1598 */
1599static int io_iopoll_getevents(struct io_ring_ctx *ctx, unsigned int *nr_events,
1600 long min)
1601{
Jens Axboe08f54392019-08-21 22:19:11 -06001602 while (!list_empty(&ctx->poll_list) && !need_resched()) {
Jens Axboedef596e2019-01-09 08:59:42 -07001603 int ret;
1604
1605 ret = io_do_iopoll(ctx, nr_events, min);
1606 if (ret < 0)
1607 return ret;
1608 if (!min || *nr_events >= min)
1609 return 0;
1610 }
1611
1612 return 1;
1613}
1614
1615/*
1616 * We can't just wait for polled events to come to us, we have to actively
1617 * find and complete them.
1618 */
1619static void io_iopoll_reap_events(struct io_ring_ctx *ctx)
1620{
1621 if (!(ctx->flags & IORING_SETUP_IOPOLL))
1622 return;
1623
1624 mutex_lock(&ctx->uring_lock);
1625 while (!list_empty(&ctx->poll_list)) {
1626 unsigned int nr_events = 0;
1627
1628 io_iopoll_getevents(ctx, &nr_events, 1);
Jens Axboe08f54392019-08-21 22:19:11 -06001629
1630 /*
1631 * Ensure we allow local-to-the-cpu processing to take place,
1632 * in this case we need to ensure that we reap all events.
1633 */
1634 cond_resched();
Jens Axboedef596e2019-01-09 08:59:42 -07001635 }
1636 mutex_unlock(&ctx->uring_lock);
1637}
1638
Jens Axboe2b2ed972019-10-25 10:06:15 -06001639static int __io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
1640 long min)
Jens Axboedef596e2019-01-09 08:59:42 -07001641{
Jens Axboe2b2ed972019-10-25 10:06:15 -06001642 int iters = 0, ret = 0;
Jens Axboedef596e2019-01-09 08:59:42 -07001643
1644 do {
1645 int tmin = 0;
1646
Jens Axboe500f9fb2019-08-19 12:15:59 -06001647 /*
Jens Axboea3a0e432019-08-20 11:03:11 -06001648 * Don't enter poll loop if we already have events pending.
1649 * If we do, we can potentially be spinning for commands that
1650 * already triggered a CQE (eg in error).
1651 */
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001652 if (io_cqring_events(ctx, false))
Jens Axboea3a0e432019-08-20 11:03:11 -06001653 break;
1654
1655 /*
Jens Axboe500f9fb2019-08-19 12:15:59 -06001656 * If a submit got punted to a workqueue, we can have the
1657 * application entering polling for a command before it gets
1658 * issued. That app will hold the uring_lock for the duration
1659 * of the poll right here, so we need to take a breather every
1660 * now and then to ensure that the issue has a chance to add
1661 * the poll to the issued list. Otherwise we can spin here
1662 * forever, while the workqueue is stuck trying to acquire the
1663 * very same mutex.
1664 */
1665 if (!(++iters & 7)) {
1666 mutex_unlock(&ctx->uring_lock);
1667 mutex_lock(&ctx->uring_lock);
1668 }
1669
Jens Axboedef596e2019-01-09 08:59:42 -07001670 if (*nr_events < min)
1671 tmin = min - *nr_events;
1672
1673 ret = io_iopoll_getevents(ctx, nr_events, tmin);
1674 if (ret <= 0)
1675 break;
1676 ret = 0;
1677 } while (min && !*nr_events && !need_resched());
1678
Jens Axboe2b2ed972019-10-25 10:06:15 -06001679 return ret;
1680}
1681
1682static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
1683 long min)
1684{
1685 int ret;
1686
1687 /*
1688 * We disallow the app entering submit/complete with polling, but we
1689 * still need to lock the ring to prevent racing with polled issue
1690 * that got punted to a workqueue.
1691 */
1692 mutex_lock(&ctx->uring_lock);
1693 ret = __io_iopoll_check(ctx, nr_events, min);
Jens Axboe500f9fb2019-08-19 12:15:59 -06001694 mutex_unlock(&ctx->uring_lock);
Jens Axboedef596e2019-01-09 08:59:42 -07001695 return ret;
1696}
1697
Jens Axboe491381ce2019-10-17 09:20:46 -06001698static void kiocb_end_write(struct io_kiocb *req)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001699{
Jens Axboe491381ce2019-10-17 09:20:46 -06001700 /*
1701 * Tell lockdep we inherited freeze protection from submission
1702 * thread.
1703 */
1704 if (req->flags & REQ_F_ISREG) {
1705 struct inode *inode = file_inode(req->file);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001706
Jens Axboe491381ce2019-10-17 09:20:46 -06001707 __sb_writers_acquired(inode->i_sb, SB_FREEZE_WRITE);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001708 }
Jens Axboe491381ce2019-10-17 09:20:46 -06001709 file_end_write(req->file);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001710}
1711
Jens Axboe4e88d6e2019-12-07 20:59:47 -07001712static inline void req_set_fail_links(struct io_kiocb *req)
1713{
1714 if ((req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) == REQ_F_LINK)
1715 req->flags |= REQ_F_FAIL_LINK;
1716}
1717
Jens Axboeba816ad2019-09-28 11:36:45 -06001718static void io_complete_rw_common(struct kiocb *kiocb, long res)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001719{
Jens Axboe9adbd452019-12-20 08:45:55 -07001720 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001721
Jens Axboe491381ce2019-10-17 09:20:46 -06001722 if (kiocb->ki_flags & IOCB_WRITE)
1723 kiocb_end_write(req);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001724
Jens Axboe4e88d6e2019-12-07 20:59:47 -07001725 if (res != req->result)
1726 req_set_fail_links(req);
Jens Axboe78e19bb2019-11-06 15:21:34 -07001727 io_cqring_add_event(req, res);
Jens Axboeba816ad2019-09-28 11:36:45 -06001728}
1729
1730static void io_complete_rw(struct kiocb *kiocb, long res, long res2)
1731{
Jens Axboe9adbd452019-12-20 08:45:55 -07001732 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
Jens Axboeba816ad2019-09-28 11:36:45 -06001733
1734 io_complete_rw_common(kiocb, res);
Jens Axboee65ef562019-03-12 10:16:44 -06001735 io_put_req(req);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001736}
1737
Jens Axboeba816ad2019-09-28 11:36:45 -06001738static struct io_kiocb *__io_complete_rw(struct kiocb *kiocb, long res)
1739{
Jens Axboe9adbd452019-12-20 08:45:55 -07001740 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
Jackie Liuec9c02a2019-11-08 23:50:36 +08001741 struct io_kiocb *nxt = NULL;
Jens Axboeba816ad2019-09-28 11:36:45 -06001742
1743 io_complete_rw_common(kiocb, res);
Jackie Liuec9c02a2019-11-08 23:50:36 +08001744 io_put_req_find_next(req, &nxt);
1745
1746 return nxt;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001747}
1748
Jens Axboedef596e2019-01-09 08:59:42 -07001749static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2)
1750{
Jens Axboe9adbd452019-12-20 08:45:55 -07001751 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
Jens Axboedef596e2019-01-09 08:59:42 -07001752
Jens Axboe491381ce2019-10-17 09:20:46 -06001753 if (kiocb->ki_flags & IOCB_WRITE)
1754 kiocb_end_write(req);
Jens Axboedef596e2019-01-09 08:59:42 -07001755
Jens Axboe4e88d6e2019-12-07 20:59:47 -07001756 if (res != req->result)
1757 req_set_fail_links(req);
Jens Axboe9e645e112019-05-10 16:07:28 -06001758 req->result = res;
Jens Axboedef596e2019-01-09 08:59:42 -07001759 if (res != -EAGAIN)
1760 req->flags |= REQ_F_IOPOLL_COMPLETED;
1761}
1762
1763/*
1764 * After the iocb has been issued, it's safe to be found on the poll list.
1765 * Adding the kiocb to the list AFTER submission ensures that we don't
1766 * find it from a io_iopoll_getevents() thread before the issuer is done
1767 * accessing the kiocb cookie.
1768 */
1769static void io_iopoll_req_issued(struct io_kiocb *req)
1770{
1771 struct io_ring_ctx *ctx = req->ctx;
1772
1773 /*
1774 * Track whether we have multiple files in our lists. This will impact
1775 * how we do polling eventually, not spinning if we're on potentially
1776 * different devices.
1777 */
1778 if (list_empty(&ctx->poll_list)) {
1779 ctx->poll_multi_file = false;
1780 } else if (!ctx->poll_multi_file) {
1781 struct io_kiocb *list_req;
1782
1783 list_req = list_first_entry(&ctx->poll_list, struct io_kiocb,
1784 list);
Jens Axboe9adbd452019-12-20 08:45:55 -07001785 if (list_req->file != req->file)
Jens Axboedef596e2019-01-09 08:59:42 -07001786 ctx->poll_multi_file = true;
1787 }
1788
1789 /*
1790 * For fast devices, IO may have already completed. If it has, add
1791 * it to the front so we find it first.
1792 */
1793 if (req->flags & REQ_F_IOPOLL_COMPLETED)
1794 list_add(&req->list, &ctx->poll_list);
1795 else
1796 list_add_tail(&req->list, &ctx->poll_list);
1797}
1798
Jens Axboe3d6770f2019-04-13 11:50:54 -06001799static void io_file_put(struct io_submit_state *state)
Jens Axboe9a56a232019-01-09 09:06:50 -07001800{
Jens Axboe3d6770f2019-04-13 11:50:54 -06001801 if (state->file) {
Jens Axboe9a56a232019-01-09 09:06:50 -07001802 int diff = state->has_refs - state->used_refs;
1803
1804 if (diff)
1805 fput_many(state->file, diff);
1806 state->file = NULL;
1807 }
1808}
1809
1810/*
1811 * Get as many references to a file as we have IOs left in this submission,
1812 * assuming most submissions are for one file, or at least that each file
1813 * has more than one submission.
1814 */
1815static struct file *io_file_get(struct io_submit_state *state, int fd)
1816{
1817 if (!state)
1818 return fget(fd);
1819
1820 if (state->file) {
1821 if (state->fd == fd) {
1822 state->used_refs++;
1823 state->ios_left--;
1824 return state->file;
1825 }
Jens Axboe3d6770f2019-04-13 11:50:54 -06001826 io_file_put(state);
Jens Axboe9a56a232019-01-09 09:06:50 -07001827 }
1828 state->file = fget_many(fd, state->ios_left);
1829 if (!state->file)
1830 return NULL;
1831
1832 state->fd = fd;
1833 state->has_refs = state->ios_left;
1834 state->used_refs = 1;
1835 state->ios_left--;
1836 return state->file;
1837}
1838
Jens Axboe2b188cc2019-01-07 10:46:33 -07001839/*
1840 * If we tracked the file through the SCM inflight mechanism, we could support
1841 * any file. For now, just ensure that anything potentially problematic is done
1842 * inline.
1843 */
1844static bool io_file_supports_async(struct file *file)
1845{
1846 umode_t mode = file_inode(file)->i_mode;
1847
Jens Axboe10d59342019-12-09 20:16:22 -07001848 if (S_ISBLK(mode) || S_ISCHR(mode) || S_ISSOCK(mode))
Jens Axboe2b188cc2019-01-07 10:46:33 -07001849 return true;
1850 if (S_ISREG(mode) && file->f_op != &io_uring_fops)
1851 return true;
1852
1853 return false;
1854}
1855
Jens Axboe3529d8c2019-12-19 18:24:38 -07001856static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe,
1857 bool force_nonblock)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001858{
Jens Axboedef596e2019-01-09 08:59:42 -07001859 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe9adbd452019-12-20 08:45:55 -07001860 struct kiocb *kiocb = &req->rw.kiocb;
Jens Axboe09bb8392019-03-13 12:39:28 -06001861 unsigned ioprio;
1862 int ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001863
Jens Axboe491381ce2019-10-17 09:20:46 -06001864 if (S_ISREG(file_inode(req->file)->i_mode))
1865 req->flags |= REQ_F_ISREG;
1866
Jens Axboe2b188cc2019-01-07 10:46:33 -07001867 kiocb->ki_pos = READ_ONCE(sqe->off);
Jens Axboeba042912019-12-25 16:33:42 -07001868 if (kiocb->ki_pos == -1 && !(req->file->f_mode & FMODE_STREAM)) {
1869 req->flags |= REQ_F_CUR_POS;
1870 kiocb->ki_pos = req->file->f_pos;
1871 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07001872 kiocb->ki_hint = ki_hint_validate(file_write_hint(kiocb->ki_filp));
Pavel Begunkov3e577dc2020-02-01 03:58:42 +03001873 kiocb->ki_flags = iocb_flags(kiocb->ki_filp);
1874 ret = kiocb_set_rw_flags(kiocb, READ_ONCE(sqe->rw_flags));
1875 if (unlikely(ret))
1876 return ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001877
1878 ioprio = READ_ONCE(sqe->ioprio);
1879 if (ioprio) {
1880 ret = ioprio_check_cap(ioprio);
1881 if (ret)
Jens Axboe09bb8392019-03-13 12:39:28 -06001882 return ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001883
1884 kiocb->ki_ioprio = ioprio;
1885 } else
1886 kiocb->ki_ioprio = get_current_ioprio();
1887
Stefan Bühler8449eed2019-04-27 20:34:19 +02001888 /* don't allow async punt if RWF_NOWAIT was requested */
Jens Axboe491381ce2019-10-17 09:20:46 -06001889 if ((kiocb->ki_flags & IOCB_NOWAIT) ||
1890 (req->file->f_flags & O_NONBLOCK))
Stefan Bühler8449eed2019-04-27 20:34:19 +02001891 req->flags |= REQ_F_NOWAIT;
1892
1893 if (force_nonblock)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001894 kiocb->ki_flags |= IOCB_NOWAIT;
Stefan Bühler8449eed2019-04-27 20:34:19 +02001895
Jens Axboedef596e2019-01-09 08:59:42 -07001896 if (ctx->flags & IORING_SETUP_IOPOLL) {
Jens Axboedef596e2019-01-09 08:59:42 -07001897 if (!(kiocb->ki_flags & IOCB_DIRECT) ||
1898 !kiocb->ki_filp->f_op->iopoll)
Jens Axboe09bb8392019-03-13 12:39:28 -06001899 return -EOPNOTSUPP;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001900
Jens Axboedef596e2019-01-09 08:59:42 -07001901 kiocb->ki_flags |= IOCB_HIPRI;
1902 kiocb->ki_complete = io_complete_rw_iopoll;
Jens Axboe6873e0b2019-10-30 13:53:09 -06001903 req->result = 0;
Jens Axboedef596e2019-01-09 08:59:42 -07001904 } else {
Jens Axboe09bb8392019-03-13 12:39:28 -06001905 if (kiocb->ki_flags & IOCB_HIPRI)
1906 return -EINVAL;
Jens Axboedef596e2019-01-09 08:59:42 -07001907 kiocb->ki_complete = io_complete_rw;
1908 }
Jens Axboe9adbd452019-12-20 08:45:55 -07001909
Jens Axboe3529d8c2019-12-19 18:24:38 -07001910 req->rw.addr = READ_ONCE(sqe->addr);
1911 req->rw.len = READ_ONCE(sqe->len);
Jens Axboe9adbd452019-12-20 08:45:55 -07001912 /* we own ->private, reuse it for the buffer index */
1913 req->rw.kiocb.private = (void *) (unsigned long)
Jens Axboe3529d8c2019-12-19 18:24:38 -07001914 READ_ONCE(sqe->buf_index);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001915 return 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001916}
1917
1918static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
1919{
1920 switch (ret) {
1921 case -EIOCBQUEUED:
1922 break;
1923 case -ERESTARTSYS:
1924 case -ERESTARTNOINTR:
1925 case -ERESTARTNOHAND:
1926 case -ERESTART_RESTARTBLOCK:
1927 /*
1928 * We can't just restart the syscall, since previously
1929 * submitted sqes may already be in progress. Just fail this
1930 * IO with EINTR.
1931 */
1932 ret = -EINTR;
1933 /* fall through */
1934 default:
1935 kiocb->ki_complete(kiocb, ret, 0);
1936 }
1937}
1938
Jens Axboeba816ad2019-09-28 11:36:45 -06001939static void kiocb_done(struct kiocb *kiocb, ssize_t ret, struct io_kiocb **nxt,
1940 bool in_async)
1941{
Jens Axboeba042912019-12-25 16:33:42 -07001942 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
1943
1944 if (req->flags & REQ_F_CUR_POS)
1945 req->file->f_pos = kiocb->ki_pos;
Pavel Begunkovf9bd67f2019-11-21 23:21:03 +03001946 if (in_async && ret >= 0 && kiocb->ki_complete == io_complete_rw)
Jens Axboeba816ad2019-09-28 11:36:45 -06001947 *nxt = __io_complete_rw(kiocb, ret);
1948 else
1949 io_rw_done(kiocb, ret);
1950}
1951
Jens Axboe9adbd452019-12-20 08:45:55 -07001952static ssize_t io_import_fixed(struct io_kiocb *req, int rw,
Pavel Begunkov7d009162019-11-25 23:14:40 +03001953 struct iov_iter *iter)
Jens Axboeedafcce2019-01-09 09:16:05 -07001954{
Jens Axboe9adbd452019-12-20 08:45:55 -07001955 struct io_ring_ctx *ctx = req->ctx;
1956 size_t len = req->rw.len;
Jens Axboeedafcce2019-01-09 09:16:05 -07001957 struct io_mapped_ubuf *imu;
1958 unsigned index, buf_index;
1959 size_t offset;
1960 u64 buf_addr;
1961
1962 /* attempt to use fixed buffers without having provided iovecs */
1963 if (unlikely(!ctx->user_bufs))
1964 return -EFAULT;
1965
Jens Axboe9adbd452019-12-20 08:45:55 -07001966 buf_index = (unsigned long) req->rw.kiocb.private;
Jens Axboeedafcce2019-01-09 09:16:05 -07001967 if (unlikely(buf_index >= ctx->nr_user_bufs))
1968 return -EFAULT;
1969
1970 index = array_index_nospec(buf_index, ctx->nr_user_bufs);
1971 imu = &ctx->user_bufs[index];
Jens Axboe9adbd452019-12-20 08:45:55 -07001972 buf_addr = req->rw.addr;
Jens Axboeedafcce2019-01-09 09:16:05 -07001973
1974 /* overflow */
1975 if (buf_addr + len < buf_addr)
1976 return -EFAULT;
1977 /* not inside the mapped region */
1978 if (buf_addr < imu->ubuf || buf_addr + len > imu->ubuf + imu->len)
1979 return -EFAULT;
1980
1981 /*
1982 * May not be a start of buffer, set size appropriately
1983 * and advance us to the beginning.
1984 */
1985 offset = buf_addr - imu->ubuf;
1986 iov_iter_bvec(iter, rw, imu->bvec, imu->nr_bvecs, offset + len);
Jens Axboebd11b3a2019-07-20 08:37:31 -06001987
1988 if (offset) {
1989 /*
1990 * Don't use iov_iter_advance() here, as it's really slow for
1991 * using the latter parts of a big fixed buffer - it iterates
1992 * over each segment manually. We can cheat a bit here, because
1993 * we know that:
1994 *
1995 * 1) it's a BVEC iter, we set it up
1996 * 2) all bvecs are PAGE_SIZE in size, except potentially the
1997 * first and last bvec
1998 *
1999 * So just find our index, and adjust the iterator afterwards.
2000 * If the offset is within the first bvec (or the whole first
2001 * bvec, just use iov_iter_advance(). This makes it easier
2002 * since we can just skip the first segment, which may not
2003 * be PAGE_SIZE aligned.
2004 */
2005 const struct bio_vec *bvec = imu->bvec;
2006
2007 if (offset <= bvec->bv_len) {
2008 iov_iter_advance(iter, offset);
2009 } else {
2010 unsigned long seg_skip;
2011
2012 /* skip first vec */
2013 offset -= bvec->bv_len;
2014 seg_skip = 1 + (offset >> PAGE_SHIFT);
2015
2016 iter->bvec = bvec + seg_skip;
2017 iter->nr_segs -= seg_skip;
Aleix Roca Nonell99c79f62019-08-15 14:03:22 +02002018 iter->count -= bvec->bv_len + offset;
Jens Axboebd11b3a2019-07-20 08:37:31 -06002019 iter->iov_offset = offset & ~PAGE_MASK;
Jens Axboebd11b3a2019-07-20 08:37:31 -06002020 }
2021 }
2022
Jens Axboe5e559562019-11-13 16:12:46 -07002023 return len;
Jens Axboeedafcce2019-01-09 09:16:05 -07002024}
2025
Pavel Begunkovcf6fd4b2019-11-25 23:14:39 +03002026static ssize_t io_import_iovec(int rw, struct io_kiocb *req,
2027 struct iovec **iovec, struct iov_iter *iter)
Jens Axboe2b188cc2019-01-07 10:46:33 -07002028{
Jens Axboe9adbd452019-12-20 08:45:55 -07002029 void __user *buf = u64_to_user_ptr(req->rw.addr);
2030 size_t sqe_len = req->rw.len;
Jens Axboeedafcce2019-01-09 09:16:05 -07002031 u8 opcode;
2032
Jens Axboed625c6e2019-12-17 19:53:05 -07002033 opcode = req->opcode;
Pavel Begunkov7d009162019-11-25 23:14:40 +03002034 if (opcode == IORING_OP_READ_FIXED || opcode == IORING_OP_WRITE_FIXED) {
Jens Axboeedafcce2019-01-09 09:16:05 -07002035 *iovec = NULL;
Jens Axboe9adbd452019-12-20 08:45:55 -07002036 return io_import_fixed(req, rw, iter);
Jens Axboeedafcce2019-01-09 09:16:05 -07002037 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07002038
Jens Axboe9adbd452019-12-20 08:45:55 -07002039 /* buffer index only valid with fixed read/write */
2040 if (req->rw.kiocb.private)
2041 return -EINVAL;
2042
Jens Axboe3a6820f2019-12-22 15:19:35 -07002043 if (opcode == IORING_OP_READ || opcode == IORING_OP_WRITE) {
2044 ssize_t ret;
2045 ret = import_single_range(rw, buf, sqe_len, *iovec, iter);
2046 *iovec = NULL;
2047 return ret;
2048 }
2049
Jens Axboef67676d2019-12-02 11:03:47 -07002050 if (req->io) {
2051 struct io_async_rw *iorw = &req->io->rw;
2052
2053 *iovec = iorw->iov;
2054 iov_iter_init(iter, rw, *iovec, iorw->nr_segs, iorw->size);
2055 if (iorw->iov == iorw->fast_iov)
2056 *iovec = NULL;
2057 return iorw->size;
2058 }
2059
Jens Axboe2b188cc2019-01-07 10:46:33 -07002060#ifdef CONFIG_COMPAT
Pavel Begunkovcf6fd4b2019-11-25 23:14:39 +03002061 if (req->ctx->compat)
Jens Axboe2b188cc2019-01-07 10:46:33 -07002062 return compat_import_iovec(rw, buf, sqe_len, UIO_FASTIOV,
2063 iovec, iter);
2064#endif
2065
2066 return import_iovec(rw, buf, sqe_len, UIO_FASTIOV, iovec, iter);
2067}
2068
Jens Axboe32960612019-09-23 11:05:34 -06002069/*
2070 * For files that don't have ->read_iter() and ->write_iter(), handle them
2071 * by looping over ->read() or ->write() manually.
2072 */
2073static ssize_t loop_rw_iter(int rw, struct file *file, struct kiocb *kiocb,
2074 struct iov_iter *iter)
2075{
2076 ssize_t ret = 0;
2077
2078 /*
2079 * Don't support polled IO through this interface, and we can't
2080 * support non-blocking either. For the latter, this just causes
2081 * the kiocb to be handled from an async context.
2082 */
2083 if (kiocb->ki_flags & IOCB_HIPRI)
2084 return -EOPNOTSUPP;
2085 if (kiocb->ki_flags & IOCB_NOWAIT)
2086 return -EAGAIN;
2087
2088 while (iov_iter_count(iter)) {
Pavel Begunkov311ae9e2019-11-24 11:58:24 +03002089 struct iovec iovec;
Jens Axboe32960612019-09-23 11:05:34 -06002090 ssize_t nr;
2091
Pavel Begunkov311ae9e2019-11-24 11:58:24 +03002092 if (!iov_iter_is_bvec(iter)) {
2093 iovec = iov_iter_iovec(iter);
2094 } else {
2095 /* fixed buffers import bvec */
2096 iovec.iov_base = kmap(iter->bvec->bv_page)
2097 + iter->iov_offset;
2098 iovec.iov_len = min(iter->count,
2099 iter->bvec->bv_len - iter->iov_offset);
2100 }
2101
Jens Axboe32960612019-09-23 11:05:34 -06002102 if (rw == READ) {
2103 nr = file->f_op->read(file, iovec.iov_base,
2104 iovec.iov_len, &kiocb->ki_pos);
2105 } else {
2106 nr = file->f_op->write(file, iovec.iov_base,
2107 iovec.iov_len, &kiocb->ki_pos);
2108 }
2109
Pavel Begunkov311ae9e2019-11-24 11:58:24 +03002110 if (iov_iter_is_bvec(iter))
2111 kunmap(iter->bvec->bv_page);
2112
Jens Axboe32960612019-09-23 11:05:34 -06002113 if (nr < 0) {
2114 if (!ret)
2115 ret = nr;
2116 break;
2117 }
2118 ret += nr;
2119 if (nr != iovec.iov_len)
2120 break;
2121 iov_iter_advance(iter, nr);
2122 }
2123
2124 return ret;
2125}
2126
Jens Axboeb7bb4f72019-12-15 22:13:43 -07002127static void io_req_map_rw(struct io_kiocb *req, ssize_t io_size,
Jens Axboef67676d2019-12-02 11:03:47 -07002128 struct iovec *iovec, struct iovec *fast_iov,
2129 struct iov_iter *iter)
2130{
2131 req->io->rw.nr_segs = iter->nr_segs;
2132 req->io->rw.size = io_size;
2133 req->io->rw.iov = iovec;
2134 if (!req->io->rw.iov) {
2135 req->io->rw.iov = req->io->rw.fast_iov;
2136 memcpy(req->io->rw.iov, fast_iov,
2137 sizeof(struct iovec) * iter->nr_segs);
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03002138 } else {
2139 req->flags |= REQ_F_NEED_CLEANUP;
Jens Axboef67676d2019-12-02 11:03:47 -07002140 }
2141}
2142
Jens Axboeb7bb4f72019-12-15 22:13:43 -07002143static int io_alloc_async_ctx(struct io_kiocb *req)
Jens Axboef67676d2019-12-02 11:03:47 -07002144{
Jens Axboed3656342019-12-18 09:50:26 -07002145 if (!io_op_defs[req->opcode].async_ctx)
2146 return 0;
Jens Axboef67676d2019-12-02 11:03:47 -07002147 req->io = kmalloc(sizeof(*req->io), GFP_KERNEL);
Jens Axboe06b76d42019-12-19 14:44:26 -07002148 return req->io == NULL;
Jens Axboeb7bb4f72019-12-15 22:13:43 -07002149}
2150
Jens Axboeb7bb4f72019-12-15 22:13:43 -07002151static int io_setup_async_rw(struct io_kiocb *req, ssize_t io_size,
2152 struct iovec *iovec, struct iovec *fast_iov,
2153 struct iov_iter *iter)
2154{
Jens Axboe980ad262020-01-24 23:08:54 -07002155 if (!io_op_defs[req->opcode].async_ctx)
Jens Axboe74566df2020-01-13 19:23:24 -07002156 return 0;
Jens Axboe5d204bc2020-01-31 12:06:52 -07002157 if (!req->io) {
2158 if (io_alloc_async_ctx(req))
2159 return -ENOMEM;
Jens Axboeb7bb4f72019-12-15 22:13:43 -07002160
Jens Axboe5d204bc2020-01-31 12:06:52 -07002161 io_req_map_rw(req, io_size, iovec, fast_iov, iter);
2162 }
Jens Axboeb7bb4f72019-12-15 22:13:43 -07002163 return 0;
Jens Axboef67676d2019-12-02 11:03:47 -07002164}
2165
Jens Axboe3529d8c2019-12-19 18:24:38 -07002166static int io_read_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
2167 bool force_nonblock)
Jens Axboef67676d2019-12-02 11:03:47 -07002168{
Jens Axboe3529d8c2019-12-19 18:24:38 -07002169 struct io_async_ctx *io;
2170 struct iov_iter iter;
Jens Axboef67676d2019-12-02 11:03:47 -07002171 ssize_t ret;
2172
Jens Axboe3529d8c2019-12-19 18:24:38 -07002173 ret = io_prep_rw(req, sqe, force_nonblock);
2174 if (ret)
2175 return ret;
Jens Axboef67676d2019-12-02 11:03:47 -07002176
Jens Axboe3529d8c2019-12-19 18:24:38 -07002177 if (unlikely(!(req->file->f_mode & FMODE_READ)))
2178 return -EBADF;
Jens Axboef67676d2019-12-02 11:03:47 -07002179
Jens Axboe3529d8c2019-12-19 18:24:38 -07002180 if (!req->io)
2181 return 0;
2182
2183 io = req->io;
2184 io->rw.iov = io->rw.fast_iov;
2185 req->io = NULL;
2186 ret = io_import_iovec(READ, req, &io->rw.iov, &iter);
2187 req->io = io;
2188 if (ret < 0)
2189 return ret;
2190
2191 io_req_map_rw(req, ret, io->rw.iov, io->rw.fast_iov, &iter);
2192 return 0;
Jens Axboef67676d2019-12-02 11:03:47 -07002193}
2194
Pavel Begunkov267bc902019-11-07 01:41:08 +03002195static int io_read(struct io_kiocb *req, struct io_kiocb **nxt,
Jens Axboe8358e3a2019-04-23 08:17:58 -06002196 bool force_nonblock)
Jens Axboe2b188cc2019-01-07 10:46:33 -07002197{
2198 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
Jens Axboe9adbd452019-12-20 08:45:55 -07002199 struct kiocb *kiocb = &req->rw.kiocb;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002200 struct iov_iter iter;
Jens Axboe31b51512019-01-18 22:56:34 -07002201 size_t iov_count;
Jens Axboef67676d2019-12-02 11:03:47 -07002202 ssize_t io_size, ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002203
Jens Axboe3529d8c2019-12-19 18:24:38 -07002204 ret = io_import_iovec(READ, req, &iovec, &iter);
Jens Axboe06b76d42019-12-19 14:44:26 -07002205 if (ret < 0)
2206 return ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002207
Jens Axboefd6c2e42019-12-18 12:19:41 -07002208 /* Ensure we clear previously set non-block flag */
2209 if (!force_nonblock)
Jens Axboe9adbd452019-12-20 08:45:55 -07002210 req->rw.kiocb.ki_flags &= ~IOCB_NOWAIT;
Jens Axboefd6c2e42019-12-18 12:19:41 -07002211
Bijan Mottahedeh797f3f52020-01-15 18:37:45 -08002212 req->result = 0;
Jens Axboef67676d2019-12-02 11:03:47 -07002213 io_size = ret;
Jens Axboe9e645e112019-05-10 16:07:28 -06002214 if (req->flags & REQ_F_LINK)
Jens Axboef67676d2019-12-02 11:03:47 -07002215 req->result = io_size;
2216
2217 /*
2218 * If the file doesn't support async, mark it as REQ_F_MUST_PUNT so
2219 * we know to async punt it even if it was opened O_NONBLOCK
2220 */
Jens Axboe9adbd452019-12-20 08:45:55 -07002221 if (force_nonblock && !io_file_supports_async(req->file)) {
Jens Axboef67676d2019-12-02 11:03:47 -07002222 req->flags |= REQ_F_MUST_PUNT;
2223 goto copy_iov;
2224 }
Jens Axboe9e645e112019-05-10 16:07:28 -06002225
Jens Axboe31b51512019-01-18 22:56:34 -07002226 iov_count = iov_iter_count(&iter);
Jens Axboe9adbd452019-12-20 08:45:55 -07002227 ret = rw_verify_area(READ, req->file, &kiocb->ki_pos, iov_count);
Jens Axboe2b188cc2019-01-07 10:46:33 -07002228 if (!ret) {
2229 ssize_t ret2;
2230
Jens Axboe9adbd452019-12-20 08:45:55 -07002231 if (req->file->f_op->read_iter)
2232 ret2 = call_read_iter(req->file, kiocb, &iter);
Jens Axboe32960612019-09-23 11:05:34 -06002233 else
Jens Axboe9adbd452019-12-20 08:45:55 -07002234 ret2 = loop_rw_iter(READ, req->file, kiocb, &iter);
Jens Axboe32960612019-09-23 11:05:34 -06002235
Jens Axboe9d93a3f2019-05-15 13:53:07 -06002236 /* Catch -EAGAIN return for forced non-blocking submission */
Jens Axboef67676d2019-12-02 11:03:47 -07002237 if (!force_nonblock || ret2 != -EAGAIN) {
Pavel Begunkovcf6fd4b2019-11-25 23:14:39 +03002238 kiocb_done(kiocb, ret2, nxt, req->in_async);
Jens Axboef67676d2019-12-02 11:03:47 -07002239 } else {
2240copy_iov:
Jens Axboeb7bb4f72019-12-15 22:13:43 -07002241 ret = io_setup_async_rw(req, io_size, iovec,
Jens Axboef67676d2019-12-02 11:03:47 -07002242 inline_vecs, &iter);
2243 if (ret)
2244 goto out_free;
2245 return -EAGAIN;
2246 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07002247 }
Jens Axboef67676d2019-12-02 11:03:47 -07002248out_free:
Pavel Begunkov1e950812020-02-06 19:51:16 +03002249 kfree(iovec);
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03002250 req->flags &= ~REQ_F_NEED_CLEANUP;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002251 return ret;
2252}
2253
Jens Axboe3529d8c2019-12-19 18:24:38 -07002254static int io_write_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
2255 bool force_nonblock)
Jens Axboef67676d2019-12-02 11:03:47 -07002256{
Jens Axboe3529d8c2019-12-19 18:24:38 -07002257 struct io_async_ctx *io;
2258 struct iov_iter iter;
Jens Axboef67676d2019-12-02 11:03:47 -07002259 ssize_t ret;
2260
Jens Axboe3529d8c2019-12-19 18:24:38 -07002261 ret = io_prep_rw(req, sqe, force_nonblock);
2262 if (ret)
2263 return ret;
Jens Axboef67676d2019-12-02 11:03:47 -07002264
Jens Axboe3529d8c2019-12-19 18:24:38 -07002265 if (unlikely(!(req->file->f_mode & FMODE_WRITE)))
2266 return -EBADF;
Jens Axboef67676d2019-12-02 11:03:47 -07002267
Jens Axboe3529d8c2019-12-19 18:24:38 -07002268 if (!req->io)
2269 return 0;
2270
2271 io = req->io;
2272 io->rw.iov = io->rw.fast_iov;
2273 req->io = NULL;
2274 ret = io_import_iovec(WRITE, req, &io->rw.iov, &iter);
2275 req->io = io;
2276 if (ret < 0)
2277 return ret;
2278
2279 io_req_map_rw(req, ret, io->rw.iov, io->rw.fast_iov, &iter);
2280 return 0;
Jens Axboef67676d2019-12-02 11:03:47 -07002281}
2282
Pavel Begunkov267bc902019-11-07 01:41:08 +03002283static int io_write(struct io_kiocb *req, struct io_kiocb **nxt,
Jens Axboe8358e3a2019-04-23 08:17:58 -06002284 bool force_nonblock)
Jens Axboe2b188cc2019-01-07 10:46:33 -07002285{
2286 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
Jens Axboe9adbd452019-12-20 08:45:55 -07002287 struct kiocb *kiocb = &req->rw.kiocb;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002288 struct iov_iter iter;
Jens Axboe31b51512019-01-18 22:56:34 -07002289 size_t iov_count;
Jens Axboef67676d2019-12-02 11:03:47 -07002290 ssize_t ret, io_size;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002291
Jens Axboe3529d8c2019-12-19 18:24:38 -07002292 ret = io_import_iovec(WRITE, req, &iovec, &iter);
Jens Axboe06b76d42019-12-19 14:44:26 -07002293 if (ret < 0)
2294 return ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002295
Jens Axboefd6c2e42019-12-18 12:19:41 -07002296 /* Ensure we clear previously set non-block flag */
2297 if (!force_nonblock)
Jens Axboe9adbd452019-12-20 08:45:55 -07002298 req->rw.kiocb.ki_flags &= ~IOCB_NOWAIT;
Jens Axboefd6c2e42019-12-18 12:19:41 -07002299
Bijan Mottahedeh797f3f52020-01-15 18:37:45 -08002300 req->result = 0;
Jens Axboef67676d2019-12-02 11:03:47 -07002301 io_size = ret;
Jens Axboe9e645e112019-05-10 16:07:28 -06002302 if (req->flags & REQ_F_LINK)
Jens Axboef67676d2019-12-02 11:03:47 -07002303 req->result = io_size;
2304
2305 /*
2306 * If the file doesn't support async, mark it as REQ_F_MUST_PUNT so
2307 * we know to async punt it even if it was opened O_NONBLOCK
2308 */
2309 if (force_nonblock && !io_file_supports_async(req->file)) {
2310 req->flags |= REQ_F_MUST_PUNT;
2311 goto copy_iov;
2312 }
2313
Jens Axboe10d59342019-12-09 20:16:22 -07002314 /* file path doesn't support NOWAIT for non-direct_IO */
2315 if (force_nonblock && !(kiocb->ki_flags & IOCB_DIRECT) &&
2316 (req->flags & REQ_F_ISREG))
Jens Axboef67676d2019-12-02 11:03:47 -07002317 goto copy_iov;
Jens Axboe9e645e112019-05-10 16:07:28 -06002318
Jens Axboe31b51512019-01-18 22:56:34 -07002319 iov_count = iov_iter_count(&iter);
Jens Axboe9adbd452019-12-20 08:45:55 -07002320 ret = rw_verify_area(WRITE, req->file, &kiocb->ki_pos, iov_count);
Jens Axboe2b188cc2019-01-07 10:46:33 -07002321 if (!ret) {
Roman Penyaev9bf79332019-03-25 20:09:24 +01002322 ssize_t ret2;
2323
Jens Axboe2b188cc2019-01-07 10:46:33 -07002324 /*
2325 * Open-code file_start_write here to grab freeze protection,
2326 * which will be released by another thread in
2327 * io_complete_rw(). Fool lockdep by telling it the lock got
2328 * released so that it doesn't complain about the held lock when
2329 * we return to userspace.
2330 */
Jens Axboe491381ce2019-10-17 09:20:46 -06002331 if (req->flags & REQ_F_ISREG) {
Jens Axboe9adbd452019-12-20 08:45:55 -07002332 __sb_start_write(file_inode(req->file)->i_sb,
Jens Axboe2b188cc2019-01-07 10:46:33 -07002333 SB_FREEZE_WRITE, true);
Jens Axboe9adbd452019-12-20 08:45:55 -07002334 __sb_writers_release(file_inode(req->file)->i_sb,
Jens Axboe2b188cc2019-01-07 10:46:33 -07002335 SB_FREEZE_WRITE);
2336 }
2337 kiocb->ki_flags |= IOCB_WRITE;
Roman Penyaev9bf79332019-03-25 20:09:24 +01002338
Jens Axboe9adbd452019-12-20 08:45:55 -07002339 if (req->file->f_op->write_iter)
2340 ret2 = call_write_iter(req->file, kiocb, &iter);
Jens Axboe32960612019-09-23 11:05:34 -06002341 else
Jens Axboe9adbd452019-12-20 08:45:55 -07002342 ret2 = loop_rw_iter(WRITE, req->file, kiocb, &iter);
Jens Axboef67676d2019-12-02 11:03:47 -07002343 if (!force_nonblock || ret2 != -EAGAIN) {
Pavel Begunkovcf6fd4b2019-11-25 23:14:39 +03002344 kiocb_done(kiocb, ret2, nxt, req->in_async);
Jens Axboef67676d2019-12-02 11:03:47 -07002345 } else {
2346copy_iov:
Jens Axboeb7bb4f72019-12-15 22:13:43 -07002347 ret = io_setup_async_rw(req, io_size, iovec,
Jens Axboef67676d2019-12-02 11:03:47 -07002348 inline_vecs, &iter);
2349 if (ret)
2350 goto out_free;
2351 return -EAGAIN;
2352 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07002353 }
Jens Axboe31b51512019-01-18 22:56:34 -07002354out_free:
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03002355 req->flags &= ~REQ_F_NEED_CLEANUP;
Pavel Begunkov1e950812020-02-06 19:51:16 +03002356 kfree(iovec);
Jens Axboe2b188cc2019-01-07 10:46:33 -07002357 return ret;
2358}
2359
2360/*
2361 * IORING_OP_NOP just posts a completion event, nothing else.
2362 */
Jens Axboe78e19bb2019-11-06 15:21:34 -07002363static int io_nop(struct io_kiocb *req)
Jens Axboe2b188cc2019-01-07 10:46:33 -07002364{
2365 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002366
Jens Axboedef596e2019-01-09 08:59:42 -07002367 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
2368 return -EINVAL;
2369
Jens Axboe78e19bb2019-11-06 15:21:34 -07002370 io_cqring_add_event(req, 0);
Jens Axboee65ef562019-03-12 10:16:44 -06002371 io_put_req(req);
Jens Axboe2b188cc2019-01-07 10:46:33 -07002372 return 0;
2373}
2374
Jens Axboe3529d8c2019-12-19 18:24:38 -07002375static int io_prep_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Christoph Hellwigc992fe22019-01-11 09:43:02 -07002376{
Jens Axboe6b063142019-01-10 22:13:58 -07002377 struct io_ring_ctx *ctx = req->ctx;
Christoph Hellwigc992fe22019-01-11 09:43:02 -07002378
Jens Axboe09bb8392019-03-13 12:39:28 -06002379 if (!req->file)
2380 return -EBADF;
Christoph Hellwigc992fe22019-01-11 09:43:02 -07002381
Jens Axboe6b063142019-01-10 22:13:58 -07002382 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboedef596e2019-01-09 08:59:42 -07002383 return -EINVAL;
Jens Axboeedafcce2019-01-09 09:16:05 -07002384 if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index))
Christoph Hellwigc992fe22019-01-11 09:43:02 -07002385 return -EINVAL;
2386
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07002387 req->sync.flags = READ_ONCE(sqe->fsync_flags);
2388 if (unlikely(req->sync.flags & ~IORING_FSYNC_DATASYNC))
2389 return -EINVAL;
2390
2391 req->sync.off = READ_ONCE(sqe->off);
2392 req->sync.len = READ_ONCE(sqe->len);
Christoph Hellwigc992fe22019-01-11 09:43:02 -07002393 return 0;
2394}
2395
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07002396static bool io_req_cancelled(struct io_kiocb *req)
2397{
2398 if (req->work.flags & IO_WQ_WORK_CANCEL) {
2399 req_set_fail_links(req);
2400 io_cqring_add_event(req, -ECANCELED);
2401 io_put_req(req);
2402 return true;
2403 }
2404
2405 return false;
2406}
2407
Jens Axboe78912932020-01-14 22:09:06 -07002408static void io_link_work_cb(struct io_wq_work **workptr)
2409{
2410 struct io_wq_work *work = *workptr;
2411 struct io_kiocb *link = work->data;
2412
2413 io_queue_linked_timeout(link);
2414 work->func = io_wq_submit_work;
2415}
2416
2417static void io_wq_assign_next(struct io_wq_work **workptr, struct io_kiocb *nxt)
2418{
2419 struct io_kiocb *link;
2420
2421 io_prep_async_work(nxt, &link);
2422 *workptr = &nxt->work;
2423 if (link) {
2424 nxt->work.flags |= IO_WQ_WORK_CB;
2425 nxt->work.func = io_link_work_cb;
2426 nxt->work.data = link;
2427 }
2428}
2429
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07002430static void io_fsync_finish(struct io_wq_work **workptr)
2431{
2432 struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
2433 loff_t end = req->sync.off + req->sync.len;
2434 struct io_kiocb *nxt = NULL;
2435 int ret;
2436
2437 if (io_req_cancelled(req))
2438 return;
2439
Jens Axboe9adbd452019-12-20 08:45:55 -07002440 ret = vfs_fsync_range(req->file, req->sync.off,
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07002441 end > 0 ? end : LLONG_MAX,
2442 req->sync.flags & IORING_FSYNC_DATASYNC);
2443 if (ret < 0)
2444 req_set_fail_links(req);
2445 io_cqring_add_event(req, ret);
2446 io_put_req_find_next(req, &nxt);
2447 if (nxt)
Jens Axboe78912932020-01-14 22:09:06 -07002448 io_wq_assign_next(workptr, nxt);
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07002449}
2450
Jens Axboefc4df992019-12-10 14:38:45 -07002451static int io_fsync(struct io_kiocb *req, struct io_kiocb **nxt,
2452 bool force_nonblock)
Christoph Hellwigc992fe22019-01-11 09:43:02 -07002453{
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07002454 struct io_wq_work *work, *old_work;
Christoph Hellwigc992fe22019-01-11 09:43:02 -07002455
2456 /* fsync always requires a blocking context */
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07002457 if (force_nonblock) {
2458 io_put_req(req);
2459 req->work.func = io_fsync_finish;
Christoph Hellwigc992fe22019-01-11 09:43:02 -07002460 return -EAGAIN;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07002461 }
Christoph Hellwigc992fe22019-01-11 09:43:02 -07002462
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07002463 work = old_work = &req->work;
2464 io_fsync_finish(&work);
2465 if (work && work != old_work)
2466 *nxt = container_of(work, struct io_kiocb, work);
Christoph Hellwigc992fe22019-01-11 09:43:02 -07002467 return 0;
2468}
2469
Jens Axboed63d1b52019-12-10 10:38:56 -07002470static void io_fallocate_finish(struct io_wq_work **workptr)
2471{
2472 struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
2473 struct io_kiocb *nxt = NULL;
2474 int ret;
2475
2476 ret = vfs_fallocate(req->file, req->sync.mode, req->sync.off,
2477 req->sync.len);
2478 if (ret < 0)
2479 req_set_fail_links(req);
2480 io_cqring_add_event(req, ret);
2481 io_put_req_find_next(req, &nxt);
2482 if (nxt)
2483 io_wq_assign_next(workptr, nxt);
2484}
2485
2486static int io_fallocate_prep(struct io_kiocb *req,
2487 const struct io_uring_sqe *sqe)
2488{
2489 if (sqe->ioprio || sqe->buf_index || sqe->rw_flags)
2490 return -EINVAL;
2491
2492 req->sync.off = READ_ONCE(sqe->off);
2493 req->sync.len = READ_ONCE(sqe->addr);
2494 req->sync.mode = READ_ONCE(sqe->len);
2495 return 0;
2496}
2497
2498static int io_fallocate(struct io_kiocb *req, struct io_kiocb **nxt,
2499 bool force_nonblock)
2500{
2501 struct io_wq_work *work, *old_work;
2502
2503 /* fallocate always requiring blocking context */
2504 if (force_nonblock) {
2505 io_put_req(req);
2506 req->work.func = io_fallocate_finish;
2507 return -EAGAIN;
2508 }
2509
2510 work = old_work = &req->work;
2511 io_fallocate_finish(&work);
2512 if (work && work != old_work)
2513 *nxt = container_of(work, struct io_kiocb, work);
2514
2515 return 0;
2516}
2517
Jens Axboe15b71ab2019-12-11 11:20:36 -07002518static int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
2519{
Jens Axboef8748882020-01-08 17:47:02 -07002520 const char __user *fname;
Jens Axboe15b71ab2019-12-11 11:20:36 -07002521 int ret;
2522
2523 if (sqe->ioprio || sqe->buf_index)
2524 return -EINVAL;
Jens Axboecf3040c2020-02-06 21:31:40 -07002525 if (sqe->flags & IOSQE_FIXED_FILE)
2526 return -EBADF;
Jens Axboe15b71ab2019-12-11 11:20:36 -07002527
2528 req->open.dfd = READ_ONCE(sqe->fd);
Jens Axboec12cedf2020-01-08 17:41:21 -07002529 req->open.how.mode = READ_ONCE(sqe->len);
Jens Axboef8748882020-01-08 17:47:02 -07002530 fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
Jens Axboec12cedf2020-01-08 17:41:21 -07002531 req->open.how.flags = READ_ONCE(sqe->open_flags);
Jens Axboe15b71ab2019-12-11 11:20:36 -07002532
Jens Axboef8748882020-01-08 17:47:02 -07002533 req->open.filename = getname(fname);
Jens Axboe15b71ab2019-12-11 11:20:36 -07002534 if (IS_ERR(req->open.filename)) {
2535 ret = PTR_ERR(req->open.filename);
2536 req->open.filename = NULL;
2537 return ret;
2538 }
2539
2540 return 0;
2541}
2542
Jens Axboecebdb982020-01-08 17:59:24 -07002543static int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
2544{
2545 struct open_how __user *how;
2546 const char __user *fname;
2547 size_t len;
2548 int ret;
2549
2550 if (sqe->ioprio || sqe->buf_index)
2551 return -EINVAL;
Jens Axboecf3040c2020-02-06 21:31:40 -07002552 if (sqe->flags & IOSQE_FIXED_FILE)
2553 return -EBADF;
Jens Axboecebdb982020-01-08 17:59:24 -07002554
2555 req->open.dfd = READ_ONCE(sqe->fd);
2556 fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
2557 how = u64_to_user_ptr(READ_ONCE(sqe->addr2));
2558 len = READ_ONCE(sqe->len);
2559
2560 if (len < OPEN_HOW_SIZE_VER0)
2561 return -EINVAL;
2562
2563 ret = copy_struct_from_user(&req->open.how, sizeof(req->open.how), how,
2564 len);
2565 if (ret)
2566 return ret;
2567
2568 if (!(req->open.how.flags & O_PATH) && force_o_largefile())
2569 req->open.how.flags |= O_LARGEFILE;
2570
2571 req->open.filename = getname(fname);
2572 if (IS_ERR(req->open.filename)) {
2573 ret = PTR_ERR(req->open.filename);
2574 req->open.filename = NULL;
2575 return ret;
2576 }
2577
2578 return 0;
2579}
2580
2581static int io_openat2(struct io_kiocb *req, struct io_kiocb **nxt,
2582 bool force_nonblock)
Jens Axboe15b71ab2019-12-11 11:20:36 -07002583{
2584 struct open_flags op;
Jens Axboe15b71ab2019-12-11 11:20:36 -07002585 struct file *file;
2586 int ret;
2587
Jens Axboef86cd202020-01-29 13:46:44 -07002588 if (force_nonblock)
Jens Axboe15b71ab2019-12-11 11:20:36 -07002589 return -EAGAIN;
Jens Axboe15b71ab2019-12-11 11:20:36 -07002590
Jens Axboecebdb982020-01-08 17:59:24 -07002591 ret = build_open_flags(&req->open.how, &op);
Jens Axboe15b71ab2019-12-11 11:20:36 -07002592 if (ret)
2593 goto err;
2594
Jens Axboecebdb982020-01-08 17:59:24 -07002595 ret = get_unused_fd_flags(req->open.how.flags);
Jens Axboe15b71ab2019-12-11 11:20:36 -07002596 if (ret < 0)
2597 goto err;
2598
2599 file = do_filp_open(req->open.dfd, req->open.filename, &op);
2600 if (IS_ERR(file)) {
2601 put_unused_fd(ret);
2602 ret = PTR_ERR(file);
2603 } else {
2604 fsnotify_open(file);
2605 fd_install(ret, file);
2606 }
2607err:
2608 putname(req->open.filename);
2609 if (ret < 0)
2610 req_set_fail_links(req);
2611 io_cqring_add_event(req, ret);
2612 io_put_req_find_next(req, nxt);
2613 return 0;
2614}
2615
Jens Axboecebdb982020-01-08 17:59:24 -07002616static int io_openat(struct io_kiocb *req, struct io_kiocb **nxt,
2617 bool force_nonblock)
2618{
2619 req->open.how = build_open_how(req->open.how.flags, req->open.how.mode);
2620 return io_openat2(req, nxt, force_nonblock);
2621}
2622
Jens Axboe3e4827b2020-01-08 15:18:09 -07002623static int io_epoll_ctl_prep(struct io_kiocb *req,
2624 const struct io_uring_sqe *sqe)
2625{
2626#if defined(CONFIG_EPOLL)
2627 if (sqe->ioprio || sqe->buf_index)
2628 return -EINVAL;
2629
2630 req->epoll.epfd = READ_ONCE(sqe->fd);
2631 req->epoll.op = READ_ONCE(sqe->len);
2632 req->epoll.fd = READ_ONCE(sqe->off);
2633
2634 if (ep_op_has_event(req->epoll.op)) {
2635 struct epoll_event __user *ev;
2636
2637 ev = u64_to_user_ptr(READ_ONCE(sqe->addr));
2638 if (copy_from_user(&req->epoll.event, ev, sizeof(*ev)))
2639 return -EFAULT;
2640 }
2641
2642 return 0;
2643#else
2644 return -EOPNOTSUPP;
2645#endif
2646}
2647
2648static int io_epoll_ctl(struct io_kiocb *req, struct io_kiocb **nxt,
2649 bool force_nonblock)
2650{
2651#if defined(CONFIG_EPOLL)
2652 struct io_epoll *ie = &req->epoll;
2653 int ret;
2654
2655 ret = do_epoll_ctl(ie->epfd, ie->op, ie->fd, &ie->event, force_nonblock);
2656 if (force_nonblock && ret == -EAGAIN)
2657 return -EAGAIN;
2658
2659 if (ret < 0)
2660 req_set_fail_links(req);
2661 io_cqring_add_event(req, ret);
2662 io_put_req_find_next(req, nxt);
2663 return 0;
2664#else
2665 return -EOPNOTSUPP;
2666#endif
2667}
2668
Jens Axboec1ca7572019-12-25 22:18:28 -07002669static int io_madvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
2670{
2671#if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
2672 if (sqe->ioprio || sqe->buf_index || sqe->off)
2673 return -EINVAL;
2674
2675 req->madvise.addr = READ_ONCE(sqe->addr);
2676 req->madvise.len = READ_ONCE(sqe->len);
2677 req->madvise.advice = READ_ONCE(sqe->fadvise_advice);
2678 return 0;
2679#else
2680 return -EOPNOTSUPP;
2681#endif
2682}
2683
2684static int io_madvise(struct io_kiocb *req, struct io_kiocb **nxt,
2685 bool force_nonblock)
2686{
2687#if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
2688 struct io_madvise *ma = &req->madvise;
2689 int ret;
2690
2691 if (force_nonblock)
2692 return -EAGAIN;
2693
2694 ret = do_madvise(ma->addr, ma->len, ma->advice);
2695 if (ret < 0)
2696 req_set_fail_links(req);
2697 io_cqring_add_event(req, ret);
2698 io_put_req_find_next(req, nxt);
2699 return 0;
2700#else
2701 return -EOPNOTSUPP;
2702#endif
2703}
2704
Jens Axboe4840e412019-12-25 22:03:45 -07002705static int io_fadvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
2706{
2707 if (sqe->ioprio || sqe->buf_index || sqe->addr)
2708 return -EINVAL;
2709
2710 req->fadvise.offset = READ_ONCE(sqe->off);
2711 req->fadvise.len = READ_ONCE(sqe->len);
2712 req->fadvise.advice = READ_ONCE(sqe->fadvise_advice);
2713 return 0;
2714}
2715
2716static int io_fadvise(struct io_kiocb *req, struct io_kiocb **nxt,
2717 bool force_nonblock)
2718{
2719 struct io_fadvise *fa = &req->fadvise;
2720 int ret;
2721
Jens Axboe3e694262020-02-01 09:22:49 -07002722 if (force_nonblock) {
2723 switch (fa->advice) {
2724 case POSIX_FADV_NORMAL:
2725 case POSIX_FADV_RANDOM:
2726 case POSIX_FADV_SEQUENTIAL:
2727 break;
2728 default:
2729 return -EAGAIN;
2730 }
2731 }
Jens Axboe4840e412019-12-25 22:03:45 -07002732
2733 ret = vfs_fadvise(req->file, fa->offset, fa->len, fa->advice);
2734 if (ret < 0)
2735 req_set_fail_links(req);
2736 io_cqring_add_event(req, ret);
2737 io_put_req_find_next(req, nxt);
2738 return 0;
2739}
2740
Jens Axboeeddc7ef2019-12-13 21:18:10 -07002741static int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
2742{
Jens Axboef8748882020-01-08 17:47:02 -07002743 const char __user *fname;
Jens Axboeeddc7ef2019-12-13 21:18:10 -07002744 unsigned lookup_flags;
2745 int ret;
2746
2747 if (sqe->ioprio || sqe->buf_index)
2748 return -EINVAL;
Jens Axboecf3040c2020-02-06 21:31:40 -07002749 if (sqe->flags & IOSQE_FIXED_FILE)
2750 return -EBADF;
Jens Axboeeddc7ef2019-12-13 21:18:10 -07002751
2752 req->open.dfd = READ_ONCE(sqe->fd);
2753 req->open.mask = READ_ONCE(sqe->len);
Jens Axboef8748882020-01-08 17:47:02 -07002754 fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
Jens Axboeeddc7ef2019-12-13 21:18:10 -07002755 req->open.buffer = u64_to_user_ptr(READ_ONCE(sqe->addr2));
Jens Axboec12cedf2020-01-08 17:41:21 -07002756 req->open.how.flags = READ_ONCE(sqe->statx_flags);
Jens Axboeeddc7ef2019-12-13 21:18:10 -07002757
Jens Axboec12cedf2020-01-08 17:41:21 -07002758 if (vfs_stat_set_lookup_flags(&lookup_flags, req->open.how.flags))
Jens Axboeeddc7ef2019-12-13 21:18:10 -07002759 return -EINVAL;
2760
Jens Axboef8748882020-01-08 17:47:02 -07002761 req->open.filename = getname_flags(fname, lookup_flags, NULL);
Jens Axboeeddc7ef2019-12-13 21:18:10 -07002762 if (IS_ERR(req->open.filename)) {
2763 ret = PTR_ERR(req->open.filename);
2764 req->open.filename = NULL;
2765 return ret;
2766 }
2767
2768 return 0;
2769}
2770
2771static int io_statx(struct io_kiocb *req, struct io_kiocb **nxt,
2772 bool force_nonblock)
2773{
2774 struct io_open *ctx = &req->open;
2775 unsigned lookup_flags;
2776 struct path path;
2777 struct kstat stat;
2778 int ret;
2779
2780 if (force_nonblock)
2781 return -EAGAIN;
2782
Jens Axboec12cedf2020-01-08 17:41:21 -07002783 if (vfs_stat_set_lookup_flags(&lookup_flags, ctx->how.flags))
Jens Axboeeddc7ef2019-12-13 21:18:10 -07002784 return -EINVAL;
2785
2786retry:
2787 /* filename_lookup() drops it, keep a reference */
2788 ctx->filename->refcnt++;
2789
2790 ret = filename_lookup(ctx->dfd, ctx->filename, lookup_flags, &path,
2791 NULL);
2792 if (ret)
2793 goto err;
2794
Jens Axboec12cedf2020-01-08 17:41:21 -07002795 ret = vfs_getattr(&path, &stat, ctx->mask, ctx->how.flags);
Jens Axboeeddc7ef2019-12-13 21:18:10 -07002796 path_put(&path);
2797 if (retry_estale(ret, lookup_flags)) {
2798 lookup_flags |= LOOKUP_REVAL;
2799 goto retry;
2800 }
2801 if (!ret)
2802 ret = cp_statx(&stat, ctx->buffer);
2803err:
2804 putname(ctx->filename);
2805 if (ret < 0)
2806 req_set_fail_links(req);
2807 io_cqring_add_event(req, ret);
2808 io_put_req_find_next(req, nxt);
2809 return 0;
2810}
2811
Jens Axboeb5dba592019-12-11 14:02:38 -07002812static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
2813{
2814 /*
2815 * If we queue this for async, it must not be cancellable. That would
2816 * leave the 'file' in an undeterminate state.
2817 */
2818 req->work.flags |= IO_WQ_WORK_NO_CANCEL;
2819
2820 if (sqe->ioprio || sqe->off || sqe->addr || sqe->len ||
2821 sqe->rw_flags || sqe->buf_index)
2822 return -EINVAL;
2823 if (sqe->flags & IOSQE_FIXED_FILE)
Jens Axboecf3040c2020-02-06 21:31:40 -07002824 return -EBADF;
Jens Axboeb5dba592019-12-11 14:02:38 -07002825
2826 req->close.fd = READ_ONCE(sqe->fd);
2827 if (req->file->f_op == &io_uring_fops ||
Pavel Begunkovb14cca02020-01-17 04:45:59 +03002828 req->close.fd == req->ctx->ring_fd)
Jens Axboeb5dba592019-12-11 14:02:38 -07002829 return -EBADF;
2830
2831 return 0;
2832}
2833
2834static void io_close_finish(struct io_wq_work **workptr)
2835{
2836 struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
2837 struct io_kiocb *nxt = NULL;
2838
2839 /* Invoked with files, we need to do the close */
2840 if (req->work.files) {
2841 int ret;
2842
2843 ret = filp_close(req->close.put_file, req->work.files);
Jens Axboe1a417f42020-01-31 17:16:48 -07002844 if (ret < 0)
Jens Axboeb5dba592019-12-11 14:02:38 -07002845 req_set_fail_links(req);
Jens Axboeb5dba592019-12-11 14:02:38 -07002846 io_cqring_add_event(req, ret);
2847 }
2848
2849 fput(req->close.put_file);
2850
Jens Axboeb5dba592019-12-11 14:02:38 -07002851 io_put_req_find_next(req, &nxt);
2852 if (nxt)
2853 io_wq_assign_next(workptr, nxt);
2854}
2855
2856static int io_close(struct io_kiocb *req, struct io_kiocb **nxt,
2857 bool force_nonblock)
2858{
2859 int ret;
2860
2861 req->close.put_file = NULL;
2862 ret = __close_fd_get_file(req->close.fd, &req->close.put_file);
2863 if (ret < 0)
2864 return ret;
2865
2866 /* if the file has a flush method, be safe and punt to async */
Jens Axboef86cd202020-01-29 13:46:44 -07002867 if (req->close.put_file->f_op->flush && !io_wq_current_is_worker())
Jens Axboeb5dba592019-12-11 14:02:38 -07002868 goto eagain;
Jens Axboeb5dba592019-12-11 14:02:38 -07002869
2870 /*
2871 * No ->flush(), safely close from here and just punt the
2872 * fput() to async context.
2873 */
2874 ret = filp_close(req->close.put_file, current->files);
2875
2876 if (ret < 0)
2877 req_set_fail_links(req);
2878 io_cqring_add_event(req, ret);
2879
2880 if (io_wq_current_is_worker()) {
2881 struct io_wq_work *old_work, *work;
2882
2883 old_work = work = &req->work;
2884 io_close_finish(&work);
2885 if (work && work != old_work)
2886 *nxt = container_of(work, struct io_kiocb, work);
2887 return 0;
2888 }
2889
2890eagain:
2891 req->work.func = io_close_finish;
Jens Axboe1a417f42020-01-31 17:16:48 -07002892 /*
2893 * Do manual async queue here to avoid grabbing files - we don't
2894 * need the files, and it'll cause io_close_finish() to close
2895 * the file again and cause a double CQE entry for this request
2896 */
2897 io_queue_async_work(req);
2898 return 0;
Jens Axboeb5dba592019-12-11 14:02:38 -07002899}
2900
Jens Axboe3529d8c2019-12-19 18:24:38 -07002901static int io_prep_sfr(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboe5d17b4a2019-04-09 14:56:44 -06002902{
2903 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe5d17b4a2019-04-09 14:56:44 -06002904
2905 if (!req->file)
2906 return -EBADF;
Jens Axboe5d17b4a2019-04-09 14:56:44 -06002907
2908 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
2909 return -EINVAL;
2910 if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index))
2911 return -EINVAL;
2912
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07002913 req->sync.off = READ_ONCE(sqe->off);
2914 req->sync.len = READ_ONCE(sqe->len);
2915 req->sync.flags = READ_ONCE(sqe->sync_range_flags);
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07002916 return 0;
2917}
2918
2919static void io_sync_file_range_finish(struct io_wq_work **workptr)
2920{
2921 struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
2922 struct io_kiocb *nxt = NULL;
2923 int ret;
2924
2925 if (io_req_cancelled(req))
2926 return;
2927
Jens Axboe9adbd452019-12-20 08:45:55 -07002928 ret = sync_file_range(req->file, req->sync.off, req->sync.len,
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07002929 req->sync.flags);
2930 if (ret < 0)
2931 req_set_fail_links(req);
2932 io_cqring_add_event(req, ret);
2933 io_put_req_find_next(req, &nxt);
2934 if (nxt)
Jens Axboe78912932020-01-14 22:09:06 -07002935 io_wq_assign_next(workptr, nxt);
Jens Axboe5d17b4a2019-04-09 14:56:44 -06002936}
2937
Jens Axboefc4df992019-12-10 14:38:45 -07002938static int io_sync_file_range(struct io_kiocb *req, struct io_kiocb **nxt,
Jens Axboe5d17b4a2019-04-09 14:56:44 -06002939 bool force_nonblock)
2940{
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07002941 struct io_wq_work *work, *old_work;
Jens Axboe5d17b4a2019-04-09 14:56:44 -06002942
2943 /* sync_file_range always requires a blocking context */
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07002944 if (force_nonblock) {
2945 io_put_req(req);
2946 req->work.func = io_sync_file_range_finish;
Jens Axboe5d17b4a2019-04-09 14:56:44 -06002947 return -EAGAIN;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07002948 }
Jens Axboe5d17b4a2019-04-09 14:56:44 -06002949
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07002950 work = old_work = &req->work;
2951 io_sync_file_range_finish(&work);
2952 if (work && work != old_work)
2953 *nxt = container_of(work, struct io_kiocb, work);
Jens Axboe5d17b4a2019-04-09 14:56:44 -06002954 return 0;
2955}
2956
Jens Axboe3529d8c2019-12-19 18:24:38 -07002957static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboeaa1fa282019-04-19 13:38:09 -06002958{
Jens Axboe03b12302019-12-02 18:50:25 -07002959#if defined(CONFIG_NET)
Jens Axboee47293f2019-12-20 08:58:21 -07002960 struct io_sr_msg *sr = &req->sr_msg;
Jens Axboe3529d8c2019-12-19 18:24:38 -07002961 struct io_async_ctx *io = req->io;
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03002962 int ret;
Jens Axboe03b12302019-12-02 18:50:25 -07002963
Jens Axboee47293f2019-12-20 08:58:21 -07002964 sr->msg_flags = READ_ONCE(sqe->msg_flags);
2965 sr->msg = u64_to_user_ptr(READ_ONCE(sqe->addr));
Jens Axboefddafac2020-01-04 20:19:44 -07002966 sr->len = READ_ONCE(sqe->len);
Jens Axboe3529d8c2019-12-19 18:24:38 -07002967
Jens Axboefddafac2020-01-04 20:19:44 -07002968 if (!io || req->opcode == IORING_OP_SEND)
Jens Axboe3529d8c2019-12-19 18:24:38 -07002969 return 0;
2970
Jens Axboed9688562019-12-09 19:35:20 -07002971 io->msg.iov = io->msg.fast_iov;
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03002972 ret = sendmsg_copy_msghdr(&io->msg.msg, sr->msg, sr->msg_flags,
Jens Axboee47293f2019-12-20 08:58:21 -07002973 &io->msg.iov);
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03002974 if (!ret)
2975 req->flags |= REQ_F_NEED_CLEANUP;
2976 return ret;
Jens Axboe03b12302019-12-02 18:50:25 -07002977#else
Jens Axboee47293f2019-12-20 08:58:21 -07002978 return -EOPNOTSUPP;
Jens Axboe03b12302019-12-02 18:50:25 -07002979#endif
2980}
2981
Jens Axboefc4df992019-12-10 14:38:45 -07002982static int io_sendmsg(struct io_kiocb *req, struct io_kiocb **nxt,
2983 bool force_nonblock)
Jens Axboe03b12302019-12-02 18:50:25 -07002984{
2985#if defined(CONFIG_NET)
Jens Axboe0b416c32019-12-15 10:57:46 -07002986 struct io_async_msghdr *kmsg = NULL;
Jens Axboe03b12302019-12-02 18:50:25 -07002987 struct socket *sock;
2988 int ret;
2989
2990 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
2991 return -EINVAL;
2992
2993 sock = sock_from_file(req->file, &ret);
2994 if (sock) {
Jens Axboeb7bb4f72019-12-15 22:13:43 -07002995 struct io_async_ctx io;
Jens Axboe03b12302019-12-02 18:50:25 -07002996 struct sockaddr_storage addr;
Jens Axboe03b12302019-12-02 18:50:25 -07002997 unsigned flags;
2998
Jens Axboe03b12302019-12-02 18:50:25 -07002999 if (req->io) {
Jens Axboe0b416c32019-12-15 10:57:46 -07003000 kmsg = &req->io->msg;
3001 kmsg->msg.msg_name = &addr;
3002 /* if iov is set, it's allocated already */
3003 if (!kmsg->iov)
3004 kmsg->iov = kmsg->fast_iov;
3005 kmsg->msg.msg_iter.iov = kmsg->iov;
Jens Axboe03b12302019-12-02 18:50:25 -07003006 } else {
Jens Axboe3529d8c2019-12-19 18:24:38 -07003007 struct io_sr_msg *sr = &req->sr_msg;
3008
Jens Axboe0b416c32019-12-15 10:57:46 -07003009 kmsg = &io.msg;
3010 kmsg->msg.msg_name = &addr;
Jens Axboe3529d8c2019-12-19 18:24:38 -07003011
3012 io.msg.iov = io.msg.fast_iov;
3013 ret = sendmsg_copy_msghdr(&io.msg.msg, sr->msg,
3014 sr->msg_flags, &io.msg.iov);
Jens Axboe03b12302019-12-02 18:50:25 -07003015 if (ret)
Jens Axboe3529d8c2019-12-19 18:24:38 -07003016 return ret;
Jens Axboe03b12302019-12-02 18:50:25 -07003017 }
3018
Jens Axboee47293f2019-12-20 08:58:21 -07003019 flags = req->sr_msg.msg_flags;
3020 if (flags & MSG_DONTWAIT)
3021 req->flags |= REQ_F_NOWAIT;
3022 else if (force_nonblock)
3023 flags |= MSG_DONTWAIT;
3024
Jens Axboe0b416c32019-12-15 10:57:46 -07003025 ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
Jens Axboe03b12302019-12-02 18:50:25 -07003026 if (force_nonblock && ret == -EAGAIN) {
Jens Axboeb7bb4f72019-12-15 22:13:43 -07003027 if (req->io)
3028 return -EAGAIN;
Pavel Begunkov1e950812020-02-06 19:51:16 +03003029 if (io_alloc_async_ctx(req)) {
3030 if (kmsg && kmsg->iov != kmsg->fast_iov)
3031 kfree(kmsg->iov);
Jens Axboeb7bb4f72019-12-15 22:13:43 -07003032 return -ENOMEM;
Pavel Begunkov1e950812020-02-06 19:51:16 +03003033 }
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03003034 req->flags |= REQ_F_NEED_CLEANUP;
Jens Axboeb7bb4f72019-12-15 22:13:43 -07003035 memcpy(&req->io->msg, &io.msg, sizeof(io.msg));
Jens Axboe0b416c32019-12-15 10:57:46 -07003036 return -EAGAIN;
Jens Axboe03b12302019-12-02 18:50:25 -07003037 }
3038 if (ret == -ERESTARTSYS)
3039 ret = -EINTR;
3040 }
3041
Pavel Begunkov1e950812020-02-06 19:51:16 +03003042 if (kmsg && kmsg->iov != kmsg->fast_iov)
Jens Axboe0b416c32019-12-15 10:57:46 -07003043 kfree(kmsg->iov);
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03003044 req->flags &= ~REQ_F_NEED_CLEANUP;
Jens Axboe03b12302019-12-02 18:50:25 -07003045 io_cqring_add_event(req, ret);
Jens Axboe4e88d6e2019-12-07 20:59:47 -07003046 if (ret < 0)
3047 req_set_fail_links(req);
Jens Axboe03b12302019-12-02 18:50:25 -07003048 io_put_req_find_next(req, nxt);
3049 return 0;
3050#else
3051 return -EOPNOTSUPP;
3052#endif
3053}
3054
Jens Axboefddafac2020-01-04 20:19:44 -07003055static int io_send(struct io_kiocb *req, struct io_kiocb **nxt,
3056 bool force_nonblock)
3057{
3058#if defined(CONFIG_NET)
3059 struct socket *sock;
3060 int ret;
3061
3062 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3063 return -EINVAL;
3064
3065 sock = sock_from_file(req->file, &ret);
3066 if (sock) {
3067 struct io_sr_msg *sr = &req->sr_msg;
3068 struct msghdr msg;
3069 struct iovec iov;
3070 unsigned flags;
3071
3072 ret = import_single_range(WRITE, sr->buf, sr->len, &iov,
3073 &msg.msg_iter);
3074 if (ret)
3075 return ret;
3076
3077 msg.msg_name = NULL;
3078 msg.msg_control = NULL;
3079 msg.msg_controllen = 0;
3080 msg.msg_namelen = 0;
3081
3082 flags = req->sr_msg.msg_flags;
3083 if (flags & MSG_DONTWAIT)
3084 req->flags |= REQ_F_NOWAIT;
3085 else if (force_nonblock)
3086 flags |= MSG_DONTWAIT;
3087
Jens Axboe0b7b21e2020-01-31 08:34:59 -07003088 msg.msg_flags = flags;
3089 ret = sock_sendmsg(sock, &msg);
Jens Axboefddafac2020-01-04 20:19:44 -07003090 if (force_nonblock && ret == -EAGAIN)
3091 return -EAGAIN;
3092 if (ret == -ERESTARTSYS)
3093 ret = -EINTR;
3094 }
3095
3096 io_cqring_add_event(req, ret);
3097 if (ret < 0)
3098 req_set_fail_links(req);
3099 io_put_req_find_next(req, nxt);
3100 return 0;
3101#else
3102 return -EOPNOTSUPP;
3103#endif
3104}
3105
Jens Axboe3529d8c2019-12-19 18:24:38 -07003106static int io_recvmsg_prep(struct io_kiocb *req,
3107 const struct io_uring_sqe *sqe)
Jens Axboe03b12302019-12-02 18:50:25 -07003108{
3109#if defined(CONFIG_NET)
Jens Axboee47293f2019-12-20 08:58:21 -07003110 struct io_sr_msg *sr = &req->sr_msg;
Jens Axboe3529d8c2019-12-19 18:24:38 -07003111 struct io_async_ctx *io = req->io;
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03003112 int ret;
Jens Axboe06b76d42019-12-19 14:44:26 -07003113
Jens Axboe3529d8c2019-12-19 18:24:38 -07003114 sr->msg_flags = READ_ONCE(sqe->msg_flags);
3115 sr->msg = u64_to_user_ptr(READ_ONCE(sqe->addr));
Jens Axboe0b7b21e2020-01-31 08:34:59 -07003116 sr->len = READ_ONCE(sqe->len);
Jens Axboe3529d8c2019-12-19 18:24:38 -07003117
Jens Axboefddafac2020-01-04 20:19:44 -07003118 if (!io || req->opcode == IORING_OP_RECV)
Jens Axboe06b76d42019-12-19 14:44:26 -07003119 return 0;
Jens Axboe03b12302019-12-02 18:50:25 -07003120
Jens Axboed9688562019-12-09 19:35:20 -07003121 io->msg.iov = io->msg.fast_iov;
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03003122 ret = recvmsg_copy_msghdr(&io->msg.msg, sr->msg, sr->msg_flags,
Jens Axboee47293f2019-12-20 08:58:21 -07003123 &io->msg.uaddr, &io->msg.iov);
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03003124 if (!ret)
3125 req->flags |= REQ_F_NEED_CLEANUP;
3126 return ret;
Jens Axboe03b12302019-12-02 18:50:25 -07003127#else
Jens Axboee47293f2019-12-20 08:58:21 -07003128 return -EOPNOTSUPP;
Jens Axboe03b12302019-12-02 18:50:25 -07003129#endif
3130}
3131
Jens Axboefc4df992019-12-10 14:38:45 -07003132static int io_recvmsg(struct io_kiocb *req, struct io_kiocb **nxt,
3133 bool force_nonblock)
Jens Axboe03b12302019-12-02 18:50:25 -07003134{
3135#if defined(CONFIG_NET)
Jens Axboe0b416c32019-12-15 10:57:46 -07003136 struct io_async_msghdr *kmsg = NULL;
Jens Axboe0fa03c62019-04-19 13:34:07 -06003137 struct socket *sock;
3138 int ret;
3139
3140 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3141 return -EINVAL;
3142
3143 sock = sock_from_file(req->file, &ret);
3144 if (sock) {
Jens Axboeb7bb4f72019-12-15 22:13:43 -07003145 struct io_async_ctx io;
Jens Axboe03b12302019-12-02 18:50:25 -07003146 struct sockaddr_storage addr;
Jens Axboe0fa03c62019-04-19 13:34:07 -06003147 unsigned flags;
3148
Jens Axboe03b12302019-12-02 18:50:25 -07003149 if (req->io) {
Jens Axboe0b416c32019-12-15 10:57:46 -07003150 kmsg = &req->io->msg;
3151 kmsg->msg.msg_name = &addr;
3152 /* if iov is set, it's allocated already */
3153 if (!kmsg->iov)
3154 kmsg->iov = kmsg->fast_iov;
3155 kmsg->msg.msg_iter.iov = kmsg->iov;
Jens Axboe03b12302019-12-02 18:50:25 -07003156 } else {
Jens Axboe3529d8c2019-12-19 18:24:38 -07003157 struct io_sr_msg *sr = &req->sr_msg;
3158
Jens Axboe0b416c32019-12-15 10:57:46 -07003159 kmsg = &io.msg;
3160 kmsg->msg.msg_name = &addr;
Jens Axboe3529d8c2019-12-19 18:24:38 -07003161
3162 io.msg.iov = io.msg.fast_iov;
3163 ret = recvmsg_copy_msghdr(&io.msg.msg, sr->msg,
3164 sr->msg_flags, &io.msg.uaddr,
3165 &io.msg.iov);
Jens Axboe03b12302019-12-02 18:50:25 -07003166 if (ret)
Jens Axboe3529d8c2019-12-19 18:24:38 -07003167 return ret;
Jens Axboe03b12302019-12-02 18:50:25 -07003168 }
Jens Axboe0fa03c62019-04-19 13:34:07 -06003169
Jens Axboee47293f2019-12-20 08:58:21 -07003170 flags = req->sr_msg.msg_flags;
3171 if (flags & MSG_DONTWAIT)
3172 req->flags |= REQ_F_NOWAIT;
3173 else if (force_nonblock)
3174 flags |= MSG_DONTWAIT;
3175
3176 ret = __sys_recvmsg_sock(sock, &kmsg->msg, req->sr_msg.msg,
3177 kmsg->uaddr, flags);
Jens Axboe03b12302019-12-02 18:50:25 -07003178 if (force_nonblock && ret == -EAGAIN) {
Jens Axboeb7bb4f72019-12-15 22:13:43 -07003179 if (req->io)
3180 return -EAGAIN;
Pavel Begunkov1e950812020-02-06 19:51:16 +03003181 if (io_alloc_async_ctx(req)) {
3182 if (kmsg && kmsg->iov != kmsg->fast_iov)
3183 kfree(kmsg->iov);
Jens Axboeb7bb4f72019-12-15 22:13:43 -07003184 return -ENOMEM;
Pavel Begunkov1e950812020-02-06 19:51:16 +03003185 }
Jens Axboeb7bb4f72019-12-15 22:13:43 -07003186 memcpy(&req->io->msg, &io.msg, sizeof(io.msg));
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03003187 req->flags |= REQ_F_NEED_CLEANUP;
Jens Axboe0b416c32019-12-15 10:57:46 -07003188 return -EAGAIN;
Jens Axboe03b12302019-12-02 18:50:25 -07003189 }
Jens Axboe441cdbd2019-12-02 18:49:10 -07003190 if (ret == -ERESTARTSYS)
3191 ret = -EINTR;
Jens Axboe0fa03c62019-04-19 13:34:07 -06003192 }
3193
Pavel Begunkov1e950812020-02-06 19:51:16 +03003194 if (kmsg && kmsg->iov != kmsg->fast_iov)
Jens Axboe0b416c32019-12-15 10:57:46 -07003195 kfree(kmsg->iov);
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03003196 req->flags &= ~REQ_F_NEED_CLEANUP;
Jens Axboe78e19bb2019-11-06 15:21:34 -07003197 io_cqring_add_event(req, ret);
Jens Axboe4e88d6e2019-12-07 20:59:47 -07003198 if (ret < 0)
3199 req_set_fail_links(req);
Jackie Liuec9c02a2019-11-08 23:50:36 +08003200 io_put_req_find_next(req, nxt);
Jens Axboe0fa03c62019-04-19 13:34:07 -06003201 return 0;
3202#else
3203 return -EOPNOTSUPP;
3204#endif
3205}
3206
Jens Axboefddafac2020-01-04 20:19:44 -07003207static int io_recv(struct io_kiocb *req, struct io_kiocb **nxt,
3208 bool force_nonblock)
3209{
3210#if defined(CONFIG_NET)
3211 struct socket *sock;
3212 int ret;
3213
3214 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3215 return -EINVAL;
3216
3217 sock = sock_from_file(req->file, &ret);
3218 if (sock) {
3219 struct io_sr_msg *sr = &req->sr_msg;
3220 struct msghdr msg;
3221 struct iovec iov;
3222 unsigned flags;
3223
3224 ret = import_single_range(READ, sr->buf, sr->len, &iov,
3225 &msg.msg_iter);
3226 if (ret)
3227 return ret;
3228
3229 msg.msg_name = NULL;
3230 msg.msg_control = NULL;
3231 msg.msg_controllen = 0;
3232 msg.msg_namelen = 0;
3233 msg.msg_iocb = NULL;
3234 msg.msg_flags = 0;
3235
3236 flags = req->sr_msg.msg_flags;
3237 if (flags & MSG_DONTWAIT)
3238 req->flags |= REQ_F_NOWAIT;
3239 else if (force_nonblock)
3240 flags |= MSG_DONTWAIT;
3241
Jens Axboe0b7b21e2020-01-31 08:34:59 -07003242 ret = sock_recvmsg(sock, &msg, flags);
Jens Axboefddafac2020-01-04 20:19:44 -07003243 if (force_nonblock && ret == -EAGAIN)
3244 return -EAGAIN;
3245 if (ret == -ERESTARTSYS)
3246 ret = -EINTR;
3247 }
3248
3249 io_cqring_add_event(req, ret);
3250 if (ret < 0)
3251 req_set_fail_links(req);
3252 io_put_req_find_next(req, nxt);
3253 return 0;
3254#else
3255 return -EOPNOTSUPP;
3256#endif
3257}
3258
3259
Jens Axboe3529d8c2019-12-19 18:24:38 -07003260static int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboe17f2fe32019-10-17 14:42:58 -06003261{
3262#if defined(CONFIG_NET)
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07003263 struct io_accept *accept = &req->accept;
3264
Jens Axboe17f2fe32019-10-17 14:42:58 -06003265 if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
3266 return -EINVAL;
Hrvoje Zeba8042d6c2019-11-25 14:40:22 -05003267 if (sqe->ioprio || sqe->len || sqe->buf_index)
Jens Axboe17f2fe32019-10-17 14:42:58 -06003268 return -EINVAL;
3269
Jens Axboed55e5f52019-12-11 16:12:15 -07003270 accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
3271 accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2));
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07003272 accept->flags = READ_ONCE(sqe->accept_flags);
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07003273 return 0;
3274#else
3275 return -EOPNOTSUPP;
3276#endif
3277}
Jens Axboe17f2fe32019-10-17 14:42:58 -06003278
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07003279#if defined(CONFIG_NET)
3280static int __io_accept(struct io_kiocb *req, struct io_kiocb **nxt,
3281 bool force_nonblock)
3282{
3283 struct io_accept *accept = &req->accept;
3284 unsigned file_flags;
3285 int ret;
3286
3287 file_flags = force_nonblock ? O_NONBLOCK : 0;
3288 ret = __sys_accept4_file(req->file, file_flags, accept->addr,
3289 accept->addr_len, accept->flags);
3290 if (ret == -EAGAIN && force_nonblock)
Jens Axboe17f2fe32019-10-17 14:42:58 -06003291 return -EAGAIN;
Jens Axboe8e3cca12019-11-09 19:52:33 -07003292 if (ret == -ERESTARTSYS)
3293 ret = -EINTR;
Jens Axboe4e88d6e2019-12-07 20:59:47 -07003294 if (ret < 0)
3295 req_set_fail_links(req);
Jens Axboe78e19bb2019-11-06 15:21:34 -07003296 io_cqring_add_event(req, ret);
Jackie Liuec9c02a2019-11-08 23:50:36 +08003297 io_put_req_find_next(req, nxt);
Jens Axboe17f2fe32019-10-17 14:42:58 -06003298 return 0;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07003299}
3300
3301static void io_accept_finish(struct io_wq_work **workptr)
3302{
3303 struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
3304 struct io_kiocb *nxt = NULL;
3305
3306 if (io_req_cancelled(req))
3307 return;
3308 __io_accept(req, &nxt, false);
3309 if (nxt)
Jens Axboe78912932020-01-14 22:09:06 -07003310 io_wq_assign_next(workptr, nxt);
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07003311}
3312#endif
3313
3314static int io_accept(struct io_kiocb *req, struct io_kiocb **nxt,
3315 bool force_nonblock)
3316{
3317#if defined(CONFIG_NET)
3318 int ret;
3319
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07003320 ret = __io_accept(req, nxt, force_nonblock);
3321 if (ret == -EAGAIN && force_nonblock) {
3322 req->work.func = io_accept_finish;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07003323 io_put_req(req);
3324 return -EAGAIN;
3325 }
3326 return 0;
Jens Axboe17f2fe32019-10-17 14:42:58 -06003327#else
3328 return -EOPNOTSUPP;
3329#endif
3330}
3331
Jens Axboe3529d8c2019-12-19 18:24:38 -07003332static int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboef499a022019-12-02 16:28:46 -07003333{
3334#if defined(CONFIG_NET)
Jens Axboe3529d8c2019-12-19 18:24:38 -07003335 struct io_connect *conn = &req->connect;
3336 struct io_async_ctx *io = req->io;
Jens Axboef499a022019-12-02 16:28:46 -07003337
Jens Axboe3fbb51c2019-12-20 08:51:52 -07003338 if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
3339 return -EINVAL;
3340 if (sqe->ioprio || sqe->len || sqe->buf_index || sqe->rw_flags)
3341 return -EINVAL;
3342
Jens Axboe3529d8c2019-12-19 18:24:38 -07003343 conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
3344 conn->addr_len = READ_ONCE(sqe->addr2);
3345
3346 if (!io)
3347 return 0;
3348
3349 return move_addr_to_kernel(conn->addr, conn->addr_len,
Jens Axboe3fbb51c2019-12-20 08:51:52 -07003350 &io->connect.address);
Jens Axboef499a022019-12-02 16:28:46 -07003351#else
Jens Axboe3fbb51c2019-12-20 08:51:52 -07003352 return -EOPNOTSUPP;
Jens Axboef499a022019-12-02 16:28:46 -07003353#endif
3354}
3355
Jens Axboefc4df992019-12-10 14:38:45 -07003356static int io_connect(struct io_kiocb *req, struct io_kiocb **nxt,
3357 bool force_nonblock)
Jens Axboef8e85cf2019-11-23 14:24:24 -07003358{
3359#if defined(CONFIG_NET)
Jens Axboef499a022019-12-02 16:28:46 -07003360 struct io_async_ctx __io, *io;
Jens Axboef8e85cf2019-11-23 14:24:24 -07003361 unsigned file_flags;
Jens Axboe3fbb51c2019-12-20 08:51:52 -07003362 int ret;
Jens Axboef8e85cf2019-11-23 14:24:24 -07003363
Jens Axboef499a022019-12-02 16:28:46 -07003364 if (req->io) {
3365 io = req->io;
3366 } else {
Jens Axboe3529d8c2019-12-19 18:24:38 -07003367 ret = move_addr_to_kernel(req->connect.addr,
3368 req->connect.addr_len,
3369 &__io.connect.address);
Jens Axboef499a022019-12-02 16:28:46 -07003370 if (ret)
3371 goto out;
3372 io = &__io;
3373 }
3374
Jens Axboe3fbb51c2019-12-20 08:51:52 -07003375 file_flags = force_nonblock ? O_NONBLOCK : 0;
3376
3377 ret = __sys_connect_file(req->file, &io->connect.address,
3378 req->connect.addr_len, file_flags);
Jens Axboe87f80d62019-12-03 11:23:54 -07003379 if ((ret == -EAGAIN || ret == -EINPROGRESS) && force_nonblock) {
Jens Axboeb7bb4f72019-12-15 22:13:43 -07003380 if (req->io)
3381 return -EAGAIN;
3382 if (io_alloc_async_ctx(req)) {
Jens Axboef499a022019-12-02 16:28:46 -07003383 ret = -ENOMEM;
3384 goto out;
3385 }
Jens Axboeb7bb4f72019-12-15 22:13:43 -07003386 memcpy(&req->io->connect, &__io.connect, sizeof(__io.connect));
Jens Axboef8e85cf2019-11-23 14:24:24 -07003387 return -EAGAIN;
Jens Axboef499a022019-12-02 16:28:46 -07003388 }
Jens Axboef8e85cf2019-11-23 14:24:24 -07003389 if (ret == -ERESTARTSYS)
3390 ret = -EINTR;
Jens Axboef499a022019-12-02 16:28:46 -07003391out:
Jens Axboe4e88d6e2019-12-07 20:59:47 -07003392 if (ret < 0)
3393 req_set_fail_links(req);
Jens Axboef8e85cf2019-11-23 14:24:24 -07003394 io_cqring_add_event(req, ret);
3395 io_put_req_find_next(req, nxt);
3396 return 0;
3397#else
3398 return -EOPNOTSUPP;
3399#endif
3400}
3401
Jens Axboe221c5eb2019-01-17 09:41:58 -07003402static void io_poll_remove_one(struct io_kiocb *req)
3403{
3404 struct io_poll_iocb *poll = &req->poll;
3405
3406 spin_lock(&poll->head->lock);
3407 WRITE_ONCE(poll->canceled, true);
Jens Axboe392edb42019-12-09 17:52:20 -07003408 if (!list_empty(&poll->wait.entry)) {
3409 list_del_init(&poll->wait.entry);
Jackie Liua197f662019-11-08 08:09:12 -07003410 io_queue_async_work(req);
Jens Axboe221c5eb2019-01-17 09:41:58 -07003411 }
3412 spin_unlock(&poll->head->lock);
Jens Axboe78076bb2019-12-04 19:56:40 -07003413 hash_del(&req->hash_node);
Jens Axboe221c5eb2019-01-17 09:41:58 -07003414}
3415
3416static void io_poll_remove_all(struct io_ring_ctx *ctx)
3417{
Jens Axboe78076bb2019-12-04 19:56:40 -07003418 struct hlist_node *tmp;
Jens Axboe221c5eb2019-01-17 09:41:58 -07003419 struct io_kiocb *req;
Jens Axboe78076bb2019-12-04 19:56:40 -07003420 int i;
Jens Axboe221c5eb2019-01-17 09:41:58 -07003421
3422 spin_lock_irq(&ctx->completion_lock);
Jens Axboe78076bb2019-12-04 19:56:40 -07003423 for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
3424 struct hlist_head *list;
3425
3426 list = &ctx->cancel_hash[i];
3427 hlist_for_each_entry_safe(req, tmp, list, hash_node)
3428 io_poll_remove_one(req);
Jens Axboe221c5eb2019-01-17 09:41:58 -07003429 }
3430 spin_unlock_irq(&ctx->completion_lock);
3431}
3432
Jens Axboe47f46762019-11-09 17:43:02 -07003433static int io_poll_cancel(struct io_ring_ctx *ctx, __u64 sqe_addr)
3434{
Jens Axboe78076bb2019-12-04 19:56:40 -07003435 struct hlist_head *list;
Jens Axboe47f46762019-11-09 17:43:02 -07003436 struct io_kiocb *req;
3437
Jens Axboe78076bb2019-12-04 19:56:40 -07003438 list = &ctx->cancel_hash[hash_long(sqe_addr, ctx->cancel_hash_bits)];
3439 hlist_for_each_entry(req, list, hash_node) {
3440 if (sqe_addr == req->user_data) {
Jens Axboeeac406c2019-11-14 12:09:58 -07003441 io_poll_remove_one(req);
3442 return 0;
3443 }
Jens Axboe47f46762019-11-09 17:43:02 -07003444 }
3445
3446 return -ENOENT;
3447}
3448
Jens Axboe3529d8c2019-12-19 18:24:38 -07003449static int io_poll_remove_prep(struct io_kiocb *req,
3450 const struct io_uring_sqe *sqe)
Jens Axboe221c5eb2019-01-17 09:41:58 -07003451{
Jens Axboe221c5eb2019-01-17 09:41:58 -07003452 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3453 return -EINVAL;
3454 if (sqe->ioprio || sqe->off || sqe->len || sqe->buf_index ||
3455 sqe->poll_events)
3456 return -EINVAL;
3457
Jens Axboe0969e782019-12-17 18:40:57 -07003458 req->poll.addr = READ_ONCE(sqe->addr);
Jens Axboe0969e782019-12-17 18:40:57 -07003459 return 0;
3460}
3461
3462/*
3463 * Find a running poll command that matches one specified in sqe->addr,
3464 * and remove it if found.
3465 */
3466static int io_poll_remove(struct io_kiocb *req)
3467{
3468 struct io_ring_ctx *ctx = req->ctx;
3469 u64 addr;
3470 int ret;
3471
Jens Axboe0969e782019-12-17 18:40:57 -07003472 addr = req->poll.addr;
Jens Axboe221c5eb2019-01-17 09:41:58 -07003473 spin_lock_irq(&ctx->completion_lock);
Jens Axboe0969e782019-12-17 18:40:57 -07003474 ret = io_poll_cancel(ctx, addr);
Jens Axboe221c5eb2019-01-17 09:41:58 -07003475 spin_unlock_irq(&ctx->completion_lock);
3476
Jens Axboe78e19bb2019-11-06 15:21:34 -07003477 io_cqring_add_event(req, ret);
Jens Axboe4e88d6e2019-12-07 20:59:47 -07003478 if (ret < 0)
3479 req_set_fail_links(req);
Jens Axboee65ef562019-03-12 10:16:44 -06003480 io_put_req(req);
Jens Axboe221c5eb2019-01-17 09:41:58 -07003481 return 0;
3482}
3483
Jens Axboeb0dd8a42019-11-18 12:14:54 -07003484static void io_poll_complete(struct io_kiocb *req, __poll_t mask, int error)
Jens Axboe221c5eb2019-01-17 09:41:58 -07003485{
Jackie Liua197f662019-11-08 08:09:12 -07003486 struct io_ring_ctx *ctx = req->ctx;
3487
Jens Axboe8c838782019-03-12 15:48:16 -06003488 req->poll.done = true;
Jens Axboeb0dd8a42019-11-18 12:14:54 -07003489 if (error)
3490 io_cqring_fill_event(req, error);
3491 else
3492 io_cqring_fill_event(req, mangle_poll(mask));
Jens Axboe8c838782019-03-12 15:48:16 -06003493 io_commit_cqring(ctx);
Jens Axboe221c5eb2019-01-17 09:41:58 -07003494}
3495
Jens Axboe561fb042019-10-24 07:25:42 -06003496static void io_poll_complete_work(struct io_wq_work **workptr)
Jens Axboe221c5eb2019-01-17 09:41:58 -07003497{
Jens Axboe561fb042019-10-24 07:25:42 -06003498 struct io_wq_work *work = *workptr;
Jens Axboe221c5eb2019-01-17 09:41:58 -07003499 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
3500 struct io_poll_iocb *poll = &req->poll;
3501 struct poll_table_struct pt = { ._key = poll->events };
3502 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe89723d02019-11-05 15:32:58 -07003503 struct io_kiocb *nxt = NULL;
Jens Axboe221c5eb2019-01-17 09:41:58 -07003504 __poll_t mask = 0;
Jens Axboeb0dd8a42019-11-18 12:14:54 -07003505 int ret = 0;
Jens Axboe221c5eb2019-01-17 09:41:58 -07003506
Jens Axboeb0dd8a42019-11-18 12:14:54 -07003507 if (work->flags & IO_WQ_WORK_CANCEL) {
Jens Axboe561fb042019-10-24 07:25:42 -06003508 WRITE_ONCE(poll->canceled, true);
Jens Axboeb0dd8a42019-11-18 12:14:54 -07003509 ret = -ECANCELED;
3510 } else if (READ_ONCE(poll->canceled)) {
3511 ret = -ECANCELED;
3512 }
Jens Axboe561fb042019-10-24 07:25:42 -06003513
Jens Axboeb0dd8a42019-11-18 12:14:54 -07003514 if (ret != -ECANCELED)
Jens Axboe221c5eb2019-01-17 09:41:58 -07003515 mask = vfs_poll(poll->file, &pt) & poll->events;
3516
3517 /*
3518 * Note that ->ki_cancel callers also delete iocb from active_reqs after
3519 * calling ->ki_cancel. We need the ctx_lock roundtrip here to
3520 * synchronize with them. In the cancellation case the list_del_init
3521 * itself is not actually needed, but harmless so we keep it in to
3522 * avoid further branches in the fast path.
3523 */
3524 spin_lock_irq(&ctx->completion_lock);
Jens Axboeb0dd8a42019-11-18 12:14:54 -07003525 if (!mask && ret != -ECANCELED) {
Jens Axboe392edb42019-12-09 17:52:20 -07003526 add_wait_queue(poll->head, &poll->wait);
Jens Axboe221c5eb2019-01-17 09:41:58 -07003527 spin_unlock_irq(&ctx->completion_lock);
3528 return;
3529 }
Jens Axboe78076bb2019-12-04 19:56:40 -07003530 hash_del(&req->hash_node);
Jens Axboeb0dd8a42019-11-18 12:14:54 -07003531 io_poll_complete(req, mask, ret);
Jens Axboe221c5eb2019-01-17 09:41:58 -07003532 spin_unlock_irq(&ctx->completion_lock);
3533
Jens Axboe8c838782019-03-12 15:48:16 -06003534 io_cqring_ev_posted(ctx);
Jens Axboe89723d02019-11-05 15:32:58 -07003535
Jens Axboe4e88d6e2019-12-07 20:59:47 -07003536 if (ret < 0)
3537 req_set_fail_links(req);
Jackie Liuec9c02a2019-11-08 23:50:36 +08003538 io_put_req_find_next(req, &nxt);
Jens Axboe89723d02019-11-05 15:32:58 -07003539 if (nxt)
Jens Axboe78912932020-01-14 22:09:06 -07003540 io_wq_assign_next(workptr, nxt);
Jens Axboe221c5eb2019-01-17 09:41:58 -07003541}
3542
Jens Axboee94f1412019-12-19 12:06:02 -07003543static void __io_poll_flush(struct io_ring_ctx *ctx, struct llist_node *nodes)
3544{
Jens Axboee94f1412019-12-19 12:06:02 -07003545 struct io_kiocb *req, *tmp;
Jens Axboe8237e042019-12-28 10:48:22 -07003546 struct req_batch rb;
Jens Axboee94f1412019-12-19 12:06:02 -07003547
Jens Axboec6ca97b302019-12-28 12:11:08 -07003548 rb.to_free = rb.need_iter = 0;
Jens Axboee94f1412019-12-19 12:06:02 -07003549 spin_lock_irq(&ctx->completion_lock);
3550 llist_for_each_entry_safe(req, tmp, nodes, llist_node) {
3551 hash_del(&req->hash_node);
3552 io_poll_complete(req, req->result, 0);
3553
Jens Axboe8237e042019-12-28 10:48:22 -07003554 if (refcount_dec_and_test(&req->refs) &&
3555 !io_req_multi_free(&rb, req)) {
3556 req->flags |= REQ_F_COMP_LOCKED;
3557 io_free_req(req);
Jens Axboee94f1412019-12-19 12:06:02 -07003558 }
3559 }
3560 spin_unlock_irq(&ctx->completion_lock);
3561
3562 io_cqring_ev_posted(ctx);
Jens Axboe8237e042019-12-28 10:48:22 -07003563 io_free_req_many(ctx, &rb);
Jens Axboee94f1412019-12-19 12:06:02 -07003564}
3565
3566static void io_poll_flush(struct io_wq_work **workptr)
3567{
3568 struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
3569 struct llist_node *nodes;
3570
3571 nodes = llist_del_all(&req->ctx->poll_llist);
3572 if (nodes)
3573 __io_poll_flush(req->ctx, nodes);
3574}
3575
Jens Axboef0b493e2020-02-01 21:30:11 -07003576static void io_poll_trigger_evfd(struct io_wq_work **workptr)
3577{
3578 struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
3579
3580 eventfd_signal(req->ctx->cq_ev_fd, 1);
3581 io_put_req(req);
3582}
3583
Jens Axboe221c5eb2019-01-17 09:41:58 -07003584static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
3585 void *key)
3586{
Jens Axboee9444752019-11-26 15:02:04 -07003587 struct io_poll_iocb *poll = wait->private;
Jens Axboe221c5eb2019-01-17 09:41:58 -07003588 struct io_kiocb *req = container_of(poll, struct io_kiocb, poll);
3589 struct io_ring_ctx *ctx = req->ctx;
3590 __poll_t mask = key_to_poll(key);
Jens Axboe221c5eb2019-01-17 09:41:58 -07003591
3592 /* for instances that support it check for an event match first: */
Jens Axboe8c838782019-03-12 15:48:16 -06003593 if (mask && !(mask & poll->events))
3594 return 0;
Jens Axboe221c5eb2019-01-17 09:41:58 -07003595
Jens Axboe392edb42019-12-09 17:52:20 -07003596 list_del_init(&poll->wait.entry);
Jens Axboe8c838782019-03-12 15:48:16 -06003597
Jens Axboe7c9e7f02019-11-12 08:15:53 -07003598 /*
3599 * Run completion inline if we can. We're using trylock here because
3600 * we are violating the completion_lock -> poll wq lock ordering.
3601 * If we have a link timeout we're going to need the completion_lock
3602 * for finalizing the request, mark us as having grabbed that already.
3603 */
Jens Axboee94f1412019-12-19 12:06:02 -07003604 if (mask) {
3605 unsigned long flags;
Jens Axboe8c838782019-03-12 15:48:16 -06003606
Jens Axboee94f1412019-12-19 12:06:02 -07003607 if (llist_empty(&ctx->poll_llist) &&
3608 spin_trylock_irqsave(&ctx->completion_lock, flags)) {
Jens Axboef0b493e2020-02-01 21:30:11 -07003609 bool trigger_ev;
3610
Jens Axboee94f1412019-12-19 12:06:02 -07003611 hash_del(&req->hash_node);
3612 io_poll_complete(req, mask, 0);
Jens Axboee94f1412019-12-19 12:06:02 -07003613
Jens Axboef0b493e2020-02-01 21:30:11 -07003614 trigger_ev = io_should_trigger_evfd(ctx);
3615 if (trigger_ev && eventfd_signal_count()) {
3616 trigger_ev = false;
3617 req->work.func = io_poll_trigger_evfd;
3618 } else {
3619 req->flags |= REQ_F_COMP_LOCKED;
3620 io_put_req(req);
3621 req = NULL;
3622 }
3623 spin_unlock_irqrestore(&ctx->completion_lock, flags);
3624 __io_cqring_ev_posted(ctx, trigger_ev);
Jens Axboee94f1412019-12-19 12:06:02 -07003625 } else {
3626 req->result = mask;
3627 req->llist_node.next = NULL;
3628 /* if the list wasn't empty, we're done */
3629 if (!llist_add(&req->llist_node, &ctx->poll_llist))
3630 req = NULL;
3631 else
3632 req->work.func = io_poll_flush;
3633 }
Jens Axboe8c838782019-03-12 15:48:16 -06003634 }
Jens Axboee94f1412019-12-19 12:06:02 -07003635 if (req)
3636 io_queue_async_work(req);
Jens Axboe8c838782019-03-12 15:48:16 -06003637
Jens Axboe221c5eb2019-01-17 09:41:58 -07003638 return 1;
3639}
3640
3641struct io_poll_table {
3642 struct poll_table_struct pt;
3643 struct io_kiocb *req;
3644 int error;
3645};
3646
3647static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head,
3648 struct poll_table_struct *p)
3649{
3650 struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
3651
3652 if (unlikely(pt->req->poll.head)) {
3653 pt->error = -EINVAL;
3654 return;
3655 }
3656
3657 pt->error = 0;
3658 pt->req->poll.head = head;
Jens Axboe392edb42019-12-09 17:52:20 -07003659 add_wait_queue(head, &pt->req->poll.wait);
Jens Axboe221c5eb2019-01-17 09:41:58 -07003660}
3661
Jens Axboeeac406c2019-11-14 12:09:58 -07003662static void io_poll_req_insert(struct io_kiocb *req)
3663{
3664 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe78076bb2019-12-04 19:56:40 -07003665 struct hlist_head *list;
Jens Axboeeac406c2019-11-14 12:09:58 -07003666
Jens Axboe78076bb2019-12-04 19:56:40 -07003667 list = &ctx->cancel_hash[hash_long(req->user_data, ctx->cancel_hash_bits)];
3668 hlist_add_head(&req->hash_node, list);
Jens Axboeeac406c2019-11-14 12:09:58 -07003669}
3670
Jens Axboe3529d8c2019-12-19 18:24:38 -07003671static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboe221c5eb2019-01-17 09:41:58 -07003672{
3673 struct io_poll_iocb *poll = &req->poll;
Jens Axboe221c5eb2019-01-17 09:41:58 -07003674 u16 events;
Jens Axboe221c5eb2019-01-17 09:41:58 -07003675
3676 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3677 return -EINVAL;
3678 if (sqe->addr || sqe->ioprio || sqe->off || sqe->len || sqe->buf_index)
3679 return -EINVAL;
Jens Axboe09bb8392019-03-13 12:39:28 -06003680 if (!poll->file)
3681 return -EBADF;
Jens Axboe221c5eb2019-01-17 09:41:58 -07003682
Jens Axboe221c5eb2019-01-17 09:41:58 -07003683 events = READ_ONCE(sqe->poll_events);
3684 poll->events = demangle_poll(events) | EPOLLERR | EPOLLHUP;
Jens Axboe0969e782019-12-17 18:40:57 -07003685 return 0;
3686}
3687
3688static int io_poll_add(struct io_kiocb *req, struct io_kiocb **nxt)
3689{
3690 struct io_poll_iocb *poll = &req->poll;
3691 struct io_ring_ctx *ctx = req->ctx;
3692 struct io_poll_table ipt;
3693 bool cancel = false;
3694 __poll_t mask;
Jens Axboe0969e782019-12-17 18:40:57 -07003695
3696 INIT_IO_WORK(&req->work, io_poll_complete_work);
Jens Axboe78076bb2019-12-04 19:56:40 -07003697 INIT_HLIST_NODE(&req->hash_node);
Jens Axboe221c5eb2019-01-17 09:41:58 -07003698
Jens Axboe221c5eb2019-01-17 09:41:58 -07003699 poll->head = NULL;
Jens Axboe8c838782019-03-12 15:48:16 -06003700 poll->done = false;
Jens Axboe221c5eb2019-01-17 09:41:58 -07003701 poll->canceled = false;
3702
3703 ipt.pt._qproc = io_poll_queue_proc;
3704 ipt.pt._key = poll->events;
3705 ipt.req = req;
3706 ipt.error = -EINVAL; /* same as no support for IOCB_CMD_POLL */
3707
3708 /* initialized the list so that we can do list_empty checks */
Jens Axboe392edb42019-12-09 17:52:20 -07003709 INIT_LIST_HEAD(&poll->wait.entry);
3710 init_waitqueue_func_entry(&poll->wait, io_poll_wake);
3711 poll->wait.private = poll;
Jens Axboe221c5eb2019-01-17 09:41:58 -07003712
Jens Axboe36703242019-07-25 10:20:18 -06003713 INIT_LIST_HEAD(&req->list);
3714
Jens Axboe221c5eb2019-01-17 09:41:58 -07003715 mask = vfs_poll(poll->file, &ipt.pt) & poll->events;
Jens Axboe221c5eb2019-01-17 09:41:58 -07003716
3717 spin_lock_irq(&ctx->completion_lock);
Jens Axboe8c838782019-03-12 15:48:16 -06003718 if (likely(poll->head)) {
3719 spin_lock(&poll->head->lock);
Jens Axboe392edb42019-12-09 17:52:20 -07003720 if (unlikely(list_empty(&poll->wait.entry))) {
Jens Axboe8c838782019-03-12 15:48:16 -06003721 if (ipt.error)
3722 cancel = true;
3723 ipt.error = 0;
3724 mask = 0;
3725 }
3726 if (mask || ipt.error)
Jens Axboe392edb42019-12-09 17:52:20 -07003727 list_del_init(&poll->wait.entry);
Jens Axboe8c838782019-03-12 15:48:16 -06003728 else if (cancel)
3729 WRITE_ONCE(poll->canceled, true);
3730 else if (!poll->done) /* actually waiting for an event */
Jens Axboeeac406c2019-11-14 12:09:58 -07003731 io_poll_req_insert(req);
Jens Axboe8c838782019-03-12 15:48:16 -06003732 spin_unlock(&poll->head->lock);
Jens Axboe221c5eb2019-01-17 09:41:58 -07003733 }
Jens Axboe8c838782019-03-12 15:48:16 -06003734 if (mask) { /* no async, we'd stolen it */
Jens Axboe8c838782019-03-12 15:48:16 -06003735 ipt.error = 0;
Jens Axboeb0dd8a42019-11-18 12:14:54 -07003736 io_poll_complete(req, mask, 0);
Jens Axboe8c838782019-03-12 15:48:16 -06003737 }
Jens Axboe221c5eb2019-01-17 09:41:58 -07003738 spin_unlock_irq(&ctx->completion_lock);
3739
Jens Axboe8c838782019-03-12 15:48:16 -06003740 if (mask) {
3741 io_cqring_ev_posted(ctx);
Jackie Liuec9c02a2019-11-08 23:50:36 +08003742 io_put_req_find_next(req, nxt);
Jens Axboe221c5eb2019-01-17 09:41:58 -07003743 }
Jens Axboe8c838782019-03-12 15:48:16 -06003744 return ipt.error;
Jens Axboe221c5eb2019-01-17 09:41:58 -07003745}
3746
Jens Axboe5262f562019-09-17 12:26:57 -06003747static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
3748{
Jens Axboead8a48a2019-11-15 08:49:11 -07003749 struct io_timeout_data *data = container_of(timer,
3750 struct io_timeout_data, timer);
3751 struct io_kiocb *req = data->req;
3752 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe5262f562019-09-17 12:26:57 -06003753 unsigned long flags;
3754
Jens Axboe5262f562019-09-17 12:26:57 -06003755 atomic_inc(&ctx->cq_timeouts);
3756
3757 spin_lock_irqsave(&ctx->completion_lock, flags);
zhangyi (F)ef036812019-10-23 15:10:08 +08003758 /*
Jens Axboe11365042019-10-16 09:08:32 -06003759 * We could be racing with timeout deletion. If the list is empty,
3760 * then timeout lookup already found it and will be handling it.
zhangyi (F)ef036812019-10-23 15:10:08 +08003761 */
Jens Axboe842f9612019-10-29 12:34:10 -06003762 if (!list_empty(&req->list)) {
Jens Axboe11365042019-10-16 09:08:32 -06003763 struct io_kiocb *prev;
Jens Axboe5262f562019-09-17 12:26:57 -06003764
Jens Axboe11365042019-10-16 09:08:32 -06003765 /*
3766 * Adjust the reqs sequence before the current one because it
Brian Gianforcarod195a662019-12-13 03:09:50 -08003767 * will consume a slot in the cq_ring and the cq_tail
Jens Axboe11365042019-10-16 09:08:32 -06003768 * pointer will be increased, otherwise other timeout reqs may
3769 * return in advance without waiting for enough wait_nr.
3770 */
3771 prev = req;
3772 list_for_each_entry_continue_reverse(prev, &ctx->timeout_list, list)
3773 prev->sequence++;
Jens Axboe11365042019-10-16 09:08:32 -06003774 list_del_init(&req->list);
Jens Axboe11365042019-10-16 09:08:32 -06003775 }
Jens Axboe842f9612019-10-29 12:34:10 -06003776
Jens Axboe78e19bb2019-11-06 15:21:34 -07003777 io_cqring_fill_event(req, -ETIME);
Jens Axboe5262f562019-09-17 12:26:57 -06003778 io_commit_cqring(ctx);
3779 spin_unlock_irqrestore(&ctx->completion_lock, flags);
3780
3781 io_cqring_ev_posted(ctx);
Jens Axboe4e88d6e2019-12-07 20:59:47 -07003782 req_set_fail_links(req);
Jens Axboe5262f562019-09-17 12:26:57 -06003783 io_put_req(req);
3784 return HRTIMER_NORESTART;
3785}
3786
Jens Axboe47f46762019-11-09 17:43:02 -07003787static int io_timeout_cancel(struct io_ring_ctx *ctx, __u64 user_data)
3788{
3789 struct io_kiocb *req;
3790 int ret = -ENOENT;
3791
3792 list_for_each_entry(req, &ctx->timeout_list, list) {
3793 if (user_data == req->user_data) {
3794 list_del_init(&req->list);
3795 ret = 0;
3796 break;
3797 }
3798 }
3799
3800 if (ret == -ENOENT)
3801 return ret;
3802
Jens Axboe2d283902019-12-04 11:08:05 -07003803 ret = hrtimer_try_to_cancel(&req->io->timeout.timer);
Jens Axboe47f46762019-11-09 17:43:02 -07003804 if (ret == -1)
3805 return -EALREADY;
3806
Jens Axboe4e88d6e2019-12-07 20:59:47 -07003807 req_set_fail_links(req);
Jens Axboe47f46762019-11-09 17:43:02 -07003808 io_cqring_fill_event(req, -ECANCELED);
3809 io_put_req(req);
3810 return 0;
3811}
3812
Jens Axboe3529d8c2019-12-19 18:24:38 -07003813static int io_timeout_remove_prep(struct io_kiocb *req,
3814 const struct io_uring_sqe *sqe)
Jens Axboeb29472e2019-12-17 18:50:29 -07003815{
Jens Axboeb29472e2019-12-17 18:50:29 -07003816 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3817 return -EINVAL;
3818 if (sqe->flags || sqe->ioprio || sqe->buf_index || sqe->len)
3819 return -EINVAL;
3820
3821 req->timeout.addr = READ_ONCE(sqe->addr);
3822 req->timeout.flags = READ_ONCE(sqe->timeout_flags);
3823 if (req->timeout.flags)
3824 return -EINVAL;
3825
Jens Axboeb29472e2019-12-17 18:50:29 -07003826 return 0;
3827}
3828
Jens Axboe11365042019-10-16 09:08:32 -06003829/*
3830 * Remove or update an existing timeout command
3831 */
Jens Axboefc4df992019-12-10 14:38:45 -07003832static int io_timeout_remove(struct io_kiocb *req)
Jens Axboe11365042019-10-16 09:08:32 -06003833{
3834 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe47f46762019-11-09 17:43:02 -07003835 int ret;
Jens Axboe11365042019-10-16 09:08:32 -06003836
Jens Axboe11365042019-10-16 09:08:32 -06003837 spin_lock_irq(&ctx->completion_lock);
Jens Axboeb29472e2019-12-17 18:50:29 -07003838 ret = io_timeout_cancel(ctx, req->timeout.addr);
Jens Axboe11365042019-10-16 09:08:32 -06003839
Jens Axboe47f46762019-11-09 17:43:02 -07003840 io_cqring_fill_event(req, ret);
Jens Axboe11365042019-10-16 09:08:32 -06003841 io_commit_cqring(ctx);
3842 spin_unlock_irq(&ctx->completion_lock);
Jens Axboe5262f562019-09-17 12:26:57 -06003843 io_cqring_ev_posted(ctx);
Jens Axboe4e88d6e2019-12-07 20:59:47 -07003844 if (ret < 0)
3845 req_set_fail_links(req);
Jackie Liuec9c02a2019-11-08 23:50:36 +08003846 io_put_req(req);
Jens Axboe11365042019-10-16 09:08:32 -06003847 return 0;
Jens Axboe5262f562019-09-17 12:26:57 -06003848}
3849
Jens Axboe3529d8c2019-12-19 18:24:38 -07003850static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
Jens Axboe2d283902019-12-04 11:08:05 -07003851 bool is_timeout_link)
Jens Axboe5262f562019-09-17 12:26:57 -06003852{
Jens Axboead8a48a2019-11-15 08:49:11 -07003853 struct io_timeout_data *data;
Jens Axboea41525a2019-10-15 16:48:15 -06003854 unsigned flags;
Jens Axboe5262f562019-09-17 12:26:57 -06003855
Jens Axboead8a48a2019-11-15 08:49:11 -07003856 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboe5262f562019-09-17 12:26:57 -06003857 return -EINVAL;
Jens Axboead8a48a2019-11-15 08:49:11 -07003858 if (sqe->ioprio || sqe->buf_index || sqe->len != 1)
Jens Axboea41525a2019-10-15 16:48:15 -06003859 return -EINVAL;
Jens Axboe2d283902019-12-04 11:08:05 -07003860 if (sqe->off && is_timeout_link)
3861 return -EINVAL;
Jens Axboea41525a2019-10-15 16:48:15 -06003862 flags = READ_ONCE(sqe->timeout_flags);
3863 if (flags & ~IORING_TIMEOUT_ABS)
Jens Axboe5262f562019-09-17 12:26:57 -06003864 return -EINVAL;
Arnd Bergmannbdf20072019-10-01 09:53:29 -06003865
Jens Axboe26a61672019-12-20 09:02:01 -07003866 req->timeout.count = READ_ONCE(sqe->off);
3867
Jens Axboe3529d8c2019-12-19 18:24:38 -07003868 if (!req->io && io_alloc_async_ctx(req))
Jens Axboe26a61672019-12-20 09:02:01 -07003869 return -ENOMEM;
3870
3871 data = &req->io->timeout;
Jens Axboead8a48a2019-11-15 08:49:11 -07003872 data->req = req;
Jens Axboead8a48a2019-11-15 08:49:11 -07003873 req->flags |= REQ_F_TIMEOUT;
3874
3875 if (get_timespec64(&data->ts, u64_to_user_ptr(sqe->addr)))
Jens Axboe5262f562019-09-17 12:26:57 -06003876 return -EFAULT;
3877
Jens Axboe11365042019-10-16 09:08:32 -06003878 if (flags & IORING_TIMEOUT_ABS)
Jens Axboead8a48a2019-11-15 08:49:11 -07003879 data->mode = HRTIMER_MODE_ABS;
Jens Axboe11365042019-10-16 09:08:32 -06003880 else
Jens Axboead8a48a2019-11-15 08:49:11 -07003881 data->mode = HRTIMER_MODE_REL;
Jens Axboe11365042019-10-16 09:08:32 -06003882
Jens Axboead8a48a2019-11-15 08:49:11 -07003883 hrtimer_init(&data->timer, CLOCK_MONOTONIC, data->mode);
3884 return 0;
3885}
3886
Jens Axboefc4df992019-12-10 14:38:45 -07003887static int io_timeout(struct io_kiocb *req)
Jens Axboead8a48a2019-11-15 08:49:11 -07003888{
3889 unsigned count;
3890 struct io_ring_ctx *ctx = req->ctx;
3891 struct io_timeout_data *data;
3892 struct list_head *entry;
3893 unsigned span = 0;
Jens Axboead8a48a2019-11-15 08:49:11 -07003894
Jens Axboe2d283902019-12-04 11:08:05 -07003895 data = &req->io->timeout;
Jens Axboe93bd25b2019-11-11 23:34:31 -07003896
Jens Axboe5262f562019-09-17 12:26:57 -06003897 /*
3898 * sqe->off holds how many events that need to occur for this
Jens Axboe93bd25b2019-11-11 23:34:31 -07003899 * timeout event to be satisfied. If it isn't set, then this is
3900 * a pure timeout request, sequence isn't used.
Jens Axboe5262f562019-09-17 12:26:57 -06003901 */
Jens Axboe26a61672019-12-20 09:02:01 -07003902 count = req->timeout.count;
Jens Axboe93bd25b2019-11-11 23:34:31 -07003903 if (!count) {
3904 req->flags |= REQ_F_TIMEOUT_NOSEQ;
3905 spin_lock_irq(&ctx->completion_lock);
3906 entry = ctx->timeout_list.prev;
3907 goto add;
3908 }
Jens Axboe5262f562019-09-17 12:26:57 -06003909
3910 req->sequence = ctx->cached_sq_head + count - 1;
Jens Axboe2d283902019-12-04 11:08:05 -07003911 data->seq_offset = count;
Jens Axboe5262f562019-09-17 12:26:57 -06003912
3913 /*
3914 * Insertion sort, ensuring the first entry in the list is always
3915 * the one we need first.
3916 */
Jens Axboe5262f562019-09-17 12:26:57 -06003917 spin_lock_irq(&ctx->completion_lock);
3918 list_for_each_prev(entry, &ctx->timeout_list) {
3919 struct io_kiocb *nxt = list_entry(entry, struct io_kiocb, list);
yangerkun5da0fb12019-10-15 21:59:29 +08003920 unsigned nxt_sq_head;
3921 long long tmp, tmp_nxt;
Jens Axboe2d283902019-12-04 11:08:05 -07003922 u32 nxt_offset = nxt->io->timeout.seq_offset;
Jens Axboe5262f562019-09-17 12:26:57 -06003923
Jens Axboe93bd25b2019-11-11 23:34:31 -07003924 if (nxt->flags & REQ_F_TIMEOUT_NOSEQ)
3925 continue;
3926
yangerkun5da0fb12019-10-15 21:59:29 +08003927 /*
3928 * Since cached_sq_head + count - 1 can overflow, use type long
3929 * long to store it.
3930 */
3931 tmp = (long long)ctx->cached_sq_head + count - 1;
Pavel Begunkovcc42e0a2019-11-25 23:14:38 +03003932 nxt_sq_head = nxt->sequence - nxt_offset + 1;
3933 tmp_nxt = (long long)nxt_sq_head + nxt_offset - 1;
yangerkun5da0fb12019-10-15 21:59:29 +08003934
3935 /*
3936 * cached_sq_head may overflow, and it will never overflow twice
3937 * once there is some timeout req still be valid.
3938 */
3939 if (ctx->cached_sq_head < nxt_sq_head)
yangerkun8b07a652019-10-17 12:12:35 +08003940 tmp += UINT_MAX;
yangerkun5da0fb12019-10-15 21:59:29 +08003941
zhangyi (F)a1f58ba2019-10-23 15:10:09 +08003942 if (tmp > tmp_nxt)
Jens Axboe5262f562019-09-17 12:26:57 -06003943 break;
zhangyi (F)a1f58ba2019-10-23 15:10:09 +08003944
3945 /*
3946 * Sequence of reqs after the insert one and itself should
3947 * be adjusted because each timeout req consumes a slot.
3948 */
3949 span++;
3950 nxt->sequence++;
Jens Axboe5262f562019-09-17 12:26:57 -06003951 }
zhangyi (F)a1f58ba2019-10-23 15:10:09 +08003952 req->sequence -= span;
Jens Axboe93bd25b2019-11-11 23:34:31 -07003953add:
Jens Axboe5262f562019-09-17 12:26:57 -06003954 list_add(&req->list, entry);
Jens Axboead8a48a2019-11-15 08:49:11 -07003955 data->timer.function = io_timeout_fn;
3956 hrtimer_start(&data->timer, timespec64_to_ktime(data->ts), data->mode);
Jens Axboe842f9612019-10-29 12:34:10 -06003957 spin_unlock_irq(&ctx->completion_lock);
Jens Axboe5262f562019-09-17 12:26:57 -06003958 return 0;
3959}
3960
Jens Axboe62755e32019-10-28 21:49:21 -06003961static bool io_cancel_cb(struct io_wq_work *work, void *data)
Jens Axboede0617e2019-04-06 21:51:27 -06003962{
Jens Axboe62755e32019-10-28 21:49:21 -06003963 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
Jens Axboede0617e2019-04-06 21:51:27 -06003964
Jens Axboe62755e32019-10-28 21:49:21 -06003965 return req->user_data == (unsigned long) data;
3966}
3967
Jens Axboee977d6d2019-11-05 12:39:45 -07003968static int io_async_cancel_one(struct io_ring_ctx *ctx, void *sqe_addr)
Jens Axboe62755e32019-10-28 21:49:21 -06003969{
Jens Axboe62755e32019-10-28 21:49:21 -06003970 enum io_wq_cancel cancel_ret;
Jens Axboe62755e32019-10-28 21:49:21 -06003971 int ret = 0;
3972
Jens Axboe62755e32019-10-28 21:49:21 -06003973 cancel_ret = io_wq_cancel_cb(ctx->io_wq, io_cancel_cb, sqe_addr);
3974 switch (cancel_ret) {
3975 case IO_WQ_CANCEL_OK:
3976 ret = 0;
3977 break;
3978 case IO_WQ_CANCEL_RUNNING:
3979 ret = -EALREADY;
3980 break;
3981 case IO_WQ_CANCEL_NOTFOUND:
3982 ret = -ENOENT;
3983 break;
3984 }
3985
Jens Axboee977d6d2019-11-05 12:39:45 -07003986 return ret;
3987}
3988
Jens Axboe47f46762019-11-09 17:43:02 -07003989static void io_async_find_and_cancel(struct io_ring_ctx *ctx,
3990 struct io_kiocb *req, __u64 sqe_addr,
Jens Axboeb0dd8a42019-11-18 12:14:54 -07003991 struct io_kiocb **nxt, int success_ret)
Jens Axboe47f46762019-11-09 17:43:02 -07003992{
3993 unsigned long flags;
3994 int ret;
3995
3996 ret = io_async_cancel_one(ctx, (void *) (unsigned long) sqe_addr);
3997 if (ret != -ENOENT) {
3998 spin_lock_irqsave(&ctx->completion_lock, flags);
3999 goto done;
4000 }
4001
4002 spin_lock_irqsave(&ctx->completion_lock, flags);
4003 ret = io_timeout_cancel(ctx, sqe_addr);
4004 if (ret != -ENOENT)
4005 goto done;
4006 ret = io_poll_cancel(ctx, sqe_addr);
4007done:
Jens Axboeb0dd8a42019-11-18 12:14:54 -07004008 if (!ret)
4009 ret = success_ret;
Jens Axboe47f46762019-11-09 17:43:02 -07004010 io_cqring_fill_event(req, ret);
4011 io_commit_cqring(ctx);
4012 spin_unlock_irqrestore(&ctx->completion_lock, flags);
4013 io_cqring_ev_posted(ctx);
4014
Jens Axboe4e88d6e2019-12-07 20:59:47 -07004015 if (ret < 0)
4016 req_set_fail_links(req);
Jens Axboe47f46762019-11-09 17:43:02 -07004017 io_put_req_find_next(req, nxt);
4018}
4019
Jens Axboe3529d8c2019-12-19 18:24:38 -07004020static int io_async_cancel_prep(struct io_kiocb *req,
4021 const struct io_uring_sqe *sqe)
Jens Axboee977d6d2019-11-05 12:39:45 -07004022{
Jens Axboefbf23842019-12-17 18:45:56 -07004023 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboee977d6d2019-11-05 12:39:45 -07004024 return -EINVAL;
4025 if (sqe->flags || sqe->ioprio || sqe->off || sqe->len ||
4026 sqe->cancel_flags)
4027 return -EINVAL;
4028
Jens Axboefbf23842019-12-17 18:45:56 -07004029 req->cancel.addr = READ_ONCE(sqe->addr);
4030 return 0;
4031}
4032
4033static int io_async_cancel(struct io_kiocb *req, struct io_kiocb **nxt)
4034{
4035 struct io_ring_ctx *ctx = req->ctx;
Jens Axboefbf23842019-12-17 18:45:56 -07004036
4037 io_async_find_and_cancel(ctx, req, req->cancel.addr, nxt, 0);
Jens Axboe62755e32019-10-28 21:49:21 -06004038 return 0;
4039}
4040
Jens Axboe05f3fb32019-12-09 11:22:50 -07004041static int io_files_update_prep(struct io_kiocb *req,
4042 const struct io_uring_sqe *sqe)
4043{
4044 if (sqe->flags || sqe->ioprio || sqe->rw_flags)
4045 return -EINVAL;
4046
4047 req->files_update.offset = READ_ONCE(sqe->off);
4048 req->files_update.nr_args = READ_ONCE(sqe->len);
4049 if (!req->files_update.nr_args)
4050 return -EINVAL;
4051 req->files_update.arg = READ_ONCE(sqe->addr);
4052 return 0;
4053}
4054
4055static int io_files_update(struct io_kiocb *req, bool force_nonblock)
4056{
4057 struct io_ring_ctx *ctx = req->ctx;
4058 struct io_uring_files_update up;
4059 int ret;
4060
Jens Axboef86cd202020-01-29 13:46:44 -07004061 if (force_nonblock)
Jens Axboe05f3fb32019-12-09 11:22:50 -07004062 return -EAGAIN;
Jens Axboe05f3fb32019-12-09 11:22:50 -07004063
4064 up.offset = req->files_update.offset;
4065 up.fds = req->files_update.arg;
4066
4067 mutex_lock(&ctx->uring_lock);
4068 ret = __io_sqe_files_update(ctx, &up, req->files_update.nr_args);
4069 mutex_unlock(&ctx->uring_lock);
4070
4071 if (ret < 0)
4072 req_set_fail_links(req);
4073 io_cqring_add_event(req, ret);
4074 io_put_req(req);
4075 return 0;
4076}
4077
Jens Axboe3529d8c2019-12-19 18:24:38 -07004078static int io_req_defer_prep(struct io_kiocb *req,
4079 const struct io_uring_sqe *sqe)
Jens Axboef67676d2019-12-02 11:03:47 -07004080{
Jens Axboee7815732019-12-17 19:45:06 -07004081 ssize_t ret = 0;
Jens Axboef67676d2019-12-02 11:03:47 -07004082
Jens Axboef86cd202020-01-29 13:46:44 -07004083 if (io_op_defs[req->opcode].file_table) {
4084 ret = io_grab_files(req);
4085 if (unlikely(ret))
4086 return ret;
4087 }
4088
Jens Axboecccf0ee2020-01-27 16:34:48 -07004089 io_req_work_grab_env(req, &io_op_defs[req->opcode]);
4090
Jens Axboed625c6e2019-12-17 19:53:05 -07004091 switch (req->opcode) {
Jens Axboee7815732019-12-17 19:45:06 -07004092 case IORING_OP_NOP:
4093 break;
Jens Axboef67676d2019-12-02 11:03:47 -07004094 case IORING_OP_READV:
4095 case IORING_OP_READ_FIXED:
Jens Axboe3a6820f2019-12-22 15:19:35 -07004096 case IORING_OP_READ:
Jens Axboe3529d8c2019-12-19 18:24:38 -07004097 ret = io_read_prep(req, sqe, true);
Jens Axboef67676d2019-12-02 11:03:47 -07004098 break;
4099 case IORING_OP_WRITEV:
4100 case IORING_OP_WRITE_FIXED:
Jens Axboe3a6820f2019-12-22 15:19:35 -07004101 case IORING_OP_WRITE:
Jens Axboe3529d8c2019-12-19 18:24:38 -07004102 ret = io_write_prep(req, sqe, true);
Jens Axboef67676d2019-12-02 11:03:47 -07004103 break;
Jens Axboe0969e782019-12-17 18:40:57 -07004104 case IORING_OP_POLL_ADD:
Jens Axboe3529d8c2019-12-19 18:24:38 -07004105 ret = io_poll_add_prep(req, sqe);
Jens Axboe0969e782019-12-17 18:40:57 -07004106 break;
4107 case IORING_OP_POLL_REMOVE:
Jens Axboe3529d8c2019-12-19 18:24:38 -07004108 ret = io_poll_remove_prep(req, sqe);
Jens Axboe0969e782019-12-17 18:40:57 -07004109 break;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004110 case IORING_OP_FSYNC:
Jens Axboe3529d8c2019-12-19 18:24:38 -07004111 ret = io_prep_fsync(req, sqe);
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004112 break;
4113 case IORING_OP_SYNC_FILE_RANGE:
Jens Axboe3529d8c2019-12-19 18:24:38 -07004114 ret = io_prep_sfr(req, sqe);
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004115 break;
Jens Axboe03b12302019-12-02 18:50:25 -07004116 case IORING_OP_SENDMSG:
Jens Axboefddafac2020-01-04 20:19:44 -07004117 case IORING_OP_SEND:
Jens Axboe3529d8c2019-12-19 18:24:38 -07004118 ret = io_sendmsg_prep(req, sqe);
Jens Axboe03b12302019-12-02 18:50:25 -07004119 break;
4120 case IORING_OP_RECVMSG:
Jens Axboefddafac2020-01-04 20:19:44 -07004121 case IORING_OP_RECV:
Jens Axboe3529d8c2019-12-19 18:24:38 -07004122 ret = io_recvmsg_prep(req, sqe);
Jens Axboe03b12302019-12-02 18:50:25 -07004123 break;
Jens Axboef499a022019-12-02 16:28:46 -07004124 case IORING_OP_CONNECT:
Jens Axboe3529d8c2019-12-19 18:24:38 -07004125 ret = io_connect_prep(req, sqe);
Jens Axboef499a022019-12-02 16:28:46 -07004126 break;
Jens Axboe2d283902019-12-04 11:08:05 -07004127 case IORING_OP_TIMEOUT:
Jens Axboe3529d8c2019-12-19 18:24:38 -07004128 ret = io_timeout_prep(req, sqe, false);
Jens Axboeb7bb4f72019-12-15 22:13:43 -07004129 break;
Jens Axboeb29472e2019-12-17 18:50:29 -07004130 case IORING_OP_TIMEOUT_REMOVE:
Jens Axboe3529d8c2019-12-19 18:24:38 -07004131 ret = io_timeout_remove_prep(req, sqe);
Jens Axboeb29472e2019-12-17 18:50:29 -07004132 break;
Jens Axboefbf23842019-12-17 18:45:56 -07004133 case IORING_OP_ASYNC_CANCEL:
Jens Axboe3529d8c2019-12-19 18:24:38 -07004134 ret = io_async_cancel_prep(req, sqe);
Jens Axboefbf23842019-12-17 18:45:56 -07004135 break;
Jens Axboe2d283902019-12-04 11:08:05 -07004136 case IORING_OP_LINK_TIMEOUT:
Jens Axboe3529d8c2019-12-19 18:24:38 -07004137 ret = io_timeout_prep(req, sqe, true);
Jens Axboeb7bb4f72019-12-15 22:13:43 -07004138 break;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004139 case IORING_OP_ACCEPT:
Jens Axboe3529d8c2019-12-19 18:24:38 -07004140 ret = io_accept_prep(req, sqe);
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004141 break;
Jens Axboed63d1b52019-12-10 10:38:56 -07004142 case IORING_OP_FALLOCATE:
4143 ret = io_fallocate_prep(req, sqe);
4144 break;
Jens Axboe15b71ab2019-12-11 11:20:36 -07004145 case IORING_OP_OPENAT:
4146 ret = io_openat_prep(req, sqe);
4147 break;
Jens Axboeb5dba592019-12-11 14:02:38 -07004148 case IORING_OP_CLOSE:
4149 ret = io_close_prep(req, sqe);
4150 break;
Jens Axboe05f3fb32019-12-09 11:22:50 -07004151 case IORING_OP_FILES_UPDATE:
4152 ret = io_files_update_prep(req, sqe);
4153 break;
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004154 case IORING_OP_STATX:
4155 ret = io_statx_prep(req, sqe);
4156 break;
Jens Axboe4840e412019-12-25 22:03:45 -07004157 case IORING_OP_FADVISE:
4158 ret = io_fadvise_prep(req, sqe);
4159 break;
Jens Axboec1ca7572019-12-25 22:18:28 -07004160 case IORING_OP_MADVISE:
4161 ret = io_madvise_prep(req, sqe);
4162 break;
Jens Axboecebdb982020-01-08 17:59:24 -07004163 case IORING_OP_OPENAT2:
4164 ret = io_openat2_prep(req, sqe);
4165 break;
Jens Axboe3e4827b2020-01-08 15:18:09 -07004166 case IORING_OP_EPOLL_CTL:
4167 ret = io_epoll_ctl_prep(req, sqe);
4168 break;
Jens Axboef67676d2019-12-02 11:03:47 -07004169 default:
Jens Axboee7815732019-12-17 19:45:06 -07004170 printk_once(KERN_WARNING "io_uring: unhandled opcode %d\n",
4171 req->opcode);
4172 ret = -EINVAL;
Jens Axboeb7bb4f72019-12-15 22:13:43 -07004173 break;
Jens Axboef67676d2019-12-02 11:03:47 -07004174 }
4175
Jens Axboeb7bb4f72019-12-15 22:13:43 -07004176 return ret;
Jens Axboef67676d2019-12-02 11:03:47 -07004177}
4178
Jens Axboe3529d8c2019-12-19 18:24:38 -07004179static int io_req_defer(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboede0617e2019-04-06 21:51:27 -06004180{
Jackie Liua197f662019-11-08 08:09:12 -07004181 struct io_ring_ctx *ctx = req->ctx;
Jens Axboef67676d2019-12-02 11:03:47 -07004182 int ret;
Jens Axboede0617e2019-04-06 21:51:27 -06004183
Bob Liu9d858b22019-11-13 18:06:25 +08004184 /* Still need defer if there is pending req in defer list. */
4185 if (!req_need_defer(req) && list_empty(&ctx->defer_list))
Jens Axboede0617e2019-04-06 21:51:27 -06004186 return 0;
4187
Jens Axboe3529d8c2019-12-19 18:24:38 -07004188 if (!req->io && io_alloc_async_ctx(req))
Jens Axboede0617e2019-04-06 21:51:27 -06004189 return -EAGAIN;
4190
Jens Axboe3529d8c2019-12-19 18:24:38 -07004191 ret = io_req_defer_prep(req, sqe);
Jens Axboeb7bb4f72019-12-15 22:13:43 -07004192 if (ret < 0)
Jens Axboe2d283902019-12-04 11:08:05 -07004193 return ret;
Jens Axboe2d283902019-12-04 11:08:05 -07004194
Jens Axboede0617e2019-04-06 21:51:27 -06004195 spin_lock_irq(&ctx->completion_lock);
Bob Liu9d858b22019-11-13 18:06:25 +08004196 if (!req_need_defer(req) && list_empty(&ctx->defer_list)) {
Jens Axboede0617e2019-04-06 21:51:27 -06004197 spin_unlock_irq(&ctx->completion_lock);
Jens Axboede0617e2019-04-06 21:51:27 -06004198 return 0;
4199 }
4200
Jens Axboe915967f2019-11-21 09:01:20 -07004201 trace_io_uring_defer(ctx, req, req->user_data);
Jens Axboede0617e2019-04-06 21:51:27 -06004202 list_add_tail(&req->list, &ctx->defer_list);
4203 spin_unlock_irq(&ctx->completion_lock);
4204 return -EIOCBQUEUED;
4205}
4206
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03004207static void io_cleanup_req(struct io_kiocb *req)
4208{
4209 struct io_async_ctx *io = req->io;
4210
4211 switch (req->opcode) {
4212 case IORING_OP_READV:
4213 case IORING_OP_READ_FIXED:
4214 case IORING_OP_READ:
4215 case IORING_OP_WRITEV:
4216 case IORING_OP_WRITE_FIXED:
4217 case IORING_OP_WRITE:
4218 if (io->rw.iov != io->rw.fast_iov)
4219 kfree(io->rw.iov);
4220 break;
4221 case IORING_OP_SENDMSG:
4222 case IORING_OP_RECVMSG:
4223 if (io->msg.iov != io->msg.fast_iov)
4224 kfree(io->msg.iov);
4225 break;
4226 }
4227
4228 req->flags &= ~REQ_F_NEED_CLEANUP;
4229}
4230
Jens Axboe3529d8c2019-12-19 18:24:38 -07004231static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
4232 struct io_kiocb **nxt, bool force_nonblock)
Jens Axboe2b188cc2019-01-07 10:46:33 -07004233{
Jackie Liua197f662019-11-08 08:09:12 -07004234 struct io_ring_ctx *ctx = req->ctx;
Jens Axboed625c6e2019-12-17 19:53:05 -07004235 int ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07004236
Jens Axboed625c6e2019-12-17 19:53:05 -07004237 switch (req->opcode) {
Jens Axboe2b188cc2019-01-07 10:46:33 -07004238 case IORING_OP_NOP:
Jens Axboe78e19bb2019-11-06 15:21:34 -07004239 ret = io_nop(req);
Jens Axboe2b188cc2019-01-07 10:46:33 -07004240 break;
4241 case IORING_OP_READV:
Jens Axboe3529d8c2019-12-19 18:24:38 -07004242 case IORING_OP_READ_FIXED:
Jens Axboe3a6820f2019-12-22 15:19:35 -07004243 case IORING_OP_READ:
Jens Axboe3529d8c2019-12-19 18:24:38 -07004244 if (sqe) {
4245 ret = io_read_prep(req, sqe, force_nonblock);
4246 if (ret < 0)
4247 break;
4248 }
Pavel Begunkov267bc902019-11-07 01:41:08 +03004249 ret = io_read(req, nxt, force_nonblock);
Jens Axboe2b188cc2019-01-07 10:46:33 -07004250 break;
4251 case IORING_OP_WRITEV:
Jens Axboeedafcce2019-01-09 09:16:05 -07004252 case IORING_OP_WRITE_FIXED:
Jens Axboe3a6820f2019-12-22 15:19:35 -07004253 case IORING_OP_WRITE:
Jens Axboe3529d8c2019-12-19 18:24:38 -07004254 if (sqe) {
4255 ret = io_write_prep(req, sqe, force_nonblock);
4256 if (ret < 0)
4257 break;
4258 }
Pavel Begunkov267bc902019-11-07 01:41:08 +03004259 ret = io_write(req, nxt, force_nonblock);
Jens Axboe2b188cc2019-01-07 10:46:33 -07004260 break;
Christoph Hellwigc992fe22019-01-11 09:43:02 -07004261 case IORING_OP_FSYNC:
Jens Axboe3529d8c2019-12-19 18:24:38 -07004262 if (sqe) {
4263 ret = io_prep_fsync(req, sqe);
4264 if (ret < 0)
4265 break;
4266 }
Jens Axboefc4df992019-12-10 14:38:45 -07004267 ret = io_fsync(req, nxt, force_nonblock);
Christoph Hellwigc992fe22019-01-11 09:43:02 -07004268 break;
Jens Axboe221c5eb2019-01-17 09:41:58 -07004269 case IORING_OP_POLL_ADD:
Jens Axboe3529d8c2019-12-19 18:24:38 -07004270 if (sqe) {
4271 ret = io_poll_add_prep(req, sqe);
4272 if (ret)
4273 break;
4274 }
Jens Axboefc4df992019-12-10 14:38:45 -07004275 ret = io_poll_add(req, nxt);
Jens Axboe221c5eb2019-01-17 09:41:58 -07004276 break;
4277 case IORING_OP_POLL_REMOVE:
Jens Axboe3529d8c2019-12-19 18:24:38 -07004278 if (sqe) {
4279 ret = io_poll_remove_prep(req, sqe);
4280 if (ret < 0)
4281 break;
4282 }
Jens Axboefc4df992019-12-10 14:38:45 -07004283 ret = io_poll_remove(req);
Jens Axboe221c5eb2019-01-17 09:41:58 -07004284 break;
Jens Axboe5d17b4a2019-04-09 14:56:44 -06004285 case IORING_OP_SYNC_FILE_RANGE:
Jens Axboe3529d8c2019-12-19 18:24:38 -07004286 if (sqe) {
4287 ret = io_prep_sfr(req, sqe);
4288 if (ret < 0)
4289 break;
4290 }
Jens Axboefc4df992019-12-10 14:38:45 -07004291 ret = io_sync_file_range(req, nxt, force_nonblock);
Jens Axboe5d17b4a2019-04-09 14:56:44 -06004292 break;
Jens Axboe0fa03c62019-04-19 13:34:07 -06004293 case IORING_OP_SENDMSG:
Jens Axboefddafac2020-01-04 20:19:44 -07004294 case IORING_OP_SEND:
Jens Axboe3529d8c2019-12-19 18:24:38 -07004295 if (sqe) {
4296 ret = io_sendmsg_prep(req, sqe);
4297 if (ret < 0)
4298 break;
4299 }
Jens Axboefddafac2020-01-04 20:19:44 -07004300 if (req->opcode == IORING_OP_SENDMSG)
4301 ret = io_sendmsg(req, nxt, force_nonblock);
4302 else
4303 ret = io_send(req, nxt, force_nonblock);
Jens Axboe0fa03c62019-04-19 13:34:07 -06004304 break;
Jens Axboeaa1fa282019-04-19 13:38:09 -06004305 case IORING_OP_RECVMSG:
Jens Axboefddafac2020-01-04 20:19:44 -07004306 case IORING_OP_RECV:
Jens Axboe3529d8c2019-12-19 18:24:38 -07004307 if (sqe) {
4308 ret = io_recvmsg_prep(req, sqe);
4309 if (ret)
4310 break;
4311 }
Jens Axboefddafac2020-01-04 20:19:44 -07004312 if (req->opcode == IORING_OP_RECVMSG)
4313 ret = io_recvmsg(req, nxt, force_nonblock);
4314 else
4315 ret = io_recv(req, nxt, force_nonblock);
Jens Axboeaa1fa282019-04-19 13:38:09 -06004316 break;
Jens Axboe5262f562019-09-17 12:26:57 -06004317 case IORING_OP_TIMEOUT:
Jens Axboe3529d8c2019-12-19 18:24:38 -07004318 if (sqe) {
4319 ret = io_timeout_prep(req, sqe, false);
4320 if (ret)
4321 break;
4322 }
Jens Axboefc4df992019-12-10 14:38:45 -07004323 ret = io_timeout(req);
Jens Axboe5262f562019-09-17 12:26:57 -06004324 break;
Jens Axboe11365042019-10-16 09:08:32 -06004325 case IORING_OP_TIMEOUT_REMOVE:
Jens Axboe3529d8c2019-12-19 18:24:38 -07004326 if (sqe) {
4327 ret = io_timeout_remove_prep(req, sqe);
4328 if (ret)
4329 break;
4330 }
Jens Axboefc4df992019-12-10 14:38:45 -07004331 ret = io_timeout_remove(req);
Jens Axboe11365042019-10-16 09:08:32 -06004332 break;
Jens Axboe17f2fe32019-10-17 14:42:58 -06004333 case IORING_OP_ACCEPT:
Jens Axboe3529d8c2019-12-19 18:24:38 -07004334 if (sqe) {
4335 ret = io_accept_prep(req, sqe);
4336 if (ret)
4337 break;
4338 }
Jens Axboefc4df992019-12-10 14:38:45 -07004339 ret = io_accept(req, nxt, force_nonblock);
Jens Axboe17f2fe32019-10-17 14:42:58 -06004340 break;
Jens Axboef8e85cf2019-11-23 14:24:24 -07004341 case IORING_OP_CONNECT:
Jens Axboe3529d8c2019-12-19 18:24:38 -07004342 if (sqe) {
4343 ret = io_connect_prep(req, sqe);
4344 if (ret)
4345 break;
4346 }
Jens Axboefc4df992019-12-10 14:38:45 -07004347 ret = io_connect(req, nxt, force_nonblock);
Jens Axboef8e85cf2019-11-23 14:24:24 -07004348 break;
Jens Axboe62755e32019-10-28 21:49:21 -06004349 case IORING_OP_ASYNC_CANCEL:
Jens Axboe3529d8c2019-12-19 18:24:38 -07004350 if (sqe) {
4351 ret = io_async_cancel_prep(req, sqe);
4352 if (ret)
4353 break;
4354 }
Jens Axboefc4df992019-12-10 14:38:45 -07004355 ret = io_async_cancel(req, nxt);
Jens Axboe62755e32019-10-28 21:49:21 -06004356 break;
Jens Axboed63d1b52019-12-10 10:38:56 -07004357 case IORING_OP_FALLOCATE:
4358 if (sqe) {
4359 ret = io_fallocate_prep(req, sqe);
4360 if (ret)
4361 break;
4362 }
4363 ret = io_fallocate(req, nxt, force_nonblock);
4364 break;
Jens Axboe15b71ab2019-12-11 11:20:36 -07004365 case IORING_OP_OPENAT:
4366 if (sqe) {
4367 ret = io_openat_prep(req, sqe);
4368 if (ret)
4369 break;
4370 }
4371 ret = io_openat(req, nxt, force_nonblock);
4372 break;
Jens Axboeb5dba592019-12-11 14:02:38 -07004373 case IORING_OP_CLOSE:
4374 if (sqe) {
4375 ret = io_close_prep(req, sqe);
4376 if (ret)
4377 break;
4378 }
4379 ret = io_close(req, nxt, force_nonblock);
4380 break;
Jens Axboe05f3fb32019-12-09 11:22:50 -07004381 case IORING_OP_FILES_UPDATE:
4382 if (sqe) {
4383 ret = io_files_update_prep(req, sqe);
4384 if (ret)
4385 break;
4386 }
4387 ret = io_files_update(req, force_nonblock);
4388 break;
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004389 case IORING_OP_STATX:
4390 if (sqe) {
4391 ret = io_statx_prep(req, sqe);
4392 if (ret)
4393 break;
4394 }
4395 ret = io_statx(req, nxt, force_nonblock);
4396 break;
Jens Axboe4840e412019-12-25 22:03:45 -07004397 case IORING_OP_FADVISE:
4398 if (sqe) {
4399 ret = io_fadvise_prep(req, sqe);
4400 if (ret)
4401 break;
4402 }
4403 ret = io_fadvise(req, nxt, force_nonblock);
4404 break;
Jens Axboec1ca7572019-12-25 22:18:28 -07004405 case IORING_OP_MADVISE:
4406 if (sqe) {
4407 ret = io_madvise_prep(req, sqe);
4408 if (ret)
4409 break;
4410 }
4411 ret = io_madvise(req, nxt, force_nonblock);
4412 break;
Jens Axboecebdb982020-01-08 17:59:24 -07004413 case IORING_OP_OPENAT2:
4414 if (sqe) {
4415 ret = io_openat2_prep(req, sqe);
4416 if (ret)
4417 break;
4418 }
4419 ret = io_openat2(req, nxt, force_nonblock);
4420 break;
Jens Axboe3e4827b2020-01-08 15:18:09 -07004421 case IORING_OP_EPOLL_CTL:
4422 if (sqe) {
4423 ret = io_epoll_ctl_prep(req, sqe);
4424 if (ret)
4425 break;
4426 }
4427 ret = io_epoll_ctl(req, nxt, force_nonblock);
4428 break;
Jens Axboe2b188cc2019-01-07 10:46:33 -07004429 default:
4430 ret = -EINVAL;
4431 break;
4432 }
4433
Jens Axboedef596e2019-01-09 08:59:42 -07004434 if (ret)
4435 return ret;
4436
4437 if (ctx->flags & IORING_SETUP_IOPOLL) {
Jens Axboe11ba8202020-01-15 21:51:17 -07004438 const bool in_async = io_wq_current_is_worker();
4439
Jens Axboe9e645e112019-05-10 16:07:28 -06004440 if (req->result == -EAGAIN)
Jens Axboedef596e2019-01-09 08:59:42 -07004441 return -EAGAIN;
4442
Jens Axboe11ba8202020-01-15 21:51:17 -07004443 /* workqueue context doesn't hold uring_lock, grab it now */
4444 if (in_async)
4445 mutex_lock(&ctx->uring_lock);
4446
Jens Axboedef596e2019-01-09 08:59:42 -07004447 io_iopoll_req_issued(req);
Jens Axboe11ba8202020-01-15 21:51:17 -07004448
4449 if (in_async)
4450 mutex_unlock(&ctx->uring_lock);
Jens Axboedef596e2019-01-09 08:59:42 -07004451 }
4452
4453 return 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07004454}
4455
Jens Axboe561fb042019-10-24 07:25:42 -06004456static void io_wq_submit_work(struct io_wq_work **workptr)
Jens Axboe31b51512019-01-18 22:56:34 -07004457{
Jens Axboe561fb042019-10-24 07:25:42 -06004458 struct io_wq_work *work = *workptr;
Jens Axboe2b188cc2019-01-07 10:46:33 -07004459 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
Jens Axboe561fb042019-10-24 07:25:42 -06004460 struct io_kiocb *nxt = NULL;
4461 int ret = 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07004462
Jens Axboe0c9d5cc2019-12-11 19:29:43 -07004463 /* if NO_CANCEL is set, we must still run the work */
4464 if ((work->flags & (IO_WQ_WORK_CANCEL|IO_WQ_WORK_NO_CANCEL)) ==
4465 IO_WQ_WORK_CANCEL) {
Jens Axboe561fb042019-10-24 07:25:42 -06004466 ret = -ECANCELED;
Jens Axboe0c9d5cc2019-12-11 19:29:43 -07004467 }
Jens Axboe31b51512019-01-18 22:56:34 -07004468
Jens Axboe561fb042019-10-24 07:25:42 -06004469 if (!ret) {
Pavel Begunkovcf6fd4b2019-11-25 23:14:39 +03004470 req->in_async = true;
Jens Axboe561fb042019-10-24 07:25:42 -06004471 do {
Jens Axboe3529d8c2019-12-19 18:24:38 -07004472 ret = io_issue_sqe(req, NULL, &nxt, false);
Jens Axboe561fb042019-10-24 07:25:42 -06004473 /*
4474 * We can get EAGAIN for polled IO even though we're
4475 * forcing a sync submission from here, since we can't
4476 * wait for request slots on the block side.
4477 */
4478 if (ret != -EAGAIN)
4479 break;
4480 cond_resched();
4481 } while (1);
4482 }
Jens Axboe31b51512019-01-18 22:56:34 -07004483
Jens Axboe561fb042019-10-24 07:25:42 -06004484 /* drop submission reference */
Jackie Liuec9c02a2019-11-08 23:50:36 +08004485 io_put_req(req);
Jens Axboe817869d2019-04-30 14:44:05 -06004486
Jens Axboe561fb042019-10-24 07:25:42 -06004487 if (ret) {
Jens Axboe4e88d6e2019-12-07 20:59:47 -07004488 req_set_fail_links(req);
Jens Axboe78e19bb2019-11-06 15:21:34 -07004489 io_cqring_add_event(req, ret);
Jens Axboe817869d2019-04-30 14:44:05 -06004490 io_put_req(req);
Jens Axboeedafcce2019-01-09 09:16:05 -07004491 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07004492
Jens Axboe561fb042019-10-24 07:25:42 -06004493 /* if a dependent link is ready, pass it back */
Jens Axboe78912932020-01-14 22:09:06 -07004494 if (!ret && nxt)
4495 io_wq_assign_next(workptr, nxt);
Jens Axboe31b51512019-01-18 22:56:34 -07004496}
Jens Axboe2b188cc2019-01-07 10:46:33 -07004497
Jens Axboe15b71ab2019-12-11 11:20:36 -07004498static int io_req_needs_file(struct io_kiocb *req, int fd)
Jens Axboe9e3aa612019-12-11 15:55:43 -07004499{
Jens Axboed3656342019-12-18 09:50:26 -07004500 if (!io_op_defs[req->opcode].needs_file)
Jens Axboe9e3aa612019-12-11 15:55:43 -07004501 return 0;
Jens Axboed3656342019-12-18 09:50:26 -07004502 if (fd == -1 && io_op_defs[req->opcode].fd_non_neg)
4503 return 0;
4504 return 1;
Jens Axboe09bb8392019-03-13 12:39:28 -06004505}
4506
Jens Axboe65e19f52019-10-26 07:20:21 -06004507static inline struct file *io_file_from_index(struct io_ring_ctx *ctx,
4508 int index)
Jens Axboe09bb8392019-03-13 12:39:28 -06004509{
Jens Axboe65e19f52019-10-26 07:20:21 -06004510 struct fixed_file_table *table;
4511
Jens Axboe05f3fb32019-12-09 11:22:50 -07004512 table = &ctx->file_data->table[index >> IORING_FILE_TABLE_SHIFT];
4513 return table->files[index & IORING_FILE_TABLE_MASK];;
Jens Axboe65e19f52019-10-26 07:20:21 -06004514}
4515
Jens Axboe3529d8c2019-12-19 18:24:38 -07004516static int io_req_set_file(struct io_submit_state *state, struct io_kiocb *req,
4517 const struct io_uring_sqe *sqe)
Jens Axboe09bb8392019-03-13 12:39:28 -06004518{
Jackie Liua197f662019-11-08 08:09:12 -07004519 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe09bb8392019-03-13 12:39:28 -06004520 unsigned flags;
Jens Axboed3656342019-12-18 09:50:26 -07004521 int fd;
Jens Axboe09bb8392019-03-13 12:39:28 -06004522
Jens Axboe3529d8c2019-12-19 18:24:38 -07004523 flags = READ_ONCE(sqe->flags);
4524 fd = READ_ONCE(sqe->fd);
Jens Axboe09bb8392019-03-13 12:39:28 -06004525
Jens Axboed3656342019-12-18 09:50:26 -07004526 if (!io_req_needs_file(req, fd))
4527 return 0;
Jens Axboe09bb8392019-03-13 12:39:28 -06004528
4529 if (flags & IOSQE_FIXED_FILE) {
Jens Axboe05f3fb32019-12-09 11:22:50 -07004530 if (unlikely(!ctx->file_data ||
Jens Axboe09bb8392019-03-13 12:39:28 -06004531 (unsigned) fd >= ctx->nr_user_files))
4532 return -EBADF;
Jens Axboeb7620122019-10-26 07:22:55 -06004533 fd = array_index_nospec(fd, ctx->nr_user_files);
Jens Axboe65e19f52019-10-26 07:20:21 -06004534 req->file = io_file_from_index(ctx, fd);
4535 if (!req->file)
Jens Axboe08a45172019-10-03 08:11:03 -06004536 return -EBADF;
Jens Axboe09bb8392019-03-13 12:39:28 -06004537 req->flags |= REQ_F_FIXED_FILE;
Jens Axboe05f3fb32019-12-09 11:22:50 -07004538 percpu_ref_get(&ctx->file_data->refs);
Jens Axboe09bb8392019-03-13 12:39:28 -06004539 } else {
Pavel Begunkovcf6fd4b2019-11-25 23:14:39 +03004540 if (req->needs_fixed_file)
Jens Axboe09bb8392019-03-13 12:39:28 -06004541 return -EBADF;
Dmitrii Dolgovc826bd72019-10-15 19:02:01 +02004542 trace_io_uring_file_get(ctx, fd);
Jens Axboe09bb8392019-03-13 12:39:28 -06004543 req->file = io_file_get(state, fd);
4544 if (unlikely(!req->file))
4545 return -EBADF;
4546 }
4547
4548 return 0;
4549}
4550
Jackie Liua197f662019-11-08 08:09:12 -07004551static int io_grab_files(struct io_kiocb *req)
Jens Axboe2b188cc2019-01-07 10:46:33 -07004552{
Jens Axboefcb323c2019-10-24 12:39:47 -06004553 int ret = -EBADF;
Jackie Liua197f662019-11-08 08:09:12 -07004554 struct io_ring_ctx *ctx = req->ctx;
Jens Axboefcb323c2019-10-24 12:39:47 -06004555
Jens Axboef86cd202020-01-29 13:46:44 -07004556 if (req->work.files)
4557 return 0;
Pavel Begunkovb14cca02020-01-17 04:45:59 +03004558 if (!ctx->ring_file)
Jens Axboeb5dba592019-12-11 14:02:38 -07004559 return -EBADF;
4560
Jens Axboefcb323c2019-10-24 12:39:47 -06004561 rcu_read_lock();
4562 spin_lock_irq(&ctx->inflight_lock);
4563 /*
4564 * We use the f_ops->flush() handler to ensure that we can flush
4565 * out work accessing these files if the fd is closed. Check if
4566 * the fd has changed since we started down this path, and disallow
4567 * this operation if it has.
4568 */
Pavel Begunkovb14cca02020-01-17 04:45:59 +03004569 if (fcheck(ctx->ring_fd) == ctx->ring_file) {
Jens Axboefcb323c2019-10-24 12:39:47 -06004570 list_add(&req->inflight_entry, &ctx->inflight_list);
4571 req->flags |= REQ_F_INFLIGHT;
4572 req->work.files = current->files;
4573 ret = 0;
4574 }
4575 spin_unlock_irq(&ctx->inflight_lock);
4576 rcu_read_unlock();
4577
4578 return ret;
4579}
4580
Jens Axboe2665abf2019-11-05 12:40:47 -07004581static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
4582{
Jens Axboead8a48a2019-11-15 08:49:11 -07004583 struct io_timeout_data *data = container_of(timer,
4584 struct io_timeout_data, timer);
4585 struct io_kiocb *req = data->req;
Jens Axboe2665abf2019-11-05 12:40:47 -07004586 struct io_ring_ctx *ctx = req->ctx;
4587 struct io_kiocb *prev = NULL;
4588 unsigned long flags;
Jens Axboe2665abf2019-11-05 12:40:47 -07004589
4590 spin_lock_irqsave(&ctx->completion_lock, flags);
4591
4592 /*
4593 * We don't expect the list to be empty, that will only happen if we
4594 * race with the completion of the linked work.
4595 */
Pavel Begunkov44932332019-12-05 16:16:35 +03004596 if (!list_empty(&req->link_list)) {
4597 prev = list_entry(req->link_list.prev, struct io_kiocb,
4598 link_list);
Jens Axboe5d960722019-11-19 15:31:28 -07004599 if (refcount_inc_not_zero(&prev->refs)) {
Pavel Begunkov44932332019-12-05 16:16:35 +03004600 list_del_init(&req->link_list);
Jens Axboe5d960722019-11-19 15:31:28 -07004601 prev->flags &= ~REQ_F_LINK_TIMEOUT;
4602 } else
Jens Axboe76a46e02019-11-10 23:34:16 -07004603 prev = NULL;
Jens Axboe2665abf2019-11-05 12:40:47 -07004604 }
4605
4606 spin_unlock_irqrestore(&ctx->completion_lock, flags);
4607
4608 if (prev) {
Jens Axboe4e88d6e2019-12-07 20:59:47 -07004609 req_set_fail_links(prev);
Jens Axboeb0dd8a42019-11-18 12:14:54 -07004610 io_async_find_and_cancel(ctx, req, prev->user_data, NULL,
4611 -ETIME);
Jens Axboe76a46e02019-11-10 23:34:16 -07004612 io_put_req(prev);
Jens Axboe47f46762019-11-09 17:43:02 -07004613 } else {
4614 io_cqring_add_event(req, -ETIME);
4615 io_put_req(req);
Jens Axboe2665abf2019-11-05 12:40:47 -07004616 }
Jens Axboe2665abf2019-11-05 12:40:47 -07004617 return HRTIMER_NORESTART;
4618}
4619
Jens Axboead8a48a2019-11-15 08:49:11 -07004620static void io_queue_linked_timeout(struct io_kiocb *req)
Jens Axboe2665abf2019-11-05 12:40:47 -07004621{
Jens Axboe76a46e02019-11-10 23:34:16 -07004622 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe2665abf2019-11-05 12:40:47 -07004623
Jens Axboe76a46e02019-11-10 23:34:16 -07004624 /*
4625 * If the list is now empty, then our linked request finished before
4626 * we got a chance to setup the timer
4627 */
4628 spin_lock_irq(&ctx->completion_lock);
Pavel Begunkov44932332019-12-05 16:16:35 +03004629 if (!list_empty(&req->link_list)) {
Jens Axboe2d283902019-12-04 11:08:05 -07004630 struct io_timeout_data *data = &req->io->timeout;
Jens Axboe94ae5e72019-11-14 19:39:52 -07004631
Jens Axboead8a48a2019-11-15 08:49:11 -07004632 data->timer.function = io_link_timeout_fn;
4633 hrtimer_start(&data->timer, timespec64_to_ktime(data->ts),
4634 data->mode);
Jens Axboe2665abf2019-11-05 12:40:47 -07004635 }
Jens Axboe76a46e02019-11-10 23:34:16 -07004636 spin_unlock_irq(&ctx->completion_lock);
Jens Axboe2665abf2019-11-05 12:40:47 -07004637
Jens Axboe2665abf2019-11-05 12:40:47 -07004638 /* drop submission reference */
Jens Axboe76a46e02019-11-10 23:34:16 -07004639 io_put_req(req);
Jens Axboe2665abf2019-11-05 12:40:47 -07004640}
4641
Jens Axboead8a48a2019-11-15 08:49:11 -07004642static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req)
Jens Axboe2665abf2019-11-05 12:40:47 -07004643{
4644 struct io_kiocb *nxt;
Jens Axboe2b188cc2019-01-07 10:46:33 -07004645
Jens Axboe2665abf2019-11-05 12:40:47 -07004646 if (!(req->flags & REQ_F_LINK))
4647 return NULL;
4648
Pavel Begunkov44932332019-12-05 16:16:35 +03004649 nxt = list_first_entry_or_null(&req->link_list, struct io_kiocb,
4650 link_list);
Jens Axboed625c6e2019-12-17 19:53:05 -07004651 if (!nxt || nxt->opcode != IORING_OP_LINK_TIMEOUT)
Jens Axboe76a46e02019-11-10 23:34:16 -07004652 return NULL;
Jens Axboe2665abf2019-11-05 12:40:47 -07004653
Jens Axboe76a46e02019-11-10 23:34:16 -07004654 req->flags |= REQ_F_LINK_TIMEOUT;
Jens Axboe76a46e02019-11-10 23:34:16 -07004655 return nxt;
Jens Axboe2665abf2019-11-05 12:40:47 -07004656}
4657
Jens Axboe3529d8c2019-12-19 18:24:38 -07004658static void __io_queue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboe2b188cc2019-01-07 10:46:33 -07004659{
Jens Axboe4a0a7a12019-12-09 20:01:01 -07004660 struct io_kiocb *linked_timeout;
Pavel Begunkovf9bd67f2019-11-21 23:21:03 +03004661 struct io_kiocb *nxt = NULL;
Jens Axboe2b188cc2019-01-07 10:46:33 -07004662 int ret;
4663
Jens Axboe4a0a7a12019-12-09 20:01:01 -07004664again:
4665 linked_timeout = io_prep_linked_timeout(req);
4666
Jens Axboe3529d8c2019-12-19 18:24:38 -07004667 ret = io_issue_sqe(req, sqe, &nxt, true);
Jens Axboe491381ce2019-10-17 09:20:46 -06004668
4669 /*
4670 * We async punt it if the file wasn't marked NOWAIT, or if the file
4671 * doesn't support non-blocking read/write attempts
4672 */
4673 if (ret == -EAGAIN && (!(req->flags & REQ_F_NOWAIT) ||
4674 (req->flags & REQ_F_MUST_PUNT))) {
Pavel Begunkov86a761f2020-01-22 23:09:36 +03004675punt:
Jens Axboef86cd202020-01-29 13:46:44 -07004676 if (io_op_defs[req->opcode].file_table) {
Pavel Begunkovbbad27b2019-11-19 23:32:47 +03004677 ret = io_grab_files(req);
4678 if (ret)
4679 goto err;
Jens Axboe2b188cc2019-01-07 10:46:33 -07004680 }
Pavel Begunkovbbad27b2019-11-19 23:32:47 +03004681
4682 /*
4683 * Queued up for async execution, worker will release
4684 * submit reference when the iocb is actually submitted.
4685 */
4686 io_queue_async_work(req);
Jens Axboe4a0a7a12019-12-09 20:01:01 -07004687 goto done_req;
Jens Axboe2b188cc2019-01-07 10:46:33 -07004688 }
Jens Axboee65ef562019-03-12 10:16:44 -06004689
Jens Axboefcb323c2019-10-24 12:39:47 -06004690err:
Jens Axboee65ef562019-03-12 10:16:44 -06004691 /* drop submission reference */
4692 io_put_req(req);
4693
Pavel Begunkovf9bd67f2019-11-21 23:21:03 +03004694 if (linked_timeout) {
Jens Axboe76a46e02019-11-10 23:34:16 -07004695 if (!ret)
Pavel Begunkovf9bd67f2019-11-21 23:21:03 +03004696 io_queue_linked_timeout(linked_timeout);
Jens Axboe76a46e02019-11-10 23:34:16 -07004697 else
Pavel Begunkovf9bd67f2019-11-21 23:21:03 +03004698 io_put_req(linked_timeout);
Jens Axboe76a46e02019-11-10 23:34:16 -07004699 }
4700
Jens Axboee65ef562019-03-12 10:16:44 -06004701 /* and drop final reference, if we failed */
Jens Axboe9e645e112019-05-10 16:07:28 -06004702 if (ret) {
Jens Axboe78e19bb2019-11-06 15:21:34 -07004703 io_cqring_add_event(req, ret);
Jens Axboe4e88d6e2019-12-07 20:59:47 -07004704 req_set_fail_links(req);
Jens Axboee65ef562019-03-12 10:16:44 -06004705 io_put_req(req);
Jens Axboe9e645e112019-05-10 16:07:28 -06004706 }
Jens Axboe4a0a7a12019-12-09 20:01:01 -07004707done_req:
4708 if (nxt) {
4709 req = nxt;
4710 nxt = NULL;
Pavel Begunkov86a761f2020-01-22 23:09:36 +03004711
4712 if (req->flags & REQ_F_FORCE_ASYNC)
4713 goto punt;
Jens Axboe4a0a7a12019-12-09 20:01:01 -07004714 goto again;
4715 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07004716}
4717
Jens Axboe3529d8c2019-12-19 18:24:38 -07004718static void io_queue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jackie Liu4fe2c962019-09-09 20:50:40 +08004719{
4720 int ret;
4721
Jens Axboe3529d8c2019-12-19 18:24:38 -07004722 ret = io_req_defer(req, sqe);
Jackie Liu4fe2c962019-09-09 20:50:40 +08004723 if (ret) {
4724 if (ret != -EIOCBQUEUED) {
Pavel Begunkov11185912020-01-22 23:09:35 +03004725fail_req:
Jens Axboe78e19bb2019-11-06 15:21:34 -07004726 io_cqring_add_event(req, ret);
Jens Axboe4e88d6e2019-12-07 20:59:47 -07004727 req_set_fail_links(req);
Jens Axboe78e19bb2019-11-06 15:21:34 -07004728 io_double_put_req(req);
Jackie Liu4fe2c962019-09-09 20:50:40 +08004729 }
Pavel Begunkov25508782019-12-30 21:24:47 +03004730 } else if (req->flags & REQ_F_FORCE_ASYNC) {
Pavel Begunkov11185912020-01-22 23:09:35 +03004731 ret = io_req_defer_prep(req, sqe);
4732 if (unlikely(ret < 0))
4733 goto fail_req;
Jens Axboece35a472019-12-17 08:04:44 -07004734 /*
4735 * Never try inline submit of IOSQE_ASYNC is set, go straight
4736 * to async execution.
4737 */
4738 req->work.flags |= IO_WQ_WORK_CONCURRENT;
4739 io_queue_async_work(req);
4740 } else {
Jens Axboe3529d8c2019-12-19 18:24:38 -07004741 __io_queue_sqe(req, sqe);
Jens Axboece35a472019-12-17 08:04:44 -07004742 }
Jackie Liu4fe2c962019-09-09 20:50:40 +08004743}
4744
Pavel Begunkov1b4a51b2019-11-21 11:54:28 +03004745static inline void io_queue_link_head(struct io_kiocb *req)
Jackie Liu4fe2c962019-09-09 20:50:40 +08004746{
Jens Axboe94ae5e72019-11-14 19:39:52 -07004747 if (unlikely(req->flags & REQ_F_FAIL_LINK)) {
Pavel Begunkov1b4a51b2019-11-21 11:54:28 +03004748 io_cqring_add_event(req, -ECANCELED);
4749 io_double_put_req(req);
4750 } else
Jens Axboe3529d8c2019-12-19 18:24:38 -07004751 io_queue_sqe(req, NULL);
Jackie Liu4fe2c962019-09-09 20:50:40 +08004752}
4753
Jens Axboe4e88d6e2019-12-07 20:59:47 -07004754#define SQE_VALID_FLAGS (IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK| \
Jens Axboece35a472019-12-17 08:04:44 -07004755 IOSQE_IO_HARDLINK | IOSQE_ASYNC)
Jens Axboe9e645e112019-05-10 16:07:28 -06004756
Jens Axboe3529d8c2019-12-19 18:24:38 -07004757static bool io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
4758 struct io_submit_state *state, struct io_kiocb **link)
Jens Axboe9e645e112019-05-10 16:07:28 -06004759{
Jens Axboe75c6a032020-01-28 10:15:23 -07004760 const struct cred *old_creds = NULL;
Jackie Liua197f662019-11-08 08:09:12 -07004761 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkov32fe5252019-12-17 22:26:58 +03004762 unsigned int sqe_flags;
Jens Axboe75c6a032020-01-28 10:15:23 -07004763 int ret, id;
Jens Axboe9e645e112019-05-10 16:07:28 -06004764
Pavel Begunkov32fe5252019-12-17 22:26:58 +03004765 sqe_flags = READ_ONCE(sqe->flags);
Jens Axboe9e645e112019-05-10 16:07:28 -06004766
4767 /* enforce forwards compatibility on users */
Pavel Begunkov32fe5252019-12-17 22:26:58 +03004768 if (unlikely(sqe_flags & ~SQE_VALID_FLAGS)) {
Jens Axboe9e645e112019-05-10 16:07:28 -06004769 ret = -EINVAL;
Pavel Begunkov196be952019-11-07 01:41:06 +03004770 goto err_req;
Jens Axboe9e645e112019-05-10 16:07:28 -06004771 }
4772
Jens Axboe75c6a032020-01-28 10:15:23 -07004773 id = READ_ONCE(sqe->personality);
4774 if (id) {
4775 const struct cred *personality_creds;
4776
4777 personality_creds = idr_find(&ctx->personality_idr, id);
4778 if (unlikely(!personality_creds)) {
4779 ret = -EINVAL;
4780 goto err_req;
4781 }
4782 old_creds = override_creds(personality_creds);
4783 }
4784
Pavel Begunkov6b47ee62020-01-18 20:22:41 +03004785 /* same numerical values with corresponding REQ_F_*, safe to copy */
4786 req->flags |= sqe_flags & (IOSQE_IO_DRAIN|IOSQE_IO_HARDLINK|
4787 IOSQE_ASYNC);
Jens Axboe9e645e112019-05-10 16:07:28 -06004788
Jens Axboe3529d8c2019-12-19 18:24:38 -07004789 ret = io_req_set_file(state, req, sqe);
Jens Axboe9e645e112019-05-10 16:07:28 -06004790 if (unlikely(ret)) {
4791err_req:
Jens Axboe78e19bb2019-11-06 15:21:34 -07004792 io_cqring_add_event(req, ret);
4793 io_double_put_req(req);
Jens Axboe75c6a032020-01-28 10:15:23 -07004794 if (old_creds)
4795 revert_creds(old_creds);
Pavel Begunkov2e6e1fd2019-12-05 16:15:45 +03004796 return false;
Jens Axboe9e645e112019-05-10 16:07:28 -06004797 }
4798
Jens Axboe9e645e112019-05-10 16:07:28 -06004799 /*
4800 * If we already have a head request, queue this one for async
4801 * submittal once the head completes. If we don't have a head but
4802 * IOSQE_IO_LINK is set in the sqe, start a new head. This one will be
4803 * submitted sync once the chain is complete. If none of those
4804 * conditions are true (normal request), then just queue it.
4805 */
4806 if (*link) {
Pavel Begunkov9d763772019-12-17 02:22:07 +03004807 struct io_kiocb *head = *link;
Jens Axboe9e645e112019-05-10 16:07:28 -06004808
Pavel Begunkov8cdf2192020-01-25 00:40:24 +03004809 /*
4810 * Taking sequential execution of a link, draining both sides
4811 * of the link also fullfils IOSQE_IO_DRAIN semantics for all
4812 * requests in the link. So, it drains the head and the
4813 * next after the link request. The last one is done via
4814 * drain_next flag to persist the effect across calls.
4815 */
Pavel Begunkov711be032020-01-17 03:57:59 +03004816 if (sqe_flags & IOSQE_IO_DRAIN) {
4817 head->flags |= REQ_F_IO_DRAIN;
4818 ctx->drain_next = 1;
4819 }
Jens Axboeb7bb4f72019-12-15 22:13:43 -07004820 if (io_alloc_async_ctx(req)) {
Jens Axboe9e645e112019-05-10 16:07:28 -06004821 ret = -EAGAIN;
4822 goto err_req;
4823 }
4824
Jens Axboe3529d8c2019-12-19 18:24:38 -07004825 ret = io_req_defer_prep(req, sqe);
Jens Axboe2d283902019-12-04 11:08:05 -07004826 if (ret) {
Jens Axboe4e88d6e2019-12-07 20:59:47 -07004827 /* fail even hard links since we don't submit */
Pavel Begunkov9d763772019-12-17 02:22:07 +03004828 head->flags |= REQ_F_FAIL_LINK;
Jens Axboef67676d2019-12-02 11:03:47 -07004829 goto err_req;
Jens Axboe2d283902019-12-04 11:08:05 -07004830 }
Pavel Begunkov9d763772019-12-17 02:22:07 +03004831 trace_io_uring_link(ctx, req, head);
4832 list_add_tail(&req->link_list, &head->link_list);
Jens Axboe9e645e112019-05-10 16:07:28 -06004833
Pavel Begunkov32fe5252019-12-17 22:26:58 +03004834 /* last request of a link, enqueue the link */
4835 if (!(sqe_flags & (IOSQE_IO_LINK|IOSQE_IO_HARDLINK))) {
4836 io_queue_link_head(head);
4837 *link = NULL;
4838 }
Jens Axboe9e645e112019-05-10 16:07:28 -06004839 } else {
Pavel Begunkov711be032020-01-17 03:57:59 +03004840 if (unlikely(ctx->drain_next)) {
4841 req->flags |= REQ_F_IO_DRAIN;
4842 req->ctx->drain_next = 0;
4843 }
4844 if (sqe_flags & (IOSQE_IO_LINK|IOSQE_IO_HARDLINK)) {
4845 req->flags |= REQ_F_LINK;
Pavel Begunkov711be032020-01-17 03:57:59 +03004846 INIT_LIST_HEAD(&req->link_list);
4847 ret = io_req_defer_prep(req, sqe);
4848 if (ret)
4849 req->flags |= REQ_F_FAIL_LINK;
4850 *link = req;
4851 } else {
4852 io_queue_sqe(req, sqe);
4853 }
Jens Axboe9e645e112019-05-10 16:07:28 -06004854 }
Pavel Begunkov2e6e1fd2019-12-05 16:15:45 +03004855
Jens Axboe75c6a032020-01-28 10:15:23 -07004856 if (old_creds)
4857 revert_creds(old_creds);
Pavel Begunkov2e6e1fd2019-12-05 16:15:45 +03004858 return true;
Jens Axboe9e645e112019-05-10 16:07:28 -06004859}
4860
Jens Axboe9a56a232019-01-09 09:06:50 -07004861/*
4862 * Batched submission is done, ensure local IO is flushed out.
4863 */
4864static void io_submit_state_end(struct io_submit_state *state)
4865{
4866 blk_finish_plug(&state->plug);
Jens Axboe3d6770f2019-04-13 11:50:54 -06004867 io_file_put(state);
Jens Axboe2579f912019-01-09 09:10:43 -07004868 if (state->free_reqs)
Pavel Begunkov6c8a3132020-02-01 03:58:00 +03004869 kmem_cache_free_bulk(req_cachep, state->free_reqs, state->reqs);
Jens Axboe9a56a232019-01-09 09:06:50 -07004870}
4871
4872/*
4873 * Start submission side cache.
4874 */
4875static void io_submit_state_start(struct io_submit_state *state,
Jackie Liu22efde52019-12-02 17:14:52 +08004876 unsigned int max_ios)
Jens Axboe9a56a232019-01-09 09:06:50 -07004877{
4878 blk_start_plug(&state->plug);
Jens Axboe2579f912019-01-09 09:10:43 -07004879 state->free_reqs = 0;
Jens Axboe9a56a232019-01-09 09:06:50 -07004880 state->file = NULL;
4881 state->ios_left = max_ios;
4882}
4883
Jens Axboe2b188cc2019-01-07 10:46:33 -07004884static void io_commit_sqring(struct io_ring_ctx *ctx)
4885{
Hristo Venev75b28af2019-08-26 17:23:46 +00004886 struct io_rings *rings = ctx->rings;
Jens Axboe2b188cc2019-01-07 10:46:33 -07004887
Pavel Begunkovcaf582c2019-12-30 21:24:46 +03004888 /*
4889 * Ensure any loads from the SQEs are done at this point,
4890 * since once we write the new head, the application could
4891 * write new data to them.
4892 */
4893 smp_store_release(&rings->sq.head, ctx->cached_sq_head);
Jens Axboe2b188cc2019-01-07 10:46:33 -07004894}
4895
4896/*
Jens Axboe3529d8c2019-12-19 18:24:38 -07004897 * Fetch an sqe, if one is available. Note that sqe_ptr will point to memory
Jens Axboe2b188cc2019-01-07 10:46:33 -07004898 * that is mapped by userspace. This means that care needs to be taken to
4899 * ensure that reads are stable, as we cannot rely on userspace always
4900 * being a good citizen. If members of the sqe are validated and then later
4901 * used, it's important that those reads are done through READ_ONCE() to
4902 * prevent a re-load down the line.
4903 */
Jens Axboe3529d8c2019-12-19 18:24:38 -07004904static bool io_get_sqring(struct io_ring_ctx *ctx, struct io_kiocb *req,
4905 const struct io_uring_sqe **sqe_ptr)
Jens Axboe2b188cc2019-01-07 10:46:33 -07004906{
Hristo Venev75b28af2019-08-26 17:23:46 +00004907 u32 *sq_array = ctx->sq_array;
Jens Axboe2b188cc2019-01-07 10:46:33 -07004908 unsigned head;
4909
4910 /*
4911 * The cached sq head (or cq tail) serves two purposes:
4912 *
4913 * 1) allows us to batch the cost of updating the user visible
4914 * head updates.
4915 * 2) allows the kernel side to track the head on its own, even
4916 * though the application is the one updating it.
4917 */
Pavel Begunkovee7d46d2019-12-30 21:24:45 +03004918 head = READ_ONCE(sq_array[ctx->cached_sq_head & ctx->sq_mask]);
Pavel Begunkov9835d6f2019-11-21 21:24:56 +03004919 if (likely(head < ctx->sq_entries)) {
Pavel Begunkovcf6fd4b2019-11-25 23:14:39 +03004920 /*
4921 * All io need record the previous position, if LINK vs DARIN,
4922 * it can be used to mark the position of the first IO in the
4923 * link list.
4924 */
4925 req->sequence = ctx->cached_sq_head;
Jens Axboe3529d8c2019-12-19 18:24:38 -07004926 *sqe_ptr = &ctx->sq_sqes[head];
4927 req->opcode = READ_ONCE((*sqe_ptr)->opcode);
4928 req->user_data = READ_ONCE((*sqe_ptr)->user_data);
Jens Axboe2b188cc2019-01-07 10:46:33 -07004929 ctx->cached_sq_head++;
4930 return true;
4931 }
4932
4933 /* drop invalid entries */
4934 ctx->cached_sq_head++;
Jens Axboe498ccd92019-10-25 10:04:25 -06004935 ctx->cached_sq_dropped++;
Pavel Begunkovee7d46d2019-12-30 21:24:45 +03004936 WRITE_ONCE(ctx->rings->sq_dropped, ctx->cached_sq_dropped);
Jens Axboe2b188cc2019-01-07 10:46:33 -07004937 return false;
4938}
4939
Pavel Begunkovfb5ccc92019-10-25 12:31:30 +03004940static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
Pavel Begunkovae9428c2019-11-06 00:22:14 +03004941 struct file *ring_file, int ring_fd,
4942 struct mm_struct **mm, bool async)
Jens Axboe6c271ce2019-01-10 11:22:30 -07004943{
4944 struct io_submit_state state, *statep = NULL;
Jens Axboe9e645e112019-05-10 16:07:28 -06004945 struct io_kiocb *link = NULL;
Jens Axboe9e645e112019-05-10 16:07:28 -06004946 int i, submitted = 0;
Pavel Begunkov95a1b3ff2019-10-27 23:15:41 +03004947 bool mm_fault = false;
Jens Axboe6c271ce2019-01-10 11:22:30 -07004948
Jens Axboec4a2ed72019-11-21 21:01:26 -07004949 /* if we have a backlog and couldn't flush it all, return BUSY */
Jens Axboead3eb2c2019-12-18 17:12:20 -07004950 if (test_bit(0, &ctx->sq_check_overflow)) {
4951 if (!list_empty(&ctx->cq_overflow_list) &&
4952 !io_cqring_overflow_flush(ctx, false))
4953 return -EBUSY;
4954 }
Jens Axboe6c271ce2019-01-10 11:22:30 -07004955
Pavel Begunkovee7d46d2019-12-30 21:24:45 +03004956 /* make sure SQ entry isn't read before tail */
4957 nr = min3(nr, ctx->sq_entries, io_sqring_entries(ctx));
Pavel Begunkov9ef4f122019-12-30 21:24:44 +03004958
Pavel Begunkov2b85edf2019-12-28 14:13:03 +03004959 if (!percpu_ref_tryget_many(&ctx->refs, nr))
4960 return -EAGAIN;
Jens Axboe6c271ce2019-01-10 11:22:30 -07004961
4962 if (nr > IO_PLUG_THRESHOLD) {
Jackie Liu22efde52019-12-02 17:14:52 +08004963 io_submit_state_start(&state, nr);
Jens Axboe6c271ce2019-01-10 11:22:30 -07004964 statep = &state;
4965 }
4966
Pavel Begunkovb14cca02020-01-17 04:45:59 +03004967 ctx->ring_fd = ring_fd;
4968 ctx->ring_file = ring_file;
4969
Jens Axboe6c271ce2019-01-10 11:22:30 -07004970 for (i = 0; i < nr; i++) {
Jens Axboe3529d8c2019-12-19 18:24:38 -07004971 const struct io_uring_sqe *sqe;
Pavel Begunkov196be952019-11-07 01:41:06 +03004972 struct io_kiocb *req;
Pavel Begunkov1cb1edb2020-02-06 21:16:09 +03004973 int err;
Pavel Begunkovfb5ccc92019-10-25 12:31:30 +03004974
Pavel Begunkov196be952019-11-07 01:41:06 +03004975 req = io_get_req(ctx, statep);
4976 if (unlikely(!req)) {
4977 if (!submitted)
4978 submitted = -EAGAIN;
Pavel Begunkovfb5ccc92019-10-25 12:31:30 +03004979 break;
Jens Axboe9e645e112019-05-10 16:07:28 -06004980 }
Jens Axboe3529d8c2019-12-19 18:24:38 -07004981 if (!io_get_sqring(ctx, req, &sqe)) {
Pavel Begunkov2b85edf2019-12-28 14:13:03 +03004982 __io_req_do_free(req);
Pavel Begunkov196be952019-11-07 01:41:06 +03004983 break;
4984 }
Jens Axboe9e645e112019-05-10 16:07:28 -06004985
Jens Axboed3656342019-12-18 09:50:26 -07004986 /* will complete beyond this point, count as submitted */
4987 submitted++;
4988
4989 if (unlikely(req->opcode >= IORING_OP_LAST)) {
Pavel Begunkov1cb1edb2020-02-06 21:16:09 +03004990 err = -EINVAL;
4991fail_req:
4992 io_cqring_add_event(req, err);
Jens Axboed3656342019-12-18 09:50:26 -07004993 io_double_put_req(req);
4994 break;
4995 }
4996
4997 if (io_op_defs[req->opcode].needs_mm && !*mm) {
Pavel Begunkov95a1b3ff2019-10-27 23:15:41 +03004998 mm_fault = mm_fault || !mmget_not_zero(ctx->sqo_mm);
Pavel Begunkov1cb1edb2020-02-06 21:16:09 +03004999 if (unlikely(mm_fault)) {
5000 err = -EFAULT;
5001 goto fail_req;
Pavel Begunkov95a1b3ff2019-10-27 23:15:41 +03005002 }
Pavel Begunkov1cb1edb2020-02-06 21:16:09 +03005003 use_mm(ctx->sqo_mm);
5004 *mm = ctx->sqo_mm;
Pavel Begunkov95a1b3ff2019-10-27 23:15:41 +03005005 }
5006
Pavel Begunkovcf6fd4b2019-11-25 23:14:39 +03005007 req->in_async = async;
5008 req->needs_fixed_file = async;
Jens Axboe354420f2020-01-08 18:55:15 -07005009 trace_io_uring_submit_sqe(ctx, req->opcode, req->user_data,
5010 true, async);
Jens Axboe3529d8c2019-12-19 18:24:38 -07005011 if (!io_submit_sqe(req, sqe, statep, &link))
Pavel Begunkov2e6e1fd2019-12-05 16:15:45 +03005012 break;
Jens Axboe6c271ce2019-01-10 11:22:30 -07005013 }
5014
Pavel Begunkov9466f432020-01-25 22:34:01 +03005015 if (unlikely(submitted != nr)) {
5016 int ref_used = (submitted == -EAGAIN) ? 0 : submitted;
5017
5018 percpu_ref_put_many(&ctx->refs, nr - ref_used);
5019 }
Jens Axboe9e645e112019-05-10 16:07:28 -06005020 if (link)
Pavel Begunkov1b4a51b2019-11-21 11:54:28 +03005021 io_queue_link_head(link);
Jens Axboe6c271ce2019-01-10 11:22:30 -07005022 if (statep)
5023 io_submit_state_end(&state);
5024
Pavel Begunkovae9428c2019-11-06 00:22:14 +03005025 /* Commit SQ ring head once we've consumed and submitted all SQEs */
5026 io_commit_sqring(ctx);
5027
Jens Axboe6c271ce2019-01-10 11:22:30 -07005028 return submitted;
5029}
5030
5031static int io_sq_thread(void *data)
5032{
Jens Axboe6c271ce2019-01-10 11:22:30 -07005033 struct io_ring_ctx *ctx = data;
5034 struct mm_struct *cur_mm = NULL;
Jens Axboe181e4482019-11-25 08:52:30 -07005035 const struct cred *old_cred;
Jens Axboe6c271ce2019-01-10 11:22:30 -07005036 mm_segment_t old_fs;
5037 DEFINE_WAIT(wait);
5038 unsigned inflight;
5039 unsigned long timeout;
Jens Axboec1edbf52019-11-10 16:56:04 -07005040 int ret;
Jens Axboe6c271ce2019-01-10 11:22:30 -07005041
Jens Axboe206aefd2019-11-07 18:27:42 -07005042 complete(&ctx->completions[1]);
Jackie Liua4c0b3d2019-07-08 13:41:12 +08005043
Jens Axboe6c271ce2019-01-10 11:22:30 -07005044 old_fs = get_fs();
5045 set_fs(USER_DS);
Jens Axboe181e4482019-11-25 08:52:30 -07005046 old_cred = override_creds(ctx->creds);
Jens Axboe6c271ce2019-01-10 11:22:30 -07005047
Jens Axboec1edbf52019-11-10 16:56:04 -07005048 ret = timeout = inflight = 0;
Roman Penyaev2bbcd6d2019-05-16 10:53:57 +02005049 while (!kthread_should_park()) {
Pavel Begunkovfb5ccc92019-10-25 12:31:30 +03005050 unsigned int to_submit;
Jens Axboe6c271ce2019-01-10 11:22:30 -07005051
5052 if (inflight) {
5053 unsigned nr_events = 0;
5054
5055 if (ctx->flags & IORING_SETUP_IOPOLL) {
Jens Axboe2b2ed972019-10-25 10:06:15 -06005056 /*
5057 * inflight is the count of the maximum possible
5058 * entries we submitted, but it can be smaller
5059 * if we dropped some of them. If we don't have
5060 * poll entries available, then we know that we
5061 * have nothing left to poll for. Reset the
5062 * inflight count to zero in that case.
5063 */
5064 mutex_lock(&ctx->uring_lock);
5065 if (!list_empty(&ctx->poll_list))
5066 __io_iopoll_check(ctx, &nr_events, 0);
5067 else
5068 inflight = 0;
5069 mutex_unlock(&ctx->uring_lock);
Jens Axboe6c271ce2019-01-10 11:22:30 -07005070 } else {
5071 /*
5072 * Normal IO, just pretend everything completed.
5073 * We don't have to poll completions for that.
5074 */
5075 nr_events = inflight;
5076 }
5077
5078 inflight -= nr_events;
5079 if (!inflight)
5080 timeout = jiffies + ctx->sq_thread_idle;
5081 }
5082
Pavel Begunkovfb5ccc92019-10-25 12:31:30 +03005083 to_submit = io_sqring_entries(ctx);
Jens Axboec1edbf52019-11-10 16:56:04 -07005084
5085 /*
5086 * If submit got -EBUSY, flag us as needing the application
5087 * to enter the kernel to reap and flush events.
5088 */
5089 if (!to_submit || ret == -EBUSY) {
Jens Axboe6c271ce2019-01-10 11:22:30 -07005090 /*
5091 * We're polling. If we're within the defined idle
5092 * period, then let us spin without work before going
Jens Axboec1edbf52019-11-10 16:56:04 -07005093 * to sleep. The exception is if we got EBUSY doing
5094 * more IO, we should wait for the application to
5095 * reap events and wake us up.
Jens Axboe6c271ce2019-01-10 11:22:30 -07005096 */
Jens Axboec1edbf52019-11-10 16:56:04 -07005097 if (inflight ||
Jens Axboedf069d82020-02-04 16:48:34 -07005098 (!time_after(jiffies, timeout) && ret != -EBUSY &&
5099 !percpu_ref_is_dying(&ctx->refs))) {
Jens Axboe9831a902019-09-19 09:48:55 -06005100 cond_resched();
Jens Axboe6c271ce2019-01-10 11:22:30 -07005101 continue;
5102 }
5103
5104 /*
5105 * Drop cur_mm before scheduling, we can't hold it for
5106 * long periods (or over schedule()). Do this before
5107 * adding ourselves to the waitqueue, as the unuse/drop
5108 * may sleep.
5109 */
5110 if (cur_mm) {
5111 unuse_mm(cur_mm);
5112 mmput(cur_mm);
5113 cur_mm = NULL;
5114 }
5115
5116 prepare_to_wait(&ctx->sqo_wait, &wait,
5117 TASK_INTERRUPTIBLE);
5118
5119 /* Tell userspace we may need a wakeup call */
Hristo Venev75b28af2019-08-26 17:23:46 +00005120 ctx->rings->sq_flags |= IORING_SQ_NEED_WAKEUP;
Stefan Bühler0d7bae62019-04-19 11:57:45 +02005121 /* make sure to read SQ tail after writing flags */
5122 smp_mb();
Jens Axboe6c271ce2019-01-10 11:22:30 -07005123
Pavel Begunkovfb5ccc92019-10-25 12:31:30 +03005124 to_submit = io_sqring_entries(ctx);
Jens Axboec1edbf52019-11-10 16:56:04 -07005125 if (!to_submit || ret == -EBUSY) {
Roman Penyaev2bbcd6d2019-05-16 10:53:57 +02005126 if (kthread_should_park()) {
Jens Axboe6c271ce2019-01-10 11:22:30 -07005127 finish_wait(&ctx->sqo_wait, &wait);
5128 break;
5129 }
5130 if (signal_pending(current))
5131 flush_signals(current);
5132 schedule();
5133 finish_wait(&ctx->sqo_wait, &wait);
5134
Hristo Venev75b28af2019-08-26 17:23:46 +00005135 ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP;
Jens Axboe6c271ce2019-01-10 11:22:30 -07005136 continue;
5137 }
5138 finish_wait(&ctx->sqo_wait, &wait);
5139
Hristo Venev75b28af2019-08-26 17:23:46 +00005140 ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP;
Jens Axboe6c271ce2019-01-10 11:22:30 -07005141 }
5142
Jens Axboe8a4955f2019-12-09 14:52:35 -07005143 mutex_lock(&ctx->uring_lock);
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07005144 ret = io_submit_sqes(ctx, to_submit, NULL, -1, &cur_mm, true);
Jens Axboe8a4955f2019-12-09 14:52:35 -07005145 mutex_unlock(&ctx->uring_lock);
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07005146 if (ret > 0)
5147 inflight += ret;
Jens Axboe6c271ce2019-01-10 11:22:30 -07005148 }
5149
5150 set_fs(old_fs);
5151 if (cur_mm) {
5152 unuse_mm(cur_mm);
5153 mmput(cur_mm);
5154 }
Jens Axboe181e4482019-11-25 08:52:30 -07005155 revert_creds(old_cred);
Jens Axboe06058632019-04-13 09:26:03 -06005156
Roman Penyaev2bbcd6d2019-05-16 10:53:57 +02005157 kthread_parkme();
Jens Axboe06058632019-04-13 09:26:03 -06005158
Jens Axboe6c271ce2019-01-10 11:22:30 -07005159 return 0;
5160}
5161
Jens Axboebda52162019-09-24 13:47:15 -06005162struct io_wait_queue {
5163 struct wait_queue_entry wq;
5164 struct io_ring_ctx *ctx;
5165 unsigned to_wait;
5166 unsigned nr_timeouts;
5167};
5168
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07005169static inline bool io_should_wake(struct io_wait_queue *iowq, bool noflush)
Jens Axboebda52162019-09-24 13:47:15 -06005170{
5171 struct io_ring_ctx *ctx = iowq->ctx;
5172
5173 /*
Brian Gianforcarod195a662019-12-13 03:09:50 -08005174 * Wake up if we have enough events, or if a timeout occurred since we
Jens Axboebda52162019-09-24 13:47:15 -06005175 * started waiting. For timeouts, we always want to return to userspace,
5176 * regardless of event count.
5177 */
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07005178 return io_cqring_events(ctx, noflush) >= iowq->to_wait ||
Jens Axboebda52162019-09-24 13:47:15 -06005179 atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts;
5180}
5181
5182static int io_wake_function(struct wait_queue_entry *curr, unsigned int mode,
5183 int wake_flags, void *key)
5184{
5185 struct io_wait_queue *iowq = container_of(curr, struct io_wait_queue,
5186 wq);
5187
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07005188 /* use noflush == true, as we can't safely rely on locking context */
5189 if (!io_should_wake(iowq, true))
Jens Axboebda52162019-09-24 13:47:15 -06005190 return -1;
5191
5192 return autoremove_wake_function(curr, mode, wake_flags, key);
5193}
5194
Jens Axboe2b188cc2019-01-07 10:46:33 -07005195/*
5196 * Wait until events become available, if we don't already have some. The
5197 * application must reap them itself, as they reside on the shared cq ring.
5198 */
5199static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
5200 const sigset_t __user *sig, size_t sigsz)
5201{
Jens Axboebda52162019-09-24 13:47:15 -06005202 struct io_wait_queue iowq = {
5203 .wq = {
5204 .private = current,
5205 .func = io_wake_function,
5206 .entry = LIST_HEAD_INIT(iowq.wq.entry),
5207 },
5208 .ctx = ctx,
5209 .to_wait = min_events,
5210 };
Hristo Venev75b28af2019-08-26 17:23:46 +00005211 struct io_rings *rings = ctx->rings;
Jackie Liue9ffa5c2019-10-29 11:16:42 +08005212 int ret = 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07005213
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07005214 if (io_cqring_events(ctx, false) >= min_events)
Jens Axboe2b188cc2019-01-07 10:46:33 -07005215 return 0;
5216
5217 if (sig) {
Arnd Bergmann9e75ad52019-03-25 15:34:53 +01005218#ifdef CONFIG_COMPAT
5219 if (in_compat_syscall())
5220 ret = set_compat_user_sigmask((const compat_sigset_t __user *)sig,
Oleg Nesterovb7724342019-07-16 16:29:53 -07005221 sigsz);
Arnd Bergmann9e75ad52019-03-25 15:34:53 +01005222 else
5223#endif
Oleg Nesterovb7724342019-07-16 16:29:53 -07005224 ret = set_user_sigmask(sig, sigsz);
Arnd Bergmann9e75ad52019-03-25 15:34:53 +01005225
Jens Axboe2b188cc2019-01-07 10:46:33 -07005226 if (ret)
5227 return ret;
5228 }
5229
Jens Axboebda52162019-09-24 13:47:15 -06005230 iowq.nr_timeouts = atomic_read(&ctx->cq_timeouts);
Dmitrii Dolgovc826bd72019-10-15 19:02:01 +02005231 trace_io_uring_cqring_wait(ctx, min_events);
Jens Axboebda52162019-09-24 13:47:15 -06005232 do {
5233 prepare_to_wait_exclusive(&ctx->wait, &iowq.wq,
5234 TASK_INTERRUPTIBLE);
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07005235 if (io_should_wake(&iowq, false))
Jens Axboebda52162019-09-24 13:47:15 -06005236 break;
5237 schedule();
5238 if (signal_pending(current)) {
Jackie Liue9ffa5c2019-10-29 11:16:42 +08005239 ret = -EINTR;
Jens Axboebda52162019-09-24 13:47:15 -06005240 break;
5241 }
5242 } while (1);
5243 finish_wait(&ctx->wait, &iowq.wq);
5244
Jackie Liue9ffa5c2019-10-29 11:16:42 +08005245 restore_saved_sigmask_unless(ret == -EINTR);
Jens Axboe2b188cc2019-01-07 10:46:33 -07005246
Hristo Venev75b28af2019-08-26 17:23:46 +00005247 return READ_ONCE(rings->cq.head) == READ_ONCE(rings->cq.tail) ? ret : 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07005248}
5249
Jens Axboe6b063142019-01-10 22:13:58 -07005250static void __io_sqe_files_unregister(struct io_ring_ctx *ctx)
5251{
5252#if defined(CONFIG_UNIX)
5253 if (ctx->ring_sock) {
5254 struct sock *sock = ctx->ring_sock->sk;
5255 struct sk_buff *skb;
5256
5257 while ((skb = skb_dequeue(&sock->sk_receive_queue)) != NULL)
5258 kfree_skb(skb);
5259 }
5260#else
5261 int i;
5262
Jens Axboe65e19f52019-10-26 07:20:21 -06005263 for (i = 0; i < ctx->nr_user_files; i++) {
5264 struct file *file;
5265
5266 file = io_file_from_index(ctx, i);
5267 if (file)
5268 fput(file);
5269 }
Jens Axboe6b063142019-01-10 22:13:58 -07005270#endif
5271}
5272
Jens Axboe05f3fb32019-12-09 11:22:50 -07005273static void io_file_ref_kill(struct percpu_ref *ref)
5274{
5275 struct fixed_file_data *data;
5276
5277 data = container_of(ref, struct fixed_file_data, refs);
5278 complete(&data->done);
5279}
5280
Jens Axboe6b063142019-01-10 22:13:58 -07005281static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
5282{
Jens Axboe05f3fb32019-12-09 11:22:50 -07005283 struct fixed_file_data *data = ctx->file_data;
Jens Axboe65e19f52019-10-26 07:20:21 -06005284 unsigned nr_tables, i;
5285
Jens Axboe05f3fb32019-12-09 11:22:50 -07005286 if (!data)
Jens Axboe6b063142019-01-10 22:13:58 -07005287 return -ENXIO;
5288
Jens Axboe05f3fb32019-12-09 11:22:50 -07005289 percpu_ref_kill_and_confirm(&data->refs, io_file_ref_kill);
Jens Axboee46a7952020-01-17 11:15:34 -07005290 flush_work(&data->ref_work);
Jens Axboe2faf8522020-02-04 19:54:55 -07005291 wait_for_completion(&data->done);
5292 io_ring_file_ref_flush(data);
Jens Axboe05f3fb32019-12-09 11:22:50 -07005293 percpu_ref_exit(&data->refs);
5294
Jens Axboe6b063142019-01-10 22:13:58 -07005295 __io_sqe_files_unregister(ctx);
Jens Axboe65e19f52019-10-26 07:20:21 -06005296 nr_tables = DIV_ROUND_UP(ctx->nr_user_files, IORING_MAX_FILES_TABLE);
5297 for (i = 0; i < nr_tables; i++)
Jens Axboe05f3fb32019-12-09 11:22:50 -07005298 kfree(data->table[i].files);
5299 kfree(data->table);
5300 kfree(data);
5301 ctx->file_data = NULL;
Jens Axboe6b063142019-01-10 22:13:58 -07005302 ctx->nr_user_files = 0;
5303 return 0;
5304}
5305
Jens Axboe6c271ce2019-01-10 11:22:30 -07005306static void io_sq_thread_stop(struct io_ring_ctx *ctx)
5307{
5308 if (ctx->sqo_thread) {
Jens Axboe206aefd2019-11-07 18:27:42 -07005309 wait_for_completion(&ctx->completions[1]);
Roman Penyaev2bbcd6d2019-05-16 10:53:57 +02005310 /*
5311 * The park is a bit of a work-around, without it we get
5312 * warning spews on shutdown with SQPOLL set and affinity
5313 * set to a single CPU.
5314 */
Jens Axboe06058632019-04-13 09:26:03 -06005315 kthread_park(ctx->sqo_thread);
Jens Axboe6c271ce2019-01-10 11:22:30 -07005316 kthread_stop(ctx->sqo_thread);
5317 ctx->sqo_thread = NULL;
5318 }
5319}
5320
Jens Axboe6b063142019-01-10 22:13:58 -07005321static void io_finish_async(struct io_ring_ctx *ctx)
5322{
Jens Axboe6c271ce2019-01-10 11:22:30 -07005323 io_sq_thread_stop(ctx);
5324
Jens Axboe561fb042019-10-24 07:25:42 -06005325 if (ctx->io_wq) {
5326 io_wq_destroy(ctx->io_wq);
5327 ctx->io_wq = NULL;
Jens Axboe6b063142019-01-10 22:13:58 -07005328 }
5329}
5330
5331#if defined(CONFIG_UNIX)
Jens Axboe6b063142019-01-10 22:13:58 -07005332/*
5333 * Ensure the UNIX gc is aware of our file set, so we are certain that
5334 * the io_uring can be safely unregistered on process exit, even if we have
5335 * loops in the file referencing.
5336 */
5337static int __io_sqe_files_scm(struct io_ring_ctx *ctx, int nr, int offset)
5338{
5339 struct sock *sk = ctx->ring_sock->sk;
5340 struct scm_fp_list *fpl;
5341 struct sk_buff *skb;
Jens Axboe08a45172019-10-03 08:11:03 -06005342 int i, nr_files;
Jens Axboe6b063142019-01-10 22:13:58 -07005343
5344 if (!capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN)) {
5345 unsigned long inflight = ctx->user->unix_inflight + nr;
5346
5347 if (inflight > task_rlimit(current, RLIMIT_NOFILE))
5348 return -EMFILE;
5349 }
5350
5351 fpl = kzalloc(sizeof(*fpl), GFP_KERNEL);
5352 if (!fpl)
5353 return -ENOMEM;
5354
5355 skb = alloc_skb(0, GFP_KERNEL);
5356 if (!skb) {
5357 kfree(fpl);
5358 return -ENOMEM;
5359 }
5360
5361 skb->sk = sk;
Jens Axboe6b063142019-01-10 22:13:58 -07005362
Jens Axboe08a45172019-10-03 08:11:03 -06005363 nr_files = 0;
Jens Axboe6b063142019-01-10 22:13:58 -07005364 fpl->user = get_uid(ctx->user);
5365 for (i = 0; i < nr; i++) {
Jens Axboe65e19f52019-10-26 07:20:21 -06005366 struct file *file = io_file_from_index(ctx, i + offset);
5367
5368 if (!file)
Jens Axboe08a45172019-10-03 08:11:03 -06005369 continue;
Jens Axboe65e19f52019-10-26 07:20:21 -06005370 fpl->fp[nr_files] = get_file(file);
Jens Axboe08a45172019-10-03 08:11:03 -06005371 unix_inflight(fpl->user, fpl->fp[nr_files]);
5372 nr_files++;
Jens Axboe6b063142019-01-10 22:13:58 -07005373 }
5374
Jens Axboe08a45172019-10-03 08:11:03 -06005375 if (nr_files) {
5376 fpl->max = SCM_MAX_FD;
5377 fpl->count = nr_files;
5378 UNIXCB(skb).fp = fpl;
Jens Axboe05f3fb32019-12-09 11:22:50 -07005379 skb->destructor = unix_destruct_scm;
Jens Axboe08a45172019-10-03 08:11:03 -06005380 refcount_add(skb->truesize, &sk->sk_wmem_alloc);
5381 skb_queue_head(&sk->sk_receive_queue, skb);
Jens Axboe6b063142019-01-10 22:13:58 -07005382
Jens Axboe08a45172019-10-03 08:11:03 -06005383 for (i = 0; i < nr_files; i++)
5384 fput(fpl->fp[i]);
5385 } else {
5386 kfree_skb(skb);
5387 kfree(fpl);
5388 }
Jens Axboe6b063142019-01-10 22:13:58 -07005389
5390 return 0;
5391}
5392
5393/*
5394 * If UNIX sockets are enabled, fd passing can cause a reference cycle which
5395 * causes regular reference counting to break down. We rely on the UNIX
5396 * garbage collection to take care of this problem for us.
5397 */
5398static int io_sqe_files_scm(struct io_ring_ctx *ctx)
5399{
5400 unsigned left, total;
5401 int ret = 0;
5402
5403 total = 0;
5404 left = ctx->nr_user_files;
5405 while (left) {
5406 unsigned this_files = min_t(unsigned, left, SCM_MAX_FD);
Jens Axboe6b063142019-01-10 22:13:58 -07005407
5408 ret = __io_sqe_files_scm(ctx, this_files, total);
5409 if (ret)
5410 break;
5411 left -= this_files;
5412 total += this_files;
5413 }
5414
5415 if (!ret)
5416 return 0;
5417
5418 while (total < ctx->nr_user_files) {
Jens Axboe65e19f52019-10-26 07:20:21 -06005419 struct file *file = io_file_from_index(ctx, total);
5420
5421 if (file)
5422 fput(file);
Jens Axboe6b063142019-01-10 22:13:58 -07005423 total++;
5424 }
5425
5426 return ret;
5427}
5428#else
5429static int io_sqe_files_scm(struct io_ring_ctx *ctx)
5430{
5431 return 0;
5432}
5433#endif
5434
Jens Axboe65e19f52019-10-26 07:20:21 -06005435static int io_sqe_alloc_file_tables(struct io_ring_ctx *ctx, unsigned nr_tables,
5436 unsigned nr_files)
5437{
5438 int i;
5439
5440 for (i = 0; i < nr_tables; i++) {
Jens Axboe05f3fb32019-12-09 11:22:50 -07005441 struct fixed_file_table *table = &ctx->file_data->table[i];
Jens Axboe65e19f52019-10-26 07:20:21 -06005442 unsigned this_files;
5443
5444 this_files = min(nr_files, IORING_MAX_FILES_TABLE);
5445 table->files = kcalloc(this_files, sizeof(struct file *),
5446 GFP_KERNEL);
5447 if (!table->files)
5448 break;
5449 nr_files -= this_files;
5450 }
5451
5452 if (i == nr_tables)
5453 return 0;
5454
5455 for (i = 0; i < nr_tables; i++) {
Jens Axboe05f3fb32019-12-09 11:22:50 -07005456 struct fixed_file_table *table = &ctx->file_data->table[i];
Jens Axboe65e19f52019-10-26 07:20:21 -06005457 kfree(table->files);
5458 }
5459 return 1;
5460}
5461
Jens Axboe05f3fb32019-12-09 11:22:50 -07005462static void io_ring_file_put(struct io_ring_ctx *ctx, struct file *file)
Jens Axboec3a31e62019-10-03 13:59:56 -06005463{
5464#if defined(CONFIG_UNIX)
Jens Axboec3a31e62019-10-03 13:59:56 -06005465 struct sock *sock = ctx->ring_sock->sk;
5466 struct sk_buff_head list, *head = &sock->sk_receive_queue;
5467 struct sk_buff *skb;
5468 int i;
5469
5470 __skb_queue_head_init(&list);
5471
5472 /*
5473 * Find the skb that holds this file in its SCM_RIGHTS. When found,
5474 * remove this entry and rearrange the file array.
5475 */
5476 skb = skb_dequeue(head);
5477 while (skb) {
5478 struct scm_fp_list *fp;
5479
5480 fp = UNIXCB(skb).fp;
5481 for (i = 0; i < fp->count; i++) {
5482 int left;
5483
5484 if (fp->fp[i] != file)
5485 continue;
5486
5487 unix_notinflight(fp->user, fp->fp[i]);
5488 left = fp->count - 1 - i;
5489 if (left) {
5490 memmove(&fp->fp[i], &fp->fp[i + 1],
5491 left * sizeof(struct file *));
5492 }
5493 fp->count--;
5494 if (!fp->count) {
5495 kfree_skb(skb);
5496 skb = NULL;
5497 } else {
5498 __skb_queue_tail(&list, skb);
5499 }
5500 fput(file);
5501 file = NULL;
5502 break;
5503 }
5504
5505 if (!file)
5506 break;
5507
5508 __skb_queue_tail(&list, skb);
5509
5510 skb = skb_dequeue(head);
5511 }
5512
5513 if (skb_peek(&list)) {
5514 spin_lock_irq(&head->lock);
5515 while ((skb = __skb_dequeue(&list)) != NULL)
5516 __skb_queue_tail(head, skb);
5517 spin_unlock_irq(&head->lock);
5518 }
5519#else
Jens Axboe05f3fb32019-12-09 11:22:50 -07005520 fput(file);
Jens Axboec3a31e62019-10-03 13:59:56 -06005521#endif
5522}
5523
Jens Axboe05f3fb32019-12-09 11:22:50 -07005524struct io_file_put {
5525 struct llist_node llist;
5526 struct file *file;
5527 struct completion *done;
5528};
5529
Jens Axboe2faf8522020-02-04 19:54:55 -07005530static void io_ring_file_ref_flush(struct fixed_file_data *data)
Jens Axboe05f3fb32019-12-09 11:22:50 -07005531{
5532 struct io_file_put *pfile, *tmp;
Jens Axboe05f3fb32019-12-09 11:22:50 -07005533 struct llist_node *node;
5534
Jens Axboe05f3fb32019-12-09 11:22:50 -07005535 while ((node = llist_del_all(&data->put_llist)) != NULL) {
5536 llist_for_each_entry_safe(pfile, tmp, node, llist) {
5537 io_ring_file_put(data->ctx, pfile->file);
5538 if (pfile->done)
5539 complete(pfile->done);
5540 else
5541 kfree(pfile);
5542 }
5543 }
Jens Axboe2faf8522020-02-04 19:54:55 -07005544}
Jens Axboe05f3fb32019-12-09 11:22:50 -07005545
Jens Axboe2faf8522020-02-04 19:54:55 -07005546static void io_ring_file_ref_switch(struct work_struct *work)
5547{
5548 struct fixed_file_data *data;
5549
5550 data = container_of(work, struct fixed_file_data, ref_work);
5551 io_ring_file_ref_flush(data);
Jens Axboe05f3fb32019-12-09 11:22:50 -07005552 percpu_ref_get(&data->refs);
5553 percpu_ref_switch_to_percpu(&data->refs);
5554}
5555
5556static void io_file_data_ref_zero(struct percpu_ref *ref)
5557{
5558 struct fixed_file_data *data;
5559
5560 data = container_of(ref, struct fixed_file_data, refs);
5561
Jens Axboe2faf8522020-02-04 19:54:55 -07005562 /*
5563 * We can't safely switch from inside this context, punt to wq. If
5564 * the table ref is going away, the table is being unregistered.
5565 * Don't queue up the async work for that case, the caller will
5566 * handle it.
5567 */
5568 if (!percpu_ref_is_dying(&data->refs))
5569 queue_work(system_wq, &data->ref_work);
Jens Axboe05f3fb32019-12-09 11:22:50 -07005570}
5571
5572static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
5573 unsigned nr_args)
5574{
5575 __s32 __user *fds = (__s32 __user *) arg;
5576 unsigned nr_tables;
5577 struct file *file;
5578 int fd, ret = 0;
5579 unsigned i;
5580
5581 if (ctx->file_data)
5582 return -EBUSY;
5583 if (!nr_args)
5584 return -EINVAL;
5585 if (nr_args > IORING_MAX_FIXED_FILES)
5586 return -EMFILE;
5587
5588 ctx->file_data = kzalloc(sizeof(*ctx->file_data), GFP_KERNEL);
5589 if (!ctx->file_data)
5590 return -ENOMEM;
5591 ctx->file_data->ctx = ctx;
5592 init_completion(&ctx->file_data->done);
5593
5594 nr_tables = DIV_ROUND_UP(nr_args, IORING_MAX_FILES_TABLE);
5595 ctx->file_data->table = kcalloc(nr_tables,
5596 sizeof(struct fixed_file_table),
5597 GFP_KERNEL);
5598 if (!ctx->file_data->table) {
5599 kfree(ctx->file_data);
5600 ctx->file_data = NULL;
5601 return -ENOMEM;
5602 }
5603
5604 if (percpu_ref_init(&ctx->file_data->refs, io_file_data_ref_zero,
5605 PERCPU_REF_ALLOW_REINIT, GFP_KERNEL)) {
5606 kfree(ctx->file_data->table);
5607 kfree(ctx->file_data);
5608 ctx->file_data = NULL;
5609 return -ENOMEM;
5610 }
5611 ctx->file_data->put_llist.first = NULL;
5612 INIT_WORK(&ctx->file_data->ref_work, io_ring_file_ref_switch);
5613
5614 if (io_sqe_alloc_file_tables(ctx, nr_tables, nr_args)) {
5615 percpu_ref_exit(&ctx->file_data->refs);
5616 kfree(ctx->file_data->table);
5617 kfree(ctx->file_data);
5618 ctx->file_data = NULL;
5619 return -ENOMEM;
5620 }
5621
5622 for (i = 0; i < nr_args; i++, ctx->nr_user_files++) {
5623 struct fixed_file_table *table;
5624 unsigned index;
5625
5626 ret = -EFAULT;
5627 if (copy_from_user(&fd, &fds[i], sizeof(fd)))
5628 break;
5629 /* allow sparse sets */
5630 if (fd == -1) {
5631 ret = 0;
5632 continue;
5633 }
5634
5635 table = &ctx->file_data->table[i >> IORING_FILE_TABLE_SHIFT];
5636 index = i & IORING_FILE_TABLE_MASK;
5637 file = fget(fd);
5638
5639 ret = -EBADF;
5640 if (!file)
5641 break;
5642
5643 /*
5644 * Don't allow io_uring instances to be registered. If UNIX
5645 * isn't enabled, then this causes a reference cycle and this
5646 * instance can never get freed. If UNIX is enabled we'll
5647 * handle it just fine, but there's still no point in allowing
5648 * a ring fd as it doesn't support regular read/write anyway.
5649 */
5650 if (file->f_op == &io_uring_fops) {
5651 fput(file);
5652 break;
5653 }
5654 ret = 0;
5655 table->files[index] = file;
5656 }
5657
5658 if (ret) {
5659 for (i = 0; i < ctx->nr_user_files; i++) {
5660 file = io_file_from_index(ctx, i);
5661 if (file)
5662 fput(file);
5663 }
5664 for (i = 0; i < nr_tables; i++)
5665 kfree(ctx->file_data->table[i].files);
5666
5667 kfree(ctx->file_data->table);
5668 kfree(ctx->file_data);
5669 ctx->file_data = NULL;
5670 ctx->nr_user_files = 0;
5671 return ret;
5672 }
5673
5674 ret = io_sqe_files_scm(ctx);
5675 if (ret)
5676 io_sqe_files_unregister(ctx);
5677
5678 return ret;
5679}
5680
Jens Axboec3a31e62019-10-03 13:59:56 -06005681static int io_sqe_file_register(struct io_ring_ctx *ctx, struct file *file,
5682 int index)
5683{
5684#if defined(CONFIG_UNIX)
5685 struct sock *sock = ctx->ring_sock->sk;
5686 struct sk_buff_head *head = &sock->sk_receive_queue;
5687 struct sk_buff *skb;
5688
5689 /*
5690 * See if we can merge this file into an existing skb SCM_RIGHTS
5691 * file set. If there's no room, fall back to allocating a new skb
5692 * and filling it in.
5693 */
5694 spin_lock_irq(&head->lock);
5695 skb = skb_peek(head);
5696 if (skb) {
5697 struct scm_fp_list *fpl = UNIXCB(skb).fp;
5698
5699 if (fpl->count < SCM_MAX_FD) {
5700 __skb_unlink(skb, head);
5701 spin_unlock_irq(&head->lock);
5702 fpl->fp[fpl->count] = get_file(file);
5703 unix_inflight(fpl->user, fpl->fp[fpl->count]);
5704 fpl->count++;
5705 spin_lock_irq(&head->lock);
5706 __skb_queue_head(head, skb);
5707 } else {
5708 skb = NULL;
5709 }
5710 }
5711 spin_unlock_irq(&head->lock);
5712
5713 if (skb) {
5714 fput(file);
5715 return 0;
5716 }
5717
5718 return __io_sqe_files_scm(ctx, 1, index);
5719#else
5720 return 0;
5721#endif
5722}
5723
Jens Axboe05f3fb32019-12-09 11:22:50 -07005724static void io_atomic_switch(struct percpu_ref *ref)
Jens Axboec3a31e62019-10-03 13:59:56 -06005725{
Jens Axboe05f3fb32019-12-09 11:22:50 -07005726 struct fixed_file_data *data;
5727
5728 data = container_of(ref, struct fixed_file_data, refs);
5729 clear_bit(FFD_F_ATOMIC, &data->state);
5730}
5731
5732static bool io_queue_file_removal(struct fixed_file_data *data,
5733 struct file *file)
5734{
5735 struct io_file_put *pfile, pfile_stack;
5736 DECLARE_COMPLETION_ONSTACK(done);
5737
5738 /*
5739 * If we fail allocating the struct we need for doing async reomval
5740 * of this file, just punt to sync and wait for it.
5741 */
5742 pfile = kzalloc(sizeof(*pfile), GFP_KERNEL);
5743 if (!pfile) {
5744 pfile = &pfile_stack;
5745 pfile->done = &done;
5746 }
5747
5748 pfile->file = file;
5749 llist_add(&pfile->llist, &data->put_llist);
5750
5751 if (pfile == &pfile_stack) {
5752 if (!test_and_set_bit(FFD_F_ATOMIC, &data->state)) {
5753 percpu_ref_put(&data->refs);
5754 percpu_ref_switch_to_atomic(&data->refs,
5755 io_atomic_switch);
5756 }
5757 wait_for_completion(&done);
5758 flush_work(&data->ref_work);
5759 return false;
5760 }
5761
5762 return true;
5763}
5764
5765static int __io_sqe_files_update(struct io_ring_ctx *ctx,
5766 struct io_uring_files_update *up,
5767 unsigned nr_args)
5768{
5769 struct fixed_file_data *data = ctx->file_data;
5770 bool ref_switch = false;
5771 struct file *file;
Jens Axboec3a31e62019-10-03 13:59:56 -06005772 __s32 __user *fds;
5773 int fd, i, err;
5774 __u32 done;
5775
Jens Axboe05f3fb32019-12-09 11:22:50 -07005776 if (check_add_overflow(up->offset, nr_args, &done))
Jens Axboec3a31e62019-10-03 13:59:56 -06005777 return -EOVERFLOW;
5778 if (done > ctx->nr_user_files)
5779 return -EINVAL;
5780
5781 done = 0;
Jens Axboe05f3fb32019-12-09 11:22:50 -07005782 fds = u64_to_user_ptr(up->fds);
Jens Axboec3a31e62019-10-03 13:59:56 -06005783 while (nr_args) {
Jens Axboe65e19f52019-10-26 07:20:21 -06005784 struct fixed_file_table *table;
5785 unsigned index;
5786
Jens Axboec3a31e62019-10-03 13:59:56 -06005787 err = 0;
5788 if (copy_from_user(&fd, &fds[done], sizeof(fd))) {
5789 err = -EFAULT;
5790 break;
5791 }
Jens Axboe05f3fb32019-12-09 11:22:50 -07005792 i = array_index_nospec(up->offset, ctx->nr_user_files);
5793 table = &ctx->file_data->table[i >> IORING_FILE_TABLE_SHIFT];
Jens Axboe65e19f52019-10-26 07:20:21 -06005794 index = i & IORING_FILE_TABLE_MASK;
5795 if (table->files[index]) {
Jens Axboe05f3fb32019-12-09 11:22:50 -07005796 file = io_file_from_index(ctx, index);
Jens Axboe65e19f52019-10-26 07:20:21 -06005797 table->files[index] = NULL;
Jens Axboe05f3fb32019-12-09 11:22:50 -07005798 if (io_queue_file_removal(data, file))
5799 ref_switch = true;
Jens Axboec3a31e62019-10-03 13:59:56 -06005800 }
5801 if (fd != -1) {
Jens Axboec3a31e62019-10-03 13:59:56 -06005802 file = fget(fd);
5803 if (!file) {
5804 err = -EBADF;
5805 break;
5806 }
5807 /*
5808 * Don't allow io_uring instances to be registered. If
5809 * UNIX isn't enabled, then this causes a reference
5810 * cycle and this instance can never get freed. If UNIX
5811 * is enabled we'll handle it just fine, but there's
5812 * still no point in allowing a ring fd as it doesn't
5813 * support regular read/write anyway.
5814 */
5815 if (file->f_op == &io_uring_fops) {
5816 fput(file);
5817 err = -EBADF;
5818 break;
5819 }
Jens Axboe65e19f52019-10-26 07:20:21 -06005820 table->files[index] = file;
Jens Axboec3a31e62019-10-03 13:59:56 -06005821 err = io_sqe_file_register(ctx, file, i);
5822 if (err)
5823 break;
5824 }
5825 nr_args--;
5826 done++;
Jens Axboe05f3fb32019-12-09 11:22:50 -07005827 up->offset++;
5828 }
5829
5830 if (ref_switch && !test_and_set_bit(FFD_F_ATOMIC, &data->state)) {
5831 percpu_ref_put(&data->refs);
5832 percpu_ref_switch_to_atomic(&data->refs, io_atomic_switch);
Jens Axboec3a31e62019-10-03 13:59:56 -06005833 }
5834
5835 return done ? done : err;
5836}
Jens Axboe05f3fb32019-12-09 11:22:50 -07005837static int io_sqe_files_update(struct io_ring_ctx *ctx, void __user *arg,
5838 unsigned nr_args)
5839{
5840 struct io_uring_files_update up;
5841
5842 if (!ctx->file_data)
5843 return -ENXIO;
5844 if (!nr_args)
5845 return -EINVAL;
5846 if (copy_from_user(&up, arg, sizeof(up)))
5847 return -EFAULT;
5848 if (up.resv)
5849 return -EINVAL;
5850
5851 return __io_sqe_files_update(ctx, &up, nr_args);
5852}
Jens Axboec3a31e62019-10-03 13:59:56 -06005853
Jens Axboe7d723062019-11-12 22:31:31 -07005854static void io_put_work(struct io_wq_work *work)
5855{
5856 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
5857
5858 io_put_req(req);
5859}
5860
5861static void io_get_work(struct io_wq_work *work)
5862{
5863 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
5864
5865 refcount_inc(&req->refs);
5866}
5867
Pavel Begunkov24369c22020-01-28 03:15:48 +03005868static int io_init_wq_offload(struct io_ring_ctx *ctx,
5869 struct io_uring_params *p)
5870{
5871 struct io_wq_data data;
5872 struct fd f;
5873 struct io_ring_ctx *ctx_attach;
5874 unsigned int concurrency;
5875 int ret = 0;
5876
5877 data.user = ctx->user;
5878 data.get_work = io_get_work;
5879 data.put_work = io_put_work;
5880
5881 if (!(p->flags & IORING_SETUP_ATTACH_WQ)) {
5882 /* Do QD, or 4 * CPUS, whatever is smallest */
5883 concurrency = min(ctx->sq_entries, 4 * num_online_cpus());
5884
5885 ctx->io_wq = io_wq_create(concurrency, &data);
5886 if (IS_ERR(ctx->io_wq)) {
5887 ret = PTR_ERR(ctx->io_wq);
5888 ctx->io_wq = NULL;
5889 }
5890 return ret;
5891 }
5892
5893 f = fdget(p->wq_fd);
5894 if (!f.file)
5895 return -EBADF;
5896
5897 if (f.file->f_op != &io_uring_fops) {
5898 ret = -EINVAL;
5899 goto out_fput;
5900 }
5901
5902 ctx_attach = f.file->private_data;
5903 /* @io_wq is protected by holding the fd */
5904 if (!io_wq_get(ctx_attach->io_wq, &data)) {
5905 ret = -EINVAL;
5906 goto out_fput;
5907 }
5908
5909 ctx->io_wq = ctx_attach->io_wq;
5910out_fput:
5911 fdput(f);
5912 return ret;
5913}
5914
Jens Axboe6c271ce2019-01-10 11:22:30 -07005915static int io_sq_offload_start(struct io_ring_ctx *ctx,
5916 struct io_uring_params *p)
Jens Axboe2b188cc2019-01-07 10:46:33 -07005917{
5918 int ret;
5919
Jens Axboe6c271ce2019-01-10 11:22:30 -07005920 init_waitqueue_head(&ctx->sqo_wait);
Jens Axboe2b188cc2019-01-07 10:46:33 -07005921 mmgrab(current->mm);
5922 ctx->sqo_mm = current->mm;
5923
Jens Axboe6c271ce2019-01-10 11:22:30 -07005924 if (ctx->flags & IORING_SETUP_SQPOLL) {
Jens Axboe3ec482d2019-04-08 10:51:01 -06005925 ret = -EPERM;
5926 if (!capable(CAP_SYS_ADMIN))
5927 goto err;
5928
Jens Axboe917257d2019-04-13 09:28:55 -06005929 ctx->sq_thread_idle = msecs_to_jiffies(p->sq_thread_idle);
5930 if (!ctx->sq_thread_idle)
5931 ctx->sq_thread_idle = HZ;
5932
Jens Axboe6c271ce2019-01-10 11:22:30 -07005933 if (p->flags & IORING_SETUP_SQ_AFF) {
Jens Axboe44a9bd12019-05-14 20:00:30 -06005934 int cpu = p->sq_thread_cpu;
Jens Axboe6c271ce2019-01-10 11:22:30 -07005935
Jens Axboe917257d2019-04-13 09:28:55 -06005936 ret = -EINVAL;
Jens Axboe44a9bd12019-05-14 20:00:30 -06005937 if (cpu >= nr_cpu_ids)
5938 goto err;
Shenghui Wang7889f442019-05-07 16:03:19 +08005939 if (!cpu_online(cpu))
Jens Axboe917257d2019-04-13 09:28:55 -06005940 goto err;
5941
Jens Axboe6c271ce2019-01-10 11:22:30 -07005942 ctx->sqo_thread = kthread_create_on_cpu(io_sq_thread,
5943 ctx, cpu,
5944 "io_uring-sq");
5945 } else {
5946 ctx->sqo_thread = kthread_create(io_sq_thread, ctx,
5947 "io_uring-sq");
5948 }
5949 if (IS_ERR(ctx->sqo_thread)) {
5950 ret = PTR_ERR(ctx->sqo_thread);
5951 ctx->sqo_thread = NULL;
5952 goto err;
5953 }
5954 wake_up_process(ctx->sqo_thread);
5955 } else if (p->flags & IORING_SETUP_SQ_AFF) {
5956 /* Can't have SQ_AFF without SQPOLL */
5957 ret = -EINVAL;
5958 goto err;
5959 }
5960
Pavel Begunkov24369c22020-01-28 03:15:48 +03005961 ret = io_init_wq_offload(ctx, p);
5962 if (ret)
Jens Axboe2b188cc2019-01-07 10:46:33 -07005963 goto err;
Jens Axboe2b188cc2019-01-07 10:46:33 -07005964
5965 return 0;
5966err:
Jens Axboe54a91f32019-09-10 09:15:04 -06005967 io_finish_async(ctx);
Jens Axboe2b188cc2019-01-07 10:46:33 -07005968 mmdrop(ctx->sqo_mm);
5969 ctx->sqo_mm = NULL;
5970 return ret;
5971}
5972
5973static void io_unaccount_mem(struct user_struct *user, unsigned long nr_pages)
5974{
5975 atomic_long_sub(nr_pages, &user->locked_vm);
5976}
5977
5978static int io_account_mem(struct user_struct *user, unsigned long nr_pages)
5979{
5980 unsigned long page_limit, cur_pages, new_pages;
5981
5982 /* Don't allow more pages than we can safely lock */
5983 page_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
5984
5985 do {
5986 cur_pages = atomic_long_read(&user->locked_vm);
5987 new_pages = cur_pages + nr_pages;
5988 if (new_pages > page_limit)
5989 return -ENOMEM;
5990 } while (atomic_long_cmpxchg(&user->locked_vm, cur_pages,
5991 new_pages) != cur_pages);
5992
5993 return 0;
5994}
5995
5996static void io_mem_free(void *ptr)
5997{
Mark Rutland52e04ef2019-04-30 17:30:21 +01005998 struct page *page;
Jens Axboe2b188cc2019-01-07 10:46:33 -07005999
Mark Rutland52e04ef2019-04-30 17:30:21 +01006000 if (!ptr)
6001 return;
6002
6003 page = virt_to_head_page(ptr);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006004 if (put_page_testzero(page))
6005 free_compound_page(page);
6006}
6007
6008static void *io_mem_alloc(size_t size)
6009{
6010 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN | __GFP_COMP |
6011 __GFP_NORETRY;
6012
6013 return (void *) __get_free_pages(gfp_flags, get_order(size));
6014}
6015
Hristo Venev75b28af2019-08-26 17:23:46 +00006016static unsigned long rings_size(unsigned sq_entries, unsigned cq_entries,
6017 size_t *sq_offset)
6018{
6019 struct io_rings *rings;
6020 size_t off, sq_array_size;
6021
6022 off = struct_size(rings, cqes, cq_entries);
6023 if (off == SIZE_MAX)
6024 return SIZE_MAX;
6025
6026#ifdef CONFIG_SMP
6027 off = ALIGN(off, SMP_CACHE_BYTES);
6028 if (off == 0)
6029 return SIZE_MAX;
6030#endif
6031
6032 sq_array_size = array_size(sizeof(u32), sq_entries);
6033 if (sq_array_size == SIZE_MAX)
6034 return SIZE_MAX;
6035
6036 if (check_add_overflow(off, sq_array_size, &off))
6037 return SIZE_MAX;
6038
6039 if (sq_offset)
6040 *sq_offset = off;
6041
6042 return off;
6043}
6044
Jens Axboe2b188cc2019-01-07 10:46:33 -07006045static unsigned long ring_pages(unsigned sq_entries, unsigned cq_entries)
6046{
Hristo Venev75b28af2019-08-26 17:23:46 +00006047 size_t pages;
Jens Axboe2b188cc2019-01-07 10:46:33 -07006048
Hristo Venev75b28af2019-08-26 17:23:46 +00006049 pages = (size_t)1 << get_order(
6050 rings_size(sq_entries, cq_entries, NULL));
6051 pages += (size_t)1 << get_order(
6052 array_size(sizeof(struct io_uring_sqe), sq_entries));
Jens Axboe2b188cc2019-01-07 10:46:33 -07006053
Hristo Venev75b28af2019-08-26 17:23:46 +00006054 return pages;
Jens Axboe2b188cc2019-01-07 10:46:33 -07006055}
6056
Jens Axboeedafcce2019-01-09 09:16:05 -07006057static int io_sqe_buffer_unregister(struct io_ring_ctx *ctx)
6058{
6059 int i, j;
6060
6061 if (!ctx->user_bufs)
6062 return -ENXIO;
6063
6064 for (i = 0; i < ctx->nr_user_bufs; i++) {
6065 struct io_mapped_ubuf *imu = &ctx->user_bufs[i];
6066
6067 for (j = 0; j < imu->nr_bvecs; j++)
John Hubbardf1f6a7d2020-01-30 22:13:35 -08006068 unpin_user_page(imu->bvec[j].bv_page);
Jens Axboeedafcce2019-01-09 09:16:05 -07006069
6070 if (ctx->account_mem)
6071 io_unaccount_mem(ctx->user, imu->nr_bvecs);
Mark Rutlandd4ef6472019-05-01 16:59:16 +01006072 kvfree(imu->bvec);
Jens Axboeedafcce2019-01-09 09:16:05 -07006073 imu->nr_bvecs = 0;
6074 }
6075
6076 kfree(ctx->user_bufs);
6077 ctx->user_bufs = NULL;
6078 ctx->nr_user_bufs = 0;
6079 return 0;
6080}
6081
6082static int io_copy_iov(struct io_ring_ctx *ctx, struct iovec *dst,
6083 void __user *arg, unsigned index)
6084{
6085 struct iovec __user *src;
6086
6087#ifdef CONFIG_COMPAT
6088 if (ctx->compat) {
6089 struct compat_iovec __user *ciovs;
6090 struct compat_iovec ciov;
6091
6092 ciovs = (struct compat_iovec __user *) arg;
6093 if (copy_from_user(&ciov, &ciovs[index], sizeof(ciov)))
6094 return -EFAULT;
6095
Jens Axboed55e5f52019-12-11 16:12:15 -07006096 dst->iov_base = u64_to_user_ptr((u64)ciov.iov_base);
Jens Axboeedafcce2019-01-09 09:16:05 -07006097 dst->iov_len = ciov.iov_len;
6098 return 0;
6099 }
6100#endif
6101 src = (struct iovec __user *) arg;
6102 if (copy_from_user(dst, &src[index], sizeof(*dst)))
6103 return -EFAULT;
6104 return 0;
6105}
6106
6107static int io_sqe_buffer_register(struct io_ring_ctx *ctx, void __user *arg,
6108 unsigned nr_args)
6109{
6110 struct vm_area_struct **vmas = NULL;
6111 struct page **pages = NULL;
6112 int i, j, got_pages = 0;
6113 int ret = -EINVAL;
6114
6115 if (ctx->user_bufs)
6116 return -EBUSY;
6117 if (!nr_args || nr_args > UIO_MAXIOV)
6118 return -EINVAL;
6119
6120 ctx->user_bufs = kcalloc(nr_args, sizeof(struct io_mapped_ubuf),
6121 GFP_KERNEL);
6122 if (!ctx->user_bufs)
6123 return -ENOMEM;
6124
6125 for (i = 0; i < nr_args; i++) {
6126 struct io_mapped_ubuf *imu = &ctx->user_bufs[i];
6127 unsigned long off, start, end, ubuf;
6128 int pret, nr_pages;
6129 struct iovec iov;
6130 size_t size;
6131
6132 ret = io_copy_iov(ctx, &iov, arg, i);
6133 if (ret)
Pavel Begunkova2786822019-05-26 12:35:47 +03006134 goto err;
Jens Axboeedafcce2019-01-09 09:16:05 -07006135
6136 /*
6137 * Don't impose further limits on the size and buffer
6138 * constraints here, we'll -EINVAL later when IO is
6139 * submitted if they are wrong.
6140 */
6141 ret = -EFAULT;
6142 if (!iov.iov_base || !iov.iov_len)
6143 goto err;
6144
6145 /* arbitrary limit, but we need something */
6146 if (iov.iov_len > SZ_1G)
6147 goto err;
6148
6149 ubuf = (unsigned long) iov.iov_base;
6150 end = (ubuf + iov.iov_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
6151 start = ubuf >> PAGE_SHIFT;
6152 nr_pages = end - start;
6153
6154 if (ctx->account_mem) {
6155 ret = io_account_mem(ctx->user, nr_pages);
6156 if (ret)
6157 goto err;
6158 }
6159
6160 ret = 0;
6161 if (!pages || nr_pages > got_pages) {
6162 kfree(vmas);
6163 kfree(pages);
Mark Rutlandd4ef6472019-05-01 16:59:16 +01006164 pages = kvmalloc_array(nr_pages, sizeof(struct page *),
Jens Axboeedafcce2019-01-09 09:16:05 -07006165 GFP_KERNEL);
Mark Rutlandd4ef6472019-05-01 16:59:16 +01006166 vmas = kvmalloc_array(nr_pages,
Jens Axboeedafcce2019-01-09 09:16:05 -07006167 sizeof(struct vm_area_struct *),
6168 GFP_KERNEL);
6169 if (!pages || !vmas) {
6170 ret = -ENOMEM;
6171 if (ctx->account_mem)
6172 io_unaccount_mem(ctx->user, nr_pages);
6173 goto err;
6174 }
6175 got_pages = nr_pages;
6176 }
6177
Mark Rutlandd4ef6472019-05-01 16:59:16 +01006178 imu->bvec = kvmalloc_array(nr_pages, sizeof(struct bio_vec),
Jens Axboeedafcce2019-01-09 09:16:05 -07006179 GFP_KERNEL);
6180 ret = -ENOMEM;
6181 if (!imu->bvec) {
6182 if (ctx->account_mem)
6183 io_unaccount_mem(ctx->user, nr_pages);
6184 goto err;
6185 }
6186
6187 ret = 0;
6188 down_read(&current->mm->mmap_sem);
John Hubbard2113b052020-01-30 22:13:13 -08006189 pret = pin_user_pages(ubuf, nr_pages,
Ira Weiny932f4a62019-05-13 17:17:03 -07006190 FOLL_WRITE | FOLL_LONGTERM,
6191 pages, vmas);
Jens Axboeedafcce2019-01-09 09:16:05 -07006192 if (pret == nr_pages) {
6193 /* don't support file backed memory */
6194 for (j = 0; j < nr_pages; j++) {
6195 struct vm_area_struct *vma = vmas[j];
6196
6197 if (vma->vm_file &&
6198 !is_file_hugepages(vma->vm_file)) {
6199 ret = -EOPNOTSUPP;
6200 break;
6201 }
6202 }
6203 } else {
6204 ret = pret < 0 ? pret : -EFAULT;
6205 }
6206 up_read(&current->mm->mmap_sem);
6207 if (ret) {
6208 /*
6209 * if we did partial map, or found file backed vmas,
6210 * release any pages we did get
6211 */
John Hubbard27c4d3a2019-08-04 19:32:06 -07006212 if (pret > 0)
John Hubbardf1f6a7d2020-01-30 22:13:35 -08006213 unpin_user_pages(pages, pret);
Jens Axboeedafcce2019-01-09 09:16:05 -07006214 if (ctx->account_mem)
6215 io_unaccount_mem(ctx->user, nr_pages);
Mark Rutlandd4ef6472019-05-01 16:59:16 +01006216 kvfree(imu->bvec);
Jens Axboeedafcce2019-01-09 09:16:05 -07006217 goto err;
6218 }
6219
6220 off = ubuf & ~PAGE_MASK;
6221 size = iov.iov_len;
6222 for (j = 0; j < nr_pages; j++) {
6223 size_t vec_len;
6224
6225 vec_len = min_t(size_t, size, PAGE_SIZE - off);
6226 imu->bvec[j].bv_page = pages[j];
6227 imu->bvec[j].bv_len = vec_len;
6228 imu->bvec[j].bv_offset = off;
6229 off = 0;
6230 size -= vec_len;
6231 }
6232 /* store original address for later verification */
6233 imu->ubuf = ubuf;
6234 imu->len = iov.iov_len;
6235 imu->nr_bvecs = nr_pages;
6236
6237 ctx->nr_user_bufs++;
6238 }
Mark Rutlandd4ef6472019-05-01 16:59:16 +01006239 kvfree(pages);
6240 kvfree(vmas);
Jens Axboeedafcce2019-01-09 09:16:05 -07006241 return 0;
6242err:
Mark Rutlandd4ef6472019-05-01 16:59:16 +01006243 kvfree(pages);
6244 kvfree(vmas);
Jens Axboeedafcce2019-01-09 09:16:05 -07006245 io_sqe_buffer_unregister(ctx);
6246 return ret;
6247}
6248
Jens Axboe9b402842019-04-11 11:45:41 -06006249static int io_eventfd_register(struct io_ring_ctx *ctx, void __user *arg)
6250{
6251 __s32 __user *fds = arg;
6252 int fd;
6253
6254 if (ctx->cq_ev_fd)
6255 return -EBUSY;
6256
6257 if (copy_from_user(&fd, fds, sizeof(*fds)))
6258 return -EFAULT;
6259
6260 ctx->cq_ev_fd = eventfd_ctx_fdget(fd);
6261 if (IS_ERR(ctx->cq_ev_fd)) {
6262 int ret = PTR_ERR(ctx->cq_ev_fd);
6263 ctx->cq_ev_fd = NULL;
6264 return ret;
6265 }
6266
6267 return 0;
6268}
6269
6270static int io_eventfd_unregister(struct io_ring_ctx *ctx)
6271{
6272 if (ctx->cq_ev_fd) {
6273 eventfd_ctx_put(ctx->cq_ev_fd);
6274 ctx->cq_ev_fd = NULL;
6275 return 0;
6276 }
6277
6278 return -ENXIO;
6279}
6280
Jens Axboe2b188cc2019-01-07 10:46:33 -07006281static void io_ring_ctx_free(struct io_ring_ctx *ctx)
6282{
Jens Axboe6b063142019-01-10 22:13:58 -07006283 io_finish_async(ctx);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006284 if (ctx->sqo_mm)
6285 mmdrop(ctx->sqo_mm);
Jens Axboedef596e2019-01-09 08:59:42 -07006286
6287 io_iopoll_reap_events(ctx);
Jens Axboeedafcce2019-01-09 09:16:05 -07006288 io_sqe_buffer_unregister(ctx);
Jens Axboe6b063142019-01-10 22:13:58 -07006289 io_sqe_files_unregister(ctx);
Jens Axboe9b402842019-04-11 11:45:41 -06006290 io_eventfd_unregister(ctx);
Jens Axboedef596e2019-01-09 08:59:42 -07006291
Jens Axboe2b188cc2019-01-07 10:46:33 -07006292#if defined(CONFIG_UNIX)
Eric Biggers355e8d22019-06-12 14:58:43 -07006293 if (ctx->ring_sock) {
6294 ctx->ring_sock->file = NULL; /* so that iput() is called */
Jens Axboe2b188cc2019-01-07 10:46:33 -07006295 sock_release(ctx->ring_sock);
Eric Biggers355e8d22019-06-12 14:58:43 -07006296 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07006297#endif
6298
Hristo Venev75b28af2019-08-26 17:23:46 +00006299 io_mem_free(ctx->rings);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006300 io_mem_free(ctx->sq_sqes);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006301
6302 percpu_ref_exit(&ctx->refs);
6303 if (ctx->account_mem)
6304 io_unaccount_mem(ctx->user,
6305 ring_pages(ctx->sq_entries, ctx->cq_entries));
6306 free_uid(ctx->user);
Jens Axboe181e4482019-11-25 08:52:30 -07006307 put_cred(ctx->creds);
Jens Axboe206aefd2019-11-07 18:27:42 -07006308 kfree(ctx->completions);
Jens Axboe78076bb2019-12-04 19:56:40 -07006309 kfree(ctx->cancel_hash);
Jens Axboe0ddf92e2019-11-08 08:52:53 -07006310 kmem_cache_free(req_cachep, ctx->fallback_req);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006311 kfree(ctx);
6312}
6313
6314static __poll_t io_uring_poll(struct file *file, poll_table *wait)
6315{
6316 struct io_ring_ctx *ctx = file->private_data;
6317 __poll_t mask = 0;
6318
6319 poll_wait(file, &ctx->cq_wait, wait);
Stefan Bühler4f7067c2019-04-24 23:54:17 +02006320 /*
6321 * synchronizes with barrier from wq_has_sleeper call in
6322 * io_commit_cqring
6323 */
Jens Axboe2b188cc2019-01-07 10:46:33 -07006324 smp_rmb();
Hristo Venev75b28af2019-08-26 17:23:46 +00006325 if (READ_ONCE(ctx->rings->sq.tail) - ctx->cached_sq_head !=
6326 ctx->rings->sq_ring_entries)
Jens Axboe2b188cc2019-01-07 10:46:33 -07006327 mask |= EPOLLOUT | EPOLLWRNORM;
Stefano Garzarella63e5d812020-02-07 13:18:28 +01006328 if (io_cqring_events(ctx, false))
Jens Axboe2b188cc2019-01-07 10:46:33 -07006329 mask |= EPOLLIN | EPOLLRDNORM;
6330
6331 return mask;
6332}
6333
6334static int io_uring_fasync(int fd, struct file *file, int on)
6335{
6336 struct io_ring_ctx *ctx = file->private_data;
6337
6338 return fasync_helper(fd, file, on, &ctx->cq_fasync);
6339}
6340
Jens Axboe071698e2020-01-28 10:04:42 -07006341static int io_remove_personalities(int id, void *p, void *data)
6342{
6343 struct io_ring_ctx *ctx = data;
6344 const struct cred *cred;
6345
6346 cred = idr_remove(&ctx->personality_idr, id);
6347 if (cred)
6348 put_cred(cred);
6349 return 0;
6350}
6351
Jens Axboe2b188cc2019-01-07 10:46:33 -07006352static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
6353{
6354 mutex_lock(&ctx->uring_lock);
6355 percpu_ref_kill(&ctx->refs);
6356 mutex_unlock(&ctx->uring_lock);
6357
Jens Axboedf069d82020-02-04 16:48:34 -07006358 /*
6359 * Wait for sq thread to idle, if we have one. It won't spin on new
6360 * work after we've killed the ctx ref above. This is important to do
6361 * before we cancel existing commands, as the thread could otherwise
6362 * be queueing new work post that. If that's work we need to cancel,
6363 * it could cause shutdown to hang.
6364 */
6365 while (ctx->sqo_thread && !wq_has_sleeper(&ctx->sqo_wait))
6366 cpu_relax();
6367
Jens Axboe5262f562019-09-17 12:26:57 -06006368 io_kill_timeouts(ctx);
Jens Axboe221c5eb2019-01-17 09:41:58 -07006369 io_poll_remove_all(ctx);
Jens Axboe561fb042019-10-24 07:25:42 -06006370
6371 if (ctx->io_wq)
6372 io_wq_cancel_all(ctx->io_wq);
6373
Jens Axboedef596e2019-01-09 08:59:42 -07006374 io_iopoll_reap_events(ctx);
Jens Axboe15dff282019-11-13 09:09:23 -07006375 /* if we failed setting up the ctx, we might not have any rings */
6376 if (ctx->rings)
6377 io_cqring_overflow_flush(ctx, true);
Jens Axboe071698e2020-01-28 10:04:42 -07006378 idr_for_each(&ctx->personality_idr, io_remove_personalities, ctx);
Jens Axboe206aefd2019-11-07 18:27:42 -07006379 wait_for_completion(&ctx->completions[0]);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006380 io_ring_ctx_free(ctx);
6381}
6382
6383static int io_uring_release(struct inode *inode, struct file *file)
6384{
6385 struct io_ring_ctx *ctx = file->private_data;
6386
6387 file->private_data = NULL;
6388 io_ring_ctx_wait_and_kill(ctx);
6389 return 0;
6390}
6391
Jens Axboefcb323c2019-10-24 12:39:47 -06006392static void io_uring_cancel_files(struct io_ring_ctx *ctx,
6393 struct files_struct *files)
6394{
6395 struct io_kiocb *req;
6396 DEFINE_WAIT(wait);
6397
6398 while (!list_empty_careful(&ctx->inflight_list)) {
Jens Axboe768134d2019-11-10 20:30:53 -07006399 struct io_kiocb *cancel_req = NULL;
Jens Axboefcb323c2019-10-24 12:39:47 -06006400
6401 spin_lock_irq(&ctx->inflight_lock);
6402 list_for_each_entry(req, &ctx->inflight_list, inflight_entry) {
Jens Axboe768134d2019-11-10 20:30:53 -07006403 if (req->work.files != files)
6404 continue;
6405 /* req is being completed, ignore */
6406 if (!refcount_inc_not_zero(&req->refs))
6407 continue;
6408 cancel_req = req;
6409 break;
Jens Axboefcb323c2019-10-24 12:39:47 -06006410 }
Jens Axboe768134d2019-11-10 20:30:53 -07006411 if (cancel_req)
Jens Axboefcb323c2019-10-24 12:39:47 -06006412 prepare_to_wait(&ctx->inflight_wait, &wait,
Jens Axboe768134d2019-11-10 20:30:53 -07006413 TASK_UNINTERRUPTIBLE);
Jens Axboefcb323c2019-10-24 12:39:47 -06006414 spin_unlock_irq(&ctx->inflight_lock);
6415
Jens Axboe768134d2019-11-10 20:30:53 -07006416 /* We need to keep going until we don't find a matching req */
6417 if (!cancel_req)
Jens Axboefcb323c2019-10-24 12:39:47 -06006418 break;
Bob Liu2f6d9b92019-11-13 18:06:24 +08006419
6420 io_wq_cancel_work(ctx->io_wq, &cancel_req->work);
6421 io_put_req(cancel_req);
Jens Axboefcb323c2019-10-24 12:39:47 -06006422 schedule();
6423 }
Jens Axboe768134d2019-11-10 20:30:53 -07006424 finish_wait(&ctx->inflight_wait, &wait);
Jens Axboefcb323c2019-10-24 12:39:47 -06006425}
6426
6427static int io_uring_flush(struct file *file, void *data)
6428{
6429 struct io_ring_ctx *ctx = file->private_data;
6430
6431 io_uring_cancel_files(ctx, data);
Jens Axboefcb323c2019-10-24 12:39:47 -06006432 return 0;
6433}
6434
Roman Penyaev6c5c2402019-11-28 12:53:22 +01006435static void *io_uring_validate_mmap_request(struct file *file,
6436 loff_t pgoff, size_t sz)
Jens Axboe2b188cc2019-01-07 10:46:33 -07006437{
Jens Axboe2b188cc2019-01-07 10:46:33 -07006438 struct io_ring_ctx *ctx = file->private_data;
Roman Penyaev6c5c2402019-11-28 12:53:22 +01006439 loff_t offset = pgoff << PAGE_SHIFT;
Jens Axboe2b188cc2019-01-07 10:46:33 -07006440 struct page *page;
6441 void *ptr;
6442
6443 switch (offset) {
6444 case IORING_OFF_SQ_RING:
Hristo Venev75b28af2019-08-26 17:23:46 +00006445 case IORING_OFF_CQ_RING:
6446 ptr = ctx->rings;
Jens Axboe2b188cc2019-01-07 10:46:33 -07006447 break;
6448 case IORING_OFF_SQES:
6449 ptr = ctx->sq_sqes;
6450 break;
Jens Axboe2b188cc2019-01-07 10:46:33 -07006451 default:
Roman Penyaev6c5c2402019-11-28 12:53:22 +01006452 return ERR_PTR(-EINVAL);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006453 }
6454
6455 page = virt_to_head_page(ptr);
Matthew Wilcox (Oracle)a50b8542019-09-23 15:34:25 -07006456 if (sz > page_size(page))
Roman Penyaev6c5c2402019-11-28 12:53:22 +01006457 return ERR_PTR(-EINVAL);
6458
6459 return ptr;
6460}
6461
6462#ifdef CONFIG_MMU
6463
6464static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
6465{
6466 size_t sz = vma->vm_end - vma->vm_start;
6467 unsigned long pfn;
6468 void *ptr;
6469
6470 ptr = io_uring_validate_mmap_request(file, vma->vm_pgoff, sz);
6471 if (IS_ERR(ptr))
6472 return PTR_ERR(ptr);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006473
6474 pfn = virt_to_phys(ptr) >> PAGE_SHIFT;
6475 return remap_pfn_range(vma, vma->vm_start, pfn, sz, vma->vm_page_prot);
6476}
6477
Roman Penyaev6c5c2402019-11-28 12:53:22 +01006478#else /* !CONFIG_MMU */
6479
6480static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
6481{
6482 return vma->vm_flags & (VM_SHARED | VM_MAYSHARE) ? 0 : -EINVAL;
6483}
6484
6485static unsigned int io_uring_nommu_mmap_capabilities(struct file *file)
6486{
6487 return NOMMU_MAP_DIRECT | NOMMU_MAP_READ | NOMMU_MAP_WRITE;
6488}
6489
6490static unsigned long io_uring_nommu_get_unmapped_area(struct file *file,
6491 unsigned long addr, unsigned long len,
6492 unsigned long pgoff, unsigned long flags)
6493{
6494 void *ptr;
6495
6496 ptr = io_uring_validate_mmap_request(file, pgoff, len);
6497 if (IS_ERR(ptr))
6498 return PTR_ERR(ptr);
6499
6500 return (unsigned long) ptr;
6501}
6502
6503#endif /* !CONFIG_MMU */
6504
Jens Axboe2b188cc2019-01-07 10:46:33 -07006505SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
6506 u32, min_complete, u32, flags, const sigset_t __user *, sig,
6507 size_t, sigsz)
6508{
6509 struct io_ring_ctx *ctx;
6510 long ret = -EBADF;
6511 int submitted = 0;
6512 struct fd f;
6513
Jens Axboe6c271ce2019-01-10 11:22:30 -07006514 if (flags & ~(IORING_ENTER_GETEVENTS | IORING_ENTER_SQ_WAKEUP))
Jens Axboe2b188cc2019-01-07 10:46:33 -07006515 return -EINVAL;
6516
6517 f = fdget(fd);
6518 if (!f.file)
6519 return -EBADF;
6520
6521 ret = -EOPNOTSUPP;
6522 if (f.file->f_op != &io_uring_fops)
6523 goto out_fput;
6524
6525 ret = -ENXIO;
6526 ctx = f.file->private_data;
6527 if (!percpu_ref_tryget(&ctx->refs))
6528 goto out_fput;
6529
Jens Axboe6c271ce2019-01-10 11:22:30 -07006530 /*
6531 * For SQ polling, the thread will do all submissions and completions.
6532 * Just return the requested submit count, and wake the thread if
6533 * we were asked to.
6534 */
Jens Axboeb2a9ead2019-09-12 14:19:16 -06006535 ret = 0;
Jens Axboe6c271ce2019-01-10 11:22:30 -07006536 if (ctx->flags & IORING_SETUP_SQPOLL) {
Jens Axboec1edbf52019-11-10 16:56:04 -07006537 if (!list_empty_careful(&ctx->cq_overflow_list))
6538 io_cqring_overflow_flush(ctx, false);
Jens Axboe6c271ce2019-01-10 11:22:30 -07006539 if (flags & IORING_ENTER_SQ_WAKEUP)
6540 wake_up(&ctx->sqo_wait);
6541 submitted = to_submit;
Jens Axboeb2a9ead2019-09-12 14:19:16 -06006542 } else if (to_submit) {
Pavel Begunkovae9428c2019-11-06 00:22:14 +03006543 struct mm_struct *cur_mm;
Jens Axboe2b188cc2019-01-07 10:46:33 -07006544
6545 mutex_lock(&ctx->uring_lock);
Pavel Begunkovae9428c2019-11-06 00:22:14 +03006546 /* already have mm, so io_submit_sqes() won't try to grab it */
6547 cur_mm = ctx->sqo_mm;
6548 submitted = io_submit_sqes(ctx, to_submit, f.file, fd,
6549 &cur_mm, false);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006550 mutex_unlock(&ctx->uring_lock);
Pavel Begunkov7c504e652019-12-18 19:53:45 +03006551
6552 if (submitted != to_submit)
6553 goto out;
Jens Axboe2b188cc2019-01-07 10:46:33 -07006554 }
6555 if (flags & IORING_ENTER_GETEVENTS) {
Jens Axboedef596e2019-01-09 08:59:42 -07006556 unsigned nr_events = 0;
6557
Jens Axboe2b188cc2019-01-07 10:46:33 -07006558 min_complete = min(min_complete, ctx->cq_entries);
6559
Jens Axboedef596e2019-01-09 08:59:42 -07006560 if (ctx->flags & IORING_SETUP_IOPOLL) {
Jens Axboedef596e2019-01-09 08:59:42 -07006561 ret = io_iopoll_check(ctx, &nr_events, min_complete);
Jens Axboedef596e2019-01-09 08:59:42 -07006562 } else {
6563 ret = io_cqring_wait(ctx, min_complete, sig, sigsz);
6564 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07006565 }
6566
Pavel Begunkov7c504e652019-12-18 19:53:45 +03006567out:
Pavel Begunkov6805b322019-10-08 02:18:42 +03006568 percpu_ref_put(&ctx->refs);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006569out_fput:
6570 fdput(f);
6571 return submitted ? submitted : ret;
6572}
6573
Jens Axboe87ce9552020-01-30 08:25:34 -07006574static int io_uring_show_cred(int id, void *p, void *data)
6575{
6576 const struct cred *cred = p;
6577 struct seq_file *m = data;
6578 struct user_namespace *uns = seq_user_ns(m);
6579 struct group_info *gi;
6580 kernel_cap_t cap;
6581 unsigned __capi;
6582 int g;
6583
6584 seq_printf(m, "%5d\n", id);
6585 seq_put_decimal_ull(m, "\tUid:\t", from_kuid_munged(uns, cred->uid));
6586 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->euid));
6587 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->suid));
6588 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->fsuid));
6589 seq_put_decimal_ull(m, "\n\tGid:\t", from_kgid_munged(uns, cred->gid));
6590 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->egid));
6591 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->sgid));
6592 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->fsgid));
6593 seq_puts(m, "\n\tGroups:\t");
6594 gi = cred->group_info;
6595 for (g = 0; g < gi->ngroups; g++) {
6596 seq_put_decimal_ull(m, g ? " " : "",
6597 from_kgid_munged(uns, gi->gid[g]));
6598 }
6599 seq_puts(m, "\n\tCapEff:\t");
6600 cap = cred->cap_effective;
6601 CAP_FOR_EACH_U32(__capi)
6602 seq_put_hex_ll(m, NULL, cap.cap[CAP_LAST_U32 - __capi], 8);
6603 seq_putc(m, '\n');
6604 return 0;
6605}
6606
6607static void __io_uring_show_fdinfo(struct io_ring_ctx *ctx, struct seq_file *m)
6608{
6609 int i;
6610
6611 mutex_lock(&ctx->uring_lock);
6612 seq_printf(m, "UserFiles:\t%u\n", ctx->nr_user_files);
6613 for (i = 0; i < ctx->nr_user_files; i++) {
6614 struct fixed_file_table *table;
6615 struct file *f;
6616
6617 table = &ctx->file_data->table[i >> IORING_FILE_TABLE_SHIFT];
6618 f = table->files[i & IORING_FILE_TABLE_MASK];
6619 if (f)
6620 seq_printf(m, "%5u: %s\n", i, file_dentry(f)->d_iname);
6621 else
6622 seq_printf(m, "%5u: <none>\n", i);
6623 }
6624 seq_printf(m, "UserBufs:\t%u\n", ctx->nr_user_bufs);
6625 for (i = 0; i < ctx->nr_user_bufs; i++) {
6626 struct io_mapped_ubuf *buf = &ctx->user_bufs[i];
6627
6628 seq_printf(m, "%5u: 0x%llx/%u\n", i, buf->ubuf,
6629 (unsigned int) buf->len);
6630 }
6631 if (!idr_is_empty(&ctx->personality_idr)) {
6632 seq_printf(m, "Personalities:\n");
6633 idr_for_each(&ctx->personality_idr, io_uring_show_cred, m);
6634 }
6635 mutex_unlock(&ctx->uring_lock);
6636}
6637
6638static void io_uring_show_fdinfo(struct seq_file *m, struct file *f)
6639{
6640 struct io_ring_ctx *ctx = f->private_data;
6641
6642 if (percpu_ref_tryget(&ctx->refs)) {
6643 __io_uring_show_fdinfo(ctx, m);
6644 percpu_ref_put(&ctx->refs);
6645 }
6646}
6647
Jens Axboe2b188cc2019-01-07 10:46:33 -07006648static const struct file_operations io_uring_fops = {
6649 .release = io_uring_release,
Jens Axboefcb323c2019-10-24 12:39:47 -06006650 .flush = io_uring_flush,
Jens Axboe2b188cc2019-01-07 10:46:33 -07006651 .mmap = io_uring_mmap,
Roman Penyaev6c5c2402019-11-28 12:53:22 +01006652#ifndef CONFIG_MMU
6653 .get_unmapped_area = io_uring_nommu_get_unmapped_area,
6654 .mmap_capabilities = io_uring_nommu_mmap_capabilities,
6655#endif
Jens Axboe2b188cc2019-01-07 10:46:33 -07006656 .poll = io_uring_poll,
6657 .fasync = io_uring_fasync,
Jens Axboe87ce9552020-01-30 08:25:34 -07006658 .show_fdinfo = io_uring_show_fdinfo,
Jens Axboe2b188cc2019-01-07 10:46:33 -07006659};
6660
6661static int io_allocate_scq_urings(struct io_ring_ctx *ctx,
6662 struct io_uring_params *p)
6663{
Hristo Venev75b28af2019-08-26 17:23:46 +00006664 struct io_rings *rings;
6665 size_t size, sq_array_offset;
Jens Axboe2b188cc2019-01-07 10:46:33 -07006666
Hristo Venev75b28af2019-08-26 17:23:46 +00006667 size = rings_size(p->sq_entries, p->cq_entries, &sq_array_offset);
6668 if (size == SIZE_MAX)
6669 return -EOVERFLOW;
6670
6671 rings = io_mem_alloc(size);
6672 if (!rings)
Jens Axboe2b188cc2019-01-07 10:46:33 -07006673 return -ENOMEM;
6674
Hristo Venev75b28af2019-08-26 17:23:46 +00006675 ctx->rings = rings;
6676 ctx->sq_array = (u32 *)((char *)rings + sq_array_offset);
6677 rings->sq_ring_mask = p->sq_entries - 1;
6678 rings->cq_ring_mask = p->cq_entries - 1;
6679 rings->sq_ring_entries = p->sq_entries;
6680 rings->cq_ring_entries = p->cq_entries;
6681 ctx->sq_mask = rings->sq_ring_mask;
6682 ctx->cq_mask = rings->cq_ring_mask;
6683 ctx->sq_entries = rings->sq_ring_entries;
6684 ctx->cq_entries = rings->cq_ring_entries;
Jens Axboe2b188cc2019-01-07 10:46:33 -07006685
6686 size = array_size(sizeof(struct io_uring_sqe), p->sq_entries);
Jens Axboeeb065d32019-11-20 09:26:29 -07006687 if (size == SIZE_MAX) {
6688 io_mem_free(ctx->rings);
6689 ctx->rings = NULL;
Jens Axboe2b188cc2019-01-07 10:46:33 -07006690 return -EOVERFLOW;
Jens Axboeeb065d32019-11-20 09:26:29 -07006691 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07006692
6693 ctx->sq_sqes = io_mem_alloc(size);
Jens Axboeeb065d32019-11-20 09:26:29 -07006694 if (!ctx->sq_sqes) {
6695 io_mem_free(ctx->rings);
6696 ctx->rings = NULL;
Jens Axboe2b188cc2019-01-07 10:46:33 -07006697 return -ENOMEM;
Jens Axboeeb065d32019-11-20 09:26:29 -07006698 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07006699
Jens Axboe2b188cc2019-01-07 10:46:33 -07006700 return 0;
6701}
6702
6703/*
6704 * Allocate an anonymous fd, this is what constitutes the application
6705 * visible backing of an io_uring instance. The application mmaps this
6706 * fd to gain access to the SQ/CQ ring details. If UNIX sockets are enabled,
6707 * we have to tie this fd to a socket for file garbage collection purposes.
6708 */
6709static int io_uring_get_fd(struct io_ring_ctx *ctx)
6710{
6711 struct file *file;
6712 int ret;
6713
6714#if defined(CONFIG_UNIX)
6715 ret = sock_create_kern(&init_net, PF_UNIX, SOCK_RAW, IPPROTO_IP,
6716 &ctx->ring_sock);
6717 if (ret)
6718 return ret;
6719#endif
6720
6721 ret = get_unused_fd_flags(O_RDWR | O_CLOEXEC);
6722 if (ret < 0)
6723 goto err;
6724
6725 file = anon_inode_getfile("[io_uring]", &io_uring_fops, ctx,
6726 O_RDWR | O_CLOEXEC);
6727 if (IS_ERR(file)) {
6728 put_unused_fd(ret);
6729 ret = PTR_ERR(file);
6730 goto err;
6731 }
6732
6733#if defined(CONFIG_UNIX)
6734 ctx->ring_sock->file = file;
6735#endif
6736 fd_install(ret, file);
6737 return ret;
6738err:
6739#if defined(CONFIG_UNIX)
6740 sock_release(ctx->ring_sock);
6741 ctx->ring_sock = NULL;
6742#endif
6743 return ret;
6744}
6745
6746static int io_uring_create(unsigned entries, struct io_uring_params *p)
6747{
6748 struct user_struct *user = NULL;
6749 struct io_ring_ctx *ctx;
6750 bool account_mem;
6751 int ret;
6752
Jens Axboe8110c1a2019-12-28 15:39:54 -07006753 if (!entries)
Jens Axboe2b188cc2019-01-07 10:46:33 -07006754 return -EINVAL;
Jens Axboe8110c1a2019-12-28 15:39:54 -07006755 if (entries > IORING_MAX_ENTRIES) {
6756 if (!(p->flags & IORING_SETUP_CLAMP))
6757 return -EINVAL;
6758 entries = IORING_MAX_ENTRIES;
6759 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07006760
6761 /*
6762 * Use twice as many entries for the CQ ring. It's possible for the
6763 * application to drive a higher depth than the size of the SQ ring,
6764 * since the sqes are only used at submission time. This allows for
Jens Axboe33a107f2019-10-04 12:10:03 -06006765 * some flexibility in overcommitting a bit. If the application has
6766 * set IORING_SETUP_CQSIZE, it will have passed in the desired number
6767 * of CQ ring entries manually.
Jens Axboe2b188cc2019-01-07 10:46:33 -07006768 */
6769 p->sq_entries = roundup_pow_of_two(entries);
Jens Axboe33a107f2019-10-04 12:10:03 -06006770 if (p->flags & IORING_SETUP_CQSIZE) {
6771 /*
6772 * If IORING_SETUP_CQSIZE is set, we do the same roundup
6773 * to a power-of-two, if it isn't already. We do NOT impose
6774 * any cq vs sq ring sizing.
6775 */
Jens Axboe8110c1a2019-12-28 15:39:54 -07006776 if (p->cq_entries < p->sq_entries)
Jens Axboe33a107f2019-10-04 12:10:03 -06006777 return -EINVAL;
Jens Axboe8110c1a2019-12-28 15:39:54 -07006778 if (p->cq_entries > IORING_MAX_CQ_ENTRIES) {
6779 if (!(p->flags & IORING_SETUP_CLAMP))
6780 return -EINVAL;
6781 p->cq_entries = IORING_MAX_CQ_ENTRIES;
6782 }
Jens Axboe33a107f2019-10-04 12:10:03 -06006783 p->cq_entries = roundup_pow_of_two(p->cq_entries);
6784 } else {
6785 p->cq_entries = 2 * p->sq_entries;
6786 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07006787
6788 user = get_uid(current_user());
6789 account_mem = !capable(CAP_IPC_LOCK);
6790
6791 if (account_mem) {
6792 ret = io_account_mem(user,
6793 ring_pages(p->sq_entries, p->cq_entries));
6794 if (ret) {
6795 free_uid(user);
6796 return ret;
6797 }
6798 }
6799
6800 ctx = io_ring_ctx_alloc(p);
6801 if (!ctx) {
6802 if (account_mem)
6803 io_unaccount_mem(user, ring_pages(p->sq_entries,
6804 p->cq_entries));
6805 free_uid(user);
6806 return -ENOMEM;
6807 }
6808 ctx->compat = in_compat_syscall();
6809 ctx->account_mem = account_mem;
6810 ctx->user = user;
Jens Axboe0b8c0ec2019-12-02 08:50:00 -07006811 ctx->creds = get_current_cred();
Jens Axboe2b188cc2019-01-07 10:46:33 -07006812
6813 ret = io_allocate_scq_urings(ctx, p);
6814 if (ret)
6815 goto err;
6816
Jens Axboe6c271ce2019-01-10 11:22:30 -07006817 ret = io_sq_offload_start(ctx, p);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006818 if (ret)
6819 goto err;
6820
Jens Axboe2b188cc2019-01-07 10:46:33 -07006821 memset(&p->sq_off, 0, sizeof(p->sq_off));
Hristo Venev75b28af2019-08-26 17:23:46 +00006822 p->sq_off.head = offsetof(struct io_rings, sq.head);
6823 p->sq_off.tail = offsetof(struct io_rings, sq.tail);
6824 p->sq_off.ring_mask = offsetof(struct io_rings, sq_ring_mask);
6825 p->sq_off.ring_entries = offsetof(struct io_rings, sq_ring_entries);
6826 p->sq_off.flags = offsetof(struct io_rings, sq_flags);
6827 p->sq_off.dropped = offsetof(struct io_rings, sq_dropped);
6828 p->sq_off.array = (char *)ctx->sq_array - (char *)ctx->rings;
Jens Axboe2b188cc2019-01-07 10:46:33 -07006829
6830 memset(&p->cq_off, 0, sizeof(p->cq_off));
Hristo Venev75b28af2019-08-26 17:23:46 +00006831 p->cq_off.head = offsetof(struct io_rings, cq.head);
6832 p->cq_off.tail = offsetof(struct io_rings, cq.tail);
6833 p->cq_off.ring_mask = offsetof(struct io_rings, cq_ring_mask);
6834 p->cq_off.ring_entries = offsetof(struct io_rings, cq_ring_entries);
6835 p->cq_off.overflow = offsetof(struct io_rings, cq_overflow);
6836 p->cq_off.cqes = offsetof(struct io_rings, cqes);
Jens Axboeac90f242019-09-06 10:26:21 -06006837
Jens Axboe044c1ab2019-10-28 09:15:33 -06006838 /*
6839 * Install ring fd as the very last thing, so we don't risk someone
6840 * having closed it before we finish setup
6841 */
6842 ret = io_uring_get_fd(ctx);
6843 if (ret < 0)
6844 goto err;
6845
Jens Axboeda8c9692019-12-02 18:51:26 -07006846 p->features = IORING_FEAT_SINGLE_MMAP | IORING_FEAT_NODROP |
Jens Axboecccf0ee2020-01-27 16:34:48 -07006847 IORING_FEAT_SUBMIT_STABLE | IORING_FEAT_RW_CUR_POS |
6848 IORING_FEAT_CUR_PERSONALITY;
Dmitrii Dolgovc826bd72019-10-15 19:02:01 +02006849 trace_io_uring_create(ret, ctx, p->sq_entries, p->cq_entries, p->flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006850 return ret;
6851err:
6852 io_ring_ctx_wait_and_kill(ctx);
6853 return ret;
6854}
6855
6856/*
6857 * Sets up an aio uring context, and returns the fd. Applications asks for a
6858 * ring size, we return the actual sq/cq ring sizes (among other things) in the
6859 * params structure passed in.
6860 */
6861static long io_uring_setup(u32 entries, struct io_uring_params __user *params)
6862{
6863 struct io_uring_params p;
6864 long ret;
6865 int i;
6866
6867 if (copy_from_user(&p, params, sizeof(p)))
6868 return -EFAULT;
6869 for (i = 0; i < ARRAY_SIZE(p.resv); i++) {
6870 if (p.resv[i])
6871 return -EINVAL;
6872 }
6873
Jens Axboe6c271ce2019-01-10 11:22:30 -07006874 if (p.flags & ~(IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL |
Jens Axboe8110c1a2019-12-28 15:39:54 -07006875 IORING_SETUP_SQ_AFF | IORING_SETUP_CQSIZE |
Pavel Begunkov24369c22020-01-28 03:15:48 +03006876 IORING_SETUP_CLAMP | IORING_SETUP_ATTACH_WQ))
Jens Axboe2b188cc2019-01-07 10:46:33 -07006877 return -EINVAL;
6878
6879 ret = io_uring_create(entries, &p);
6880 if (ret < 0)
6881 return ret;
6882
6883 if (copy_to_user(params, &p, sizeof(p)))
6884 return -EFAULT;
6885
6886 return ret;
6887}
6888
6889SYSCALL_DEFINE2(io_uring_setup, u32, entries,
6890 struct io_uring_params __user *, params)
6891{
6892 return io_uring_setup(entries, params);
6893}
6894
Jens Axboe66f4af92020-01-16 15:36:52 -07006895static int io_probe(struct io_ring_ctx *ctx, void __user *arg, unsigned nr_args)
6896{
6897 struct io_uring_probe *p;
6898 size_t size;
6899 int i, ret;
6900
6901 size = struct_size(p, ops, nr_args);
6902 if (size == SIZE_MAX)
6903 return -EOVERFLOW;
6904 p = kzalloc(size, GFP_KERNEL);
6905 if (!p)
6906 return -ENOMEM;
6907
6908 ret = -EFAULT;
6909 if (copy_from_user(p, arg, size))
6910 goto out;
6911 ret = -EINVAL;
6912 if (memchr_inv(p, 0, size))
6913 goto out;
6914
6915 p->last_op = IORING_OP_LAST - 1;
6916 if (nr_args > IORING_OP_LAST)
6917 nr_args = IORING_OP_LAST;
6918
6919 for (i = 0; i < nr_args; i++) {
6920 p->ops[i].op = i;
6921 if (!io_op_defs[i].not_supported)
6922 p->ops[i].flags = IO_URING_OP_SUPPORTED;
6923 }
6924 p->ops_len = i;
6925
6926 ret = 0;
6927 if (copy_to_user(arg, p, size))
6928 ret = -EFAULT;
6929out:
6930 kfree(p);
6931 return ret;
6932}
6933
Jens Axboe071698e2020-01-28 10:04:42 -07006934static int io_register_personality(struct io_ring_ctx *ctx)
6935{
6936 const struct cred *creds = get_current_cred();
6937 int id;
6938
6939 id = idr_alloc_cyclic(&ctx->personality_idr, (void *) creds, 1,
6940 USHRT_MAX, GFP_KERNEL);
6941 if (id < 0)
6942 put_cred(creds);
6943 return id;
6944}
6945
6946static int io_unregister_personality(struct io_ring_ctx *ctx, unsigned id)
6947{
6948 const struct cred *old_creds;
6949
6950 old_creds = idr_remove(&ctx->personality_idr, id);
6951 if (old_creds) {
6952 put_cred(old_creds);
6953 return 0;
6954 }
6955
6956 return -EINVAL;
6957}
6958
6959static bool io_register_op_must_quiesce(int op)
6960{
6961 switch (op) {
6962 case IORING_UNREGISTER_FILES:
6963 case IORING_REGISTER_FILES_UPDATE:
6964 case IORING_REGISTER_PROBE:
6965 case IORING_REGISTER_PERSONALITY:
6966 case IORING_UNREGISTER_PERSONALITY:
6967 return false;
6968 default:
6969 return true;
6970 }
6971}
6972
Jens Axboeedafcce2019-01-09 09:16:05 -07006973static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
6974 void __user *arg, unsigned nr_args)
Jens Axboeb19062a2019-04-15 10:49:38 -06006975 __releases(ctx->uring_lock)
6976 __acquires(ctx->uring_lock)
Jens Axboeedafcce2019-01-09 09:16:05 -07006977{
6978 int ret;
6979
Jens Axboe35fa71a2019-04-22 10:23:23 -06006980 /*
6981 * We're inside the ring mutex, if the ref is already dying, then
6982 * someone else killed the ctx or is already going through
6983 * io_uring_register().
6984 */
6985 if (percpu_ref_is_dying(&ctx->refs))
6986 return -ENXIO;
6987
Jens Axboe071698e2020-01-28 10:04:42 -07006988 if (io_register_op_must_quiesce(opcode)) {
Jens Axboe05f3fb32019-12-09 11:22:50 -07006989 percpu_ref_kill(&ctx->refs);
Jens Axboeb19062a2019-04-15 10:49:38 -06006990
Jens Axboe05f3fb32019-12-09 11:22:50 -07006991 /*
6992 * Drop uring mutex before waiting for references to exit. If
6993 * another thread is currently inside io_uring_enter() it might
6994 * need to grab the uring_lock to make progress. If we hold it
6995 * here across the drain wait, then we can deadlock. It's safe
6996 * to drop the mutex here, since no new references will come in
6997 * after we've killed the percpu ref.
6998 */
6999 mutex_unlock(&ctx->uring_lock);
Jens Axboec1503682020-01-08 08:26:07 -07007000 ret = wait_for_completion_interruptible(&ctx->completions[0]);
Jens Axboe05f3fb32019-12-09 11:22:50 -07007001 mutex_lock(&ctx->uring_lock);
Jens Axboec1503682020-01-08 08:26:07 -07007002 if (ret) {
7003 percpu_ref_resurrect(&ctx->refs);
7004 ret = -EINTR;
7005 goto out;
7006 }
Jens Axboe05f3fb32019-12-09 11:22:50 -07007007 }
Jens Axboeedafcce2019-01-09 09:16:05 -07007008
7009 switch (opcode) {
7010 case IORING_REGISTER_BUFFERS:
7011 ret = io_sqe_buffer_register(ctx, arg, nr_args);
7012 break;
7013 case IORING_UNREGISTER_BUFFERS:
7014 ret = -EINVAL;
7015 if (arg || nr_args)
7016 break;
7017 ret = io_sqe_buffer_unregister(ctx);
7018 break;
Jens Axboe6b063142019-01-10 22:13:58 -07007019 case IORING_REGISTER_FILES:
7020 ret = io_sqe_files_register(ctx, arg, nr_args);
7021 break;
7022 case IORING_UNREGISTER_FILES:
7023 ret = -EINVAL;
7024 if (arg || nr_args)
7025 break;
7026 ret = io_sqe_files_unregister(ctx);
7027 break;
Jens Axboec3a31e62019-10-03 13:59:56 -06007028 case IORING_REGISTER_FILES_UPDATE:
7029 ret = io_sqe_files_update(ctx, arg, nr_args);
7030 break;
Jens Axboe9b402842019-04-11 11:45:41 -06007031 case IORING_REGISTER_EVENTFD:
Jens Axboef2842ab2020-01-08 11:04:00 -07007032 case IORING_REGISTER_EVENTFD_ASYNC:
Jens Axboe9b402842019-04-11 11:45:41 -06007033 ret = -EINVAL;
7034 if (nr_args != 1)
7035 break;
7036 ret = io_eventfd_register(ctx, arg);
Jens Axboef2842ab2020-01-08 11:04:00 -07007037 if (ret)
7038 break;
7039 if (opcode == IORING_REGISTER_EVENTFD_ASYNC)
7040 ctx->eventfd_async = 1;
7041 else
7042 ctx->eventfd_async = 0;
Jens Axboe9b402842019-04-11 11:45:41 -06007043 break;
7044 case IORING_UNREGISTER_EVENTFD:
7045 ret = -EINVAL;
7046 if (arg || nr_args)
7047 break;
7048 ret = io_eventfd_unregister(ctx);
7049 break;
Jens Axboe66f4af92020-01-16 15:36:52 -07007050 case IORING_REGISTER_PROBE:
7051 ret = -EINVAL;
7052 if (!arg || nr_args > 256)
7053 break;
7054 ret = io_probe(ctx, arg, nr_args);
7055 break;
Jens Axboe071698e2020-01-28 10:04:42 -07007056 case IORING_REGISTER_PERSONALITY:
7057 ret = -EINVAL;
7058 if (arg || nr_args)
7059 break;
7060 ret = io_register_personality(ctx);
7061 break;
7062 case IORING_UNREGISTER_PERSONALITY:
7063 ret = -EINVAL;
7064 if (arg)
7065 break;
7066 ret = io_unregister_personality(ctx, nr_args);
7067 break;
Jens Axboeedafcce2019-01-09 09:16:05 -07007068 default:
7069 ret = -EINVAL;
7070 break;
7071 }
7072
Jens Axboe071698e2020-01-28 10:04:42 -07007073 if (io_register_op_must_quiesce(opcode)) {
Jens Axboe05f3fb32019-12-09 11:22:50 -07007074 /* bring the ctx back to life */
Jens Axboe05f3fb32019-12-09 11:22:50 -07007075 percpu_ref_reinit(&ctx->refs);
Jens Axboec1503682020-01-08 08:26:07 -07007076out:
7077 reinit_completion(&ctx->completions[0]);
Jens Axboe05f3fb32019-12-09 11:22:50 -07007078 }
Jens Axboeedafcce2019-01-09 09:16:05 -07007079 return ret;
7080}
7081
7082SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode,
7083 void __user *, arg, unsigned int, nr_args)
7084{
7085 struct io_ring_ctx *ctx;
7086 long ret = -EBADF;
7087 struct fd f;
7088
7089 f = fdget(fd);
7090 if (!f.file)
7091 return -EBADF;
7092
7093 ret = -EOPNOTSUPP;
7094 if (f.file->f_op != &io_uring_fops)
7095 goto out_fput;
7096
7097 ctx = f.file->private_data;
7098
7099 mutex_lock(&ctx->uring_lock);
7100 ret = __io_uring_register(ctx, opcode, arg, nr_args);
7101 mutex_unlock(&ctx->uring_lock);
Dmitrii Dolgovc826bd72019-10-15 19:02:01 +02007102 trace_io_uring_register(ctx, opcode, ctx->nr_user_files, ctx->nr_user_bufs,
7103 ctx->cq_ev_fd != NULL, ret);
Jens Axboeedafcce2019-01-09 09:16:05 -07007104out_fput:
7105 fdput(f);
7106 return ret;
7107}
7108
Jens Axboe2b188cc2019-01-07 10:46:33 -07007109static int __init io_uring_init(void)
7110{
Stefan Metzmacherd7f62e82020-01-29 14:39:41 +01007111#define __BUILD_BUG_VERIFY_ELEMENT(stype, eoffset, etype, ename) do { \
7112 BUILD_BUG_ON(offsetof(stype, ename) != eoffset); \
7113 BUILD_BUG_ON(sizeof(etype) != sizeof_field(stype, ename)); \
7114} while (0)
7115
7116#define BUILD_BUG_SQE_ELEM(eoffset, etype, ename) \
7117 __BUILD_BUG_VERIFY_ELEMENT(struct io_uring_sqe, eoffset, etype, ename)
7118 BUILD_BUG_ON(sizeof(struct io_uring_sqe) != 64);
7119 BUILD_BUG_SQE_ELEM(0, __u8, opcode);
7120 BUILD_BUG_SQE_ELEM(1, __u8, flags);
7121 BUILD_BUG_SQE_ELEM(2, __u16, ioprio);
7122 BUILD_BUG_SQE_ELEM(4, __s32, fd);
7123 BUILD_BUG_SQE_ELEM(8, __u64, off);
7124 BUILD_BUG_SQE_ELEM(8, __u64, addr2);
7125 BUILD_BUG_SQE_ELEM(16, __u64, addr);
7126 BUILD_BUG_SQE_ELEM(24, __u32, len);
7127 BUILD_BUG_SQE_ELEM(28, __kernel_rwf_t, rw_flags);
7128 BUILD_BUG_SQE_ELEM(28, /* compat */ int, rw_flags);
7129 BUILD_BUG_SQE_ELEM(28, /* compat */ __u32, rw_flags);
7130 BUILD_BUG_SQE_ELEM(28, __u32, fsync_flags);
7131 BUILD_BUG_SQE_ELEM(28, __u16, poll_events);
7132 BUILD_BUG_SQE_ELEM(28, __u32, sync_range_flags);
7133 BUILD_BUG_SQE_ELEM(28, __u32, msg_flags);
7134 BUILD_BUG_SQE_ELEM(28, __u32, timeout_flags);
7135 BUILD_BUG_SQE_ELEM(28, __u32, accept_flags);
7136 BUILD_BUG_SQE_ELEM(28, __u32, cancel_flags);
7137 BUILD_BUG_SQE_ELEM(28, __u32, open_flags);
7138 BUILD_BUG_SQE_ELEM(28, __u32, statx_flags);
7139 BUILD_BUG_SQE_ELEM(28, __u32, fadvise_advice);
7140 BUILD_BUG_SQE_ELEM(32, __u64, user_data);
7141 BUILD_BUG_SQE_ELEM(40, __u16, buf_index);
7142 BUILD_BUG_SQE_ELEM(42, __u16, personality);
7143
Jens Axboed3656342019-12-18 09:50:26 -07007144 BUILD_BUG_ON(ARRAY_SIZE(io_op_defs) != IORING_OP_LAST);
Jens Axboe2b188cc2019-01-07 10:46:33 -07007145 req_cachep = KMEM_CACHE(io_kiocb, SLAB_HWCACHE_ALIGN | SLAB_PANIC);
7146 return 0;
7147};
7148__initcall(io_uring_init);