blob: 5b0249140ff5907c8647d9a97e7dddcdf6191840 [file] [log] [blame]
Jens Axboe2b188cc2019-01-07 10:46:33 -07001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Shared application/kernel submission and completion ring pairs, for
4 * supporting fast/efficient IO.
5 *
6 * A note on the read/write ordering memory barriers that are matched between
Stefan Bühler1e84b972019-04-24 23:54:16 +02007 * the application and kernel side.
8 *
9 * After the application reads the CQ ring tail, it must use an
10 * appropriate smp_rmb() to pair with the smp_wmb() the kernel uses
11 * before writing the tail (using smp_load_acquire to read the tail will
12 * do). It also needs a smp_mb() before updating CQ head (ordering the
13 * entry load(s) with the head store), pairing with an implicit barrier
14 * through a control-dependency in io_get_cqring (smp_store_release to
15 * store head will do). Failure to do so could lead to reading invalid
16 * CQ entries.
17 *
18 * Likewise, the application must use an appropriate smp_wmb() before
19 * writing the SQ tail (ordering SQ entry stores with the tail store),
20 * which pairs with smp_load_acquire in io_get_sqring (smp_store_release
21 * to store the tail will do). And it needs a barrier ordering the SQ
22 * head load before writing new SQ entries (smp_load_acquire to read
23 * head will do).
24 *
25 * When using the SQ poll thread (IORING_SETUP_SQPOLL), the application
26 * needs to check the SQ flags for IORING_SQ_NEED_WAKEUP *after*
27 * updating the SQ tail; a full memory barrier smp_mb() is needed
28 * between.
Jens Axboe2b188cc2019-01-07 10:46:33 -070029 *
30 * Also see the examples in the liburing library:
31 *
32 * git://git.kernel.dk/liburing
33 *
34 * io_uring also uses READ/WRITE_ONCE() for _any_ store or load that happens
35 * from data shared between the kernel and application. This is done both
36 * for ordering purposes, but also to ensure that once a value is loaded from
37 * data that the application could potentially modify, it remains stable.
38 *
39 * Copyright (C) 2018-2019 Jens Axboe
Christoph Hellwigc992fe22019-01-11 09:43:02 -070040 * Copyright (c) 2018-2019 Christoph Hellwig
Jens Axboe2b188cc2019-01-07 10:46:33 -070041 */
42#include <linux/kernel.h>
43#include <linux/init.h>
44#include <linux/errno.h>
45#include <linux/syscalls.h>
46#include <linux/compat.h>
Jens Axboe52de1fe2020-02-27 10:15:42 -070047#include <net/compat.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070048#include <linux/refcount.h>
49#include <linux/uio.h>
Pavel Begunkov6b47ee62020-01-18 20:22:41 +030050#include <linux/bits.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070051
52#include <linux/sched/signal.h>
53#include <linux/fs.h>
54#include <linux/file.h>
55#include <linux/fdtable.h>
56#include <linux/mm.h>
57#include <linux/mman.h>
58#include <linux/mmu_context.h>
59#include <linux/percpu.h>
60#include <linux/slab.h>
Jens Axboe6c271ce2019-01-10 11:22:30 -070061#include <linux/kthread.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070062#include <linux/blkdev.h>
Jens Axboeedafcce2019-01-09 09:16:05 -070063#include <linux/bvec.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070064#include <linux/net.h>
65#include <net/sock.h>
66#include <net/af_unix.h>
Jens Axboe6b063142019-01-10 22:13:58 -070067#include <net/scm.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070068#include <linux/anon_inodes.h>
69#include <linux/sched/mm.h>
70#include <linux/uaccess.h>
71#include <linux/nospec.h>
Jens Axboeedafcce2019-01-09 09:16:05 -070072#include <linux/sizes.h>
73#include <linux/hugetlb.h>
Jens Axboeaa4c3962019-11-29 10:14:00 -070074#include <linux/highmem.h>
Jens Axboe15b71ab2019-12-11 11:20:36 -070075#include <linux/namei.h>
76#include <linux/fsnotify.h>
Jens Axboe4840e412019-12-25 22:03:45 -070077#include <linux/fadvise.h>
Jens Axboe3e4827b2020-01-08 15:18:09 -070078#include <linux/eventpoll.h>
Jens Axboeff002b32020-02-07 16:05:21 -070079#include <linux/fs_struct.h>
Pavel Begunkov7d67af22020-02-24 11:32:45 +030080#include <linux/splice.h>
Jens Axboeb41e9852020-02-17 09:52:41 -070081#include <linux/task_work.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070082
Dmitrii Dolgovc826bd72019-10-15 19:02:01 +020083#define CREATE_TRACE_POINTS
84#include <trace/events/io_uring.h>
85
Jens Axboe2b188cc2019-01-07 10:46:33 -070086#include <uapi/linux/io_uring.h>
87
88#include "internal.h"
Jens Axboe561fb042019-10-24 07:25:42 -060089#include "io-wq.h"
Jens Axboe2b188cc2019-01-07 10:46:33 -070090
Daniel Xu5277dea2019-09-14 14:23:45 -070091#define IORING_MAX_ENTRIES 32768
Jens Axboe33a107f2019-10-04 12:10:03 -060092#define IORING_MAX_CQ_ENTRIES (2 * IORING_MAX_ENTRIES)
Jens Axboe65e19f52019-10-26 07:20:21 -060093
94/*
95 * Shift of 9 is 512 entries, or exactly one page on 64-bit archs
96 */
97#define IORING_FILE_TABLE_SHIFT 9
98#define IORING_MAX_FILES_TABLE (1U << IORING_FILE_TABLE_SHIFT)
99#define IORING_FILE_TABLE_MASK (IORING_MAX_FILES_TABLE - 1)
100#define IORING_MAX_FIXED_FILES (64 * IORING_MAX_FILES_TABLE)
Jens Axboe2b188cc2019-01-07 10:46:33 -0700101
102struct io_uring {
103 u32 head ____cacheline_aligned_in_smp;
104 u32 tail ____cacheline_aligned_in_smp;
105};
106
Stefan Bühler1e84b972019-04-24 23:54:16 +0200107/*
Hristo Venev75b28af2019-08-26 17:23:46 +0000108 * This data is shared with the application through the mmap at offsets
109 * IORING_OFF_SQ_RING and IORING_OFF_CQ_RING.
Stefan Bühler1e84b972019-04-24 23:54:16 +0200110 *
111 * The offsets to the member fields are published through struct
112 * io_sqring_offsets when calling io_uring_setup.
113 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000114struct io_rings {
Stefan Bühler1e84b972019-04-24 23:54:16 +0200115 /*
116 * Head and tail offsets into the ring; the offsets need to be
117 * masked to get valid indices.
118 *
Hristo Venev75b28af2019-08-26 17:23:46 +0000119 * The kernel controls head of the sq ring and the tail of the cq ring,
120 * and the application controls tail of the sq ring and the head of the
121 * cq ring.
Stefan Bühler1e84b972019-04-24 23:54:16 +0200122 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000123 struct io_uring sq, cq;
Stefan Bühler1e84b972019-04-24 23:54:16 +0200124 /*
Hristo Venev75b28af2019-08-26 17:23:46 +0000125 * Bitmasks to apply to head and tail offsets (constant, equals
Stefan Bühler1e84b972019-04-24 23:54:16 +0200126 * ring_entries - 1)
127 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000128 u32 sq_ring_mask, cq_ring_mask;
129 /* Ring sizes (constant, power of 2) */
130 u32 sq_ring_entries, cq_ring_entries;
Stefan Bühler1e84b972019-04-24 23:54:16 +0200131 /*
132 * Number of invalid entries dropped by the kernel due to
133 * invalid index stored in array
134 *
135 * Written by the kernel, shouldn't be modified by the
136 * application (i.e. get number of "new events" by comparing to
137 * cached value).
138 *
139 * After a new SQ head value was read by the application this
140 * counter includes all submissions that were dropped reaching
141 * the new SQ head (and possibly more).
142 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000143 u32 sq_dropped;
Stefan Bühler1e84b972019-04-24 23:54:16 +0200144 /*
Stefano Garzarella0d9b5b32020-05-15 18:38:04 +0200145 * Runtime SQ flags
Stefan Bühler1e84b972019-04-24 23:54:16 +0200146 *
147 * Written by the kernel, shouldn't be modified by the
148 * application.
149 *
150 * The application needs a full memory barrier before checking
151 * for IORING_SQ_NEED_WAKEUP after updating the sq tail.
152 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000153 u32 sq_flags;
Stefan Bühler1e84b972019-04-24 23:54:16 +0200154 /*
Stefano Garzarella0d9b5b32020-05-15 18:38:04 +0200155 * Runtime CQ flags
156 *
157 * Written by the application, shouldn't be modified by the
158 * kernel.
159 */
160 u32 cq_flags;
161 /*
Stefan Bühler1e84b972019-04-24 23:54:16 +0200162 * Number of completion events lost because the queue was full;
163 * this should be avoided by the application by making sure
LimingWu0b4295b2019-12-05 20:18:18 +0800164 * there are not more requests pending than there is space in
Stefan Bühler1e84b972019-04-24 23:54:16 +0200165 * the completion queue.
166 *
167 * Written by the kernel, shouldn't be modified by the
168 * application (i.e. get number of "new events" by comparing to
169 * cached value).
170 *
171 * As completion events come in out of order this counter is not
172 * ordered with any other data.
173 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000174 u32 cq_overflow;
Stefan Bühler1e84b972019-04-24 23:54:16 +0200175 /*
176 * Ring buffer of completion events.
177 *
178 * The kernel writes completion events fresh every time they are
179 * produced, so the application is allowed to modify pending
180 * entries.
181 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000182 struct io_uring_cqe cqes[] ____cacheline_aligned_in_smp;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700183};
184
Jens Axboeedafcce2019-01-09 09:16:05 -0700185struct io_mapped_ubuf {
186 u64 ubuf;
187 size_t len;
188 struct bio_vec *bvec;
189 unsigned int nr_bvecs;
190};
191
Jens Axboe65e19f52019-10-26 07:20:21 -0600192struct fixed_file_table {
193 struct file **files;
Jens Axboe31b51512019-01-18 22:56:34 -0700194};
195
Xiaoguang Wang05589552020-03-31 14:05:18 +0800196struct fixed_file_ref_node {
197 struct percpu_ref refs;
198 struct list_head node;
199 struct list_head file_list;
200 struct fixed_file_data *file_data;
Jens Axboe4a38aed22020-05-14 17:21:15 -0600201 struct llist_node llist;
Xiaoguang Wang05589552020-03-31 14:05:18 +0800202};
203
Jens Axboe05f3fb32019-12-09 11:22:50 -0700204struct fixed_file_data {
205 struct fixed_file_table *table;
206 struct io_ring_ctx *ctx;
207
Xiaoguang Wang05589552020-03-31 14:05:18 +0800208 struct percpu_ref *cur_refs;
Jens Axboe05f3fb32019-12-09 11:22:50 -0700209 struct percpu_ref refs;
Jens Axboe05f3fb32019-12-09 11:22:50 -0700210 struct completion done;
Xiaoguang Wang05589552020-03-31 14:05:18 +0800211 struct list_head ref_list;
212 spinlock_t lock;
Jens Axboe05f3fb32019-12-09 11:22:50 -0700213};
214
Jens Axboe5a2e7452020-02-23 16:23:11 -0700215struct io_buffer {
216 struct list_head list;
217 __u64 addr;
218 __s32 len;
219 __u16 bid;
220};
221
Jens Axboe2b188cc2019-01-07 10:46:33 -0700222struct io_ring_ctx {
223 struct {
224 struct percpu_ref refs;
225 } ____cacheline_aligned_in_smp;
226
227 struct {
228 unsigned int flags;
Randy Dunlape1d85332020-02-05 20:57:10 -0800229 unsigned int compat: 1;
230 unsigned int account_mem: 1;
231 unsigned int cq_overflow_flushed: 1;
232 unsigned int drain_next: 1;
233 unsigned int eventfd_async: 1;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700234
Hristo Venev75b28af2019-08-26 17:23:46 +0000235 /*
236 * Ring buffer of indices into array of io_uring_sqe, which is
237 * mmapped by the application using the IORING_OFF_SQES offset.
238 *
239 * This indirection could e.g. be used to assign fixed
240 * io_uring_sqe entries to operations and only submit them to
241 * the queue when needed.
242 *
243 * The kernel modifies neither the indices array nor the entries
244 * array.
245 */
246 u32 *sq_array;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700247 unsigned cached_sq_head;
248 unsigned sq_entries;
249 unsigned sq_mask;
Jens Axboe6c271ce2019-01-10 11:22:30 -0700250 unsigned sq_thread_idle;
Jens Axboe498ccd92019-10-25 10:04:25 -0600251 unsigned cached_sq_dropped;
Jens Axboe206aefd2019-11-07 18:27:42 -0700252 atomic_t cached_cq_overflow;
Jens Axboead3eb2c2019-12-18 17:12:20 -0700253 unsigned long sq_check_overflow;
Jens Axboede0617e2019-04-06 21:51:27 -0600254
255 struct list_head defer_list;
Jens Axboe5262f562019-09-17 12:26:57 -0600256 struct list_head timeout_list;
Jens Axboe1d7bb1d2019-11-06 11:31:17 -0700257 struct list_head cq_overflow_list;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700258
Jens Axboefcb323c2019-10-24 12:39:47 -0600259 wait_queue_head_t inflight_wait;
Jens Axboead3eb2c2019-12-18 17:12:20 -0700260 struct io_uring_sqe *sq_sqes;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700261 } ____cacheline_aligned_in_smp;
262
Hristo Venev75b28af2019-08-26 17:23:46 +0000263 struct io_rings *rings;
264
Jens Axboe2b188cc2019-01-07 10:46:33 -0700265 /* IO offload */
Jens Axboe561fb042019-10-24 07:25:42 -0600266 struct io_wq *io_wq;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700267 struct task_struct *sqo_thread; /* if using sq thread polling */
268 struct mm_struct *sqo_mm;
269 wait_queue_head_t sqo_wait;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700270
Jens Axboe6b063142019-01-10 22:13:58 -0700271 /*
272 * If used, fixed file set. Writers must ensure that ->refs is dead,
273 * readers must ensure that ->refs is alive as long as the file* is
274 * used. Only updated through io_uring_register(2).
275 */
Jens Axboe05f3fb32019-12-09 11:22:50 -0700276 struct fixed_file_data *file_data;
Jens Axboe6b063142019-01-10 22:13:58 -0700277 unsigned nr_user_files;
Pavel Begunkovb14cca02020-01-17 04:45:59 +0300278 int ring_fd;
279 struct file *ring_file;
Jens Axboe6b063142019-01-10 22:13:58 -0700280
Jens Axboeedafcce2019-01-09 09:16:05 -0700281 /* if used, fixed mapped user buffers */
282 unsigned nr_user_bufs;
283 struct io_mapped_ubuf *user_bufs;
284
Jens Axboe2b188cc2019-01-07 10:46:33 -0700285 struct user_struct *user;
286
Jens Axboe0b8c0ec2019-12-02 08:50:00 -0700287 const struct cred *creds;
Jens Axboe181e4482019-11-25 08:52:30 -0700288
Jens Axboe0f158b42020-05-14 17:18:39 -0600289 struct completion ref_comp;
290 struct completion sq_thread_comp;
Jens Axboe206aefd2019-11-07 18:27:42 -0700291
Jens Axboe0ddf92e2019-11-08 08:52:53 -0700292 /* if all else fails... */
293 struct io_kiocb *fallback_req;
294
Jens Axboe206aefd2019-11-07 18:27:42 -0700295#if defined(CONFIG_UNIX)
296 struct socket *ring_sock;
297#endif
298
Jens Axboe5a2e7452020-02-23 16:23:11 -0700299 struct idr io_buffer_idr;
300
Jens Axboe071698e2020-01-28 10:04:42 -0700301 struct idr personality_idr;
302
Jens Axboe206aefd2019-11-07 18:27:42 -0700303 struct {
304 unsigned cached_cq_tail;
305 unsigned cq_entries;
306 unsigned cq_mask;
307 atomic_t cq_timeouts;
Jens Axboead3eb2c2019-12-18 17:12:20 -0700308 unsigned long cq_check_overflow;
Jens Axboe206aefd2019-11-07 18:27:42 -0700309 struct wait_queue_head cq_wait;
310 struct fasync_struct *cq_fasync;
311 struct eventfd_ctx *cq_ev_fd;
312 } ____cacheline_aligned_in_smp;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700313
314 struct {
315 struct mutex uring_lock;
316 wait_queue_head_t wait;
317 } ____cacheline_aligned_in_smp;
318
319 struct {
320 spinlock_t completion_lock;
Jens Axboee94f1412019-12-19 12:06:02 -0700321
Jens Axboedef596e2019-01-09 08:59:42 -0700322 /*
323 * ->poll_list is protected by the ctx->uring_lock for
324 * io_uring instances that don't use IORING_SETUP_SQPOLL.
325 * For SQPOLL, only the single threaded io_sq_thread() will
326 * manipulate the list, hence no extra locking is needed there.
327 */
328 struct list_head poll_list;
Jens Axboe78076bb2019-12-04 19:56:40 -0700329 struct hlist_head *cancel_hash;
330 unsigned cancel_hash_bits;
Jens Axboee94f1412019-12-19 12:06:02 -0700331 bool poll_multi_file;
Jens Axboefcb323c2019-10-24 12:39:47 -0600332
333 spinlock_t inflight_lock;
334 struct list_head inflight_list;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700335 } ____cacheline_aligned_in_smp;
Jens Axboe85faa7b2020-04-09 18:14:00 -0600336
Jens Axboe4a38aed22020-05-14 17:21:15 -0600337 struct delayed_work file_put_work;
338 struct llist_head file_put_llist;
339
Jens Axboe85faa7b2020-04-09 18:14:00 -0600340 struct work_struct exit_work;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700341};
342
Jens Axboe09bb8392019-03-13 12:39:28 -0600343/*
344 * First field must be the file pointer in all the
345 * iocb unions! See also 'struct kiocb' in <linux/fs.h>
346 */
Jens Axboe221c5eb2019-01-17 09:41:58 -0700347struct io_poll_iocb {
348 struct file *file;
Jens Axboe0969e782019-12-17 18:40:57 -0700349 union {
350 struct wait_queue_head *head;
351 u64 addr;
352 };
Jens Axboe221c5eb2019-01-17 09:41:58 -0700353 __poll_t events;
Jens Axboe8c838782019-03-12 15:48:16 -0600354 bool done;
Jens Axboe221c5eb2019-01-17 09:41:58 -0700355 bool canceled;
Jens Axboe392edb42019-12-09 17:52:20 -0700356 struct wait_queue_entry wait;
Jens Axboe221c5eb2019-01-17 09:41:58 -0700357};
358
Jens Axboeb5dba592019-12-11 14:02:38 -0700359struct io_close {
360 struct file *file;
361 struct file *put_file;
362 int fd;
363};
364
Jens Axboead8a48a2019-11-15 08:49:11 -0700365struct io_timeout_data {
366 struct io_kiocb *req;
367 struct hrtimer timer;
368 struct timespec64 ts;
369 enum hrtimer_mode mode;
370};
371
Jens Axboe8ed8d3c2019-12-16 11:55:28 -0700372struct io_accept {
373 struct file *file;
374 struct sockaddr __user *addr;
375 int __user *addr_len;
376 int flags;
Jens Axboe09952e32020-03-19 20:16:56 -0600377 unsigned long nofile;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -0700378};
379
380struct io_sync {
381 struct file *file;
382 loff_t len;
383 loff_t off;
384 int flags;
Jens Axboed63d1b52019-12-10 10:38:56 -0700385 int mode;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -0700386};
387
Jens Axboefbf23842019-12-17 18:45:56 -0700388struct io_cancel {
389 struct file *file;
390 u64 addr;
391};
392
Jens Axboeb29472e2019-12-17 18:50:29 -0700393struct io_timeout {
394 struct file *file;
395 u64 addr;
396 int flags;
Pavel Begunkovbfe68a22020-05-30 14:54:18 +0300397 u32 off;
398 u32 target_seq;
Jens Axboeb29472e2019-12-17 18:50:29 -0700399};
400
Jens Axboe9adbd452019-12-20 08:45:55 -0700401struct io_rw {
402 /* NOTE: kiocb has the file as the first member, so don't do it here */
403 struct kiocb kiocb;
404 u64 addr;
405 u64 len;
406};
407
Jens Axboe3fbb51c2019-12-20 08:51:52 -0700408struct io_connect {
409 struct file *file;
410 struct sockaddr __user *addr;
411 int addr_len;
412};
413
Jens Axboee47293f2019-12-20 08:58:21 -0700414struct io_sr_msg {
415 struct file *file;
Jens Axboefddafac2020-01-04 20:19:44 -0700416 union {
417 struct user_msghdr __user *msg;
418 void __user *buf;
419 };
Jens Axboee47293f2019-12-20 08:58:21 -0700420 int msg_flags;
Jens Axboebcda7ba2020-02-23 16:42:51 -0700421 int bgid;
Jens Axboefddafac2020-01-04 20:19:44 -0700422 size_t len;
Jens Axboebcda7ba2020-02-23 16:42:51 -0700423 struct io_buffer *kbuf;
Jens Axboee47293f2019-12-20 08:58:21 -0700424};
425
Jens Axboe15b71ab2019-12-11 11:20:36 -0700426struct io_open {
427 struct file *file;
428 int dfd;
Jens Axboe15b71ab2019-12-11 11:20:36 -0700429 struct filename *filename;
Jens Axboec12cedf2020-01-08 17:41:21 -0700430 struct open_how how;
Jens Axboe4022e7a2020-03-19 19:23:18 -0600431 unsigned long nofile;
Jens Axboe15b71ab2019-12-11 11:20:36 -0700432};
433
Jens Axboe05f3fb32019-12-09 11:22:50 -0700434struct io_files_update {
435 struct file *file;
436 u64 arg;
437 u32 nr_args;
438 u32 offset;
439};
440
Jens Axboe4840e412019-12-25 22:03:45 -0700441struct io_fadvise {
442 struct file *file;
443 u64 offset;
444 u32 len;
445 u32 advice;
446};
447
Jens Axboec1ca7572019-12-25 22:18:28 -0700448struct io_madvise {
449 struct file *file;
450 u64 addr;
451 u32 len;
452 u32 advice;
453};
454
Jens Axboe3e4827b2020-01-08 15:18:09 -0700455struct io_epoll {
456 struct file *file;
457 int epfd;
458 int op;
459 int fd;
460 struct epoll_event event;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700461};
462
Pavel Begunkov7d67af22020-02-24 11:32:45 +0300463struct io_splice {
464 struct file *file_out;
465 struct file *file_in;
466 loff_t off_out;
467 loff_t off_in;
468 u64 len;
469 unsigned int flags;
470};
471
Jens Axboeddf0322d2020-02-23 16:41:33 -0700472struct io_provide_buf {
473 struct file *file;
474 __u64 addr;
475 __s32 len;
476 __u32 bgid;
477 __u16 nbufs;
478 __u16 bid;
479};
480
Bijan Mottahedeh1d9e1282020-05-22 21:31:16 -0700481struct io_statx {
482 struct file *file;
483 int dfd;
484 unsigned int mask;
485 unsigned int flags;
Bijan Mottahedehe62753e2020-05-22 21:31:18 -0700486 const char __user *filename;
Bijan Mottahedeh1d9e1282020-05-22 21:31:16 -0700487 struct statx __user *buffer;
488};
489
Jens Axboef499a022019-12-02 16:28:46 -0700490struct io_async_connect {
491 struct sockaddr_storage address;
492};
493
Jens Axboe03b12302019-12-02 18:50:25 -0700494struct io_async_msghdr {
495 struct iovec fast_iov[UIO_FASTIOV];
496 struct iovec *iov;
497 struct sockaddr __user *uaddr;
498 struct msghdr msg;
Jens Axboeb5379162020-02-09 11:29:15 -0700499 struct sockaddr_storage addr;
Jens Axboe03b12302019-12-02 18:50:25 -0700500};
501
Jens Axboef67676d2019-12-02 11:03:47 -0700502struct io_async_rw {
503 struct iovec fast_iov[UIO_FASTIOV];
504 struct iovec *iov;
505 ssize_t nr_segs;
506 ssize_t size;
507};
508
Jens Axboe1a6b74f2019-12-02 10:33:15 -0700509struct io_async_ctx {
Jens Axboef67676d2019-12-02 11:03:47 -0700510 union {
511 struct io_async_rw rw;
Jens Axboe03b12302019-12-02 18:50:25 -0700512 struct io_async_msghdr msg;
Jens Axboef499a022019-12-02 16:28:46 -0700513 struct io_async_connect connect;
Jens Axboe2d283902019-12-04 11:08:05 -0700514 struct io_timeout_data timeout;
Jens Axboef67676d2019-12-02 11:03:47 -0700515 };
Jens Axboe1a6b74f2019-12-02 10:33:15 -0700516};
517
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300518enum {
519 REQ_F_FIXED_FILE_BIT = IOSQE_FIXED_FILE_BIT,
520 REQ_F_IO_DRAIN_BIT = IOSQE_IO_DRAIN_BIT,
521 REQ_F_LINK_BIT = IOSQE_IO_LINK_BIT,
522 REQ_F_HARDLINK_BIT = IOSQE_IO_HARDLINK_BIT,
523 REQ_F_FORCE_ASYNC_BIT = IOSQE_ASYNC_BIT,
Jens Axboebcda7ba2020-02-23 16:42:51 -0700524 REQ_F_BUFFER_SELECT_BIT = IOSQE_BUFFER_SELECT_BIT,
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300525
Pavel Begunkovdea3b492020-04-12 02:05:04 +0300526 REQ_F_LINK_HEAD_BIT,
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300527 REQ_F_LINK_NEXT_BIT,
528 REQ_F_FAIL_LINK_BIT,
529 REQ_F_INFLIGHT_BIT,
530 REQ_F_CUR_POS_BIT,
531 REQ_F_NOWAIT_BIT,
532 REQ_F_IOPOLL_COMPLETED_BIT,
533 REQ_F_LINK_TIMEOUT_BIT,
534 REQ_F_TIMEOUT_BIT,
535 REQ_F_ISREG_BIT,
536 REQ_F_MUST_PUNT_BIT,
537 REQ_F_TIMEOUT_NOSEQ_BIT,
538 REQ_F_COMP_LOCKED_BIT,
Pavel Begunkov99bc4c32020-02-07 22:04:45 +0300539 REQ_F_NEED_CLEANUP_BIT,
Jens Axboe2ca10252020-02-13 17:17:35 -0700540 REQ_F_OVERFLOW_BIT,
Jens Axboed7718a92020-02-14 22:23:12 -0700541 REQ_F_POLLED_BIT,
Jens Axboebcda7ba2020-02-23 16:42:51 -0700542 REQ_F_BUFFER_SELECTED_BIT,
Jens Axboe5b0bbee2020-04-27 10:41:22 -0600543 REQ_F_NO_FILE_TABLE_BIT,
Pavel Begunkovd4c81f32020-06-08 21:08:19 +0300544 REQ_F_QUEUE_TIMEOUT_BIT,
Xiaoguang Wang7cdaf582020-06-10 19:41:19 +0800545 REQ_F_WORK_INITIALIZED_BIT,
Jens Axboe84557872020-03-03 15:28:17 -0700546
547 /* not a real bit, just to check we're not overflowing the space */
548 __REQ_F_LAST_BIT,
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300549};
550
551enum {
552 /* ctx owns file */
553 REQ_F_FIXED_FILE = BIT(REQ_F_FIXED_FILE_BIT),
554 /* drain existing IO first */
555 REQ_F_IO_DRAIN = BIT(REQ_F_IO_DRAIN_BIT),
556 /* linked sqes */
557 REQ_F_LINK = BIT(REQ_F_LINK_BIT),
558 /* doesn't sever on completion < 0 */
559 REQ_F_HARDLINK = BIT(REQ_F_HARDLINK_BIT),
560 /* IOSQE_ASYNC */
561 REQ_F_FORCE_ASYNC = BIT(REQ_F_FORCE_ASYNC_BIT),
Jens Axboebcda7ba2020-02-23 16:42:51 -0700562 /* IOSQE_BUFFER_SELECT */
563 REQ_F_BUFFER_SELECT = BIT(REQ_F_BUFFER_SELECT_BIT),
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300564
Pavel Begunkovdea3b492020-04-12 02:05:04 +0300565 /* head of a link */
566 REQ_F_LINK_HEAD = BIT(REQ_F_LINK_HEAD_BIT),
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300567 /* already grabbed next link */
568 REQ_F_LINK_NEXT = BIT(REQ_F_LINK_NEXT_BIT),
569 /* fail rest of links */
570 REQ_F_FAIL_LINK = BIT(REQ_F_FAIL_LINK_BIT),
571 /* on inflight list */
572 REQ_F_INFLIGHT = BIT(REQ_F_INFLIGHT_BIT),
573 /* read/write uses file position */
574 REQ_F_CUR_POS = BIT(REQ_F_CUR_POS_BIT),
575 /* must not punt to workers */
576 REQ_F_NOWAIT = BIT(REQ_F_NOWAIT_BIT),
577 /* polled IO has completed */
578 REQ_F_IOPOLL_COMPLETED = BIT(REQ_F_IOPOLL_COMPLETED_BIT),
579 /* has linked timeout */
580 REQ_F_LINK_TIMEOUT = BIT(REQ_F_LINK_TIMEOUT_BIT),
581 /* timeout request */
582 REQ_F_TIMEOUT = BIT(REQ_F_TIMEOUT_BIT),
583 /* regular file */
584 REQ_F_ISREG = BIT(REQ_F_ISREG_BIT),
585 /* must be punted even for NONBLOCK */
586 REQ_F_MUST_PUNT = BIT(REQ_F_MUST_PUNT_BIT),
587 /* no timeout sequence */
588 REQ_F_TIMEOUT_NOSEQ = BIT(REQ_F_TIMEOUT_NOSEQ_BIT),
589 /* completion under lock */
590 REQ_F_COMP_LOCKED = BIT(REQ_F_COMP_LOCKED_BIT),
Pavel Begunkov99bc4c32020-02-07 22:04:45 +0300591 /* needs cleanup */
592 REQ_F_NEED_CLEANUP = BIT(REQ_F_NEED_CLEANUP_BIT),
Jens Axboe2ca10252020-02-13 17:17:35 -0700593 /* in overflow list */
594 REQ_F_OVERFLOW = BIT(REQ_F_OVERFLOW_BIT),
Jens Axboed7718a92020-02-14 22:23:12 -0700595 /* already went through poll handler */
596 REQ_F_POLLED = BIT(REQ_F_POLLED_BIT),
Jens Axboebcda7ba2020-02-23 16:42:51 -0700597 /* buffer already selected */
598 REQ_F_BUFFER_SELECTED = BIT(REQ_F_BUFFER_SELECTED_BIT),
Jens Axboe5b0bbee2020-04-27 10:41:22 -0600599 /* doesn't need file table for this request */
600 REQ_F_NO_FILE_TABLE = BIT(REQ_F_NO_FILE_TABLE_BIT),
Pavel Begunkovd4c81f32020-06-08 21:08:19 +0300601 /* needs to queue linked timeout */
602 REQ_F_QUEUE_TIMEOUT = BIT(REQ_F_QUEUE_TIMEOUT_BIT),
Xiaoguang Wang7cdaf582020-06-10 19:41:19 +0800603 /* io_wq_work is initialized */
604 REQ_F_WORK_INITIALIZED = BIT(REQ_F_WORK_INITIALIZED_BIT),
Jens Axboed7718a92020-02-14 22:23:12 -0700605};
606
607struct async_poll {
608 struct io_poll_iocb poll;
609 struct io_wq_work work;
Pavel Begunkov6b47ee62020-01-18 20:22:41 +0300610};
611
Jens Axboe09bb8392019-03-13 12:39:28 -0600612/*
613 * NOTE! Each of the iocb union members has the file pointer
614 * as the first entry in their struct definition. So you can
615 * access the file pointer through any of the sub-structs,
616 * or directly as just 'ki_filp' in this struct.
617 */
Jens Axboe2b188cc2019-01-07 10:46:33 -0700618struct io_kiocb {
Jens Axboe221c5eb2019-01-17 09:41:58 -0700619 union {
Jens Axboe09bb8392019-03-13 12:39:28 -0600620 struct file *file;
Jens Axboe9adbd452019-12-20 08:45:55 -0700621 struct io_rw rw;
Jens Axboe221c5eb2019-01-17 09:41:58 -0700622 struct io_poll_iocb poll;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -0700623 struct io_accept accept;
624 struct io_sync sync;
Jens Axboefbf23842019-12-17 18:45:56 -0700625 struct io_cancel cancel;
Jens Axboeb29472e2019-12-17 18:50:29 -0700626 struct io_timeout timeout;
Jens Axboe3fbb51c2019-12-20 08:51:52 -0700627 struct io_connect connect;
Jens Axboee47293f2019-12-20 08:58:21 -0700628 struct io_sr_msg sr_msg;
Jens Axboe15b71ab2019-12-11 11:20:36 -0700629 struct io_open open;
Jens Axboeb5dba592019-12-11 14:02:38 -0700630 struct io_close close;
Jens Axboe05f3fb32019-12-09 11:22:50 -0700631 struct io_files_update files_update;
Jens Axboe4840e412019-12-25 22:03:45 -0700632 struct io_fadvise fadvise;
Jens Axboec1ca7572019-12-25 22:18:28 -0700633 struct io_madvise madvise;
Jens Axboe3e4827b2020-01-08 15:18:09 -0700634 struct io_epoll epoll;
Pavel Begunkov7d67af22020-02-24 11:32:45 +0300635 struct io_splice splice;
Jens Axboeddf0322d2020-02-23 16:41:33 -0700636 struct io_provide_buf pbuf;
Bijan Mottahedeh1d9e1282020-05-22 21:31:16 -0700637 struct io_statx statx;
Jens Axboe221c5eb2019-01-17 09:41:58 -0700638 };
Jens Axboe2b188cc2019-01-07 10:46:33 -0700639
Jens Axboe1a6b74f2019-12-02 10:33:15 -0700640 struct io_async_ctx *io;
Pavel Begunkovc398ecb2020-04-09 08:17:59 +0300641 int cflags;
Jens Axboed625c6e2019-12-17 19:53:05 -0700642 u8 opcode;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700643
Bijan Mottahedeh4f4eeba2020-05-19 14:52:49 -0700644 u16 buf_index;
645
Jens Axboe2b188cc2019-01-07 10:46:33 -0700646 struct io_ring_ctx *ctx;
Jens Axboed7718a92020-02-14 22:23:12 -0700647 struct list_head list;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700648 unsigned int flags;
Jens Axboec16361c2019-01-17 08:39:48 -0700649 refcount_t refs;
Jens Axboe3537b6a2020-04-03 11:19:06 -0600650 struct task_struct *task;
651 unsigned long fsize;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700652 u64 user_data;
Jens Axboe9e645e112019-05-10 16:07:28 -0600653 u32 result;
Jens Axboede0617e2019-04-06 21:51:27 -0600654 u32 sequence;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700655
Jens Axboed7718a92020-02-14 22:23:12 -0700656 struct list_head link_list;
657
Jens Axboefcb323c2019-10-24 12:39:47 -0600658 struct list_head inflight_entry;
659
Xiaoguang Wang05589552020-03-31 14:05:18 +0800660 struct percpu_ref *fixed_file_refs;
661
Jens Axboeb41e9852020-02-17 09:52:41 -0700662 union {
663 /*
664 * Only commands that never go async can use the below fields,
Jens Axboed7718a92020-02-14 22:23:12 -0700665 * obviously. Right now only IORING_OP_POLL_ADD uses them, and
666 * async armed poll handlers for regular commands. The latter
667 * restore the work, if needed.
Jens Axboeb41e9852020-02-17 09:52:41 -0700668 */
669 struct {
Jens Axboeb41e9852020-02-17 09:52:41 -0700670 struct callback_head task_work;
Jens Axboed7718a92020-02-14 22:23:12 -0700671 struct hlist_node hash_node;
672 struct async_poll *apoll;
Jens Axboeb41e9852020-02-17 09:52:41 -0700673 };
674 struct io_wq_work work;
675 };
Jens Axboe2b188cc2019-01-07 10:46:33 -0700676};
677
678#define IO_PLUG_THRESHOLD 2
Jens Axboedef596e2019-01-09 08:59:42 -0700679#define IO_IOPOLL_BATCH 8
Jens Axboe2b188cc2019-01-07 10:46:33 -0700680
Jens Axboe9a56a232019-01-09 09:06:50 -0700681struct io_submit_state {
682 struct blk_plug plug;
683
684 /*
Jens Axboe2579f912019-01-09 09:10:43 -0700685 * io_kiocb alloc cache
686 */
687 void *reqs[IO_IOPOLL_BATCH];
Pavel Begunkov6c8a3132020-02-01 03:58:00 +0300688 unsigned int free_reqs;
Jens Axboe2579f912019-01-09 09:10:43 -0700689
690 /*
Jens Axboe9a56a232019-01-09 09:06:50 -0700691 * File reference cache
692 */
693 struct file *file;
694 unsigned int fd;
695 unsigned int has_refs;
696 unsigned int used_refs;
697 unsigned int ios_left;
698};
699
Jens Axboed3656342019-12-18 09:50:26 -0700700struct io_op_def {
701 /* needs req->io allocated for deferral/async */
702 unsigned async_ctx : 1;
703 /* needs current->mm setup, does mm access */
704 unsigned needs_mm : 1;
705 /* needs req->file assigned */
706 unsigned needs_file : 1;
Jens Axboefd2206e2020-06-02 16:40:47 -0600707 /* don't fail if file grab fails */
708 unsigned needs_file_no_error : 1;
Jens Axboed3656342019-12-18 09:50:26 -0700709 /* hash wq insertion if file is a regular file */
710 unsigned hash_reg_file : 1;
711 /* unbound wq insertion if file is a non-regular file */
712 unsigned unbound_nonreg_file : 1;
Jens Axboe66f4af92020-01-16 15:36:52 -0700713 /* opcode is not supported by this kernel */
714 unsigned not_supported : 1;
Jens Axboef86cd202020-01-29 13:46:44 -0700715 /* needs file table */
716 unsigned file_table : 1;
Jens Axboeff002b32020-02-07 16:05:21 -0700717 /* needs ->fs */
718 unsigned needs_fs : 1;
Jens Axboe8a727582020-02-20 09:59:44 -0700719 /* set if opcode supports polled "wait" */
720 unsigned pollin : 1;
721 unsigned pollout : 1;
Jens Axboebcda7ba2020-02-23 16:42:51 -0700722 /* op supports buffer selection */
723 unsigned buffer_select : 1;
Jens Axboed3656342019-12-18 09:50:26 -0700724};
725
726static const struct io_op_def io_op_defs[] = {
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300727 [IORING_OP_NOP] = {},
728 [IORING_OP_READV] = {
Jens Axboed3656342019-12-18 09:50:26 -0700729 .async_ctx = 1,
730 .needs_mm = 1,
731 .needs_file = 1,
732 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700733 .pollin = 1,
Jens Axboe4d954c22020-02-27 07:31:19 -0700734 .buffer_select = 1,
Jens Axboed3656342019-12-18 09:50:26 -0700735 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300736 [IORING_OP_WRITEV] = {
Jens Axboed3656342019-12-18 09:50:26 -0700737 .async_ctx = 1,
738 .needs_mm = 1,
739 .needs_file = 1,
740 .hash_reg_file = 1,
741 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700742 .pollout = 1,
Jens Axboed3656342019-12-18 09:50:26 -0700743 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300744 [IORING_OP_FSYNC] = {
Jens Axboed3656342019-12-18 09:50:26 -0700745 .needs_file = 1,
746 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300747 [IORING_OP_READ_FIXED] = {
Jens Axboed3656342019-12-18 09:50:26 -0700748 .needs_file = 1,
749 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700750 .pollin = 1,
Jens Axboed3656342019-12-18 09:50:26 -0700751 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300752 [IORING_OP_WRITE_FIXED] = {
Jens Axboed3656342019-12-18 09:50:26 -0700753 .needs_file = 1,
754 .hash_reg_file = 1,
755 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700756 .pollout = 1,
Jens Axboed3656342019-12-18 09:50:26 -0700757 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300758 [IORING_OP_POLL_ADD] = {
Jens Axboed3656342019-12-18 09:50:26 -0700759 .needs_file = 1,
760 .unbound_nonreg_file = 1,
761 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300762 [IORING_OP_POLL_REMOVE] = {},
763 [IORING_OP_SYNC_FILE_RANGE] = {
Jens Axboed3656342019-12-18 09:50:26 -0700764 .needs_file = 1,
765 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300766 [IORING_OP_SENDMSG] = {
Jens Axboed3656342019-12-18 09:50:26 -0700767 .async_ctx = 1,
768 .needs_mm = 1,
769 .needs_file = 1,
770 .unbound_nonreg_file = 1,
Jens Axboeff002b32020-02-07 16:05:21 -0700771 .needs_fs = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700772 .pollout = 1,
Jens Axboed3656342019-12-18 09:50:26 -0700773 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300774 [IORING_OP_RECVMSG] = {
Jens Axboed3656342019-12-18 09:50:26 -0700775 .async_ctx = 1,
776 .needs_mm = 1,
777 .needs_file = 1,
778 .unbound_nonreg_file = 1,
Jens Axboeff002b32020-02-07 16:05:21 -0700779 .needs_fs = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700780 .pollin = 1,
Jens Axboe52de1fe2020-02-27 10:15:42 -0700781 .buffer_select = 1,
Jens Axboed3656342019-12-18 09:50:26 -0700782 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300783 [IORING_OP_TIMEOUT] = {
Jens Axboed3656342019-12-18 09:50:26 -0700784 .async_ctx = 1,
785 .needs_mm = 1,
786 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300787 [IORING_OP_TIMEOUT_REMOVE] = {},
788 [IORING_OP_ACCEPT] = {
Jens Axboed3656342019-12-18 09:50:26 -0700789 .needs_mm = 1,
790 .needs_file = 1,
791 .unbound_nonreg_file = 1,
Jens Axboef86cd202020-01-29 13:46:44 -0700792 .file_table = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700793 .pollin = 1,
Jens Axboed3656342019-12-18 09:50:26 -0700794 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300795 [IORING_OP_ASYNC_CANCEL] = {},
796 [IORING_OP_LINK_TIMEOUT] = {
Jens Axboed3656342019-12-18 09:50:26 -0700797 .async_ctx = 1,
798 .needs_mm = 1,
799 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300800 [IORING_OP_CONNECT] = {
Jens Axboed3656342019-12-18 09:50:26 -0700801 .async_ctx = 1,
802 .needs_mm = 1,
803 .needs_file = 1,
804 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700805 .pollout = 1,
Jens Axboed3656342019-12-18 09:50:26 -0700806 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300807 [IORING_OP_FALLOCATE] = {
Jens Axboed3656342019-12-18 09:50:26 -0700808 .needs_file = 1,
809 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300810 [IORING_OP_OPENAT] = {
Jens Axboef86cd202020-01-29 13:46:44 -0700811 .file_table = 1,
Jens Axboeff002b32020-02-07 16:05:21 -0700812 .needs_fs = 1,
Jens Axboed3656342019-12-18 09:50:26 -0700813 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300814 [IORING_OP_CLOSE] = {
Jens Axboefd2206e2020-06-02 16:40:47 -0600815 .needs_file = 1,
816 .needs_file_no_error = 1,
Jens Axboef86cd202020-01-29 13:46:44 -0700817 .file_table = 1,
Jens Axboed3656342019-12-18 09:50:26 -0700818 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300819 [IORING_OP_FILES_UPDATE] = {
Jens Axboed3656342019-12-18 09:50:26 -0700820 .needs_mm = 1,
Jens Axboef86cd202020-01-29 13:46:44 -0700821 .file_table = 1,
Jens Axboed3656342019-12-18 09:50:26 -0700822 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300823 [IORING_OP_STATX] = {
Jens Axboed3656342019-12-18 09:50:26 -0700824 .needs_mm = 1,
Jens Axboeff002b32020-02-07 16:05:21 -0700825 .needs_fs = 1,
Jens Axboe5b0bbee2020-04-27 10:41:22 -0600826 .file_table = 1,
Jens Axboed3656342019-12-18 09:50:26 -0700827 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300828 [IORING_OP_READ] = {
Jens Axboe3a6820f2019-12-22 15:19:35 -0700829 .needs_mm = 1,
830 .needs_file = 1,
831 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700832 .pollin = 1,
Jens Axboebcda7ba2020-02-23 16:42:51 -0700833 .buffer_select = 1,
Jens Axboe3a6820f2019-12-22 15:19:35 -0700834 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300835 [IORING_OP_WRITE] = {
Jens Axboe3a6820f2019-12-22 15:19:35 -0700836 .needs_mm = 1,
837 .needs_file = 1,
838 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700839 .pollout = 1,
Jens Axboe3a6820f2019-12-22 15:19:35 -0700840 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300841 [IORING_OP_FADVISE] = {
Jens Axboe4840e412019-12-25 22:03:45 -0700842 .needs_file = 1,
843 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300844 [IORING_OP_MADVISE] = {
Jens Axboec1ca7572019-12-25 22:18:28 -0700845 .needs_mm = 1,
846 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300847 [IORING_OP_SEND] = {
Jens Axboefddafac2020-01-04 20:19:44 -0700848 .needs_mm = 1,
849 .needs_file = 1,
850 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700851 .pollout = 1,
Jens Axboefddafac2020-01-04 20:19:44 -0700852 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300853 [IORING_OP_RECV] = {
Jens Axboefddafac2020-01-04 20:19:44 -0700854 .needs_mm = 1,
855 .needs_file = 1,
856 .unbound_nonreg_file = 1,
Jens Axboe8a727582020-02-20 09:59:44 -0700857 .pollin = 1,
Jens Axboebcda7ba2020-02-23 16:42:51 -0700858 .buffer_select = 1,
Jens Axboefddafac2020-01-04 20:19:44 -0700859 },
Pavel Begunkov0463b6c2020-01-18 21:35:38 +0300860 [IORING_OP_OPENAT2] = {
Jens Axboef86cd202020-01-29 13:46:44 -0700861 .file_table = 1,
Jens Axboeff002b32020-02-07 16:05:21 -0700862 .needs_fs = 1,
Jens Axboecebdb982020-01-08 17:59:24 -0700863 },
Jens Axboe3e4827b2020-01-08 15:18:09 -0700864 [IORING_OP_EPOLL_CTL] = {
865 .unbound_nonreg_file = 1,
866 .file_table = 1,
867 },
Pavel Begunkov7d67af22020-02-24 11:32:45 +0300868 [IORING_OP_SPLICE] = {
869 .needs_file = 1,
870 .hash_reg_file = 1,
871 .unbound_nonreg_file = 1,
Jens Axboeddf0322d2020-02-23 16:41:33 -0700872 },
873 [IORING_OP_PROVIDE_BUFFERS] = {},
Jens Axboe067524e2020-03-02 16:32:28 -0700874 [IORING_OP_REMOVE_BUFFERS] = {},
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +0300875 [IORING_OP_TEE] = {
876 .needs_file = 1,
877 .hash_reg_file = 1,
878 .unbound_nonreg_file = 1,
879 },
Jens Axboed3656342019-12-18 09:50:26 -0700880};
881
Jens Axboe561fb042019-10-24 07:25:42 -0600882static void io_wq_submit_work(struct io_wq_work **workptr);
Jens Axboe78e19bb2019-11-06 15:21:34 -0700883static void io_cqring_fill_event(struct io_kiocb *req, long res);
Jackie Liuec9c02a2019-11-08 23:50:36 +0800884static void io_put_req(struct io_kiocb *req);
Jens Axboe978db572019-11-14 22:39:04 -0700885static void __io_double_put_req(struct io_kiocb *req);
Jens Axboe94ae5e72019-11-14 19:39:52 -0700886static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req);
887static void io_queue_linked_timeout(struct io_kiocb *req);
Jens Axboe05f3fb32019-12-09 11:22:50 -0700888static int __io_sqe_files_update(struct io_ring_ctx *ctx,
889 struct io_uring_files_update *ip,
890 unsigned nr_args);
Jens Axboef86cd202020-01-29 13:46:44 -0700891static int io_grab_files(struct io_kiocb *req);
Pavel Begunkov99bc4c32020-02-07 22:04:45 +0300892static void io_cleanup_req(struct io_kiocb *req);
Jens Axboeb41e9852020-02-17 09:52:41 -0700893static int io_file_get(struct io_submit_state *state, struct io_kiocb *req,
894 int fd, struct file **out_file, bool fixed);
895static void __io_queue_sqe(struct io_kiocb *req,
896 const struct io_uring_sqe *sqe);
Jens Axboede0617e2019-04-06 21:51:27 -0600897
Jens Axboe2b188cc2019-01-07 10:46:33 -0700898static struct kmem_cache *req_cachep;
899
900static const struct file_operations io_uring_fops;
901
902struct sock *io_uring_get_socket(struct file *file)
903{
904#if defined(CONFIG_UNIX)
905 if (file->f_op == &io_uring_fops) {
906 struct io_ring_ctx *ctx = file->private_data;
907
908 return ctx->ring_sock->sk;
909 }
910#endif
911 return NULL;
912}
913EXPORT_SYMBOL(io_uring_get_socket);
914
Jens Axboe4a38aed22020-05-14 17:21:15 -0600915static void io_file_put_work(struct work_struct *work);
916
Xiaoguang Wang7cdaf582020-06-10 19:41:19 +0800917/*
918 * Note: must call io_req_init_async() for the first time you
919 * touch any members of io_wq_work.
920 */
921static inline void io_req_init_async(struct io_kiocb *req)
922{
923 if (req->flags & REQ_F_WORK_INITIALIZED)
924 return;
925
926 memset(&req->work, 0, sizeof(req->work));
927 req->flags |= REQ_F_WORK_INITIALIZED;
928}
929
Pavel Begunkov0cdaf762020-05-17 14:13:40 +0300930static inline bool io_async_submit(struct io_ring_ctx *ctx)
931{
932 return ctx->flags & IORING_SETUP_SQPOLL;
933}
934
Jens Axboe2b188cc2019-01-07 10:46:33 -0700935static void io_ring_ctx_ref_free(struct percpu_ref *ref)
936{
937 struct io_ring_ctx *ctx = container_of(ref, struct io_ring_ctx, refs);
938
Jens Axboe0f158b42020-05-14 17:18:39 -0600939 complete(&ctx->ref_comp);
Jens Axboe2b188cc2019-01-07 10:46:33 -0700940}
941
942static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
943{
944 struct io_ring_ctx *ctx;
Jens Axboe78076bb2019-12-04 19:56:40 -0700945 int hash_bits;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700946
947 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
948 if (!ctx)
949 return NULL;
950
Jens Axboe0ddf92e2019-11-08 08:52:53 -0700951 ctx->fallback_req = kmem_cache_alloc(req_cachep, GFP_KERNEL);
952 if (!ctx->fallback_req)
953 goto err;
954
Jens Axboe78076bb2019-12-04 19:56:40 -0700955 /*
956 * Use 5 bits less than the max cq entries, that should give us around
957 * 32 entries per hash list if totally full and uniformly spread.
958 */
959 hash_bits = ilog2(p->cq_entries);
960 hash_bits -= 5;
961 if (hash_bits <= 0)
962 hash_bits = 1;
963 ctx->cancel_hash_bits = hash_bits;
964 ctx->cancel_hash = kmalloc((1U << hash_bits) * sizeof(struct hlist_head),
965 GFP_KERNEL);
966 if (!ctx->cancel_hash)
967 goto err;
968 __hash_init(ctx->cancel_hash, 1U << hash_bits);
969
Roman Gushchin21482892019-05-07 10:01:48 -0700970 if (percpu_ref_init(&ctx->refs, io_ring_ctx_ref_free,
Jens Axboe206aefd2019-11-07 18:27:42 -0700971 PERCPU_REF_ALLOW_REINIT, GFP_KERNEL))
972 goto err;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700973
974 ctx->flags = p->flags;
Jens Axboe583863e2020-05-17 09:20:00 -0600975 init_waitqueue_head(&ctx->sqo_wait);
Jens Axboe2b188cc2019-01-07 10:46:33 -0700976 init_waitqueue_head(&ctx->cq_wait);
Jens Axboe1d7bb1d2019-11-06 11:31:17 -0700977 INIT_LIST_HEAD(&ctx->cq_overflow_list);
Jens Axboe0f158b42020-05-14 17:18:39 -0600978 init_completion(&ctx->ref_comp);
979 init_completion(&ctx->sq_thread_comp);
Jens Axboe5a2e7452020-02-23 16:23:11 -0700980 idr_init(&ctx->io_buffer_idr);
Jens Axboe071698e2020-01-28 10:04:42 -0700981 idr_init(&ctx->personality_idr);
Jens Axboe2b188cc2019-01-07 10:46:33 -0700982 mutex_init(&ctx->uring_lock);
983 init_waitqueue_head(&ctx->wait);
984 spin_lock_init(&ctx->completion_lock);
Jens Axboedef596e2019-01-09 08:59:42 -0700985 INIT_LIST_HEAD(&ctx->poll_list);
Jens Axboede0617e2019-04-06 21:51:27 -0600986 INIT_LIST_HEAD(&ctx->defer_list);
Jens Axboe5262f562019-09-17 12:26:57 -0600987 INIT_LIST_HEAD(&ctx->timeout_list);
Jens Axboefcb323c2019-10-24 12:39:47 -0600988 init_waitqueue_head(&ctx->inflight_wait);
989 spin_lock_init(&ctx->inflight_lock);
990 INIT_LIST_HEAD(&ctx->inflight_list);
Jens Axboe4a38aed22020-05-14 17:21:15 -0600991 INIT_DELAYED_WORK(&ctx->file_put_work, io_file_put_work);
992 init_llist_head(&ctx->file_put_llist);
Jens Axboe2b188cc2019-01-07 10:46:33 -0700993 return ctx;
Jens Axboe206aefd2019-11-07 18:27:42 -0700994err:
Jens Axboe0ddf92e2019-11-08 08:52:53 -0700995 if (ctx->fallback_req)
996 kmem_cache_free(req_cachep, ctx->fallback_req);
Jens Axboe78076bb2019-12-04 19:56:40 -0700997 kfree(ctx->cancel_hash);
Jens Axboe206aefd2019-11-07 18:27:42 -0700998 kfree(ctx);
999 return NULL;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001000}
1001
Bob Liu9d858b22019-11-13 18:06:25 +08001002static inline bool __req_need_defer(struct io_kiocb *req)
Jens Axboede0617e2019-04-06 21:51:27 -06001003{
Jackie Liua197f662019-11-08 08:09:12 -07001004 struct io_ring_ctx *ctx = req->ctx;
1005
Pavel Begunkov31af27c2020-04-15 00:39:50 +03001006 return req->sequence != ctx->cached_cq_tail
1007 + atomic_read(&ctx->cached_cq_overflow);
Jens Axboede0617e2019-04-06 21:51:27 -06001008}
1009
Bob Liu9d858b22019-11-13 18:06:25 +08001010static inline bool req_need_defer(struct io_kiocb *req)
Jens Axboe7adf4ea2019-10-10 21:42:58 -06001011{
Pavel Begunkov87987892020-01-18 01:22:30 +03001012 if (unlikely(req->flags & REQ_F_IO_DRAIN))
Bob Liu9d858b22019-11-13 18:06:25 +08001013 return __req_need_defer(req);
Jens Axboe7adf4ea2019-10-10 21:42:58 -06001014
Bob Liu9d858b22019-11-13 18:06:25 +08001015 return false;
Jens Axboe7adf4ea2019-10-10 21:42:58 -06001016}
1017
Jens Axboede0617e2019-04-06 21:51:27 -06001018static void __io_commit_cqring(struct io_ring_ctx *ctx)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001019{
Hristo Venev75b28af2019-08-26 17:23:46 +00001020 struct io_rings *rings = ctx->rings;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001021
Pavel Begunkov07910152020-01-17 03:52:46 +03001022 /* order cqe stores with ring update */
1023 smp_store_release(&rings->cq.tail, ctx->cached_cq_tail);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001024
Pavel Begunkov07910152020-01-17 03:52:46 +03001025 if (wq_has_sleeper(&ctx->cq_wait)) {
1026 wake_up_interruptible(&ctx->cq_wait);
1027 kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001028 }
1029}
1030
Jens Axboecccf0ee2020-01-27 16:34:48 -07001031static inline void io_req_work_grab_env(struct io_kiocb *req,
1032 const struct io_op_def *def)
Jens Axboe18d9be12019-09-10 09:13:05 -06001033{
Jens Axboecccf0ee2020-01-27 16:34:48 -07001034 if (!req->work.mm && def->needs_mm) {
1035 mmgrab(current->mm);
1036 req->work.mm = current->mm;
1037 }
1038 if (!req->work.creds)
1039 req->work.creds = get_current_cred();
Jens Axboeff002b32020-02-07 16:05:21 -07001040 if (!req->work.fs && def->needs_fs) {
1041 spin_lock(&current->fs->lock);
1042 if (!current->fs->in_exec) {
1043 req->work.fs = current->fs;
1044 req->work.fs->users++;
1045 } else {
1046 req->work.flags |= IO_WQ_WORK_CANCEL;
1047 }
1048 spin_unlock(&current->fs->lock);
1049 }
Jens Axboe6ab23142020-02-08 20:23:59 -07001050 if (!req->work.task_pid)
1051 req->work.task_pid = task_pid_vnr(current);
Jens Axboecccf0ee2020-01-27 16:34:48 -07001052}
1053
1054static inline void io_req_work_drop_env(struct io_kiocb *req)
1055{
Xiaoguang Wang7cdaf582020-06-10 19:41:19 +08001056 if (!(req->flags & REQ_F_WORK_INITIALIZED))
1057 return;
1058
Jens Axboecccf0ee2020-01-27 16:34:48 -07001059 if (req->work.mm) {
1060 mmdrop(req->work.mm);
1061 req->work.mm = NULL;
1062 }
1063 if (req->work.creds) {
1064 put_cred(req->work.creds);
1065 req->work.creds = NULL;
1066 }
Jens Axboeff002b32020-02-07 16:05:21 -07001067 if (req->work.fs) {
1068 struct fs_struct *fs = req->work.fs;
1069
1070 spin_lock(&req->work.fs->lock);
1071 if (--fs->users)
1072 fs = NULL;
1073 spin_unlock(&req->work.fs->lock);
1074 if (fs)
1075 free_fs_struct(fs);
1076 }
Jens Axboe561fb042019-10-24 07:25:42 -06001077}
1078
Pavel Begunkov8766dd52020-03-14 00:31:04 +03001079static inline void io_prep_async_work(struct io_kiocb *req,
Jens Axboe94ae5e72019-11-14 19:39:52 -07001080 struct io_kiocb **link)
Jens Axboe561fb042019-10-24 07:25:42 -06001081{
Jens Axboed3656342019-12-18 09:50:26 -07001082 const struct io_op_def *def = &io_op_defs[req->opcode];
Jens Axboe54a91f32019-09-10 09:15:04 -06001083
Jens Axboed3656342019-12-18 09:50:26 -07001084 if (req->flags & REQ_F_ISREG) {
1085 if (def->hash_reg_file)
Pavel Begunkov8766dd52020-03-14 00:31:04 +03001086 io_wq_hash_work(&req->work, file_inode(req->file));
Jens Axboed3656342019-12-18 09:50:26 -07001087 } else {
1088 if (def->unbound_nonreg_file)
Jens Axboe3529d8c2019-12-19 18:24:38 -07001089 req->work.flags |= IO_WQ_WORK_UNBOUND;
Jens Axboe54a91f32019-09-10 09:15:04 -06001090 }
Jens Axboecccf0ee2020-01-27 16:34:48 -07001091
1092 io_req_work_grab_env(req, def);
Jens Axboe54a91f32019-09-10 09:15:04 -06001093
Jens Axboe94ae5e72019-11-14 19:39:52 -07001094 *link = io_prep_linked_timeout(req);
Jens Axboe561fb042019-10-24 07:25:42 -06001095}
1096
Jackie Liua197f662019-11-08 08:09:12 -07001097static inline void io_queue_async_work(struct io_kiocb *req)
Jens Axboe561fb042019-10-24 07:25:42 -06001098{
Jackie Liua197f662019-11-08 08:09:12 -07001099 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe94ae5e72019-11-14 19:39:52 -07001100 struct io_kiocb *link;
Jens Axboe94ae5e72019-11-14 19:39:52 -07001101
Pavel Begunkov8766dd52020-03-14 00:31:04 +03001102 io_prep_async_work(req, &link);
Jens Axboe561fb042019-10-24 07:25:42 -06001103
Pavel Begunkov8766dd52020-03-14 00:31:04 +03001104 trace_io_uring_queue_async_work(ctx, io_wq_is_hashed(&req->work), req,
1105 &req->work, req->flags);
1106 io_wq_enqueue(ctx->io_wq, &req->work);
Jens Axboe94ae5e72019-11-14 19:39:52 -07001107
1108 if (link)
1109 io_queue_linked_timeout(link);
Jens Axboe18d9be12019-09-10 09:13:05 -06001110}
1111
Jens Axboe5262f562019-09-17 12:26:57 -06001112static void io_kill_timeout(struct io_kiocb *req)
1113{
1114 int ret;
1115
Jens Axboe2d283902019-12-04 11:08:05 -07001116 ret = hrtimer_try_to_cancel(&req->io->timeout.timer);
Jens Axboe5262f562019-09-17 12:26:57 -06001117 if (ret != -1) {
1118 atomic_inc(&req->ctx->cq_timeouts);
Jens Axboe842f9612019-10-29 12:34:10 -06001119 list_del_init(&req->list);
Pavel Begunkovf0e20b82020-03-07 01:15:22 +03001120 req->flags |= REQ_F_COMP_LOCKED;
Jens Axboe78e19bb2019-11-06 15:21:34 -07001121 io_cqring_fill_event(req, 0);
Jackie Liuec9c02a2019-11-08 23:50:36 +08001122 io_put_req(req);
Jens Axboe5262f562019-09-17 12:26:57 -06001123 }
1124}
1125
1126static void io_kill_timeouts(struct io_ring_ctx *ctx)
1127{
1128 struct io_kiocb *req, *tmp;
1129
1130 spin_lock_irq(&ctx->completion_lock);
1131 list_for_each_entry_safe(req, tmp, &ctx->timeout_list, list)
1132 io_kill_timeout(req);
1133 spin_unlock_irq(&ctx->completion_lock);
1134}
1135
Pavel Begunkov04518942020-05-26 20:34:05 +03001136static void __io_queue_deferred(struct io_ring_ctx *ctx)
1137{
1138 do {
1139 struct io_kiocb *req = list_first_entry(&ctx->defer_list,
1140 struct io_kiocb, list);
1141
1142 if (req_need_defer(req))
1143 break;
1144 list_del_init(&req->list);
1145 io_queue_async_work(req);
1146 } while (!list_empty(&ctx->defer_list));
1147}
1148
Pavel Begunkov360428f2020-05-30 14:54:17 +03001149static void io_flush_timeouts(struct io_ring_ctx *ctx)
1150{
1151 while (!list_empty(&ctx->timeout_list)) {
1152 struct io_kiocb *req = list_first_entry(&ctx->timeout_list,
1153 struct io_kiocb, list);
1154
1155 if (req->flags & REQ_F_TIMEOUT_NOSEQ)
1156 break;
Pavel Begunkovbfe68a22020-05-30 14:54:18 +03001157 if (req->timeout.target_seq != ctx->cached_cq_tail
1158 - atomic_read(&ctx->cq_timeouts))
Pavel Begunkov360428f2020-05-30 14:54:17 +03001159 break;
Pavel Begunkovbfe68a22020-05-30 14:54:18 +03001160
Pavel Begunkov360428f2020-05-30 14:54:17 +03001161 list_del_init(&req->list);
1162 io_kill_timeout(req);
1163 }
1164}
1165
Jens Axboede0617e2019-04-06 21:51:27 -06001166static void io_commit_cqring(struct io_ring_ctx *ctx)
1167{
Pavel Begunkov360428f2020-05-30 14:54:17 +03001168 io_flush_timeouts(ctx);
Jens Axboede0617e2019-04-06 21:51:27 -06001169 __io_commit_cqring(ctx);
1170
Pavel Begunkov04518942020-05-26 20:34:05 +03001171 if (unlikely(!list_empty(&ctx->defer_list)))
1172 __io_queue_deferred(ctx);
Jens Axboede0617e2019-04-06 21:51:27 -06001173}
1174
Jens Axboe2b188cc2019-01-07 10:46:33 -07001175static struct io_uring_cqe *io_get_cqring(struct io_ring_ctx *ctx)
1176{
Hristo Venev75b28af2019-08-26 17:23:46 +00001177 struct io_rings *rings = ctx->rings;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001178 unsigned tail;
1179
1180 tail = ctx->cached_cq_tail;
Stefan Bühler115e12e2019-04-24 23:54:18 +02001181 /*
1182 * writes to the cq entry need to come after reading head; the
1183 * control dependency is enough as we're using WRITE_ONCE to
1184 * fill the cq entry
1185 */
Hristo Venev75b28af2019-08-26 17:23:46 +00001186 if (tail - READ_ONCE(rings->cq.head) == rings->cq_ring_entries)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001187 return NULL;
1188
1189 ctx->cached_cq_tail++;
Hristo Venev75b28af2019-08-26 17:23:46 +00001190 return &rings->cqes[tail & ctx->cq_mask];
Jens Axboe2b188cc2019-01-07 10:46:33 -07001191}
1192
Jens Axboef2842ab2020-01-08 11:04:00 -07001193static inline bool io_should_trigger_evfd(struct io_ring_ctx *ctx)
1194{
Jens Axboef0b493e2020-02-01 21:30:11 -07001195 if (!ctx->cq_ev_fd)
1196 return false;
Stefano Garzarella7e55a192020-05-15 18:38:05 +02001197 if (READ_ONCE(ctx->rings->cq_flags) & IORING_CQ_EVENTFD_DISABLED)
1198 return false;
Jens Axboef2842ab2020-01-08 11:04:00 -07001199 if (!ctx->eventfd_async)
1200 return true;
Jens Axboeb41e9852020-02-17 09:52:41 -07001201 return io_wq_current_is_worker();
Jens Axboef2842ab2020-01-08 11:04:00 -07001202}
1203
Jens Axboeb41e9852020-02-17 09:52:41 -07001204static void io_cqring_ev_posted(struct io_ring_ctx *ctx)
Jens Axboe8c838782019-03-12 15:48:16 -06001205{
1206 if (waitqueue_active(&ctx->wait))
1207 wake_up(&ctx->wait);
1208 if (waitqueue_active(&ctx->sqo_wait))
1209 wake_up(&ctx->sqo_wait);
Jens Axboeb41e9852020-02-17 09:52:41 -07001210 if (io_should_trigger_evfd(ctx))
Jens Axboe9b402842019-04-11 11:45:41 -06001211 eventfd_signal(ctx->cq_ev_fd, 1);
Jens Axboe8c838782019-03-12 15:48:16 -06001212}
1213
Jens Axboec4a2ed72019-11-21 21:01:26 -07001214/* Returns true if there are no backlogged entries after the flush */
1215static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001216{
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001217 struct io_rings *rings = ctx->rings;
1218 struct io_uring_cqe *cqe;
1219 struct io_kiocb *req;
1220 unsigned long flags;
1221 LIST_HEAD(list);
1222
1223 if (!force) {
1224 if (list_empty_careful(&ctx->cq_overflow_list))
Jens Axboec4a2ed72019-11-21 21:01:26 -07001225 return true;
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001226 if ((ctx->cached_cq_tail - READ_ONCE(rings->cq.head) ==
1227 rings->cq_ring_entries))
Jens Axboec4a2ed72019-11-21 21:01:26 -07001228 return false;
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001229 }
1230
1231 spin_lock_irqsave(&ctx->completion_lock, flags);
1232
1233 /* if force is set, the ring is going away. always drop after that */
1234 if (force)
Jens Axboe69b3e542020-01-08 11:01:46 -07001235 ctx->cq_overflow_flushed = 1;
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001236
Jens Axboec4a2ed72019-11-21 21:01:26 -07001237 cqe = NULL;
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001238 while (!list_empty(&ctx->cq_overflow_list)) {
1239 cqe = io_get_cqring(ctx);
1240 if (!cqe && !force)
1241 break;
1242
1243 req = list_first_entry(&ctx->cq_overflow_list, struct io_kiocb,
1244 list);
1245 list_move(&req->list, &list);
Jens Axboe2ca10252020-02-13 17:17:35 -07001246 req->flags &= ~REQ_F_OVERFLOW;
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001247 if (cqe) {
1248 WRITE_ONCE(cqe->user_data, req->user_data);
1249 WRITE_ONCE(cqe->res, req->result);
Jens Axboebcda7ba2020-02-23 16:42:51 -07001250 WRITE_ONCE(cqe->flags, req->cflags);
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001251 } else {
1252 WRITE_ONCE(ctx->rings->cq_overflow,
1253 atomic_inc_return(&ctx->cached_cq_overflow));
1254 }
1255 }
1256
1257 io_commit_cqring(ctx);
Jens Axboead3eb2c2019-12-18 17:12:20 -07001258 if (cqe) {
1259 clear_bit(0, &ctx->sq_check_overflow);
1260 clear_bit(0, &ctx->cq_check_overflow);
1261 }
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001262 spin_unlock_irqrestore(&ctx->completion_lock, flags);
1263 io_cqring_ev_posted(ctx);
1264
1265 while (!list_empty(&list)) {
1266 req = list_first_entry(&list, struct io_kiocb, list);
1267 list_del(&req->list);
Jackie Liuec9c02a2019-11-08 23:50:36 +08001268 io_put_req(req);
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001269 }
Jens Axboec4a2ed72019-11-21 21:01:26 -07001270
1271 return cqe != NULL;
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001272}
1273
Jens Axboebcda7ba2020-02-23 16:42:51 -07001274static void __io_cqring_fill_event(struct io_kiocb *req, long res, long cflags)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001275{
Jens Axboe78e19bb2019-11-06 15:21:34 -07001276 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001277 struct io_uring_cqe *cqe;
1278
Jens Axboe78e19bb2019-11-06 15:21:34 -07001279 trace_io_uring_complete(ctx, req->user_data, res);
Jens Axboe51c3ff62019-11-03 06:52:50 -07001280
Jens Axboe2b188cc2019-01-07 10:46:33 -07001281 /*
1282 * If we can't get a cq entry, userspace overflowed the
1283 * submission (by quite a lot). Increment the overflow count in
1284 * the ring.
1285 */
1286 cqe = io_get_cqring(ctx);
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001287 if (likely(cqe)) {
Jens Axboe78e19bb2019-11-06 15:21:34 -07001288 WRITE_ONCE(cqe->user_data, req->user_data);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001289 WRITE_ONCE(cqe->res, res);
Jens Axboebcda7ba2020-02-23 16:42:51 -07001290 WRITE_ONCE(cqe->flags, cflags);
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001291 } else if (ctx->cq_overflow_flushed) {
Jens Axboe2b188cc2019-01-07 10:46:33 -07001292 WRITE_ONCE(ctx->rings->cq_overflow,
1293 atomic_inc_return(&ctx->cached_cq_overflow));
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001294 } else {
Jens Axboead3eb2c2019-12-18 17:12:20 -07001295 if (list_empty(&ctx->cq_overflow_list)) {
1296 set_bit(0, &ctx->sq_check_overflow);
1297 set_bit(0, &ctx->cq_check_overflow);
1298 }
Jens Axboe2ca10252020-02-13 17:17:35 -07001299 req->flags |= REQ_F_OVERFLOW;
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001300 refcount_inc(&req->refs);
1301 req->result = res;
Jens Axboebcda7ba2020-02-23 16:42:51 -07001302 req->cflags = cflags;
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001303 list_add_tail(&req->list, &ctx->cq_overflow_list);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001304 }
1305}
1306
Jens Axboebcda7ba2020-02-23 16:42:51 -07001307static void io_cqring_fill_event(struct io_kiocb *req, long res)
1308{
1309 __io_cqring_fill_event(req, res, 0);
1310}
1311
1312static void __io_cqring_add_event(struct io_kiocb *req, long res, long cflags)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001313{
Jens Axboe78e19bb2019-11-06 15:21:34 -07001314 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001315 unsigned long flags;
1316
1317 spin_lock_irqsave(&ctx->completion_lock, flags);
Jens Axboebcda7ba2020-02-23 16:42:51 -07001318 __io_cqring_fill_event(req, res, cflags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001319 io_commit_cqring(ctx);
1320 spin_unlock_irqrestore(&ctx->completion_lock, flags);
1321
Jens Axboe8c838782019-03-12 15:48:16 -06001322 io_cqring_ev_posted(ctx);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001323}
1324
Jens Axboebcda7ba2020-02-23 16:42:51 -07001325static void io_cqring_add_event(struct io_kiocb *req, long res)
1326{
1327 __io_cqring_add_event(req, res, 0);
1328}
1329
Jens Axboe0ddf92e2019-11-08 08:52:53 -07001330static inline bool io_is_fallback_req(struct io_kiocb *req)
1331{
1332 return req == (struct io_kiocb *)
1333 ((unsigned long) req->ctx->fallback_req & ~1UL);
1334}
1335
1336static struct io_kiocb *io_get_fallback_req(struct io_ring_ctx *ctx)
1337{
1338 struct io_kiocb *req;
1339
1340 req = ctx->fallback_req;
Bijan Mottahedehdd461af2020-04-29 17:47:50 -07001341 if (!test_and_set_bit_lock(0, (unsigned long *) &ctx->fallback_req))
Jens Axboe0ddf92e2019-11-08 08:52:53 -07001342 return req;
1343
1344 return NULL;
1345}
1346
Pavel Begunkov0553b8b2020-04-08 08:58:45 +03001347static struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx,
1348 struct io_submit_state *state)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001349{
Jens Axboefd6fab22019-03-14 16:30:06 -06001350 gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001351 struct io_kiocb *req;
1352
Jens Axboe2579f912019-01-09 09:10:43 -07001353 if (!state) {
Jens Axboefd6fab22019-03-14 16:30:06 -06001354 req = kmem_cache_alloc(req_cachep, gfp);
Jens Axboe2579f912019-01-09 09:10:43 -07001355 if (unlikely(!req))
Jens Axboe0ddf92e2019-11-08 08:52:53 -07001356 goto fallback;
Jens Axboe2579f912019-01-09 09:10:43 -07001357 } else if (!state->free_reqs) {
1358 size_t sz;
1359 int ret;
1360
1361 sz = min_t(size_t, state->ios_left, ARRAY_SIZE(state->reqs));
Jens Axboefd6fab22019-03-14 16:30:06 -06001362 ret = kmem_cache_alloc_bulk(req_cachep, gfp, sz, state->reqs);
1363
1364 /*
1365 * Bulk alloc is all-or-nothing. If we fail to get a batch,
1366 * retry single alloc to be on the safe side.
1367 */
1368 if (unlikely(ret <= 0)) {
1369 state->reqs[0] = kmem_cache_alloc(req_cachep, gfp);
1370 if (!state->reqs[0])
Jens Axboe0ddf92e2019-11-08 08:52:53 -07001371 goto fallback;
Jens Axboefd6fab22019-03-14 16:30:06 -06001372 ret = 1;
1373 }
Jens Axboe2579f912019-01-09 09:10:43 -07001374 state->free_reqs = ret - 1;
Pavel Begunkov6c8a3132020-02-01 03:58:00 +03001375 req = state->reqs[ret - 1];
Jens Axboe2579f912019-01-09 09:10:43 -07001376 } else {
Jens Axboe2579f912019-01-09 09:10:43 -07001377 state->free_reqs--;
Pavel Begunkov6c8a3132020-02-01 03:58:00 +03001378 req = state->reqs[state->free_reqs];
Jens Axboe2b188cc2019-01-07 10:46:33 -07001379 }
1380
Jens Axboe2579f912019-01-09 09:10:43 -07001381 return req;
Jens Axboe0ddf92e2019-11-08 08:52:53 -07001382fallback:
Pavel Begunkov0553b8b2020-04-08 08:58:45 +03001383 return io_get_fallback_req(ctx);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001384}
1385
Pavel Begunkov8da11c12020-02-24 11:32:44 +03001386static inline void io_put_file(struct io_kiocb *req, struct file *file,
1387 bool fixed)
1388{
1389 if (fixed)
Xiaoguang Wang05589552020-03-31 14:05:18 +08001390 percpu_ref_put(req->fixed_file_refs);
Pavel Begunkov8da11c12020-02-24 11:32:44 +03001391 else
1392 fput(file);
1393}
1394
Jens Axboec6ca97b302019-12-28 12:11:08 -07001395static void __io_req_aux_free(struct io_kiocb *req)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001396{
Pavel Begunkov929a3af2020-02-19 00:19:09 +03001397 if (req->flags & REQ_F_NEED_CLEANUP)
1398 io_cleanup_req(req);
1399
YueHaibing96fd84d2020-01-07 22:22:44 +08001400 kfree(req->io);
Pavel Begunkov8da11c12020-02-24 11:32:44 +03001401 if (req->file)
1402 io_put_file(req, req->file, (req->flags & REQ_F_FIXED_FILE));
Jens Axboe3537b6a2020-04-03 11:19:06 -06001403 if (req->task)
1404 put_task_struct(req->task);
Jens Axboecccf0ee2020-01-27 16:34:48 -07001405
1406 io_req_work_drop_env(req);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001407}
1408
1409static void __io_free_req(struct io_kiocb *req)
1410{
Jens Axboec6ca97b302019-12-28 12:11:08 -07001411 __io_req_aux_free(req);
Jens Axboefcb323c2019-10-24 12:39:47 -06001412
Jens Axboefcb323c2019-10-24 12:39:47 -06001413 if (req->flags & REQ_F_INFLIGHT) {
Jens Axboec6ca97b302019-12-28 12:11:08 -07001414 struct io_ring_ctx *ctx = req->ctx;
Jens Axboefcb323c2019-10-24 12:39:47 -06001415 unsigned long flags;
1416
1417 spin_lock_irqsave(&ctx->inflight_lock, flags);
1418 list_del(&req->inflight_entry);
1419 if (waitqueue_active(&ctx->inflight_wait))
1420 wake_up(&ctx->inflight_wait);
1421 spin_unlock_irqrestore(&ctx->inflight_lock, flags);
1422 }
Pavel Begunkov2b85edf2019-12-28 14:13:03 +03001423
1424 percpu_ref_put(&req->ctx->refs);
Pavel Begunkovb1e50e52020-04-08 08:58:44 +03001425 if (likely(!io_is_fallback_req(req)))
1426 kmem_cache_free(req_cachep, req);
1427 else
Bijan Mottahedehdd461af2020-04-29 17:47:50 -07001428 clear_bit_unlock(0, (unsigned long *) &req->ctx->fallback_req);
Jens Axboee65ef562019-03-12 10:16:44 -06001429}
1430
Jens Axboec6ca97b302019-12-28 12:11:08 -07001431struct req_batch {
1432 void *reqs[IO_IOPOLL_BATCH];
1433 int to_free;
1434 int need_iter;
1435};
1436
1437static void io_free_req_many(struct io_ring_ctx *ctx, struct req_batch *rb)
1438{
1439 if (!rb->to_free)
1440 return;
1441 if (rb->need_iter) {
1442 int i, inflight = 0;
1443 unsigned long flags;
1444
1445 for (i = 0; i < rb->to_free; i++) {
1446 struct io_kiocb *req = rb->reqs[i];
1447
Jens Axboec6ca97b302019-12-28 12:11:08 -07001448 if (req->flags & REQ_F_INFLIGHT)
1449 inflight++;
Jens Axboec6ca97b302019-12-28 12:11:08 -07001450 __io_req_aux_free(req);
1451 }
1452 if (!inflight)
1453 goto do_free;
1454
1455 spin_lock_irqsave(&ctx->inflight_lock, flags);
1456 for (i = 0; i < rb->to_free; i++) {
1457 struct io_kiocb *req = rb->reqs[i];
1458
Jens Axboe10fef4b2020-01-09 07:52:28 -07001459 if (req->flags & REQ_F_INFLIGHT) {
Jens Axboec6ca97b302019-12-28 12:11:08 -07001460 list_del(&req->inflight_entry);
1461 if (!--inflight)
1462 break;
1463 }
1464 }
1465 spin_unlock_irqrestore(&ctx->inflight_lock, flags);
1466
1467 if (waitqueue_active(&ctx->inflight_wait))
1468 wake_up(&ctx->inflight_wait);
1469 }
1470do_free:
1471 kmem_cache_free_bulk(req_cachep, rb->to_free, rb->reqs);
1472 percpu_ref_put_many(&ctx->refs, rb->to_free);
Jens Axboec6ca97b302019-12-28 12:11:08 -07001473 rb->to_free = rb->need_iter = 0;
Jens Axboee65ef562019-03-12 10:16:44 -06001474}
1475
Jackie Liua197f662019-11-08 08:09:12 -07001476static bool io_link_cancel_timeout(struct io_kiocb *req)
Jens Axboe9e645e112019-05-10 16:07:28 -06001477{
Jackie Liua197f662019-11-08 08:09:12 -07001478 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe2665abf2019-11-05 12:40:47 -07001479 int ret;
1480
Jens Axboe2d283902019-12-04 11:08:05 -07001481 ret = hrtimer_try_to_cancel(&req->io->timeout.timer);
Jens Axboe2665abf2019-11-05 12:40:47 -07001482 if (ret != -1) {
Jens Axboe78e19bb2019-11-06 15:21:34 -07001483 io_cqring_fill_event(req, -ECANCELED);
Jens Axboe2665abf2019-11-05 12:40:47 -07001484 io_commit_cqring(ctx);
Pavel Begunkovdea3b492020-04-12 02:05:04 +03001485 req->flags &= ~REQ_F_LINK_HEAD;
Jackie Liuec9c02a2019-11-08 23:50:36 +08001486 io_put_req(req);
Jens Axboe2665abf2019-11-05 12:40:47 -07001487 return true;
1488 }
1489
1490 return false;
1491}
1492
Jens Axboeba816ad2019-09-28 11:36:45 -06001493static void io_req_link_next(struct io_kiocb *req, struct io_kiocb **nxtptr)
Jens Axboe9e645e112019-05-10 16:07:28 -06001494{
Jens Axboe2665abf2019-11-05 12:40:47 -07001495 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe2665abf2019-11-05 12:40:47 -07001496 bool wake_ev = false;
Jens Axboe9e645e112019-05-10 16:07:28 -06001497
Jens Axboe4d7dd462019-11-20 13:03:52 -07001498 /* Already got next link */
1499 if (req->flags & REQ_F_LINK_NEXT)
1500 return;
1501
Jens Axboe9e645e112019-05-10 16:07:28 -06001502 /*
1503 * The list should never be empty when we are called here. But could
1504 * potentially happen if the chain is messed up, check to be on the
1505 * safe side.
1506 */
Pavel Begunkov44932332019-12-05 16:16:35 +03001507 while (!list_empty(&req->link_list)) {
1508 struct io_kiocb *nxt = list_first_entry(&req->link_list,
1509 struct io_kiocb, link_list);
Jens Axboe94ae5e72019-11-14 19:39:52 -07001510
Pavel Begunkov44932332019-12-05 16:16:35 +03001511 if (unlikely((req->flags & REQ_F_LINK_TIMEOUT) &&
1512 (nxt->flags & REQ_F_TIMEOUT))) {
1513 list_del_init(&nxt->link_list);
Jens Axboe94ae5e72019-11-14 19:39:52 -07001514 wake_ev |= io_link_cancel_timeout(nxt);
Jens Axboe94ae5e72019-11-14 19:39:52 -07001515 req->flags &= ~REQ_F_LINK_TIMEOUT;
1516 continue;
1517 }
Jens Axboe9e645e112019-05-10 16:07:28 -06001518
Pavel Begunkov44932332019-12-05 16:16:35 +03001519 list_del_init(&req->link_list);
1520 if (!list_empty(&nxt->link_list))
Pavel Begunkovdea3b492020-04-12 02:05:04 +03001521 nxt->flags |= REQ_F_LINK_HEAD;
Pavel Begunkovb18fdf72019-11-21 23:21:02 +03001522 *nxtptr = nxt;
Jens Axboe94ae5e72019-11-14 19:39:52 -07001523 break;
Jens Axboe9e645e112019-05-10 16:07:28 -06001524 }
Jens Axboe2665abf2019-11-05 12:40:47 -07001525
Jens Axboe4d7dd462019-11-20 13:03:52 -07001526 req->flags |= REQ_F_LINK_NEXT;
Jens Axboe2665abf2019-11-05 12:40:47 -07001527 if (wake_ev)
1528 io_cqring_ev_posted(ctx);
Jens Axboe9e645e112019-05-10 16:07:28 -06001529}
1530
1531/*
Pavel Begunkovdea3b492020-04-12 02:05:04 +03001532 * Called if REQ_F_LINK_HEAD is set, and we fail the head request
Jens Axboe9e645e112019-05-10 16:07:28 -06001533 */
1534static void io_fail_links(struct io_kiocb *req)
1535{
Jens Axboe2665abf2019-11-05 12:40:47 -07001536 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe2665abf2019-11-05 12:40:47 -07001537 unsigned long flags;
1538
1539 spin_lock_irqsave(&ctx->completion_lock, flags);
Jens Axboe9e645e112019-05-10 16:07:28 -06001540
1541 while (!list_empty(&req->link_list)) {
Pavel Begunkov44932332019-12-05 16:16:35 +03001542 struct io_kiocb *link = list_first_entry(&req->link_list,
1543 struct io_kiocb, link_list);
Jens Axboe9e645e112019-05-10 16:07:28 -06001544
Pavel Begunkov44932332019-12-05 16:16:35 +03001545 list_del_init(&link->link_list);
Dmitrii Dolgovc826bd72019-10-15 19:02:01 +02001546 trace_io_uring_fail_link(req, link);
Jens Axboe2665abf2019-11-05 12:40:47 -07001547
1548 if ((req->flags & REQ_F_LINK_TIMEOUT) &&
Jens Axboed625c6e2019-12-17 19:53:05 -07001549 link->opcode == IORING_OP_LINK_TIMEOUT) {
Jackie Liua197f662019-11-08 08:09:12 -07001550 io_link_cancel_timeout(link);
Jens Axboe2665abf2019-11-05 12:40:47 -07001551 } else {
Jens Axboe78e19bb2019-11-06 15:21:34 -07001552 io_cqring_fill_event(link, -ECANCELED);
Jens Axboe978db572019-11-14 22:39:04 -07001553 __io_double_put_req(link);
Jens Axboe2665abf2019-11-05 12:40:47 -07001554 }
Jens Axboe5d960722019-11-19 15:31:28 -07001555 req->flags &= ~REQ_F_LINK_TIMEOUT;
Jens Axboe9e645e112019-05-10 16:07:28 -06001556 }
Jens Axboe2665abf2019-11-05 12:40:47 -07001557
1558 io_commit_cqring(ctx);
1559 spin_unlock_irqrestore(&ctx->completion_lock, flags);
1560 io_cqring_ev_posted(ctx);
Jens Axboe9e645e112019-05-10 16:07:28 -06001561}
1562
Jens Axboe4d7dd462019-11-20 13:03:52 -07001563static void io_req_find_next(struct io_kiocb *req, struct io_kiocb **nxt)
Jens Axboe9e645e112019-05-10 16:07:28 -06001564{
Pavel Begunkovdea3b492020-04-12 02:05:04 +03001565 if (likely(!(req->flags & REQ_F_LINK_HEAD)))
Jens Axboe2665abf2019-11-05 12:40:47 -07001566 return;
Jens Axboe2665abf2019-11-05 12:40:47 -07001567
Jens Axboe9e645e112019-05-10 16:07:28 -06001568 /*
1569 * If LINK is set, we have dependent requests in this chain. If we
1570 * didn't fail this request, queue the first one up, moving any other
1571 * dependencies to the next request. In case of failure, fail the rest
1572 * of the chain.
1573 */
Jens Axboe2665abf2019-11-05 12:40:47 -07001574 if (req->flags & REQ_F_FAIL_LINK) {
1575 io_fail_links(req);
Jens Axboe7c9e7f02019-11-12 08:15:53 -07001576 } else if ((req->flags & (REQ_F_LINK_TIMEOUT | REQ_F_COMP_LOCKED)) ==
1577 REQ_F_LINK_TIMEOUT) {
Jens Axboe2665abf2019-11-05 12:40:47 -07001578 struct io_ring_ctx *ctx = req->ctx;
1579 unsigned long flags;
1580
1581 /*
1582 * If this is a timeout link, we could be racing with the
1583 * timeout timer. Grab the completion lock for this case to
Jens Axboe7c9e7f02019-11-12 08:15:53 -07001584 * protect against that.
Jens Axboe2665abf2019-11-05 12:40:47 -07001585 */
1586 spin_lock_irqsave(&ctx->completion_lock, flags);
1587 io_req_link_next(req, nxt);
1588 spin_unlock_irqrestore(&ctx->completion_lock, flags);
1589 } else {
1590 io_req_link_next(req, nxt);
Jens Axboe9e645e112019-05-10 16:07:28 -06001591 }
Jens Axboe4d7dd462019-11-20 13:03:52 -07001592}
Jens Axboe9e645e112019-05-10 16:07:28 -06001593
Jackie Liuc69f8db2019-11-09 11:00:08 +08001594static void io_free_req(struct io_kiocb *req)
1595{
Pavel Begunkov944e58b2019-11-21 23:21:01 +03001596 struct io_kiocb *nxt = NULL;
1597
1598 io_req_find_next(req, &nxt);
Pavel Begunkov70cf9f32019-11-21 23:21:00 +03001599 __io_free_req(req);
Pavel Begunkov944e58b2019-11-21 23:21:01 +03001600
1601 if (nxt)
1602 io_queue_async_work(nxt);
Jackie Liuc69f8db2019-11-09 11:00:08 +08001603}
1604
Pavel Begunkov7a743e22020-03-03 21:33:13 +03001605static void io_wq_assign_next(struct io_wq_work **workptr, struct io_kiocb *nxt)
1606{
1607 struct io_kiocb *link;
Pavel Begunkov8766dd52020-03-14 00:31:04 +03001608 const struct io_op_def *def = &io_op_defs[nxt->opcode];
1609
1610 if ((nxt->flags & REQ_F_ISREG) && def->hash_reg_file)
1611 io_wq_hash_work(&nxt->work, file_inode(nxt->file));
Pavel Begunkov7a743e22020-03-03 21:33:13 +03001612
1613 *workptr = &nxt->work;
1614 link = io_prep_linked_timeout(nxt);
Pavel Begunkov18a542f2020-03-23 00:23:29 +03001615 if (link)
Pavel Begunkovd4c81f32020-06-08 21:08:19 +03001616 nxt->flags |= REQ_F_QUEUE_TIMEOUT;
Pavel Begunkov7a743e22020-03-03 21:33:13 +03001617}
1618
Jens Axboeba816ad2019-09-28 11:36:45 -06001619/*
1620 * Drop reference to request, return next in chain (if there is one) if this
1621 * was the last reference to this request.
1622 */
Pavel Begunkovf9bd67f2019-11-21 23:21:03 +03001623__attribute__((nonnull))
Jackie Liuec9c02a2019-11-08 23:50:36 +08001624static void io_put_req_find_next(struct io_kiocb *req, struct io_kiocb **nxtptr)
Jens Axboee65ef562019-03-12 10:16:44 -06001625{
Jens Axboe2a44f462020-02-25 13:25:41 -07001626 if (refcount_dec_and_test(&req->refs)) {
1627 io_req_find_next(req, nxtptr);
Jens Axboe4d7dd462019-11-20 13:03:52 -07001628 __io_free_req(req);
Jens Axboe2a44f462020-02-25 13:25:41 -07001629 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07001630}
1631
Jens Axboe2b188cc2019-01-07 10:46:33 -07001632static void io_put_req(struct io_kiocb *req)
1633{
Jens Axboedef596e2019-01-09 08:59:42 -07001634 if (refcount_dec_and_test(&req->refs))
1635 io_free_req(req);
1636}
1637
Pavel Begunkove9fd9392020-03-04 16:14:12 +03001638static void io_steal_work(struct io_kiocb *req,
1639 struct io_wq_work **workptr)
Pavel Begunkov7a743e22020-03-03 21:33:13 +03001640{
1641 /*
1642 * It's in an io-wq worker, so there always should be at least
1643 * one reference, which will be dropped in io_put_work() just
1644 * after the current handler returns.
1645 *
1646 * It also means, that if the counter dropped to 1, then there is
1647 * no asynchronous users left, so it's safe to steal the next work.
1648 */
Pavel Begunkov7a743e22020-03-03 21:33:13 +03001649 if (refcount_read(&req->refs) == 1) {
1650 struct io_kiocb *nxt = NULL;
1651
1652 io_req_find_next(req, &nxt);
1653 if (nxt)
1654 io_wq_assign_next(workptr, nxt);
1655 }
1656}
1657
Jens Axboe978db572019-11-14 22:39:04 -07001658/*
1659 * Must only be used if we don't need to care about links, usually from
1660 * within the completion handling itself.
1661 */
1662static void __io_double_put_req(struct io_kiocb *req)
Jens Axboea3a0e432019-08-20 11:03:11 -06001663{
Jens Axboe78e19bb2019-11-06 15:21:34 -07001664 /* drop both submit and complete references */
1665 if (refcount_sub_and_test(2, &req->refs))
1666 __io_free_req(req);
1667}
1668
Jens Axboe978db572019-11-14 22:39:04 -07001669static void io_double_put_req(struct io_kiocb *req)
1670{
1671 /* drop both submit and complete references */
1672 if (refcount_sub_and_test(2, &req->refs))
1673 io_free_req(req);
1674}
1675
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001676static unsigned io_cqring_events(struct io_ring_ctx *ctx, bool noflush)
Jens Axboea3a0e432019-08-20 11:03:11 -06001677{
Jens Axboe84f97dc2019-11-06 11:27:53 -07001678 struct io_rings *rings = ctx->rings;
1679
Jens Axboead3eb2c2019-12-18 17:12:20 -07001680 if (test_bit(0, &ctx->cq_check_overflow)) {
1681 /*
1682 * noflush == true is from the waitqueue handler, just ensure
1683 * we wake up the task, and the next invocation will flush the
1684 * entries. We cannot safely to it from here.
1685 */
1686 if (noflush && !list_empty(&ctx->cq_overflow_list))
1687 return -1U;
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001688
Jens Axboead3eb2c2019-12-18 17:12:20 -07001689 io_cqring_overflow_flush(ctx, false);
1690 }
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001691
Jens Axboea3a0e432019-08-20 11:03:11 -06001692 /* See comment at the top of this file */
1693 smp_rmb();
Jens Axboead3eb2c2019-12-18 17:12:20 -07001694 return ctx->cached_cq_tail - READ_ONCE(rings->cq.head);
Jens Axboea3a0e432019-08-20 11:03:11 -06001695}
1696
Pavel Begunkovfb5ccc92019-10-25 12:31:30 +03001697static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
1698{
1699 struct io_rings *rings = ctx->rings;
1700
1701 /* make sure SQ entry isn't read before tail */
1702 return smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head;
1703}
1704
Jens Axboe8237e042019-12-28 10:48:22 -07001705static inline bool io_req_multi_free(struct req_batch *rb, struct io_kiocb *req)
Jens Axboee94f1412019-12-19 12:06:02 -07001706{
Pavel Begunkovdea3b492020-04-12 02:05:04 +03001707 if ((req->flags & REQ_F_LINK_HEAD) || io_is_fallback_req(req))
Jens Axboec6ca97b302019-12-28 12:11:08 -07001708 return false;
Jens Axboee94f1412019-12-19 12:06:02 -07001709
Jens Axboe9d9e88a2020-05-13 12:53:19 -06001710 if (req->file || req->io)
Jens Axboec6ca97b302019-12-28 12:11:08 -07001711 rb->need_iter++;
1712
1713 rb->reqs[rb->to_free++] = req;
1714 if (unlikely(rb->to_free == ARRAY_SIZE(rb->reqs)))
1715 io_free_req_many(req->ctx, rb);
1716 return true;
Jens Axboee94f1412019-12-19 12:06:02 -07001717}
1718
Jens Axboebcda7ba2020-02-23 16:42:51 -07001719static int io_put_kbuf(struct io_kiocb *req)
1720{
Jens Axboe4d954c22020-02-27 07:31:19 -07001721 struct io_buffer *kbuf;
Jens Axboebcda7ba2020-02-23 16:42:51 -07001722 int cflags;
1723
Jens Axboe4d954c22020-02-27 07:31:19 -07001724 kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
Jens Axboebcda7ba2020-02-23 16:42:51 -07001725 cflags = kbuf->bid << IORING_CQE_BUFFER_SHIFT;
1726 cflags |= IORING_CQE_F_BUFFER;
1727 req->rw.addr = 0;
1728 kfree(kbuf);
1729 return cflags;
1730}
1731
Jens Axboedef596e2019-01-09 08:59:42 -07001732/*
1733 * Find and free completed poll iocbs
1734 */
1735static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
1736 struct list_head *done)
1737{
Jens Axboe8237e042019-12-28 10:48:22 -07001738 struct req_batch rb;
Jens Axboedef596e2019-01-09 08:59:42 -07001739 struct io_kiocb *req;
Jens Axboedef596e2019-01-09 08:59:42 -07001740
Jens Axboec6ca97b302019-12-28 12:11:08 -07001741 rb.to_free = rb.need_iter = 0;
Jens Axboedef596e2019-01-09 08:59:42 -07001742 while (!list_empty(done)) {
Jens Axboebcda7ba2020-02-23 16:42:51 -07001743 int cflags = 0;
1744
Jens Axboedef596e2019-01-09 08:59:42 -07001745 req = list_first_entry(done, struct io_kiocb, list);
1746 list_del(&req->list);
1747
Jens Axboebcda7ba2020-02-23 16:42:51 -07001748 if (req->flags & REQ_F_BUFFER_SELECTED)
1749 cflags = io_put_kbuf(req);
1750
1751 __io_cqring_fill_event(req, req->result, cflags);
Jens Axboedef596e2019-01-09 08:59:42 -07001752 (*nr_events)++;
1753
Jens Axboe8237e042019-12-28 10:48:22 -07001754 if (refcount_dec_and_test(&req->refs) &&
1755 !io_req_multi_free(&rb, req))
1756 io_free_req(req);
Jens Axboedef596e2019-01-09 08:59:42 -07001757 }
Jens Axboedef596e2019-01-09 08:59:42 -07001758
Jens Axboe09bb8392019-03-13 12:39:28 -06001759 io_commit_cqring(ctx);
Xiaoguang Wang32b22442020-03-11 09:26:09 +08001760 if (ctx->flags & IORING_SETUP_SQPOLL)
1761 io_cqring_ev_posted(ctx);
Jens Axboe8237e042019-12-28 10:48:22 -07001762 io_free_req_many(ctx, &rb);
Jens Axboedef596e2019-01-09 08:59:42 -07001763}
1764
Bijan Mottahedeh581f9812020-04-03 13:51:33 -07001765static void io_iopoll_queue(struct list_head *again)
1766{
1767 struct io_kiocb *req;
1768
1769 do {
1770 req = list_first_entry(again, struct io_kiocb, list);
1771 list_del(&req->list);
1772 refcount_inc(&req->refs);
1773 io_queue_async_work(req);
1774 } while (!list_empty(again));
1775}
1776
Jens Axboedef596e2019-01-09 08:59:42 -07001777static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
1778 long min)
1779{
1780 struct io_kiocb *req, *tmp;
1781 LIST_HEAD(done);
Bijan Mottahedeh581f9812020-04-03 13:51:33 -07001782 LIST_HEAD(again);
Jens Axboedef596e2019-01-09 08:59:42 -07001783 bool spin;
1784 int ret;
1785
1786 /*
1787 * Only spin for completions if we don't have multiple devices hanging
1788 * off our complete list, and we're under the requested amount.
1789 */
1790 spin = !ctx->poll_multi_file && *nr_events < min;
1791
1792 ret = 0;
1793 list_for_each_entry_safe(req, tmp, &ctx->poll_list, list) {
Jens Axboe9adbd452019-12-20 08:45:55 -07001794 struct kiocb *kiocb = &req->rw.kiocb;
Jens Axboedef596e2019-01-09 08:59:42 -07001795
1796 /*
Bijan Mottahedeh581f9812020-04-03 13:51:33 -07001797 * Move completed and retryable entries to our local lists.
1798 * If we find a request that requires polling, break out
1799 * and complete those lists first, if we have entries there.
Jens Axboedef596e2019-01-09 08:59:42 -07001800 */
1801 if (req->flags & REQ_F_IOPOLL_COMPLETED) {
1802 list_move_tail(&req->list, &done);
1803 continue;
1804 }
1805 if (!list_empty(&done))
1806 break;
1807
Bijan Mottahedeh581f9812020-04-03 13:51:33 -07001808 if (req->result == -EAGAIN) {
1809 list_move_tail(&req->list, &again);
1810 continue;
1811 }
1812 if (!list_empty(&again))
1813 break;
1814
Jens Axboedef596e2019-01-09 08:59:42 -07001815 ret = kiocb->ki_filp->f_op->iopoll(kiocb, spin);
1816 if (ret < 0)
1817 break;
1818
1819 if (ret && spin)
1820 spin = false;
1821 ret = 0;
1822 }
1823
1824 if (!list_empty(&done))
1825 io_iopoll_complete(ctx, nr_events, &done);
1826
Bijan Mottahedeh581f9812020-04-03 13:51:33 -07001827 if (!list_empty(&again))
1828 io_iopoll_queue(&again);
1829
Jens Axboedef596e2019-01-09 08:59:42 -07001830 return ret;
1831}
1832
1833/*
Brian Gianforcarod195a662019-12-13 03:09:50 -08001834 * Poll for a minimum of 'min' events. Note that if min == 0 we consider that a
Jens Axboedef596e2019-01-09 08:59:42 -07001835 * non-spinning poll check - we'll still enter the driver poll loop, but only
1836 * as a non-spinning completion check.
1837 */
1838static int io_iopoll_getevents(struct io_ring_ctx *ctx, unsigned int *nr_events,
1839 long min)
1840{
Jens Axboe08f54392019-08-21 22:19:11 -06001841 while (!list_empty(&ctx->poll_list) && !need_resched()) {
Jens Axboedef596e2019-01-09 08:59:42 -07001842 int ret;
1843
1844 ret = io_do_iopoll(ctx, nr_events, min);
1845 if (ret < 0)
1846 return ret;
1847 if (!min || *nr_events >= min)
1848 return 0;
1849 }
1850
1851 return 1;
1852}
1853
1854/*
1855 * We can't just wait for polled events to come to us, we have to actively
1856 * find and complete them.
1857 */
1858static void io_iopoll_reap_events(struct io_ring_ctx *ctx)
1859{
1860 if (!(ctx->flags & IORING_SETUP_IOPOLL))
1861 return;
1862
1863 mutex_lock(&ctx->uring_lock);
1864 while (!list_empty(&ctx->poll_list)) {
1865 unsigned int nr_events = 0;
1866
1867 io_iopoll_getevents(ctx, &nr_events, 1);
Jens Axboe08f54392019-08-21 22:19:11 -06001868
1869 /*
1870 * Ensure we allow local-to-the-cpu processing to take place,
1871 * in this case we need to ensure that we reap all events.
1872 */
1873 cond_resched();
Jens Axboedef596e2019-01-09 08:59:42 -07001874 }
1875 mutex_unlock(&ctx->uring_lock);
1876}
1877
Xiaoguang Wangc7849be2020-02-22 14:46:05 +08001878static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
1879 long min)
Jens Axboedef596e2019-01-09 08:59:42 -07001880{
Jens Axboe2b2ed972019-10-25 10:06:15 -06001881 int iters = 0, ret = 0;
Jens Axboedef596e2019-01-09 08:59:42 -07001882
Xiaoguang Wangc7849be2020-02-22 14:46:05 +08001883 /*
1884 * We disallow the app entering submit/complete with polling, but we
1885 * still need to lock the ring to prevent racing with polled issue
1886 * that got punted to a workqueue.
1887 */
1888 mutex_lock(&ctx->uring_lock);
Jens Axboedef596e2019-01-09 08:59:42 -07001889 do {
1890 int tmin = 0;
1891
Jens Axboe500f9fb2019-08-19 12:15:59 -06001892 /*
Jens Axboea3a0e432019-08-20 11:03:11 -06001893 * Don't enter poll loop if we already have events pending.
1894 * If we do, we can potentially be spinning for commands that
1895 * already triggered a CQE (eg in error).
1896 */
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07001897 if (io_cqring_events(ctx, false))
Jens Axboea3a0e432019-08-20 11:03:11 -06001898 break;
1899
1900 /*
Jens Axboe500f9fb2019-08-19 12:15:59 -06001901 * If a submit got punted to a workqueue, we can have the
1902 * application entering polling for a command before it gets
1903 * issued. That app will hold the uring_lock for the duration
1904 * of the poll right here, so we need to take a breather every
1905 * now and then to ensure that the issue has a chance to add
1906 * the poll to the issued list. Otherwise we can spin here
1907 * forever, while the workqueue is stuck trying to acquire the
1908 * very same mutex.
1909 */
1910 if (!(++iters & 7)) {
1911 mutex_unlock(&ctx->uring_lock);
1912 mutex_lock(&ctx->uring_lock);
1913 }
1914
Jens Axboedef596e2019-01-09 08:59:42 -07001915 if (*nr_events < min)
1916 tmin = min - *nr_events;
1917
1918 ret = io_iopoll_getevents(ctx, nr_events, tmin);
1919 if (ret <= 0)
1920 break;
1921 ret = 0;
1922 } while (min && !*nr_events && !need_resched());
1923
Jens Axboe500f9fb2019-08-19 12:15:59 -06001924 mutex_unlock(&ctx->uring_lock);
Jens Axboedef596e2019-01-09 08:59:42 -07001925 return ret;
1926}
1927
Jens Axboe491381ce2019-10-17 09:20:46 -06001928static void kiocb_end_write(struct io_kiocb *req)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001929{
Jens Axboe491381ce2019-10-17 09:20:46 -06001930 /*
1931 * Tell lockdep we inherited freeze protection from submission
1932 * thread.
1933 */
1934 if (req->flags & REQ_F_ISREG) {
1935 struct inode *inode = file_inode(req->file);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001936
Jens Axboe491381ce2019-10-17 09:20:46 -06001937 __sb_writers_acquired(inode->i_sb, SB_FREEZE_WRITE);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001938 }
Jens Axboe491381ce2019-10-17 09:20:46 -06001939 file_end_write(req->file);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001940}
1941
Jens Axboe4e88d6e2019-12-07 20:59:47 -07001942static inline void req_set_fail_links(struct io_kiocb *req)
1943{
1944 if ((req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) == REQ_F_LINK)
1945 req->flags |= REQ_F_FAIL_LINK;
1946}
1947
Jens Axboeba816ad2019-09-28 11:36:45 -06001948static void io_complete_rw_common(struct kiocb *kiocb, long res)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001949{
Jens Axboe9adbd452019-12-20 08:45:55 -07001950 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
Jens Axboebcda7ba2020-02-23 16:42:51 -07001951 int cflags = 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001952
Jens Axboe491381ce2019-10-17 09:20:46 -06001953 if (kiocb->ki_flags & IOCB_WRITE)
1954 kiocb_end_write(req);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001955
Jens Axboe4e88d6e2019-12-07 20:59:47 -07001956 if (res != req->result)
1957 req_set_fail_links(req);
Jens Axboebcda7ba2020-02-23 16:42:51 -07001958 if (req->flags & REQ_F_BUFFER_SELECTED)
1959 cflags = io_put_kbuf(req);
1960 __io_cqring_add_event(req, res, cflags);
Jens Axboeba816ad2019-09-28 11:36:45 -06001961}
1962
1963static void io_complete_rw(struct kiocb *kiocb, long res, long res2)
1964{
Jens Axboe9adbd452019-12-20 08:45:55 -07001965 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
Jens Axboeba816ad2019-09-28 11:36:45 -06001966
1967 io_complete_rw_common(kiocb, res);
Jens Axboee65ef562019-03-12 10:16:44 -06001968 io_put_req(req);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001969}
1970
Jens Axboedef596e2019-01-09 08:59:42 -07001971static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2)
1972{
Jens Axboe9adbd452019-12-20 08:45:55 -07001973 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
Jens Axboedef596e2019-01-09 08:59:42 -07001974
Jens Axboe491381ce2019-10-17 09:20:46 -06001975 if (kiocb->ki_flags & IOCB_WRITE)
1976 kiocb_end_write(req);
Jens Axboedef596e2019-01-09 08:59:42 -07001977
Jens Axboe4e88d6e2019-12-07 20:59:47 -07001978 if (res != req->result)
1979 req_set_fail_links(req);
Jens Axboe9e645e112019-05-10 16:07:28 -06001980 req->result = res;
Jens Axboedef596e2019-01-09 08:59:42 -07001981 if (res != -EAGAIN)
1982 req->flags |= REQ_F_IOPOLL_COMPLETED;
1983}
1984
1985/*
1986 * After the iocb has been issued, it's safe to be found on the poll list.
1987 * Adding the kiocb to the list AFTER submission ensures that we don't
1988 * find it from a io_iopoll_getevents() thread before the issuer is done
1989 * accessing the kiocb cookie.
1990 */
1991static void io_iopoll_req_issued(struct io_kiocb *req)
1992{
1993 struct io_ring_ctx *ctx = req->ctx;
1994
1995 /*
1996 * Track whether we have multiple files in our lists. This will impact
1997 * how we do polling eventually, not spinning if we're on potentially
1998 * different devices.
1999 */
2000 if (list_empty(&ctx->poll_list)) {
2001 ctx->poll_multi_file = false;
2002 } else if (!ctx->poll_multi_file) {
2003 struct io_kiocb *list_req;
2004
2005 list_req = list_first_entry(&ctx->poll_list, struct io_kiocb,
2006 list);
Jens Axboe9adbd452019-12-20 08:45:55 -07002007 if (list_req->file != req->file)
Jens Axboedef596e2019-01-09 08:59:42 -07002008 ctx->poll_multi_file = true;
2009 }
2010
2011 /*
2012 * For fast devices, IO may have already completed. If it has, add
2013 * it to the front so we find it first.
2014 */
2015 if (req->flags & REQ_F_IOPOLL_COMPLETED)
2016 list_add(&req->list, &ctx->poll_list);
2017 else
2018 list_add_tail(&req->list, &ctx->poll_list);
Xiaoguang Wangbdcd3ea2020-02-25 22:12:08 +08002019
2020 if ((ctx->flags & IORING_SETUP_SQPOLL) &&
2021 wq_has_sleeper(&ctx->sqo_wait))
2022 wake_up(&ctx->sqo_wait);
Jens Axboedef596e2019-01-09 08:59:42 -07002023}
2024
Pavel Begunkov9f13c352020-05-17 14:13:41 +03002025static void __io_state_file_put(struct io_submit_state *state)
Jens Axboe9a56a232019-01-09 09:06:50 -07002026{
Pavel Begunkov9f13c352020-05-17 14:13:41 +03002027 int diff = state->has_refs - state->used_refs;
Jens Axboe9a56a232019-01-09 09:06:50 -07002028
Pavel Begunkov9f13c352020-05-17 14:13:41 +03002029 if (diff)
2030 fput_many(state->file, diff);
2031 state->file = NULL;
2032}
2033
2034static inline void io_state_file_put(struct io_submit_state *state)
2035{
2036 if (state->file)
2037 __io_state_file_put(state);
Jens Axboe9a56a232019-01-09 09:06:50 -07002038}
2039
2040/*
2041 * Get as many references to a file as we have IOs left in this submission,
2042 * assuming most submissions are for one file, or at least that each file
2043 * has more than one submission.
2044 */
Pavel Begunkov8da11c12020-02-24 11:32:44 +03002045static struct file *__io_file_get(struct io_submit_state *state, int fd)
Jens Axboe9a56a232019-01-09 09:06:50 -07002046{
2047 if (!state)
2048 return fget(fd);
2049
2050 if (state->file) {
2051 if (state->fd == fd) {
2052 state->used_refs++;
2053 state->ios_left--;
2054 return state->file;
2055 }
Pavel Begunkov9f13c352020-05-17 14:13:41 +03002056 __io_state_file_put(state);
Jens Axboe9a56a232019-01-09 09:06:50 -07002057 }
2058 state->file = fget_many(fd, state->ios_left);
2059 if (!state->file)
2060 return NULL;
2061
2062 state->fd = fd;
2063 state->has_refs = state->ios_left;
2064 state->used_refs = 1;
2065 state->ios_left--;
2066 return state->file;
2067}
2068
Jens Axboe2b188cc2019-01-07 10:46:33 -07002069/*
2070 * If we tracked the file through the SCM inflight mechanism, we could support
2071 * any file. For now, just ensure that anything potentially problematic is done
2072 * inline.
2073 */
Jens Axboeaf197f52020-04-28 13:15:06 -06002074static bool io_file_supports_async(struct file *file, int rw)
Jens Axboe2b188cc2019-01-07 10:46:33 -07002075{
2076 umode_t mode = file_inode(file)->i_mode;
2077
Jens Axboe10d59342019-12-09 20:16:22 -07002078 if (S_ISBLK(mode) || S_ISCHR(mode) || S_ISSOCK(mode))
Jens Axboe2b188cc2019-01-07 10:46:33 -07002079 return true;
2080 if (S_ISREG(mode) && file->f_op != &io_uring_fops)
2081 return true;
2082
Jens Axboec5b85622020-06-09 19:23:05 -06002083 /* any ->read/write should understand O_NONBLOCK */
2084 if (file->f_flags & O_NONBLOCK)
2085 return true;
2086
Jens Axboeaf197f52020-04-28 13:15:06 -06002087 if (!(file->f_mode & FMODE_NOWAIT))
2088 return false;
2089
2090 if (rw == READ)
2091 return file->f_op->read_iter != NULL;
2092
2093 return file->f_op->write_iter != NULL;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002094}
2095
Jens Axboe3529d8c2019-12-19 18:24:38 -07002096static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe,
2097 bool force_nonblock)
Jens Axboe2b188cc2019-01-07 10:46:33 -07002098{
Jens Axboedef596e2019-01-09 08:59:42 -07002099 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe9adbd452019-12-20 08:45:55 -07002100 struct kiocb *kiocb = &req->rw.kiocb;
Jens Axboe09bb8392019-03-13 12:39:28 -06002101 unsigned ioprio;
2102 int ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002103
Jens Axboe491381ce2019-10-17 09:20:46 -06002104 if (S_ISREG(file_inode(req->file)->i_mode))
2105 req->flags |= REQ_F_ISREG;
2106
Jens Axboe2b188cc2019-01-07 10:46:33 -07002107 kiocb->ki_pos = READ_ONCE(sqe->off);
Jens Axboeba042912019-12-25 16:33:42 -07002108 if (kiocb->ki_pos == -1 && !(req->file->f_mode & FMODE_STREAM)) {
2109 req->flags |= REQ_F_CUR_POS;
2110 kiocb->ki_pos = req->file->f_pos;
2111 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07002112 kiocb->ki_hint = ki_hint_validate(file_write_hint(kiocb->ki_filp));
Pavel Begunkov3e577dc2020-02-01 03:58:42 +03002113 kiocb->ki_flags = iocb_flags(kiocb->ki_filp);
2114 ret = kiocb_set_rw_flags(kiocb, READ_ONCE(sqe->rw_flags));
2115 if (unlikely(ret))
2116 return ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002117
2118 ioprio = READ_ONCE(sqe->ioprio);
2119 if (ioprio) {
2120 ret = ioprio_check_cap(ioprio);
2121 if (ret)
Jens Axboe09bb8392019-03-13 12:39:28 -06002122 return ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002123
2124 kiocb->ki_ioprio = ioprio;
2125 } else
2126 kiocb->ki_ioprio = get_current_ioprio();
2127
Stefan Bühler8449eed2019-04-27 20:34:19 +02002128 /* don't allow async punt if RWF_NOWAIT was requested */
Jens Axboec5b85622020-06-09 19:23:05 -06002129 if (kiocb->ki_flags & IOCB_NOWAIT)
Stefan Bühler8449eed2019-04-27 20:34:19 +02002130 req->flags |= REQ_F_NOWAIT;
2131
2132 if (force_nonblock)
Jens Axboe2b188cc2019-01-07 10:46:33 -07002133 kiocb->ki_flags |= IOCB_NOWAIT;
Stefan Bühler8449eed2019-04-27 20:34:19 +02002134
Jens Axboedef596e2019-01-09 08:59:42 -07002135 if (ctx->flags & IORING_SETUP_IOPOLL) {
Jens Axboedef596e2019-01-09 08:59:42 -07002136 if (!(kiocb->ki_flags & IOCB_DIRECT) ||
2137 !kiocb->ki_filp->f_op->iopoll)
Jens Axboe09bb8392019-03-13 12:39:28 -06002138 return -EOPNOTSUPP;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002139
Jens Axboedef596e2019-01-09 08:59:42 -07002140 kiocb->ki_flags |= IOCB_HIPRI;
2141 kiocb->ki_complete = io_complete_rw_iopoll;
Jens Axboe6873e0b2019-10-30 13:53:09 -06002142 req->result = 0;
Jens Axboedef596e2019-01-09 08:59:42 -07002143 } else {
Jens Axboe09bb8392019-03-13 12:39:28 -06002144 if (kiocb->ki_flags & IOCB_HIPRI)
2145 return -EINVAL;
Jens Axboedef596e2019-01-09 08:59:42 -07002146 kiocb->ki_complete = io_complete_rw;
2147 }
Jens Axboe9adbd452019-12-20 08:45:55 -07002148
Jens Axboe3529d8c2019-12-19 18:24:38 -07002149 req->rw.addr = READ_ONCE(sqe->addr);
2150 req->rw.len = READ_ONCE(sqe->len);
Bijan Mottahedeh4f4eeba2020-05-19 14:52:49 -07002151 req->buf_index = READ_ONCE(sqe->buf_index);
Jens Axboe2b188cc2019-01-07 10:46:33 -07002152 return 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002153}
2154
2155static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
2156{
2157 switch (ret) {
2158 case -EIOCBQUEUED:
2159 break;
2160 case -ERESTARTSYS:
2161 case -ERESTARTNOINTR:
2162 case -ERESTARTNOHAND:
2163 case -ERESTART_RESTARTBLOCK:
2164 /*
2165 * We can't just restart the syscall, since previously
2166 * submitted sqes may already be in progress. Just fail this
2167 * IO with EINTR.
2168 */
2169 ret = -EINTR;
2170 /* fall through */
2171 default:
2172 kiocb->ki_complete(kiocb, ret, 0);
2173 }
2174}
2175
Pavel Begunkov014db002020-03-03 21:33:12 +03002176static void kiocb_done(struct kiocb *kiocb, ssize_t ret)
Jens Axboeba816ad2019-09-28 11:36:45 -06002177{
Jens Axboeba042912019-12-25 16:33:42 -07002178 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
2179
2180 if (req->flags & REQ_F_CUR_POS)
2181 req->file->f_pos = kiocb->ki_pos;
Pavel Begunkovbcaec082020-02-24 11:30:18 +03002182 if (ret >= 0 && kiocb->ki_complete == io_complete_rw)
Pavel Begunkov014db002020-03-03 21:33:12 +03002183 io_complete_rw(kiocb, ret, 0);
Jens Axboeba816ad2019-09-28 11:36:45 -06002184 else
2185 io_rw_done(kiocb, ret);
2186}
2187
Jens Axboe9adbd452019-12-20 08:45:55 -07002188static ssize_t io_import_fixed(struct io_kiocb *req, int rw,
Pavel Begunkov7d009162019-11-25 23:14:40 +03002189 struct iov_iter *iter)
Jens Axboeedafcce2019-01-09 09:16:05 -07002190{
Jens Axboe9adbd452019-12-20 08:45:55 -07002191 struct io_ring_ctx *ctx = req->ctx;
2192 size_t len = req->rw.len;
Jens Axboeedafcce2019-01-09 09:16:05 -07002193 struct io_mapped_ubuf *imu;
Bijan Mottahedeh4f4eeba2020-05-19 14:52:49 -07002194 u16 index, buf_index;
Jens Axboeedafcce2019-01-09 09:16:05 -07002195 size_t offset;
2196 u64 buf_addr;
2197
2198 /* attempt to use fixed buffers without having provided iovecs */
2199 if (unlikely(!ctx->user_bufs))
2200 return -EFAULT;
2201
Bijan Mottahedeh4f4eeba2020-05-19 14:52:49 -07002202 buf_index = req->buf_index;
Jens Axboeedafcce2019-01-09 09:16:05 -07002203 if (unlikely(buf_index >= ctx->nr_user_bufs))
2204 return -EFAULT;
2205
2206 index = array_index_nospec(buf_index, ctx->nr_user_bufs);
2207 imu = &ctx->user_bufs[index];
Jens Axboe9adbd452019-12-20 08:45:55 -07002208 buf_addr = req->rw.addr;
Jens Axboeedafcce2019-01-09 09:16:05 -07002209
2210 /* overflow */
2211 if (buf_addr + len < buf_addr)
2212 return -EFAULT;
2213 /* not inside the mapped region */
2214 if (buf_addr < imu->ubuf || buf_addr + len > imu->ubuf + imu->len)
2215 return -EFAULT;
2216
2217 /*
2218 * May not be a start of buffer, set size appropriately
2219 * and advance us to the beginning.
2220 */
2221 offset = buf_addr - imu->ubuf;
2222 iov_iter_bvec(iter, rw, imu->bvec, imu->nr_bvecs, offset + len);
Jens Axboebd11b3a2019-07-20 08:37:31 -06002223
2224 if (offset) {
2225 /*
2226 * Don't use iov_iter_advance() here, as it's really slow for
2227 * using the latter parts of a big fixed buffer - it iterates
2228 * over each segment manually. We can cheat a bit here, because
2229 * we know that:
2230 *
2231 * 1) it's a BVEC iter, we set it up
2232 * 2) all bvecs are PAGE_SIZE in size, except potentially the
2233 * first and last bvec
2234 *
2235 * So just find our index, and adjust the iterator afterwards.
2236 * If the offset is within the first bvec (or the whole first
2237 * bvec, just use iov_iter_advance(). This makes it easier
2238 * since we can just skip the first segment, which may not
2239 * be PAGE_SIZE aligned.
2240 */
2241 const struct bio_vec *bvec = imu->bvec;
2242
2243 if (offset <= bvec->bv_len) {
2244 iov_iter_advance(iter, offset);
2245 } else {
2246 unsigned long seg_skip;
2247
2248 /* skip first vec */
2249 offset -= bvec->bv_len;
2250 seg_skip = 1 + (offset >> PAGE_SHIFT);
2251
2252 iter->bvec = bvec + seg_skip;
2253 iter->nr_segs -= seg_skip;
Aleix Roca Nonell99c79f62019-08-15 14:03:22 +02002254 iter->count -= bvec->bv_len + offset;
Jens Axboebd11b3a2019-07-20 08:37:31 -06002255 iter->iov_offset = offset & ~PAGE_MASK;
Jens Axboebd11b3a2019-07-20 08:37:31 -06002256 }
2257 }
2258
Jens Axboe5e559562019-11-13 16:12:46 -07002259 return len;
Jens Axboeedafcce2019-01-09 09:16:05 -07002260}
2261
Jens Axboebcda7ba2020-02-23 16:42:51 -07002262static void io_ring_submit_unlock(struct io_ring_ctx *ctx, bool needs_lock)
2263{
2264 if (needs_lock)
2265 mutex_unlock(&ctx->uring_lock);
2266}
2267
2268static void io_ring_submit_lock(struct io_ring_ctx *ctx, bool needs_lock)
2269{
2270 /*
2271 * "Normal" inline submissions always hold the uring_lock, since we
2272 * grab it from the system call. Same is true for the SQPOLL offload.
2273 * The only exception is when we've detached the request and issue it
2274 * from an async worker thread, grab the lock for that case.
2275 */
2276 if (needs_lock)
2277 mutex_lock(&ctx->uring_lock);
2278}
2279
2280static struct io_buffer *io_buffer_select(struct io_kiocb *req, size_t *len,
2281 int bgid, struct io_buffer *kbuf,
2282 bool needs_lock)
2283{
2284 struct io_buffer *head;
2285
2286 if (req->flags & REQ_F_BUFFER_SELECTED)
2287 return kbuf;
2288
2289 io_ring_submit_lock(req->ctx, needs_lock);
2290
2291 lockdep_assert_held(&req->ctx->uring_lock);
2292
2293 head = idr_find(&req->ctx->io_buffer_idr, bgid);
2294 if (head) {
2295 if (!list_empty(&head->list)) {
2296 kbuf = list_last_entry(&head->list, struct io_buffer,
2297 list);
2298 list_del(&kbuf->list);
2299 } else {
2300 kbuf = head;
2301 idr_remove(&req->ctx->io_buffer_idr, bgid);
2302 }
2303 if (*len > kbuf->len)
2304 *len = kbuf->len;
2305 } else {
2306 kbuf = ERR_PTR(-ENOBUFS);
2307 }
2308
2309 io_ring_submit_unlock(req->ctx, needs_lock);
2310
2311 return kbuf;
2312}
2313
Jens Axboe4d954c22020-02-27 07:31:19 -07002314static void __user *io_rw_buffer_select(struct io_kiocb *req, size_t *len,
2315 bool needs_lock)
2316{
2317 struct io_buffer *kbuf;
Bijan Mottahedeh4f4eeba2020-05-19 14:52:49 -07002318 u16 bgid;
Jens Axboe4d954c22020-02-27 07:31:19 -07002319
2320 kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
Bijan Mottahedeh4f4eeba2020-05-19 14:52:49 -07002321 bgid = req->buf_index;
Jens Axboe4d954c22020-02-27 07:31:19 -07002322 kbuf = io_buffer_select(req, len, bgid, kbuf, needs_lock);
2323 if (IS_ERR(kbuf))
2324 return kbuf;
2325 req->rw.addr = (u64) (unsigned long) kbuf;
2326 req->flags |= REQ_F_BUFFER_SELECTED;
2327 return u64_to_user_ptr(kbuf->addr);
2328}
2329
2330#ifdef CONFIG_COMPAT
2331static ssize_t io_compat_import(struct io_kiocb *req, struct iovec *iov,
2332 bool needs_lock)
2333{
2334 struct compat_iovec __user *uiov;
2335 compat_ssize_t clen;
2336 void __user *buf;
2337 ssize_t len;
2338
2339 uiov = u64_to_user_ptr(req->rw.addr);
2340 if (!access_ok(uiov, sizeof(*uiov)))
2341 return -EFAULT;
2342 if (__get_user(clen, &uiov->iov_len))
2343 return -EFAULT;
2344 if (clen < 0)
2345 return -EINVAL;
2346
2347 len = clen;
2348 buf = io_rw_buffer_select(req, &len, needs_lock);
2349 if (IS_ERR(buf))
2350 return PTR_ERR(buf);
2351 iov[0].iov_base = buf;
2352 iov[0].iov_len = (compat_size_t) len;
2353 return 0;
2354}
2355#endif
2356
2357static ssize_t __io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
2358 bool needs_lock)
2359{
2360 struct iovec __user *uiov = u64_to_user_ptr(req->rw.addr);
2361 void __user *buf;
2362 ssize_t len;
2363
2364 if (copy_from_user(iov, uiov, sizeof(*uiov)))
2365 return -EFAULT;
2366
2367 len = iov[0].iov_len;
2368 if (len < 0)
2369 return -EINVAL;
2370 buf = io_rw_buffer_select(req, &len, needs_lock);
2371 if (IS_ERR(buf))
2372 return PTR_ERR(buf);
2373 iov[0].iov_base = buf;
2374 iov[0].iov_len = len;
2375 return 0;
2376}
2377
2378static ssize_t io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
2379 bool needs_lock)
2380{
Jens Axboedddb3e22020-06-04 11:27:01 -06002381 if (req->flags & REQ_F_BUFFER_SELECTED) {
2382 struct io_buffer *kbuf;
2383
2384 kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
2385 iov[0].iov_base = u64_to_user_ptr(kbuf->addr);
2386 iov[0].iov_len = kbuf->len;
Jens Axboe4d954c22020-02-27 07:31:19 -07002387 return 0;
Jens Axboedddb3e22020-06-04 11:27:01 -06002388 }
Jens Axboe4d954c22020-02-27 07:31:19 -07002389 if (!req->rw.len)
2390 return 0;
2391 else if (req->rw.len > 1)
2392 return -EINVAL;
2393
2394#ifdef CONFIG_COMPAT
2395 if (req->ctx->compat)
2396 return io_compat_import(req, iov, needs_lock);
2397#endif
2398
2399 return __io_iov_buffer_select(req, iov, needs_lock);
2400}
2401
Pavel Begunkovcf6fd4b2019-11-25 23:14:39 +03002402static ssize_t io_import_iovec(int rw, struct io_kiocb *req,
Jens Axboebcda7ba2020-02-23 16:42:51 -07002403 struct iovec **iovec, struct iov_iter *iter,
2404 bool needs_lock)
Jens Axboe2b188cc2019-01-07 10:46:33 -07002405{
Jens Axboe9adbd452019-12-20 08:45:55 -07002406 void __user *buf = u64_to_user_ptr(req->rw.addr);
2407 size_t sqe_len = req->rw.len;
Jens Axboe4d954c22020-02-27 07:31:19 -07002408 ssize_t ret;
Jens Axboeedafcce2019-01-09 09:16:05 -07002409 u8 opcode;
2410
Jens Axboed625c6e2019-12-17 19:53:05 -07002411 opcode = req->opcode;
Pavel Begunkov7d009162019-11-25 23:14:40 +03002412 if (opcode == IORING_OP_READ_FIXED || opcode == IORING_OP_WRITE_FIXED) {
Jens Axboeedafcce2019-01-09 09:16:05 -07002413 *iovec = NULL;
Jens Axboe9adbd452019-12-20 08:45:55 -07002414 return io_import_fixed(req, rw, iter);
Jens Axboeedafcce2019-01-09 09:16:05 -07002415 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07002416
Jens Axboebcda7ba2020-02-23 16:42:51 -07002417 /* buffer index only valid with fixed read/write, or buffer select */
Bijan Mottahedeh4f4eeba2020-05-19 14:52:49 -07002418 if (req->buf_index && !(req->flags & REQ_F_BUFFER_SELECT))
Jens Axboe9adbd452019-12-20 08:45:55 -07002419 return -EINVAL;
2420
Jens Axboe3a6820f2019-12-22 15:19:35 -07002421 if (opcode == IORING_OP_READ || opcode == IORING_OP_WRITE) {
Jens Axboebcda7ba2020-02-23 16:42:51 -07002422 if (req->flags & REQ_F_BUFFER_SELECT) {
Jens Axboe4d954c22020-02-27 07:31:19 -07002423 buf = io_rw_buffer_select(req, &sqe_len, needs_lock);
2424 if (IS_ERR(buf)) {
Jens Axboebcda7ba2020-02-23 16:42:51 -07002425 *iovec = NULL;
Jens Axboe4d954c22020-02-27 07:31:19 -07002426 return PTR_ERR(buf);
Jens Axboebcda7ba2020-02-23 16:42:51 -07002427 }
Jens Axboe3f9d6442020-03-11 12:27:04 -06002428 req->rw.len = sqe_len;
Jens Axboebcda7ba2020-02-23 16:42:51 -07002429 }
2430
Jens Axboe3a6820f2019-12-22 15:19:35 -07002431 ret = import_single_range(rw, buf, sqe_len, *iovec, iter);
2432 *iovec = NULL;
Jens Axboe3a901592020-02-25 17:48:55 -07002433 return ret < 0 ? ret : sqe_len;
Jens Axboe3a6820f2019-12-22 15:19:35 -07002434 }
2435
Jens Axboef67676d2019-12-02 11:03:47 -07002436 if (req->io) {
2437 struct io_async_rw *iorw = &req->io->rw;
2438
2439 *iovec = iorw->iov;
2440 iov_iter_init(iter, rw, *iovec, iorw->nr_segs, iorw->size);
2441 if (iorw->iov == iorw->fast_iov)
2442 *iovec = NULL;
2443 return iorw->size;
2444 }
2445
Jens Axboe4d954c22020-02-27 07:31:19 -07002446 if (req->flags & REQ_F_BUFFER_SELECT) {
2447 ret = io_iov_buffer_select(req, *iovec, needs_lock);
Jens Axboe3f9d6442020-03-11 12:27:04 -06002448 if (!ret) {
2449 ret = (*iovec)->iov_len;
2450 iov_iter_init(iter, rw, *iovec, 1, ret);
2451 }
Jens Axboe4d954c22020-02-27 07:31:19 -07002452 *iovec = NULL;
2453 return ret;
2454 }
2455
Jens Axboe2b188cc2019-01-07 10:46:33 -07002456#ifdef CONFIG_COMPAT
Pavel Begunkovcf6fd4b2019-11-25 23:14:39 +03002457 if (req->ctx->compat)
Jens Axboe2b188cc2019-01-07 10:46:33 -07002458 return compat_import_iovec(rw, buf, sqe_len, UIO_FASTIOV,
2459 iovec, iter);
2460#endif
2461
2462 return import_iovec(rw, buf, sqe_len, UIO_FASTIOV, iovec, iter);
2463}
2464
Jens Axboe32960612019-09-23 11:05:34 -06002465/*
2466 * For files that don't have ->read_iter() and ->write_iter(), handle them
2467 * by looping over ->read() or ->write() manually.
2468 */
2469static ssize_t loop_rw_iter(int rw, struct file *file, struct kiocb *kiocb,
2470 struct iov_iter *iter)
2471{
2472 ssize_t ret = 0;
2473
2474 /*
2475 * Don't support polled IO through this interface, and we can't
2476 * support non-blocking either. For the latter, this just causes
2477 * the kiocb to be handled from an async context.
2478 */
2479 if (kiocb->ki_flags & IOCB_HIPRI)
2480 return -EOPNOTSUPP;
2481 if (kiocb->ki_flags & IOCB_NOWAIT)
2482 return -EAGAIN;
2483
2484 while (iov_iter_count(iter)) {
Pavel Begunkov311ae9e2019-11-24 11:58:24 +03002485 struct iovec iovec;
Jens Axboe32960612019-09-23 11:05:34 -06002486 ssize_t nr;
2487
Pavel Begunkov311ae9e2019-11-24 11:58:24 +03002488 if (!iov_iter_is_bvec(iter)) {
2489 iovec = iov_iter_iovec(iter);
2490 } else {
2491 /* fixed buffers import bvec */
2492 iovec.iov_base = kmap(iter->bvec->bv_page)
2493 + iter->iov_offset;
2494 iovec.iov_len = min(iter->count,
2495 iter->bvec->bv_len - iter->iov_offset);
2496 }
2497
Jens Axboe32960612019-09-23 11:05:34 -06002498 if (rw == READ) {
2499 nr = file->f_op->read(file, iovec.iov_base,
2500 iovec.iov_len, &kiocb->ki_pos);
2501 } else {
2502 nr = file->f_op->write(file, iovec.iov_base,
2503 iovec.iov_len, &kiocb->ki_pos);
2504 }
2505
Pavel Begunkov311ae9e2019-11-24 11:58:24 +03002506 if (iov_iter_is_bvec(iter))
2507 kunmap(iter->bvec->bv_page);
2508
Jens Axboe32960612019-09-23 11:05:34 -06002509 if (nr < 0) {
2510 if (!ret)
2511 ret = nr;
2512 break;
2513 }
2514 ret += nr;
2515 if (nr != iovec.iov_len)
2516 break;
2517 iov_iter_advance(iter, nr);
2518 }
2519
2520 return ret;
2521}
2522
Jens Axboeb7bb4f72019-12-15 22:13:43 -07002523static void io_req_map_rw(struct io_kiocb *req, ssize_t io_size,
Jens Axboef67676d2019-12-02 11:03:47 -07002524 struct iovec *iovec, struct iovec *fast_iov,
2525 struct iov_iter *iter)
2526{
2527 req->io->rw.nr_segs = iter->nr_segs;
2528 req->io->rw.size = io_size;
2529 req->io->rw.iov = iovec;
2530 if (!req->io->rw.iov) {
2531 req->io->rw.iov = req->io->rw.fast_iov;
Xiaoguang Wang45097da2020-04-08 22:29:58 +08002532 if (req->io->rw.iov != fast_iov)
2533 memcpy(req->io->rw.iov, fast_iov,
2534 sizeof(struct iovec) * iter->nr_segs);
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03002535 } else {
2536 req->flags |= REQ_F_NEED_CLEANUP;
Jens Axboef67676d2019-12-02 11:03:47 -07002537 }
2538}
2539
Xiaoguang Wang3d9932a2020-03-27 15:36:52 +08002540static inline int __io_alloc_async_ctx(struct io_kiocb *req)
2541{
2542 req->io = kmalloc(sizeof(*req->io), GFP_KERNEL);
2543 return req->io == NULL;
2544}
2545
Jens Axboeb7bb4f72019-12-15 22:13:43 -07002546static int io_alloc_async_ctx(struct io_kiocb *req)
Jens Axboef67676d2019-12-02 11:03:47 -07002547{
Jens Axboed3656342019-12-18 09:50:26 -07002548 if (!io_op_defs[req->opcode].async_ctx)
2549 return 0;
Xiaoguang Wang3d9932a2020-03-27 15:36:52 +08002550
2551 return __io_alloc_async_ctx(req);
Jens Axboeb7bb4f72019-12-15 22:13:43 -07002552}
2553
Jens Axboeb7bb4f72019-12-15 22:13:43 -07002554static int io_setup_async_rw(struct io_kiocb *req, ssize_t io_size,
2555 struct iovec *iovec, struct iovec *fast_iov,
2556 struct iov_iter *iter)
2557{
Jens Axboe980ad262020-01-24 23:08:54 -07002558 if (!io_op_defs[req->opcode].async_ctx)
Jens Axboe74566df2020-01-13 19:23:24 -07002559 return 0;
Jens Axboe5d204bc2020-01-31 12:06:52 -07002560 if (!req->io) {
Xiaoguang Wang3d9932a2020-03-27 15:36:52 +08002561 if (__io_alloc_async_ctx(req))
Jens Axboe5d204bc2020-01-31 12:06:52 -07002562 return -ENOMEM;
Jens Axboeb7bb4f72019-12-15 22:13:43 -07002563
Jens Axboe5d204bc2020-01-31 12:06:52 -07002564 io_req_map_rw(req, io_size, iovec, fast_iov, iter);
2565 }
Jens Axboeb7bb4f72019-12-15 22:13:43 -07002566 return 0;
Jens Axboef67676d2019-12-02 11:03:47 -07002567}
2568
Jens Axboe3529d8c2019-12-19 18:24:38 -07002569static int io_read_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
2570 bool force_nonblock)
Jens Axboef67676d2019-12-02 11:03:47 -07002571{
Jens Axboe3529d8c2019-12-19 18:24:38 -07002572 struct io_async_ctx *io;
2573 struct iov_iter iter;
Jens Axboef67676d2019-12-02 11:03:47 -07002574 ssize_t ret;
2575
Jens Axboe3529d8c2019-12-19 18:24:38 -07002576 ret = io_prep_rw(req, sqe, force_nonblock);
2577 if (ret)
2578 return ret;
Jens Axboef67676d2019-12-02 11:03:47 -07002579
Jens Axboe3529d8c2019-12-19 18:24:38 -07002580 if (unlikely(!(req->file->f_mode & FMODE_READ)))
2581 return -EBADF;
Jens Axboef67676d2019-12-02 11:03:47 -07002582
Pavel Begunkov5f798be2020-02-08 13:28:02 +03002583 /* either don't need iovec imported or already have it */
2584 if (!req->io || req->flags & REQ_F_NEED_CLEANUP)
Jens Axboe3529d8c2019-12-19 18:24:38 -07002585 return 0;
2586
2587 io = req->io;
2588 io->rw.iov = io->rw.fast_iov;
2589 req->io = NULL;
Jens Axboebcda7ba2020-02-23 16:42:51 -07002590 ret = io_import_iovec(READ, req, &io->rw.iov, &iter, !force_nonblock);
Jens Axboe3529d8c2019-12-19 18:24:38 -07002591 req->io = io;
2592 if (ret < 0)
2593 return ret;
2594
2595 io_req_map_rw(req, ret, io->rw.iov, io->rw.fast_iov, &iter);
2596 return 0;
Jens Axboef67676d2019-12-02 11:03:47 -07002597}
2598
Pavel Begunkov014db002020-03-03 21:33:12 +03002599static int io_read(struct io_kiocb *req, bool force_nonblock)
Jens Axboe2b188cc2019-01-07 10:46:33 -07002600{
2601 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
Jens Axboe9adbd452019-12-20 08:45:55 -07002602 struct kiocb *kiocb = &req->rw.kiocb;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002603 struct iov_iter iter;
Jens Axboe31b51512019-01-18 22:56:34 -07002604 size_t iov_count;
Jens Axboef67676d2019-12-02 11:03:47 -07002605 ssize_t io_size, ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002606
Jens Axboebcda7ba2020-02-23 16:42:51 -07002607 ret = io_import_iovec(READ, req, &iovec, &iter, !force_nonblock);
Jens Axboe06b76d42019-12-19 14:44:26 -07002608 if (ret < 0)
2609 return ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002610
Jens Axboefd6c2e42019-12-18 12:19:41 -07002611 /* Ensure we clear previously set non-block flag */
2612 if (!force_nonblock)
Jens Axboe29de5f62020-02-20 09:56:08 -07002613 kiocb->ki_flags &= ~IOCB_NOWAIT;
Jens Axboefd6c2e42019-12-18 12:19:41 -07002614
Bijan Mottahedeh797f3f52020-01-15 18:37:45 -08002615 req->result = 0;
Jens Axboef67676d2019-12-02 11:03:47 -07002616 io_size = ret;
Pavel Begunkovdea3b492020-04-12 02:05:04 +03002617 if (req->flags & REQ_F_LINK_HEAD)
Jens Axboef67676d2019-12-02 11:03:47 -07002618 req->result = io_size;
2619
2620 /*
2621 * If the file doesn't support async, mark it as REQ_F_MUST_PUNT so
2622 * we know to async punt it even if it was opened O_NONBLOCK
2623 */
Jens Axboeaf197f52020-04-28 13:15:06 -06002624 if (force_nonblock && !io_file_supports_async(req->file, READ))
Jens Axboef67676d2019-12-02 11:03:47 -07002625 goto copy_iov;
Jens Axboe9e645e112019-05-10 16:07:28 -06002626
Jens Axboe31b51512019-01-18 22:56:34 -07002627 iov_count = iov_iter_count(&iter);
Jens Axboe9adbd452019-12-20 08:45:55 -07002628 ret = rw_verify_area(READ, req->file, &kiocb->ki_pos, iov_count);
Jens Axboe2b188cc2019-01-07 10:46:33 -07002629 if (!ret) {
2630 ssize_t ret2;
2631
Jens Axboe9adbd452019-12-20 08:45:55 -07002632 if (req->file->f_op->read_iter)
2633 ret2 = call_read_iter(req->file, kiocb, &iter);
Jens Axboe32960612019-09-23 11:05:34 -06002634 else
Jens Axboe9adbd452019-12-20 08:45:55 -07002635 ret2 = loop_rw_iter(READ, req->file, kiocb, &iter);
Jens Axboe32960612019-09-23 11:05:34 -06002636
Jens Axboe9d93a3f2019-05-15 13:53:07 -06002637 /* Catch -EAGAIN return for forced non-blocking submission */
Jens Axboef67676d2019-12-02 11:03:47 -07002638 if (!force_nonblock || ret2 != -EAGAIN) {
Pavel Begunkov014db002020-03-03 21:33:12 +03002639 kiocb_done(kiocb, ret2);
Jens Axboef67676d2019-12-02 11:03:47 -07002640 } else {
2641copy_iov:
Jens Axboeb7bb4f72019-12-15 22:13:43 -07002642 ret = io_setup_async_rw(req, io_size, iovec,
Jens Axboef67676d2019-12-02 11:03:47 -07002643 inline_vecs, &iter);
2644 if (ret)
2645 goto out_free;
Jens Axboe29de5f62020-02-20 09:56:08 -07002646 /* any defer here is final, must blocking retry */
Jens Axboe490e8962020-04-28 13:16:53 -06002647 if (!(req->flags & REQ_F_NOWAIT) &&
2648 !file_can_poll(req->file))
Jens Axboe29de5f62020-02-20 09:56:08 -07002649 req->flags |= REQ_F_MUST_PUNT;
Jens Axboef67676d2019-12-02 11:03:47 -07002650 return -EAGAIN;
2651 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07002652 }
Jens Axboef67676d2019-12-02 11:03:47 -07002653out_free:
Pavel Begunkov1e950812020-02-06 19:51:16 +03002654 kfree(iovec);
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03002655 req->flags &= ~REQ_F_NEED_CLEANUP;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002656 return ret;
2657}
2658
Jens Axboe3529d8c2019-12-19 18:24:38 -07002659static int io_write_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
2660 bool force_nonblock)
Jens Axboef67676d2019-12-02 11:03:47 -07002661{
Jens Axboe3529d8c2019-12-19 18:24:38 -07002662 struct io_async_ctx *io;
2663 struct iov_iter iter;
Jens Axboef67676d2019-12-02 11:03:47 -07002664 ssize_t ret;
2665
Jens Axboe3529d8c2019-12-19 18:24:38 -07002666 ret = io_prep_rw(req, sqe, force_nonblock);
2667 if (ret)
2668 return ret;
Jens Axboef67676d2019-12-02 11:03:47 -07002669
Jens Axboe3529d8c2019-12-19 18:24:38 -07002670 if (unlikely(!(req->file->f_mode & FMODE_WRITE)))
2671 return -EBADF;
Jens Axboef67676d2019-12-02 11:03:47 -07002672
Jens Axboe4ed734b2020-03-20 11:23:41 -06002673 req->fsize = rlimit(RLIMIT_FSIZE);
2674
Pavel Begunkov5f798be2020-02-08 13:28:02 +03002675 /* either don't need iovec imported or already have it */
2676 if (!req->io || req->flags & REQ_F_NEED_CLEANUP)
Jens Axboe3529d8c2019-12-19 18:24:38 -07002677 return 0;
2678
2679 io = req->io;
2680 io->rw.iov = io->rw.fast_iov;
2681 req->io = NULL;
Jens Axboebcda7ba2020-02-23 16:42:51 -07002682 ret = io_import_iovec(WRITE, req, &io->rw.iov, &iter, !force_nonblock);
Jens Axboe3529d8c2019-12-19 18:24:38 -07002683 req->io = io;
2684 if (ret < 0)
2685 return ret;
2686
2687 io_req_map_rw(req, ret, io->rw.iov, io->rw.fast_iov, &iter);
2688 return 0;
Jens Axboef67676d2019-12-02 11:03:47 -07002689}
2690
Pavel Begunkov014db002020-03-03 21:33:12 +03002691static int io_write(struct io_kiocb *req, bool force_nonblock)
Jens Axboe2b188cc2019-01-07 10:46:33 -07002692{
2693 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
Jens Axboe9adbd452019-12-20 08:45:55 -07002694 struct kiocb *kiocb = &req->rw.kiocb;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002695 struct iov_iter iter;
Jens Axboe31b51512019-01-18 22:56:34 -07002696 size_t iov_count;
Jens Axboef67676d2019-12-02 11:03:47 -07002697 ssize_t ret, io_size;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002698
Jens Axboebcda7ba2020-02-23 16:42:51 -07002699 ret = io_import_iovec(WRITE, req, &iovec, &iter, !force_nonblock);
Jens Axboe06b76d42019-12-19 14:44:26 -07002700 if (ret < 0)
2701 return ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002702
Jens Axboefd6c2e42019-12-18 12:19:41 -07002703 /* Ensure we clear previously set non-block flag */
2704 if (!force_nonblock)
Jens Axboe9adbd452019-12-20 08:45:55 -07002705 req->rw.kiocb.ki_flags &= ~IOCB_NOWAIT;
Jens Axboefd6c2e42019-12-18 12:19:41 -07002706
Bijan Mottahedeh797f3f52020-01-15 18:37:45 -08002707 req->result = 0;
Jens Axboef67676d2019-12-02 11:03:47 -07002708 io_size = ret;
Pavel Begunkovdea3b492020-04-12 02:05:04 +03002709 if (req->flags & REQ_F_LINK_HEAD)
Jens Axboef67676d2019-12-02 11:03:47 -07002710 req->result = io_size;
2711
2712 /*
2713 * If the file doesn't support async, mark it as REQ_F_MUST_PUNT so
2714 * we know to async punt it even if it was opened O_NONBLOCK
2715 */
Jens Axboeaf197f52020-04-28 13:15:06 -06002716 if (force_nonblock && !io_file_supports_async(req->file, WRITE))
Jens Axboef67676d2019-12-02 11:03:47 -07002717 goto copy_iov;
Jens Axboef67676d2019-12-02 11:03:47 -07002718
Jens Axboe10d59342019-12-09 20:16:22 -07002719 /* file path doesn't support NOWAIT for non-direct_IO */
2720 if (force_nonblock && !(kiocb->ki_flags & IOCB_DIRECT) &&
2721 (req->flags & REQ_F_ISREG))
Jens Axboef67676d2019-12-02 11:03:47 -07002722 goto copy_iov;
Jens Axboe9e645e112019-05-10 16:07:28 -06002723
Jens Axboe31b51512019-01-18 22:56:34 -07002724 iov_count = iov_iter_count(&iter);
Jens Axboe9adbd452019-12-20 08:45:55 -07002725 ret = rw_verify_area(WRITE, req->file, &kiocb->ki_pos, iov_count);
Jens Axboe2b188cc2019-01-07 10:46:33 -07002726 if (!ret) {
Roman Penyaev9bf79332019-03-25 20:09:24 +01002727 ssize_t ret2;
2728
Jens Axboe2b188cc2019-01-07 10:46:33 -07002729 /*
2730 * Open-code file_start_write here to grab freeze protection,
2731 * which will be released by another thread in
2732 * io_complete_rw(). Fool lockdep by telling it the lock got
2733 * released so that it doesn't complain about the held lock when
2734 * we return to userspace.
2735 */
Jens Axboe491381ce2019-10-17 09:20:46 -06002736 if (req->flags & REQ_F_ISREG) {
Jens Axboe9adbd452019-12-20 08:45:55 -07002737 __sb_start_write(file_inode(req->file)->i_sb,
Jens Axboe2b188cc2019-01-07 10:46:33 -07002738 SB_FREEZE_WRITE, true);
Jens Axboe9adbd452019-12-20 08:45:55 -07002739 __sb_writers_release(file_inode(req->file)->i_sb,
Jens Axboe2b188cc2019-01-07 10:46:33 -07002740 SB_FREEZE_WRITE);
2741 }
2742 kiocb->ki_flags |= IOCB_WRITE;
Roman Penyaev9bf79332019-03-25 20:09:24 +01002743
Jens Axboe4ed734b2020-03-20 11:23:41 -06002744 if (!force_nonblock)
2745 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = req->fsize;
2746
Jens Axboe9adbd452019-12-20 08:45:55 -07002747 if (req->file->f_op->write_iter)
2748 ret2 = call_write_iter(req->file, kiocb, &iter);
Jens Axboe32960612019-09-23 11:05:34 -06002749 else
Jens Axboe9adbd452019-12-20 08:45:55 -07002750 ret2 = loop_rw_iter(WRITE, req->file, kiocb, &iter);
Jens Axboe4ed734b2020-03-20 11:23:41 -06002751
2752 if (!force_nonblock)
2753 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
2754
Jens Axboefaac9962020-02-07 15:45:22 -07002755 /*
Chucheng Luobff60352020-03-25 11:31:38 +08002756 * Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just
Jens Axboefaac9962020-02-07 15:45:22 -07002757 * retry them without IOCB_NOWAIT.
2758 */
2759 if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT))
2760 ret2 = -EAGAIN;
Jens Axboef67676d2019-12-02 11:03:47 -07002761 if (!force_nonblock || ret2 != -EAGAIN) {
Pavel Begunkov014db002020-03-03 21:33:12 +03002762 kiocb_done(kiocb, ret2);
Jens Axboef67676d2019-12-02 11:03:47 -07002763 } else {
2764copy_iov:
Jens Axboeb7bb4f72019-12-15 22:13:43 -07002765 ret = io_setup_async_rw(req, io_size, iovec,
Jens Axboef67676d2019-12-02 11:03:47 -07002766 inline_vecs, &iter);
2767 if (ret)
2768 goto out_free;
Jens Axboe29de5f62020-02-20 09:56:08 -07002769 /* any defer here is final, must blocking retry */
Jens Axboec5b85622020-06-09 19:23:05 -06002770 if (!(req->flags & REQ_F_NOWAIT) &&
2771 !file_can_poll(req->file))
Jens Axboe490e8962020-04-28 13:16:53 -06002772 req->flags |= REQ_F_MUST_PUNT;
Jens Axboef67676d2019-12-02 11:03:47 -07002773 return -EAGAIN;
2774 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07002775 }
Jens Axboe31b51512019-01-18 22:56:34 -07002776out_free:
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03002777 req->flags &= ~REQ_F_NEED_CLEANUP;
Pavel Begunkov1e950812020-02-06 19:51:16 +03002778 kfree(iovec);
Jens Axboe2b188cc2019-01-07 10:46:33 -07002779 return ret;
2780}
2781
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03002782static int __io_splice_prep(struct io_kiocb *req,
2783 const struct io_uring_sqe *sqe)
Pavel Begunkov7d67af22020-02-24 11:32:45 +03002784{
2785 struct io_splice* sp = &req->splice;
2786 unsigned int valid_flags = SPLICE_F_FD_IN_FIXED | SPLICE_F_ALL;
2787 int ret;
2788
2789 if (req->flags & REQ_F_NEED_CLEANUP)
2790 return 0;
Pavel Begunkov3232dd02020-06-03 18:03:22 +03002791 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
2792 return -EINVAL;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03002793
2794 sp->file_in = NULL;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03002795 sp->len = READ_ONCE(sqe->len);
2796 sp->flags = READ_ONCE(sqe->splice_flags);
2797
2798 if (unlikely(sp->flags & ~valid_flags))
2799 return -EINVAL;
2800
2801 ret = io_file_get(NULL, req, READ_ONCE(sqe->splice_fd_in), &sp->file_in,
2802 (sp->flags & SPLICE_F_FD_IN_FIXED));
2803 if (ret)
2804 return ret;
2805 req->flags |= REQ_F_NEED_CLEANUP;
2806
Xiaoguang Wang7cdaf582020-06-10 19:41:19 +08002807 if (!S_ISREG(file_inode(sp->file_in)->i_mode)) {
2808 /*
2809 * Splice operation will be punted aync, and here need to
2810 * modify io_wq_work.flags, so initialize io_wq_work firstly.
2811 */
2812 io_req_init_async(req);
Pavel Begunkov7d67af22020-02-24 11:32:45 +03002813 req->work.flags |= IO_WQ_WORK_UNBOUND;
Xiaoguang Wang7cdaf582020-06-10 19:41:19 +08002814 }
Pavel Begunkov7d67af22020-02-24 11:32:45 +03002815
2816 return 0;
2817}
2818
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03002819static int io_tee_prep(struct io_kiocb *req,
2820 const struct io_uring_sqe *sqe)
2821{
2822 if (READ_ONCE(sqe->splice_off_in) || READ_ONCE(sqe->off))
2823 return -EINVAL;
2824 return __io_splice_prep(req, sqe);
2825}
2826
2827static int io_tee(struct io_kiocb *req, bool force_nonblock)
2828{
2829 struct io_splice *sp = &req->splice;
2830 struct file *in = sp->file_in;
2831 struct file *out = sp->file_out;
2832 unsigned int flags = sp->flags & ~SPLICE_F_FD_IN_FIXED;
2833 long ret = 0;
2834
2835 if (force_nonblock)
2836 return -EAGAIN;
2837 if (sp->len)
2838 ret = do_tee(in, out, sp->len, flags);
2839
2840 io_put_file(req, in, (sp->flags & SPLICE_F_FD_IN_FIXED));
2841 req->flags &= ~REQ_F_NEED_CLEANUP;
2842
2843 io_cqring_add_event(req, ret);
2844 if (ret != sp->len)
2845 req_set_fail_links(req);
2846 io_put_req(req);
2847 return 0;
2848}
2849
2850static int io_splice_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
2851{
2852 struct io_splice* sp = &req->splice;
2853
2854 sp->off_in = READ_ONCE(sqe->splice_off_in);
2855 sp->off_out = READ_ONCE(sqe->off);
2856 return __io_splice_prep(req, sqe);
2857}
2858
Pavel Begunkov014db002020-03-03 21:33:12 +03002859static int io_splice(struct io_kiocb *req, bool force_nonblock)
Pavel Begunkov7d67af22020-02-24 11:32:45 +03002860{
2861 struct io_splice *sp = &req->splice;
2862 struct file *in = sp->file_in;
2863 struct file *out = sp->file_out;
2864 unsigned int flags = sp->flags & ~SPLICE_F_FD_IN_FIXED;
2865 loff_t *poff_in, *poff_out;
Pavel Begunkovc9687422020-05-04 23:00:54 +03002866 long ret = 0;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03002867
Pavel Begunkov2fb3e822020-05-01 17:09:38 +03002868 if (force_nonblock)
2869 return -EAGAIN;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03002870
2871 poff_in = (sp->off_in == -1) ? NULL : &sp->off_in;
2872 poff_out = (sp->off_out == -1) ? NULL : &sp->off_out;
Pavel Begunkovc9687422020-05-04 23:00:54 +03002873
Jens Axboe948a7742020-05-17 14:21:38 -06002874 if (sp->len)
Pavel Begunkovc9687422020-05-04 23:00:54 +03002875 ret = do_splice(in, poff_in, out, poff_out, sp->len, flags);
Pavel Begunkov7d67af22020-02-24 11:32:45 +03002876
2877 io_put_file(req, in, (sp->flags & SPLICE_F_FD_IN_FIXED));
2878 req->flags &= ~REQ_F_NEED_CLEANUP;
2879
2880 io_cqring_add_event(req, ret);
2881 if (ret != sp->len)
2882 req_set_fail_links(req);
Pavel Begunkov014db002020-03-03 21:33:12 +03002883 io_put_req(req);
Pavel Begunkov7d67af22020-02-24 11:32:45 +03002884 return 0;
2885}
2886
Jens Axboe2b188cc2019-01-07 10:46:33 -07002887/*
2888 * IORING_OP_NOP just posts a completion event, nothing else.
2889 */
Jens Axboe78e19bb2019-11-06 15:21:34 -07002890static int io_nop(struct io_kiocb *req)
Jens Axboe2b188cc2019-01-07 10:46:33 -07002891{
2892 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002893
Jens Axboedef596e2019-01-09 08:59:42 -07002894 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
2895 return -EINVAL;
2896
Jens Axboe78e19bb2019-11-06 15:21:34 -07002897 io_cqring_add_event(req, 0);
Jens Axboee65ef562019-03-12 10:16:44 -06002898 io_put_req(req);
Jens Axboe2b188cc2019-01-07 10:46:33 -07002899 return 0;
2900}
2901
Jens Axboe3529d8c2019-12-19 18:24:38 -07002902static int io_prep_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Christoph Hellwigc992fe22019-01-11 09:43:02 -07002903{
Jens Axboe6b063142019-01-10 22:13:58 -07002904 struct io_ring_ctx *ctx = req->ctx;
Christoph Hellwigc992fe22019-01-11 09:43:02 -07002905
Jens Axboe09bb8392019-03-13 12:39:28 -06002906 if (!req->file)
2907 return -EBADF;
Christoph Hellwigc992fe22019-01-11 09:43:02 -07002908
Jens Axboe6b063142019-01-10 22:13:58 -07002909 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboedef596e2019-01-09 08:59:42 -07002910 return -EINVAL;
Jens Axboeedafcce2019-01-09 09:16:05 -07002911 if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index))
Christoph Hellwigc992fe22019-01-11 09:43:02 -07002912 return -EINVAL;
2913
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07002914 req->sync.flags = READ_ONCE(sqe->fsync_flags);
2915 if (unlikely(req->sync.flags & ~IORING_FSYNC_DATASYNC))
2916 return -EINVAL;
2917
2918 req->sync.off = READ_ONCE(sqe->off);
2919 req->sync.len = READ_ONCE(sqe->len);
Christoph Hellwigc992fe22019-01-11 09:43:02 -07002920 return 0;
2921}
2922
Pavel Begunkovac45abc2020-06-08 21:08:18 +03002923static int io_fsync(struct io_kiocb *req, bool force_nonblock)
Jens Axboe78912932020-01-14 22:09:06 -07002924{
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07002925 loff_t end = req->sync.off + req->sync.len;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07002926 int ret;
2927
Pavel Begunkovac45abc2020-06-08 21:08:18 +03002928 /* fsync always requires a blocking context */
2929 if (force_nonblock)
2930 return -EAGAIN;
2931
Jens Axboe9adbd452019-12-20 08:45:55 -07002932 ret = vfs_fsync_range(req->file, req->sync.off,
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07002933 end > 0 ? end : LLONG_MAX,
2934 req->sync.flags & IORING_FSYNC_DATASYNC);
2935 if (ret < 0)
2936 req_set_fail_links(req);
2937 io_cqring_add_event(req, ret);
Pavel Begunkov014db002020-03-03 21:33:12 +03002938 io_put_req(req);
Christoph Hellwigc992fe22019-01-11 09:43:02 -07002939 return 0;
2940}
2941
Jens Axboed63d1b52019-12-10 10:38:56 -07002942static int io_fallocate_prep(struct io_kiocb *req,
2943 const struct io_uring_sqe *sqe)
2944{
2945 if (sqe->ioprio || sqe->buf_index || sqe->rw_flags)
2946 return -EINVAL;
Pavel Begunkov3232dd02020-06-03 18:03:22 +03002947 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
2948 return -EINVAL;
Jens Axboed63d1b52019-12-10 10:38:56 -07002949
2950 req->sync.off = READ_ONCE(sqe->off);
2951 req->sync.len = READ_ONCE(sqe->addr);
2952 req->sync.mode = READ_ONCE(sqe->len);
Jens Axboe4ed734b2020-03-20 11:23:41 -06002953 req->fsize = rlimit(RLIMIT_FSIZE);
Jens Axboed63d1b52019-12-10 10:38:56 -07002954 return 0;
2955}
2956
Pavel Begunkov014db002020-03-03 21:33:12 +03002957static int io_fallocate(struct io_kiocb *req, bool force_nonblock)
Jens Axboed63d1b52019-12-10 10:38:56 -07002958{
Pavel Begunkovac45abc2020-06-08 21:08:18 +03002959 int ret;
Jens Axboed63d1b52019-12-10 10:38:56 -07002960
Pavel Begunkovac45abc2020-06-08 21:08:18 +03002961 /* fallocate always requiring blocking context */
2962 if (force_nonblock)
2963 return -EAGAIN;
2964
2965 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = req->fsize;
2966 ret = vfs_fallocate(req->file, req->sync.mode, req->sync.off,
2967 req->sync.len);
2968 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
2969 if (ret < 0)
2970 req_set_fail_links(req);
2971 io_cqring_add_event(req, ret);
2972 io_put_req(req);
Jens Axboed63d1b52019-12-10 10:38:56 -07002973 return 0;
2974}
2975
Pavel Begunkovec65fea2020-06-03 18:03:24 +03002976static int __io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboe15b71ab2019-12-11 11:20:36 -07002977{
Jens Axboef8748882020-01-08 17:47:02 -07002978 const char __user *fname;
Jens Axboe15b71ab2019-12-11 11:20:36 -07002979 int ret;
2980
Pavel Begunkov3232dd02020-06-03 18:03:22 +03002981 if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
2982 return -EINVAL;
Pavel Begunkovec65fea2020-06-03 18:03:24 +03002983 if (unlikely(sqe->ioprio || sqe->buf_index))
Jens Axboe15b71ab2019-12-11 11:20:36 -07002984 return -EINVAL;
Pavel Begunkovec65fea2020-06-03 18:03:24 +03002985 if (unlikely(req->flags & REQ_F_FIXED_FILE))
Jens Axboecf3040c2020-02-06 21:31:40 -07002986 return -EBADF;
Jens Axboe15b71ab2019-12-11 11:20:36 -07002987
Pavel Begunkovec65fea2020-06-03 18:03:24 +03002988 /* open.how should be already initialised */
2989 if (!(req->open.how.flags & O_PATH) && force_o_largefile())
2990 req->open.how.flags |= O_LARGEFILE;
Jens Axboe15b71ab2019-12-11 11:20:36 -07002991
Pavel Begunkov25e72d12020-06-03 18:03:23 +03002992 req->open.dfd = READ_ONCE(sqe->fd);
2993 fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
Jens Axboef8748882020-01-08 17:47:02 -07002994 req->open.filename = getname(fname);
Jens Axboe15b71ab2019-12-11 11:20:36 -07002995 if (IS_ERR(req->open.filename)) {
2996 ret = PTR_ERR(req->open.filename);
2997 req->open.filename = NULL;
2998 return ret;
2999 }
Jens Axboe4022e7a2020-03-19 19:23:18 -06003000 req->open.nofile = rlimit(RLIMIT_NOFILE);
Pavel Begunkov8fef80b2020-02-07 23:59:53 +03003001 req->flags |= REQ_F_NEED_CLEANUP;
Jens Axboe15b71ab2019-12-11 11:20:36 -07003002 return 0;
3003}
3004
Pavel Begunkovec65fea2020-06-03 18:03:24 +03003005static int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3006{
3007 u64 flags, mode;
3008
3009 if (req->flags & REQ_F_NEED_CLEANUP)
3010 return 0;
3011 mode = READ_ONCE(sqe->len);
3012 flags = READ_ONCE(sqe->open_flags);
3013 req->open.how = build_open_how(flags, mode);
3014 return __io_openat_prep(req, sqe);
3015}
3016
Jens Axboecebdb982020-01-08 17:59:24 -07003017static int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3018{
3019 struct open_how __user *how;
Jens Axboecebdb982020-01-08 17:59:24 -07003020 size_t len;
3021 int ret;
3022
Pavel Begunkov0bdbdd02020-02-08 13:28:03 +03003023 if (req->flags & REQ_F_NEED_CLEANUP)
3024 return 0;
Jens Axboecebdb982020-01-08 17:59:24 -07003025 how = u64_to_user_ptr(READ_ONCE(sqe->addr2));
3026 len = READ_ONCE(sqe->len);
Jens Axboecebdb982020-01-08 17:59:24 -07003027 if (len < OPEN_HOW_SIZE_VER0)
3028 return -EINVAL;
3029
3030 ret = copy_struct_from_user(&req->open.how, sizeof(req->open.how), how,
3031 len);
3032 if (ret)
3033 return ret;
3034
Pavel Begunkovec65fea2020-06-03 18:03:24 +03003035 return __io_openat_prep(req, sqe);
Jens Axboecebdb982020-01-08 17:59:24 -07003036}
3037
Pavel Begunkov014db002020-03-03 21:33:12 +03003038static int io_openat2(struct io_kiocb *req, bool force_nonblock)
Jens Axboe15b71ab2019-12-11 11:20:36 -07003039{
3040 struct open_flags op;
Jens Axboe15b71ab2019-12-11 11:20:36 -07003041 struct file *file;
3042 int ret;
3043
Jens Axboef86cd202020-01-29 13:46:44 -07003044 if (force_nonblock)
Jens Axboe15b71ab2019-12-11 11:20:36 -07003045 return -EAGAIN;
Jens Axboe15b71ab2019-12-11 11:20:36 -07003046
Jens Axboecebdb982020-01-08 17:59:24 -07003047 ret = build_open_flags(&req->open.how, &op);
Jens Axboe15b71ab2019-12-11 11:20:36 -07003048 if (ret)
3049 goto err;
3050
Jens Axboe4022e7a2020-03-19 19:23:18 -06003051 ret = __get_unused_fd_flags(req->open.how.flags, req->open.nofile);
Jens Axboe15b71ab2019-12-11 11:20:36 -07003052 if (ret < 0)
3053 goto err;
3054
3055 file = do_filp_open(req->open.dfd, req->open.filename, &op);
3056 if (IS_ERR(file)) {
3057 put_unused_fd(ret);
3058 ret = PTR_ERR(file);
3059 } else {
3060 fsnotify_open(file);
3061 fd_install(ret, file);
3062 }
3063err:
3064 putname(req->open.filename);
Pavel Begunkov8fef80b2020-02-07 23:59:53 +03003065 req->flags &= ~REQ_F_NEED_CLEANUP;
Jens Axboe15b71ab2019-12-11 11:20:36 -07003066 if (ret < 0)
3067 req_set_fail_links(req);
3068 io_cqring_add_event(req, ret);
Pavel Begunkov014db002020-03-03 21:33:12 +03003069 io_put_req(req);
Jens Axboe15b71ab2019-12-11 11:20:36 -07003070 return 0;
3071}
3072
Pavel Begunkov014db002020-03-03 21:33:12 +03003073static int io_openat(struct io_kiocb *req, bool force_nonblock)
Jens Axboecebdb982020-01-08 17:59:24 -07003074{
Pavel Begunkov014db002020-03-03 21:33:12 +03003075 return io_openat2(req, force_nonblock);
Jens Axboecebdb982020-01-08 17:59:24 -07003076}
3077
Jens Axboe067524e2020-03-02 16:32:28 -07003078static int io_remove_buffers_prep(struct io_kiocb *req,
3079 const struct io_uring_sqe *sqe)
3080{
3081 struct io_provide_buf *p = &req->pbuf;
3082 u64 tmp;
3083
3084 if (sqe->ioprio || sqe->rw_flags || sqe->addr || sqe->len || sqe->off)
3085 return -EINVAL;
3086
3087 tmp = READ_ONCE(sqe->fd);
3088 if (!tmp || tmp > USHRT_MAX)
3089 return -EINVAL;
3090
3091 memset(p, 0, sizeof(*p));
3092 p->nbufs = tmp;
3093 p->bgid = READ_ONCE(sqe->buf_group);
3094 return 0;
3095}
3096
3097static int __io_remove_buffers(struct io_ring_ctx *ctx, struct io_buffer *buf,
3098 int bgid, unsigned nbufs)
3099{
3100 unsigned i = 0;
3101
3102 /* shouldn't happen */
3103 if (!nbufs)
3104 return 0;
3105
3106 /* the head kbuf is the list itself */
3107 while (!list_empty(&buf->list)) {
3108 struct io_buffer *nxt;
3109
3110 nxt = list_first_entry(&buf->list, struct io_buffer, list);
3111 list_del(&nxt->list);
3112 kfree(nxt);
3113 if (++i == nbufs)
3114 return i;
3115 }
3116 i++;
3117 kfree(buf);
3118 idr_remove(&ctx->io_buffer_idr, bgid);
3119
3120 return i;
3121}
3122
3123static int io_remove_buffers(struct io_kiocb *req, bool force_nonblock)
3124{
3125 struct io_provide_buf *p = &req->pbuf;
3126 struct io_ring_ctx *ctx = req->ctx;
3127 struct io_buffer *head;
3128 int ret = 0;
3129
3130 io_ring_submit_lock(ctx, !force_nonblock);
3131
3132 lockdep_assert_held(&ctx->uring_lock);
3133
3134 ret = -ENOENT;
3135 head = idr_find(&ctx->io_buffer_idr, p->bgid);
3136 if (head)
3137 ret = __io_remove_buffers(ctx, head, p->bgid, p->nbufs);
3138
3139 io_ring_submit_lock(ctx, !force_nonblock);
3140 if (ret < 0)
3141 req_set_fail_links(req);
3142 io_cqring_add_event(req, ret);
3143 io_put_req(req);
3144 return 0;
3145}
3146
Jens Axboeddf0322d2020-02-23 16:41:33 -07003147static int io_provide_buffers_prep(struct io_kiocb *req,
3148 const struct io_uring_sqe *sqe)
3149{
3150 struct io_provide_buf *p = &req->pbuf;
3151 u64 tmp;
3152
3153 if (sqe->ioprio || sqe->rw_flags)
3154 return -EINVAL;
3155
3156 tmp = READ_ONCE(sqe->fd);
3157 if (!tmp || tmp > USHRT_MAX)
3158 return -E2BIG;
3159 p->nbufs = tmp;
3160 p->addr = READ_ONCE(sqe->addr);
3161 p->len = READ_ONCE(sqe->len);
3162
Bijan Mottahedehefe68c12020-06-04 18:01:52 -07003163 if (!access_ok(u64_to_user_ptr(p->addr), (p->len * p->nbufs)))
Jens Axboeddf0322d2020-02-23 16:41:33 -07003164 return -EFAULT;
3165
3166 p->bgid = READ_ONCE(sqe->buf_group);
3167 tmp = READ_ONCE(sqe->off);
3168 if (tmp > USHRT_MAX)
3169 return -E2BIG;
3170 p->bid = tmp;
3171 return 0;
3172}
3173
3174static int io_add_buffers(struct io_provide_buf *pbuf, struct io_buffer **head)
3175{
3176 struct io_buffer *buf;
3177 u64 addr = pbuf->addr;
3178 int i, bid = pbuf->bid;
3179
3180 for (i = 0; i < pbuf->nbufs; i++) {
3181 buf = kmalloc(sizeof(*buf), GFP_KERNEL);
3182 if (!buf)
3183 break;
3184
3185 buf->addr = addr;
3186 buf->len = pbuf->len;
3187 buf->bid = bid;
3188 addr += pbuf->len;
3189 bid++;
3190 if (!*head) {
3191 INIT_LIST_HEAD(&buf->list);
3192 *head = buf;
3193 } else {
3194 list_add_tail(&buf->list, &(*head)->list);
3195 }
3196 }
3197
3198 return i ? i : -ENOMEM;
3199}
3200
Jens Axboeddf0322d2020-02-23 16:41:33 -07003201static int io_provide_buffers(struct io_kiocb *req, bool force_nonblock)
3202{
3203 struct io_provide_buf *p = &req->pbuf;
3204 struct io_ring_ctx *ctx = req->ctx;
3205 struct io_buffer *head, *list;
3206 int ret = 0;
3207
3208 io_ring_submit_lock(ctx, !force_nonblock);
3209
3210 lockdep_assert_held(&ctx->uring_lock);
3211
3212 list = head = idr_find(&ctx->io_buffer_idr, p->bgid);
3213
3214 ret = io_add_buffers(p, &head);
3215 if (ret < 0)
3216 goto out;
3217
3218 if (!list) {
3219 ret = idr_alloc(&ctx->io_buffer_idr, head, p->bgid, p->bgid + 1,
3220 GFP_KERNEL);
3221 if (ret < 0) {
Jens Axboe067524e2020-03-02 16:32:28 -07003222 __io_remove_buffers(ctx, head, p->bgid, -1U);
Jens Axboeddf0322d2020-02-23 16:41:33 -07003223 goto out;
3224 }
3225 }
3226out:
3227 io_ring_submit_unlock(ctx, !force_nonblock);
3228 if (ret < 0)
3229 req_set_fail_links(req);
3230 io_cqring_add_event(req, ret);
3231 io_put_req(req);
3232 return 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003233}
3234
Jens Axboe3e4827b2020-01-08 15:18:09 -07003235static int io_epoll_ctl_prep(struct io_kiocb *req,
3236 const struct io_uring_sqe *sqe)
3237{
3238#if defined(CONFIG_EPOLL)
3239 if (sqe->ioprio || sqe->buf_index)
3240 return -EINVAL;
Pavel Begunkov3232dd02020-06-03 18:03:22 +03003241 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3242 return -EINVAL;
Jens Axboe3e4827b2020-01-08 15:18:09 -07003243
3244 req->epoll.epfd = READ_ONCE(sqe->fd);
3245 req->epoll.op = READ_ONCE(sqe->len);
3246 req->epoll.fd = READ_ONCE(sqe->off);
3247
3248 if (ep_op_has_event(req->epoll.op)) {
3249 struct epoll_event __user *ev;
3250
3251 ev = u64_to_user_ptr(READ_ONCE(sqe->addr));
3252 if (copy_from_user(&req->epoll.event, ev, sizeof(*ev)))
3253 return -EFAULT;
3254 }
3255
3256 return 0;
3257#else
3258 return -EOPNOTSUPP;
3259#endif
3260}
3261
Pavel Begunkov014db002020-03-03 21:33:12 +03003262static int io_epoll_ctl(struct io_kiocb *req, bool force_nonblock)
Jens Axboe3e4827b2020-01-08 15:18:09 -07003263{
3264#if defined(CONFIG_EPOLL)
3265 struct io_epoll *ie = &req->epoll;
3266 int ret;
3267
3268 ret = do_epoll_ctl(ie->epfd, ie->op, ie->fd, &ie->event, force_nonblock);
3269 if (force_nonblock && ret == -EAGAIN)
3270 return -EAGAIN;
3271
3272 if (ret < 0)
3273 req_set_fail_links(req);
3274 io_cqring_add_event(req, ret);
Pavel Begunkov014db002020-03-03 21:33:12 +03003275 io_put_req(req);
Jens Axboe3e4827b2020-01-08 15:18:09 -07003276 return 0;
3277#else
3278 return -EOPNOTSUPP;
3279#endif
3280}
3281
Jens Axboec1ca7572019-12-25 22:18:28 -07003282static int io_madvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3283{
3284#if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
3285 if (sqe->ioprio || sqe->buf_index || sqe->off)
3286 return -EINVAL;
Pavel Begunkov3232dd02020-06-03 18:03:22 +03003287 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3288 return -EINVAL;
Jens Axboec1ca7572019-12-25 22:18:28 -07003289
3290 req->madvise.addr = READ_ONCE(sqe->addr);
3291 req->madvise.len = READ_ONCE(sqe->len);
3292 req->madvise.advice = READ_ONCE(sqe->fadvise_advice);
3293 return 0;
3294#else
3295 return -EOPNOTSUPP;
3296#endif
3297}
3298
Pavel Begunkov014db002020-03-03 21:33:12 +03003299static int io_madvise(struct io_kiocb *req, bool force_nonblock)
Jens Axboec1ca7572019-12-25 22:18:28 -07003300{
3301#if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
3302 struct io_madvise *ma = &req->madvise;
3303 int ret;
3304
3305 if (force_nonblock)
3306 return -EAGAIN;
3307
3308 ret = do_madvise(ma->addr, ma->len, ma->advice);
3309 if (ret < 0)
3310 req_set_fail_links(req);
3311 io_cqring_add_event(req, ret);
Pavel Begunkov014db002020-03-03 21:33:12 +03003312 io_put_req(req);
Jens Axboec1ca7572019-12-25 22:18:28 -07003313 return 0;
3314#else
3315 return -EOPNOTSUPP;
3316#endif
3317}
3318
Jens Axboe4840e412019-12-25 22:03:45 -07003319static int io_fadvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3320{
3321 if (sqe->ioprio || sqe->buf_index || sqe->addr)
3322 return -EINVAL;
Pavel Begunkov3232dd02020-06-03 18:03:22 +03003323 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3324 return -EINVAL;
Jens Axboe4840e412019-12-25 22:03:45 -07003325
3326 req->fadvise.offset = READ_ONCE(sqe->off);
3327 req->fadvise.len = READ_ONCE(sqe->len);
3328 req->fadvise.advice = READ_ONCE(sqe->fadvise_advice);
3329 return 0;
3330}
3331
Pavel Begunkov014db002020-03-03 21:33:12 +03003332static int io_fadvise(struct io_kiocb *req, bool force_nonblock)
Jens Axboe4840e412019-12-25 22:03:45 -07003333{
3334 struct io_fadvise *fa = &req->fadvise;
3335 int ret;
3336
Jens Axboe3e694262020-02-01 09:22:49 -07003337 if (force_nonblock) {
3338 switch (fa->advice) {
3339 case POSIX_FADV_NORMAL:
3340 case POSIX_FADV_RANDOM:
3341 case POSIX_FADV_SEQUENTIAL:
3342 break;
3343 default:
3344 return -EAGAIN;
3345 }
3346 }
Jens Axboe4840e412019-12-25 22:03:45 -07003347
3348 ret = vfs_fadvise(req->file, fa->offset, fa->len, fa->advice);
3349 if (ret < 0)
3350 req_set_fail_links(req);
3351 io_cqring_add_event(req, ret);
Pavel Begunkov014db002020-03-03 21:33:12 +03003352 io_put_req(req);
Jens Axboe4840e412019-12-25 22:03:45 -07003353 return 0;
3354}
3355
Jens Axboeeddc7ef2019-12-13 21:18:10 -07003356static int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3357{
Pavel Begunkov3232dd02020-06-03 18:03:22 +03003358 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3359 return -EINVAL;
Jens Axboeeddc7ef2019-12-13 21:18:10 -07003360 if (sqe->ioprio || sqe->buf_index)
3361 return -EINVAL;
Pavel Begunkov9c280f92020-04-08 08:58:46 +03003362 if (req->flags & REQ_F_FIXED_FILE)
Jens Axboecf3040c2020-02-06 21:31:40 -07003363 return -EBADF;
Jens Axboeeddc7ef2019-12-13 21:18:10 -07003364
Bijan Mottahedeh1d9e1282020-05-22 21:31:16 -07003365 req->statx.dfd = READ_ONCE(sqe->fd);
3366 req->statx.mask = READ_ONCE(sqe->len);
Bijan Mottahedehe62753e2020-05-22 21:31:18 -07003367 req->statx.filename = u64_to_user_ptr(READ_ONCE(sqe->addr));
Bijan Mottahedeh1d9e1282020-05-22 21:31:16 -07003368 req->statx.buffer = u64_to_user_ptr(READ_ONCE(sqe->addr2));
3369 req->statx.flags = READ_ONCE(sqe->statx_flags);
Jens Axboeeddc7ef2019-12-13 21:18:10 -07003370
Jens Axboeeddc7ef2019-12-13 21:18:10 -07003371 return 0;
3372}
3373
Pavel Begunkov014db002020-03-03 21:33:12 +03003374static int io_statx(struct io_kiocb *req, bool force_nonblock)
Jens Axboeeddc7ef2019-12-13 21:18:10 -07003375{
Bijan Mottahedeh1d9e1282020-05-22 21:31:16 -07003376 struct io_statx *ctx = &req->statx;
Jens Axboeeddc7ef2019-12-13 21:18:10 -07003377 int ret;
3378
Jens Axboe5b0bbee2020-04-27 10:41:22 -06003379 if (force_nonblock) {
3380 /* only need file table for an actual valid fd */
3381 if (ctx->dfd == -1 || ctx->dfd == AT_FDCWD)
3382 req->flags |= REQ_F_NO_FILE_TABLE;
Jens Axboeeddc7ef2019-12-13 21:18:10 -07003383 return -EAGAIN;
Jens Axboe5b0bbee2020-04-27 10:41:22 -06003384 }
Jens Axboeeddc7ef2019-12-13 21:18:10 -07003385
Bijan Mottahedehe62753e2020-05-22 21:31:18 -07003386 ret = do_statx(ctx->dfd, ctx->filename, ctx->flags, ctx->mask,
3387 ctx->buffer);
Jens Axboeeddc7ef2019-12-13 21:18:10 -07003388
Jens Axboeeddc7ef2019-12-13 21:18:10 -07003389 if (ret < 0)
3390 req_set_fail_links(req);
3391 io_cqring_add_event(req, ret);
Pavel Begunkov014db002020-03-03 21:33:12 +03003392 io_put_req(req);
Jens Axboeeddc7ef2019-12-13 21:18:10 -07003393 return 0;
3394}
3395
Jens Axboeb5dba592019-12-11 14:02:38 -07003396static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3397{
3398 /*
3399 * If we queue this for async, it must not be cancellable. That would
Xiaoguang Wang7cdaf582020-06-10 19:41:19 +08003400 * leave the 'file' in an undeterminate state, and here need to modify
3401 * io_wq_work.flags, so initialize io_wq_work firstly.
Jens Axboeb5dba592019-12-11 14:02:38 -07003402 */
Xiaoguang Wang7cdaf582020-06-10 19:41:19 +08003403 io_req_init_async(req);
Jens Axboeb5dba592019-12-11 14:02:38 -07003404 req->work.flags |= IO_WQ_WORK_NO_CANCEL;
3405
Pavel Begunkov3232dd02020-06-03 18:03:22 +03003406 if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
3407 return -EINVAL;
Jens Axboeb5dba592019-12-11 14:02:38 -07003408 if (sqe->ioprio || sqe->off || sqe->addr || sqe->len ||
3409 sqe->rw_flags || sqe->buf_index)
3410 return -EINVAL;
Pavel Begunkov9c280f92020-04-08 08:58:46 +03003411 if (req->flags & REQ_F_FIXED_FILE)
Jens Axboecf3040c2020-02-06 21:31:40 -07003412 return -EBADF;
Jens Axboeb5dba592019-12-11 14:02:38 -07003413
3414 req->close.fd = READ_ONCE(sqe->fd);
Jens Axboefd2206e2020-06-02 16:40:47 -06003415 if ((req->file && req->file->f_op == &io_uring_fops) ||
3416 req->close.fd == req->ctx->ring_fd)
3417 return -EBADF;
3418
Pavel Begunkov3af73b22020-06-08 21:08:17 +03003419 req->close.put_file = NULL;
Jens Axboeb5dba592019-12-11 14:02:38 -07003420 return 0;
3421}
3422
Pavel Begunkov014db002020-03-03 21:33:12 +03003423static int io_close(struct io_kiocb *req, bool force_nonblock)
Jens Axboeb5dba592019-12-11 14:02:38 -07003424{
Pavel Begunkov3af73b22020-06-08 21:08:17 +03003425 struct io_close *close = &req->close;
Jens Axboeb5dba592019-12-11 14:02:38 -07003426 int ret;
3427
Pavel Begunkov3af73b22020-06-08 21:08:17 +03003428 /* might be already done during nonblock submission */
3429 if (!close->put_file) {
3430 ret = __close_fd_get_file(close->fd, &close->put_file);
3431 if (ret < 0)
3432 return (ret == -ENOENT) ? -EBADF : ret;
3433 }
Jens Axboeb5dba592019-12-11 14:02:38 -07003434
3435 /* if the file has a flush method, be safe and punt to async */
Pavel Begunkov3af73b22020-06-08 21:08:17 +03003436 if (close->put_file->f_op->flush && force_nonblock) {
Pavel Begunkov0bf0eef2020-05-26 20:34:06 +03003437 /* avoid grabbing files - we don't need the files */
3438 req->flags |= REQ_F_NO_FILE_TABLE | REQ_F_MUST_PUNT;
Pavel Begunkov0bf0eef2020-05-26 20:34:06 +03003439 return -EAGAIN;
Pavel Begunkova2100672020-03-02 23:45:16 +03003440 }
Jens Axboeb5dba592019-12-11 14:02:38 -07003441
Pavel Begunkov3af73b22020-06-08 21:08:17 +03003442 /* No ->flush() or already async, safely close from here */
3443 ret = filp_close(close->put_file, req->work.files);
3444 if (ret < 0)
3445 req_set_fail_links(req);
3446 io_cqring_add_event(req, ret);
3447 fput(close->put_file);
3448 close->put_file = NULL;
3449 io_put_req(req);
Jens Axboe1a417f42020-01-31 17:16:48 -07003450 return 0;
Jens Axboeb5dba592019-12-11 14:02:38 -07003451}
3452
Jens Axboe3529d8c2019-12-19 18:24:38 -07003453static int io_prep_sfr(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboe5d17b4a2019-04-09 14:56:44 -06003454{
3455 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe5d17b4a2019-04-09 14:56:44 -06003456
3457 if (!req->file)
3458 return -EBADF;
Jens Axboe5d17b4a2019-04-09 14:56:44 -06003459
3460 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
3461 return -EINVAL;
3462 if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index))
3463 return -EINVAL;
3464
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07003465 req->sync.off = READ_ONCE(sqe->off);
3466 req->sync.len = READ_ONCE(sqe->len);
3467 req->sync.flags = READ_ONCE(sqe->sync_range_flags);
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07003468 return 0;
3469}
3470
Pavel Begunkovac45abc2020-06-08 21:08:18 +03003471static int io_sync_file_range(struct io_kiocb *req, bool force_nonblock)
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07003472{
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07003473 int ret;
3474
Pavel Begunkovac45abc2020-06-08 21:08:18 +03003475 /* sync_file_range always requires a blocking context */
3476 if (force_nonblock)
3477 return -EAGAIN;
3478
Jens Axboe9adbd452019-12-20 08:45:55 -07003479 ret = sync_file_range(req->file, req->sync.off, req->sync.len,
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07003480 req->sync.flags);
3481 if (ret < 0)
3482 req_set_fail_links(req);
3483 io_cqring_add_event(req, ret);
Pavel Begunkov014db002020-03-03 21:33:12 +03003484 io_put_req(req);
Jens Axboe5d17b4a2019-04-09 14:56:44 -06003485 return 0;
3486}
3487
YueHaibing469956e2020-03-04 15:53:52 +08003488#if defined(CONFIG_NET)
Pavel Begunkov02d27d82020-02-28 10:36:36 +03003489static int io_setup_async_msg(struct io_kiocb *req,
3490 struct io_async_msghdr *kmsg)
3491{
3492 if (req->io)
3493 return -EAGAIN;
3494 if (io_alloc_async_ctx(req)) {
3495 if (kmsg->iov != kmsg->fast_iov)
3496 kfree(kmsg->iov);
3497 return -ENOMEM;
3498 }
3499 req->flags |= REQ_F_NEED_CLEANUP;
3500 memcpy(&req->io->msg, kmsg, sizeof(*kmsg));
3501 return -EAGAIN;
3502}
3503
Jens Axboe3529d8c2019-12-19 18:24:38 -07003504static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboeaa1fa282019-04-19 13:38:09 -06003505{
Jens Axboee47293f2019-12-20 08:58:21 -07003506 struct io_sr_msg *sr = &req->sr_msg;
Jens Axboe3529d8c2019-12-19 18:24:38 -07003507 struct io_async_ctx *io = req->io;
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03003508 int ret;
Jens Axboe03b12302019-12-02 18:50:25 -07003509
Pavel Begunkovd2b6f482020-06-03 18:03:25 +03003510 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3511 return -EINVAL;
3512
Jens Axboee47293f2019-12-20 08:58:21 -07003513 sr->msg_flags = READ_ONCE(sqe->msg_flags);
3514 sr->msg = u64_to_user_ptr(READ_ONCE(sqe->addr));
Jens Axboefddafac2020-01-04 20:19:44 -07003515 sr->len = READ_ONCE(sqe->len);
Jens Axboe3529d8c2019-12-19 18:24:38 -07003516
Jens Axboed8768362020-02-27 14:17:49 -07003517#ifdef CONFIG_COMPAT
3518 if (req->ctx->compat)
3519 sr->msg_flags |= MSG_CMSG_COMPAT;
3520#endif
3521
Jens Axboefddafac2020-01-04 20:19:44 -07003522 if (!io || req->opcode == IORING_OP_SEND)
Jens Axboe3529d8c2019-12-19 18:24:38 -07003523 return 0;
Pavel Begunkov5f798be2020-02-08 13:28:02 +03003524 /* iovec is already imported */
3525 if (req->flags & REQ_F_NEED_CLEANUP)
3526 return 0;
Jens Axboe3529d8c2019-12-19 18:24:38 -07003527
Jens Axboed9688562019-12-09 19:35:20 -07003528 io->msg.iov = io->msg.fast_iov;
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03003529 ret = sendmsg_copy_msghdr(&io->msg.msg, sr->msg, sr->msg_flags,
Jens Axboee47293f2019-12-20 08:58:21 -07003530 &io->msg.iov);
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03003531 if (!ret)
3532 req->flags |= REQ_F_NEED_CLEANUP;
3533 return ret;
Jens Axboe03b12302019-12-02 18:50:25 -07003534}
3535
Pavel Begunkov014db002020-03-03 21:33:12 +03003536static int io_sendmsg(struct io_kiocb *req, bool force_nonblock)
Jens Axboe03b12302019-12-02 18:50:25 -07003537{
Jens Axboe0b416c32019-12-15 10:57:46 -07003538 struct io_async_msghdr *kmsg = NULL;
Jens Axboe03b12302019-12-02 18:50:25 -07003539 struct socket *sock;
3540 int ret;
3541
Jens Axboe03b12302019-12-02 18:50:25 -07003542 sock = sock_from_file(req->file, &ret);
3543 if (sock) {
Jens Axboeb7bb4f72019-12-15 22:13:43 -07003544 struct io_async_ctx io;
Jens Axboe03b12302019-12-02 18:50:25 -07003545 unsigned flags;
3546
Jens Axboe03b12302019-12-02 18:50:25 -07003547 if (req->io) {
Jens Axboe0b416c32019-12-15 10:57:46 -07003548 kmsg = &req->io->msg;
Jens Axboeb5379162020-02-09 11:29:15 -07003549 kmsg->msg.msg_name = &req->io->msg.addr;
Jens Axboe0b416c32019-12-15 10:57:46 -07003550 /* if iov is set, it's allocated already */
3551 if (!kmsg->iov)
3552 kmsg->iov = kmsg->fast_iov;
3553 kmsg->msg.msg_iter.iov = kmsg->iov;
Jens Axboe03b12302019-12-02 18:50:25 -07003554 } else {
Jens Axboe3529d8c2019-12-19 18:24:38 -07003555 struct io_sr_msg *sr = &req->sr_msg;
3556
Jens Axboe0b416c32019-12-15 10:57:46 -07003557 kmsg = &io.msg;
Jens Axboeb5379162020-02-09 11:29:15 -07003558 kmsg->msg.msg_name = &io.msg.addr;
Jens Axboe3529d8c2019-12-19 18:24:38 -07003559
3560 io.msg.iov = io.msg.fast_iov;
3561 ret = sendmsg_copy_msghdr(&io.msg.msg, sr->msg,
3562 sr->msg_flags, &io.msg.iov);
Jens Axboe03b12302019-12-02 18:50:25 -07003563 if (ret)
Jens Axboe3529d8c2019-12-19 18:24:38 -07003564 return ret;
Jens Axboe03b12302019-12-02 18:50:25 -07003565 }
3566
Jens Axboee47293f2019-12-20 08:58:21 -07003567 flags = req->sr_msg.msg_flags;
3568 if (flags & MSG_DONTWAIT)
3569 req->flags |= REQ_F_NOWAIT;
3570 else if (force_nonblock)
3571 flags |= MSG_DONTWAIT;
3572
Jens Axboe0b416c32019-12-15 10:57:46 -07003573 ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
Pavel Begunkov02d27d82020-02-28 10:36:36 +03003574 if (force_nonblock && ret == -EAGAIN)
3575 return io_setup_async_msg(req, kmsg);
Jens Axboe03b12302019-12-02 18:50:25 -07003576 if (ret == -ERESTARTSYS)
3577 ret = -EINTR;
3578 }
3579
Pavel Begunkov1e950812020-02-06 19:51:16 +03003580 if (kmsg && kmsg->iov != kmsg->fast_iov)
Jens Axboe0b416c32019-12-15 10:57:46 -07003581 kfree(kmsg->iov);
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03003582 req->flags &= ~REQ_F_NEED_CLEANUP;
Jens Axboe03b12302019-12-02 18:50:25 -07003583 io_cqring_add_event(req, ret);
Jens Axboe4e88d6e2019-12-07 20:59:47 -07003584 if (ret < 0)
3585 req_set_fail_links(req);
Pavel Begunkov014db002020-03-03 21:33:12 +03003586 io_put_req(req);
Jens Axboe03b12302019-12-02 18:50:25 -07003587 return 0;
Jens Axboe03b12302019-12-02 18:50:25 -07003588}
3589
Pavel Begunkov014db002020-03-03 21:33:12 +03003590static int io_send(struct io_kiocb *req, bool force_nonblock)
Jens Axboefddafac2020-01-04 20:19:44 -07003591{
Jens Axboefddafac2020-01-04 20:19:44 -07003592 struct socket *sock;
3593 int ret;
3594
Jens Axboefddafac2020-01-04 20:19:44 -07003595 sock = sock_from_file(req->file, &ret);
3596 if (sock) {
3597 struct io_sr_msg *sr = &req->sr_msg;
3598 struct msghdr msg;
3599 struct iovec iov;
3600 unsigned flags;
3601
3602 ret = import_single_range(WRITE, sr->buf, sr->len, &iov,
3603 &msg.msg_iter);
3604 if (ret)
3605 return ret;
3606
3607 msg.msg_name = NULL;
3608 msg.msg_control = NULL;
3609 msg.msg_controllen = 0;
3610 msg.msg_namelen = 0;
3611
3612 flags = req->sr_msg.msg_flags;
3613 if (flags & MSG_DONTWAIT)
3614 req->flags |= REQ_F_NOWAIT;
3615 else if (force_nonblock)
3616 flags |= MSG_DONTWAIT;
3617
Jens Axboe0b7b21e2020-01-31 08:34:59 -07003618 msg.msg_flags = flags;
3619 ret = sock_sendmsg(sock, &msg);
Jens Axboefddafac2020-01-04 20:19:44 -07003620 if (force_nonblock && ret == -EAGAIN)
3621 return -EAGAIN;
3622 if (ret == -ERESTARTSYS)
3623 ret = -EINTR;
3624 }
3625
3626 io_cqring_add_event(req, ret);
3627 if (ret < 0)
3628 req_set_fail_links(req);
Pavel Begunkov014db002020-03-03 21:33:12 +03003629 io_put_req(req);
Jens Axboefddafac2020-01-04 20:19:44 -07003630 return 0;
Jens Axboefddafac2020-01-04 20:19:44 -07003631}
3632
Jens Axboe52de1fe2020-02-27 10:15:42 -07003633static int __io_recvmsg_copy_hdr(struct io_kiocb *req, struct io_async_ctx *io)
3634{
3635 struct io_sr_msg *sr = &req->sr_msg;
3636 struct iovec __user *uiov;
3637 size_t iov_len;
3638 int ret;
3639
3640 ret = __copy_msghdr_from_user(&io->msg.msg, sr->msg, &io->msg.uaddr,
3641 &uiov, &iov_len);
3642 if (ret)
3643 return ret;
3644
3645 if (req->flags & REQ_F_BUFFER_SELECT) {
3646 if (iov_len > 1)
3647 return -EINVAL;
3648 if (copy_from_user(io->msg.iov, uiov, sizeof(*uiov)))
3649 return -EFAULT;
3650 sr->len = io->msg.iov[0].iov_len;
3651 iov_iter_init(&io->msg.msg.msg_iter, READ, io->msg.iov, 1,
3652 sr->len);
3653 io->msg.iov = NULL;
3654 } else {
3655 ret = import_iovec(READ, uiov, iov_len, UIO_FASTIOV,
3656 &io->msg.iov, &io->msg.msg.msg_iter);
3657 if (ret > 0)
3658 ret = 0;
3659 }
3660
3661 return ret;
3662}
3663
3664#ifdef CONFIG_COMPAT
3665static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req,
3666 struct io_async_ctx *io)
3667{
3668 struct compat_msghdr __user *msg_compat;
3669 struct io_sr_msg *sr = &req->sr_msg;
3670 struct compat_iovec __user *uiov;
3671 compat_uptr_t ptr;
3672 compat_size_t len;
3673 int ret;
3674
3675 msg_compat = (struct compat_msghdr __user *) sr->msg;
3676 ret = __get_compat_msghdr(&io->msg.msg, msg_compat, &io->msg.uaddr,
3677 &ptr, &len);
3678 if (ret)
3679 return ret;
3680
3681 uiov = compat_ptr(ptr);
3682 if (req->flags & REQ_F_BUFFER_SELECT) {
3683 compat_ssize_t clen;
3684
3685 if (len > 1)
3686 return -EINVAL;
3687 if (!access_ok(uiov, sizeof(*uiov)))
3688 return -EFAULT;
3689 if (__get_user(clen, &uiov->iov_len))
3690 return -EFAULT;
3691 if (clen < 0)
3692 return -EINVAL;
3693 sr->len = io->msg.iov[0].iov_len;
3694 io->msg.iov = NULL;
3695 } else {
3696 ret = compat_import_iovec(READ, uiov, len, UIO_FASTIOV,
3697 &io->msg.iov,
3698 &io->msg.msg.msg_iter);
3699 if (ret < 0)
3700 return ret;
3701 }
3702
3703 return 0;
3704}
Jens Axboe03b12302019-12-02 18:50:25 -07003705#endif
Jens Axboe52de1fe2020-02-27 10:15:42 -07003706
3707static int io_recvmsg_copy_hdr(struct io_kiocb *req, struct io_async_ctx *io)
3708{
3709 io->msg.iov = io->msg.fast_iov;
3710
3711#ifdef CONFIG_COMPAT
3712 if (req->ctx->compat)
3713 return __io_compat_recvmsg_copy_hdr(req, io);
3714#endif
3715
3716 return __io_recvmsg_copy_hdr(req, io);
3717}
3718
Jens Axboebcda7ba2020-02-23 16:42:51 -07003719static struct io_buffer *io_recv_buffer_select(struct io_kiocb *req,
3720 int *cflags, bool needs_lock)
3721{
3722 struct io_sr_msg *sr = &req->sr_msg;
3723 struct io_buffer *kbuf;
3724
3725 if (!(req->flags & REQ_F_BUFFER_SELECT))
3726 return NULL;
3727
3728 kbuf = io_buffer_select(req, &sr->len, sr->bgid, sr->kbuf, needs_lock);
3729 if (IS_ERR(kbuf))
3730 return kbuf;
3731
3732 sr->kbuf = kbuf;
3733 req->flags |= REQ_F_BUFFER_SELECTED;
3734
3735 *cflags = kbuf->bid << IORING_CQE_BUFFER_SHIFT;
3736 *cflags |= IORING_CQE_F_BUFFER;
3737 return kbuf;
Jens Axboe03b12302019-12-02 18:50:25 -07003738}
3739
Jens Axboe3529d8c2019-12-19 18:24:38 -07003740static int io_recvmsg_prep(struct io_kiocb *req,
3741 const struct io_uring_sqe *sqe)
Jens Axboe03b12302019-12-02 18:50:25 -07003742{
Jens Axboee47293f2019-12-20 08:58:21 -07003743 struct io_sr_msg *sr = &req->sr_msg;
Jens Axboe3529d8c2019-12-19 18:24:38 -07003744 struct io_async_ctx *io = req->io;
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03003745 int ret;
Jens Axboe06b76d42019-12-19 14:44:26 -07003746
Pavel Begunkovd2b6f482020-06-03 18:03:25 +03003747 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3748 return -EINVAL;
3749
Jens Axboe3529d8c2019-12-19 18:24:38 -07003750 sr->msg_flags = READ_ONCE(sqe->msg_flags);
3751 sr->msg = u64_to_user_ptr(READ_ONCE(sqe->addr));
Jens Axboe0b7b21e2020-01-31 08:34:59 -07003752 sr->len = READ_ONCE(sqe->len);
Jens Axboebcda7ba2020-02-23 16:42:51 -07003753 sr->bgid = READ_ONCE(sqe->buf_group);
Jens Axboe3529d8c2019-12-19 18:24:38 -07003754
Jens Axboed8768362020-02-27 14:17:49 -07003755#ifdef CONFIG_COMPAT
3756 if (req->ctx->compat)
3757 sr->msg_flags |= MSG_CMSG_COMPAT;
3758#endif
3759
Jens Axboefddafac2020-01-04 20:19:44 -07003760 if (!io || req->opcode == IORING_OP_RECV)
Jens Axboe06b76d42019-12-19 14:44:26 -07003761 return 0;
Pavel Begunkov5f798be2020-02-08 13:28:02 +03003762 /* iovec is already imported */
3763 if (req->flags & REQ_F_NEED_CLEANUP)
3764 return 0;
Jens Axboe03b12302019-12-02 18:50:25 -07003765
Jens Axboe52de1fe2020-02-27 10:15:42 -07003766 ret = io_recvmsg_copy_hdr(req, io);
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03003767 if (!ret)
3768 req->flags |= REQ_F_NEED_CLEANUP;
3769 return ret;
Jens Axboe03b12302019-12-02 18:50:25 -07003770}
3771
Pavel Begunkov014db002020-03-03 21:33:12 +03003772static int io_recvmsg(struct io_kiocb *req, bool force_nonblock)
Jens Axboe03b12302019-12-02 18:50:25 -07003773{
Jens Axboe0b416c32019-12-15 10:57:46 -07003774 struct io_async_msghdr *kmsg = NULL;
Jens Axboe0fa03c62019-04-19 13:34:07 -06003775 struct socket *sock;
Jens Axboe52de1fe2020-02-27 10:15:42 -07003776 int ret, cflags = 0;
Jens Axboe0fa03c62019-04-19 13:34:07 -06003777
Jens Axboe0fa03c62019-04-19 13:34:07 -06003778 sock = sock_from_file(req->file, &ret);
3779 if (sock) {
Jens Axboe52de1fe2020-02-27 10:15:42 -07003780 struct io_buffer *kbuf;
Jens Axboeb7bb4f72019-12-15 22:13:43 -07003781 struct io_async_ctx io;
Jens Axboe0fa03c62019-04-19 13:34:07 -06003782 unsigned flags;
3783
Jens Axboe03b12302019-12-02 18:50:25 -07003784 if (req->io) {
Jens Axboe0b416c32019-12-15 10:57:46 -07003785 kmsg = &req->io->msg;
Jens Axboeb5379162020-02-09 11:29:15 -07003786 kmsg->msg.msg_name = &req->io->msg.addr;
Jens Axboe0b416c32019-12-15 10:57:46 -07003787 /* if iov is set, it's allocated already */
3788 if (!kmsg->iov)
3789 kmsg->iov = kmsg->fast_iov;
3790 kmsg->msg.msg_iter.iov = kmsg->iov;
Jens Axboe03b12302019-12-02 18:50:25 -07003791 } else {
Jens Axboe0b416c32019-12-15 10:57:46 -07003792 kmsg = &io.msg;
Jens Axboeb5379162020-02-09 11:29:15 -07003793 kmsg->msg.msg_name = &io.msg.addr;
Jens Axboe3529d8c2019-12-19 18:24:38 -07003794
Jens Axboe52de1fe2020-02-27 10:15:42 -07003795 ret = io_recvmsg_copy_hdr(req, &io);
Jens Axboe03b12302019-12-02 18:50:25 -07003796 if (ret)
Jens Axboe3529d8c2019-12-19 18:24:38 -07003797 return ret;
Jens Axboe03b12302019-12-02 18:50:25 -07003798 }
Jens Axboe0fa03c62019-04-19 13:34:07 -06003799
Jens Axboe52de1fe2020-02-27 10:15:42 -07003800 kbuf = io_recv_buffer_select(req, &cflags, !force_nonblock);
3801 if (IS_ERR(kbuf)) {
3802 return PTR_ERR(kbuf);
3803 } else if (kbuf) {
3804 kmsg->fast_iov[0].iov_base = u64_to_user_ptr(kbuf->addr);
3805 iov_iter_init(&kmsg->msg.msg_iter, READ, kmsg->iov,
3806 1, req->sr_msg.len);
3807 }
3808
Jens Axboee47293f2019-12-20 08:58:21 -07003809 flags = req->sr_msg.msg_flags;
3810 if (flags & MSG_DONTWAIT)
3811 req->flags |= REQ_F_NOWAIT;
3812 else if (force_nonblock)
3813 flags |= MSG_DONTWAIT;
3814
3815 ret = __sys_recvmsg_sock(sock, &kmsg->msg, req->sr_msg.msg,
3816 kmsg->uaddr, flags);
Pavel Begunkov02d27d82020-02-28 10:36:36 +03003817 if (force_nonblock && ret == -EAGAIN)
3818 return io_setup_async_msg(req, kmsg);
Jens Axboe441cdbd2019-12-02 18:49:10 -07003819 if (ret == -ERESTARTSYS)
3820 ret = -EINTR;
Jens Axboe0fa03c62019-04-19 13:34:07 -06003821 }
3822
Pavel Begunkov1e950812020-02-06 19:51:16 +03003823 if (kmsg && kmsg->iov != kmsg->fast_iov)
Jens Axboe0b416c32019-12-15 10:57:46 -07003824 kfree(kmsg->iov);
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03003825 req->flags &= ~REQ_F_NEED_CLEANUP;
Jens Axboe52de1fe2020-02-27 10:15:42 -07003826 __io_cqring_add_event(req, ret, cflags);
Jens Axboe4e88d6e2019-12-07 20:59:47 -07003827 if (ret < 0)
3828 req_set_fail_links(req);
Pavel Begunkov014db002020-03-03 21:33:12 +03003829 io_put_req(req);
Jens Axboe0fa03c62019-04-19 13:34:07 -06003830 return 0;
Jens Axboe0fa03c62019-04-19 13:34:07 -06003831}
3832
Pavel Begunkov014db002020-03-03 21:33:12 +03003833static int io_recv(struct io_kiocb *req, bool force_nonblock)
Jens Axboefddafac2020-01-04 20:19:44 -07003834{
Jens Axboebcda7ba2020-02-23 16:42:51 -07003835 struct io_buffer *kbuf = NULL;
Jens Axboefddafac2020-01-04 20:19:44 -07003836 struct socket *sock;
Jens Axboebcda7ba2020-02-23 16:42:51 -07003837 int ret, cflags = 0;
Jens Axboefddafac2020-01-04 20:19:44 -07003838
Jens Axboefddafac2020-01-04 20:19:44 -07003839 sock = sock_from_file(req->file, &ret);
3840 if (sock) {
3841 struct io_sr_msg *sr = &req->sr_msg;
Jens Axboebcda7ba2020-02-23 16:42:51 -07003842 void __user *buf = sr->buf;
Jens Axboefddafac2020-01-04 20:19:44 -07003843 struct msghdr msg;
3844 struct iovec iov;
3845 unsigned flags;
3846
Jens Axboebcda7ba2020-02-23 16:42:51 -07003847 kbuf = io_recv_buffer_select(req, &cflags, !force_nonblock);
3848 if (IS_ERR(kbuf))
3849 return PTR_ERR(kbuf);
3850 else if (kbuf)
3851 buf = u64_to_user_ptr(kbuf->addr);
Jens Axboefddafac2020-01-04 20:19:44 -07003852
Jens Axboebcda7ba2020-02-23 16:42:51 -07003853 ret = import_single_range(READ, buf, sr->len, &iov,
3854 &msg.msg_iter);
3855 if (ret) {
3856 kfree(kbuf);
3857 return ret;
3858 }
3859
3860 req->flags |= REQ_F_NEED_CLEANUP;
Jens Axboefddafac2020-01-04 20:19:44 -07003861 msg.msg_name = NULL;
3862 msg.msg_control = NULL;
3863 msg.msg_controllen = 0;
3864 msg.msg_namelen = 0;
3865 msg.msg_iocb = NULL;
3866 msg.msg_flags = 0;
3867
3868 flags = req->sr_msg.msg_flags;
3869 if (flags & MSG_DONTWAIT)
3870 req->flags |= REQ_F_NOWAIT;
3871 else if (force_nonblock)
3872 flags |= MSG_DONTWAIT;
3873
Jens Axboe0b7b21e2020-01-31 08:34:59 -07003874 ret = sock_recvmsg(sock, &msg, flags);
Jens Axboefddafac2020-01-04 20:19:44 -07003875 if (force_nonblock && ret == -EAGAIN)
3876 return -EAGAIN;
3877 if (ret == -ERESTARTSYS)
3878 ret = -EINTR;
3879 }
3880
Jens Axboebcda7ba2020-02-23 16:42:51 -07003881 kfree(kbuf);
3882 req->flags &= ~REQ_F_NEED_CLEANUP;
3883 __io_cqring_add_event(req, ret, cflags);
Jens Axboefddafac2020-01-04 20:19:44 -07003884 if (ret < 0)
3885 req_set_fail_links(req);
Pavel Begunkov014db002020-03-03 21:33:12 +03003886 io_put_req(req);
Jens Axboefddafac2020-01-04 20:19:44 -07003887 return 0;
Jens Axboefddafac2020-01-04 20:19:44 -07003888}
3889
Jens Axboe3529d8c2019-12-19 18:24:38 -07003890static int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboe17f2fe32019-10-17 14:42:58 -06003891{
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07003892 struct io_accept *accept = &req->accept;
3893
Jens Axboe17f2fe32019-10-17 14:42:58 -06003894 if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
3895 return -EINVAL;
Hrvoje Zeba8042d6c2019-11-25 14:40:22 -05003896 if (sqe->ioprio || sqe->len || sqe->buf_index)
Jens Axboe17f2fe32019-10-17 14:42:58 -06003897 return -EINVAL;
3898
Jens Axboed55e5f52019-12-11 16:12:15 -07003899 accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
3900 accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2));
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07003901 accept->flags = READ_ONCE(sqe->accept_flags);
Jens Axboe09952e32020-03-19 20:16:56 -06003902 accept->nofile = rlimit(RLIMIT_NOFILE);
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07003903 return 0;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07003904}
Jens Axboe17f2fe32019-10-17 14:42:58 -06003905
Pavel Begunkovac45abc2020-06-08 21:08:18 +03003906static int io_accept(struct io_kiocb *req, bool force_nonblock)
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07003907{
3908 struct io_accept *accept = &req->accept;
Pavel Begunkovac45abc2020-06-08 21:08:18 +03003909 unsigned int file_flags = force_nonblock ? O_NONBLOCK : 0;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07003910 int ret;
3911
Jiufei Xuee697dee2020-06-10 13:41:59 +08003912 if (req->file->f_flags & O_NONBLOCK)
3913 req->flags |= REQ_F_NOWAIT;
3914
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07003915 ret = __sys_accept4_file(req->file, file_flags, accept->addr,
Jens Axboe09952e32020-03-19 20:16:56 -06003916 accept->addr_len, accept->flags,
3917 accept->nofile);
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07003918 if (ret == -EAGAIN && force_nonblock)
Jens Axboe17f2fe32019-10-17 14:42:58 -06003919 return -EAGAIN;
Pavel Begunkovac45abc2020-06-08 21:08:18 +03003920 if (ret < 0) {
3921 if (ret == -ERESTARTSYS)
3922 ret = -EINTR;
Jens Axboe4e88d6e2019-12-07 20:59:47 -07003923 req_set_fail_links(req);
Pavel Begunkovac45abc2020-06-08 21:08:18 +03003924 }
Jens Axboe78e19bb2019-11-06 15:21:34 -07003925 io_cqring_add_event(req, ret);
Pavel Begunkov014db002020-03-03 21:33:12 +03003926 io_put_req(req);
Jens Axboe17f2fe32019-10-17 14:42:58 -06003927 return 0;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07003928}
3929
Jens Axboe3529d8c2019-12-19 18:24:38 -07003930static int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboef499a022019-12-02 16:28:46 -07003931{
Jens Axboe3529d8c2019-12-19 18:24:38 -07003932 struct io_connect *conn = &req->connect;
3933 struct io_async_ctx *io = req->io;
Jens Axboef499a022019-12-02 16:28:46 -07003934
Jens Axboe3fbb51c2019-12-20 08:51:52 -07003935 if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
3936 return -EINVAL;
3937 if (sqe->ioprio || sqe->len || sqe->buf_index || sqe->rw_flags)
3938 return -EINVAL;
3939
Jens Axboe3529d8c2019-12-19 18:24:38 -07003940 conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
3941 conn->addr_len = READ_ONCE(sqe->addr2);
3942
3943 if (!io)
3944 return 0;
3945
3946 return move_addr_to_kernel(conn->addr, conn->addr_len,
Jens Axboe3fbb51c2019-12-20 08:51:52 -07003947 &io->connect.address);
Jens Axboef499a022019-12-02 16:28:46 -07003948}
3949
Pavel Begunkov014db002020-03-03 21:33:12 +03003950static int io_connect(struct io_kiocb *req, bool force_nonblock)
Jens Axboef8e85cf2019-11-23 14:24:24 -07003951{
Jens Axboef499a022019-12-02 16:28:46 -07003952 struct io_async_ctx __io, *io;
Jens Axboef8e85cf2019-11-23 14:24:24 -07003953 unsigned file_flags;
Jens Axboe3fbb51c2019-12-20 08:51:52 -07003954 int ret;
Jens Axboef8e85cf2019-11-23 14:24:24 -07003955
Jens Axboef499a022019-12-02 16:28:46 -07003956 if (req->io) {
3957 io = req->io;
3958 } else {
Jens Axboe3529d8c2019-12-19 18:24:38 -07003959 ret = move_addr_to_kernel(req->connect.addr,
3960 req->connect.addr_len,
3961 &__io.connect.address);
Jens Axboef499a022019-12-02 16:28:46 -07003962 if (ret)
3963 goto out;
3964 io = &__io;
3965 }
3966
Jens Axboe3fbb51c2019-12-20 08:51:52 -07003967 file_flags = force_nonblock ? O_NONBLOCK : 0;
3968
3969 ret = __sys_connect_file(req->file, &io->connect.address,
3970 req->connect.addr_len, file_flags);
Jens Axboe87f80d62019-12-03 11:23:54 -07003971 if ((ret == -EAGAIN || ret == -EINPROGRESS) && force_nonblock) {
Jens Axboeb7bb4f72019-12-15 22:13:43 -07003972 if (req->io)
3973 return -EAGAIN;
3974 if (io_alloc_async_ctx(req)) {
Jens Axboef499a022019-12-02 16:28:46 -07003975 ret = -ENOMEM;
3976 goto out;
3977 }
Jens Axboeb7bb4f72019-12-15 22:13:43 -07003978 memcpy(&req->io->connect, &__io.connect, sizeof(__io.connect));
Jens Axboef8e85cf2019-11-23 14:24:24 -07003979 return -EAGAIN;
Jens Axboef499a022019-12-02 16:28:46 -07003980 }
Jens Axboef8e85cf2019-11-23 14:24:24 -07003981 if (ret == -ERESTARTSYS)
3982 ret = -EINTR;
Jens Axboef499a022019-12-02 16:28:46 -07003983out:
Jens Axboe4e88d6e2019-12-07 20:59:47 -07003984 if (ret < 0)
3985 req_set_fail_links(req);
Jens Axboef8e85cf2019-11-23 14:24:24 -07003986 io_cqring_add_event(req, ret);
Pavel Begunkov014db002020-03-03 21:33:12 +03003987 io_put_req(req);
Jens Axboef8e85cf2019-11-23 14:24:24 -07003988 return 0;
Jens Axboef8e85cf2019-11-23 14:24:24 -07003989}
YueHaibing469956e2020-03-04 15:53:52 +08003990#else /* !CONFIG_NET */
3991static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3992{
Jens Axboef8e85cf2019-11-23 14:24:24 -07003993 return -EOPNOTSUPP;
Jens Axboef8e85cf2019-11-23 14:24:24 -07003994}
3995
YueHaibing469956e2020-03-04 15:53:52 +08003996static int io_sendmsg(struct io_kiocb *req, bool force_nonblock)
Jens Axboe221c5eb2019-01-17 09:41:58 -07003997{
YueHaibing469956e2020-03-04 15:53:52 +08003998 return -EOPNOTSUPP;
3999}
4000
4001static int io_send(struct io_kiocb *req, bool force_nonblock)
4002{
4003 return -EOPNOTSUPP;
4004}
4005
4006static int io_recvmsg_prep(struct io_kiocb *req,
4007 const struct io_uring_sqe *sqe)
4008{
4009 return -EOPNOTSUPP;
4010}
4011
4012static int io_recvmsg(struct io_kiocb *req, bool force_nonblock)
4013{
4014 return -EOPNOTSUPP;
4015}
4016
4017static int io_recv(struct io_kiocb *req, bool force_nonblock)
4018{
4019 return -EOPNOTSUPP;
4020}
4021
4022static int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4023{
4024 return -EOPNOTSUPP;
4025}
4026
4027static int io_accept(struct io_kiocb *req, bool force_nonblock)
4028{
4029 return -EOPNOTSUPP;
4030}
4031
4032static int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4033{
4034 return -EOPNOTSUPP;
4035}
4036
4037static int io_connect(struct io_kiocb *req, bool force_nonblock)
4038{
4039 return -EOPNOTSUPP;
4040}
4041#endif /* CONFIG_NET */
Jens Axboe2b188cc2019-01-07 10:46:33 -07004042
Jens Axboed7718a92020-02-14 22:23:12 -07004043struct io_poll_table {
4044 struct poll_table_struct pt;
4045 struct io_kiocb *req;
4046 int error;
4047};
4048
Jens Axboed7718a92020-02-14 22:23:12 -07004049static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll,
4050 __poll_t mask, task_work_func_t func)
4051{
4052 struct task_struct *tsk;
Jens Axboeaa96bf82020-04-03 11:26:26 -06004053 int ret;
Jens Axboed7718a92020-02-14 22:23:12 -07004054
4055 /* for instances that support it check for an event match first: */
4056 if (mask && !(mask & poll->events))
4057 return 0;
4058
4059 trace_io_uring_task_add(req->ctx, req->opcode, req->user_data, mask);
4060
4061 list_del_init(&poll->wait.entry);
4062
4063 tsk = req->task;
4064 req->result = mask;
4065 init_task_work(&req->task_work, func);
4066 /*
Jens Axboee3aabf92020-05-18 11:04:17 -06004067 * If this fails, then the task is exiting. When a task exits, the
4068 * work gets canceled, so just cancel this request as well instead
4069 * of executing it. We can't safely execute it anyway, as we may not
4070 * have the needed state needed for it anyway.
Jens Axboed7718a92020-02-14 22:23:12 -07004071 */
Jens Axboeaa96bf82020-04-03 11:26:26 -06004072 ret = task_work_add(tsk, &req->task_work, true);
4073 if (unlikely(ret)) {
Jens Axboee3aabf92020-05-18 11:04:17 -06004074 WRITE_ONCE(poll->canceled, true);
Jens Axboeaa96bf82020-04-03 11:26:26 -06004075 tsk = io_wq_get_task(req->ctx->io_wq);
4076 task_work_add(tsk, &req->task_work, true);
4077 }
Jens Axboed7718a92020-02-14 22:23:12 -07004078 wake_up_process(tsk);
4079 return 1;
4080}
4081
Jens Axboe74ce6ce2020-04-13 11:09:12 -06004082static bool io_poll_rewait(struct io_kiocb *req, struct io_poll_iocb *poll)
4083 __acquires(&req->ctx->completion_lock)
4084{
4085 struct io_ring_ctx *ctx = req->ctx;
4086
4087 if (!req->result && !READ_ONCE(poll->canceled)) {
4088 struct poll_table_struct pt = { ._key = poll->events };
4089
4090 req->result = vfs_poll(req->file, &pt) & poll->events;
4091 }
4092
4093 spin_lock_irq(&ctx->completion_lock);
4094 if (!req->result && !READ_ONCE(poll->canceled)) {
4095 add_wait_queue(poll->head, &poll->wait);
4096 return true;
4097 }
4098
4099 return false;
4100}
4101
Jens Axboe18bceab2020-05-15 11:56:54 -06004102static void io_poll_remove_double(struct io_kiocb *req)
4103{
4104 struct io_poll_iocb *poll = (struct io_poll_iocb *) req->io;
4105
4106 lockdep_assert_held(&req->ctx->completion_lock);
4107
4108 if (poll && poll->head) {
4109 struct wait_queue_head *head = poll->head;
4110
4111 spin_lock(&head->lock);
4112 list_del_init(&poll->wait.entry);
4113 if (poll->wait.private)
4114 refcount_dec(&req->refs);
4115 poll->head = NULL;
4116 spin_unlock(&head->lock);
4117 }
4118}
4119
4120static void io_poll_complete(struct io_kiocb *req, __poll_t mask, int error)
4121{
4122 struct io_ring_ctx *ctx = req->ctx;
4123
4124 io_poll_remove_double(req);
4125 req->poll.done = true;
4126 io_cqring_fill_event(req, error ? error : mangle_poll(mask));
4127 io_commit_cqring(ctx);
4128}
4129
4130static void io_poll_task_handler(struct io_kiocb *req, struct io_kiocb **nxt)
4131{
4132 struct io_ring_ctx *ctx = req->ctx;
4133
4134 if (io_poll_rewait(req, &req->poll)) {
4135 spin_unlock_irq(&ctx->completion_lock);
4136 return;
4137 }
4138
4139 hash_del(&req->hash_node);
4140 io_poll_complete(req, req->result, 0);
4141 req->flags |= REQ_F_COMP_LOCKED;
4142 io_put_req_find_next(req, nxt);
4143 spin_unlock_irq(&ctx->completion_lock);
4144
4145 io_cqring_ev_posted(ctx);
4146}
4147
4148static void io_poll_task_func(struct callback_head *cb)
4149{
4150 struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
4151 struct io_kiocb *nxt = NULL;
4152
4153 io_poll_task_handler(req, &nxt);
4154 if (nxt) {
4155 struct io_ring_ctx *ctx = nxt->ctx;
4156
4157 mutex_lock(&ctx->uring_lock);
4158 __io_queue_sqe(nxt, NULL);
4159 mutex_unlock(&ctx->uring_lock);
4160 }
4161}
4162
4163static int io_poll_double_wake(struct wait_queue_entry *wait, unsigned mode,
4164 int sync, void *key)
4165{
4166 struct io_kiocb *req = wait->private;
4167 struct io_poll_iocb *poll = (struct io_poll_iocb *) req->io;
4168 __poll_t mask = key_to_poll(key);
4169
4170 /* for instances that support it check for an event match first: */
4171 if (mask && !(mask & poll->events))
4172 return 0;
4173
4174 if (req->poll.head) {
4175 bool done;
4176
4177 spin_lock(&req->poll.head->lock);
4178 done = list_empty(&req->poll.wait.entry);
4179 if (!done)
4180 list_del_init(&req->poll.wait.entry);
4181 spin_unlock(&req->poll.head->lock);
4182 if (!done)
4183 __io_async_wake(req, poll, mask, io_poll_task_func);
4184 }
4185 refcount_dec(&req->refs);
4186 return 1;
4187}
4188
4189static void io_init_poll_iocb(struct io_poll_iocb *poll, __poll_t events,
4190 wait_queue_func_t wake_func)
4191{
4192 poll->head = NULL;
4193 poll->done = false;
4194 poll->canceled = false;
4195 poll->events = events;
4196 INIT_LIST_HEAD(&poll->wait.entry);
4197 init_waitqueue_func_entry(&poll->wait, wake_func);
4198}
4199
4200static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt,
4201 struct wait_queue_head *head)
4202{
4203 struct io_kiocb *req = pt->req;
4204
4205 /*
4206 * If poll->head is already set, it's because the file being polled
4207 * uses multiple waitqueues for poll handling (eg one for read, one
4208 * for write). Setup a separate io_poll_iocb if this happens.
4209 */
4210 if (unlikely(poll->head)) {
4211 /* already have a 2nd entry, fail a third attempt */
4212 if (req->io) {
4213 pt->error = -EINVAL;
4214 return;
4215 }
4216 poll = kmalloc(sizeof(*poll), GFP_ATOMIC);
4217 if (!poll) {
4218 pt->error = -ENOMEM;
4219 return;
4220 }
4221 io_init_poll_iocb(poll, req->poll.events, io_poll_double_wake);
4222 refcount_inc(&req->refs);
4223 poll->wait.private = req;
4224 req->io = (void *) poll;
4225 }
4226
4227 pt->error = 0;
4228 poll->head = head;
4229 add_wait_queue(head, &poll->wait);
4230}
4231
4232static void io_async_queue_proc(struct file *file, struct wait_queue_head *head,
4233 struct poll_table_struct *p)
4234{
4235 struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
4236
4237 __io_queue_proc(&pt->req->apoll->poll, pt, head);
4238}
4239
Jens Axboed7718a92020-02-14 22:23:12 -07004240static void io_async_task_func(struct callback_head *cb)
4241{
4242 struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
4243 struct async_poll *apoll = req->apoll;
4244 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe31067252020-05-17 17:43:31 -06004245 bool canceled = false;
Jens Axboed7718a92020-02-14 22:23:12 -07004246
4247 trace_io_uring_task_run(req->ctx, req->opcode, req->user_data);
4248
Jens Axboe74ce6ce2020-04-13 11:09:12 -06004249 if (io_poll_rewait(req, &apoll->poll)) {
Jens Axboed7718a92020-02-14 22:23:12 -07004250 spin_unlock_irq(&ctx->completion_lock);
Jens Axboe74ce6ce2020-04-13 11:09:12 -06004251 return;
Jens Axboed7718a92020-02-14 22:23:12 -07004252 }
4253
Jens Axboe31067252020-05-17 17:43:31 -06004254 /* If req is still hashed, it cannot have been canceled. Don't check. */
4255 if (hash_hashed(&req->hash_node)) {
Jens Axboe74ce6ce2020-04-13 11:09:12 -06004256 hash_del(&req->hash_node);
Jens Axboe31067252020-05-17 17:43:31 -06004257 } else {
4258 canceled = READ_ONCE(apoll->poll.canceled);
4259 if (canceled) {
4260 io_cqring_fill_event(req, -ECANCELED);
4261 io_commit_cqring(ctx);
4262 }
Jens Axboe2bae0472020-04-13 11:16:34 -06004263 }
4264
Jens Axboe74ce6ce2020-04-13 11:09:12 -06004265 spin_unlock_irq(&ctx->completion_lock);
4266
Xiaoguang Wang44575a62020-04-19 10:06:55 +08004267 /* restore ->work in case we need to retry again */
Xiaoguang Wang405a5d22020-06-10 19:41:20 +08004268 if (req->flags & REQ_F_WORK_INITIALIZED)
4269 memcpy(&req->work, &apoll->work, sizeof(req->work));
Jens Axboe31067252020-05-17 17:43:31 -06004270 kfree(apoll);
Xiaoguang Wang44575a62020-04-19 10:06:55 +08004271
Jens Axboe31067252020-05-17 17:43:31 -06004272 if (!canceled) {
4273 __set_current_state(TASK_RUNNING);
4274 mutex_lock(&ctx->uring_lock);
4275 __io_queue_sqe(req, NULL);
4276 mutex_unlock(&ctx->uring_lock);
4277 } else {
Jens Axboe2bae0472020-04-13 11:16:34 -06004278 io_cqring_ev_posted(ctx);
4279 req_set_fail_links(req);
Xiaoguang Wang44575a62020-04-19 10:06:55 +08004280 io_double_put_req(req);
Jens Axboe2bae0472020-04-13 11:16:34 -06004281 }
Jens Axboed7718a92020-02-14 22:23:12 -07004282}
4283
4284static int io_async_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
4285 void *key)
4286{
4287 struct io_kiocb *req = wait->private;
4288 struct io_poll_iocb *poll = &req->apoll->poll;
4289
4290 trace_io_uring_poll_wake(req->ctx, req->opcode, req->user_data,
4291 key_to_poll(key));
4292
4293 return __io_async_wake(req, poll, key_to_poll(key), io_async_task_func);
4294}
4295
4296static void io_poll_req_insert(struct io_kiocb *req)
4297{
4298 struct io_ring_ctx *ctx = req->ctx;
4299 struct hlist_head *list;
4300
4301 list = &ctx->cancel_hash[hash_long(req->user_data, ctx->cancel_hash_bits)];
4302 hlist_add_head(&req->hash_node, list);
4303}
4304
4305static __poll_t __io_arm_poll_handler(struct io_kiocb *req,
4306 struct io_poll_iocb *poll,
4307 struct io_poll_table *ipt, __poll_t mask,
4308 wait_queue_func_t wake_func)
4309 __acquires(&ctx->completion_lock)
4310{
4311 struct io_ring_ctx *ctx = req->ctx;
4312 bool cancel = false;
4313
4314 poll->file = req->file;
Jens Axboe18bceab2020-05-15 11:56:54 -06004315 io_init_poll_iocb(poll, mask, wake_func);
4316 poll->wait.private = req;
Jens Axboed7718a92020-02-14 22:23:12 -07004317
4318 ipt->pt._key = mask;
4319 ipt->req = req;
4320 ipt->error = -EINVAL;
4321
Jens Axboed7718a92020-02-14 22:23:12 -07004322 mask = vfs_poll(req->file, &ipt->pt) & poll->events;
4323
4324 spin_lock_irq(&ctx->completion_lock);
4325 if (likely(poll->head)) {
4326 spin_lock(&poll->head->lock);
4327 if (unlikely(list_empty(&poll->wait.entry))) {
4328 if (ipt->error)
4329 cancel = true;
4330 ipt->error = 0;
4331 mask = 0;
4332 }
4333 if (mask || ipt->error)
4334 list_del_init(&poll->wait.entry);
4335 else if (cancel)
4336 WRITE_ONCE(poll->canceled, true);
4337 else if (!poll->done) /* actually waiting for an event */
4338 io_poll_req_insert(req);
4339 spin_unlock(&poll->head->lock);
4340 }
4341
4342 return mask;
4343}
4344
4345static bool io_arm_poll_handler(struct io_kiocb *req)
4346{
4347 const struct io_op_def *def = &io_op_defs[req->opcode];
4348 struct io_ring_ctx *ctx = req->ctx;
4349 struct async_poll *apoll;
4350 struct io_poll_table ipt;
4351 __poll_t mask, ret;
Jens Axboe18bceab2020-05-15 11:56:54 -06004352 bool had_io;
Jens Axboed7718a92020-02-14 22:23:12 -07004353
4354 if (!req->file || !file_can_poll(req->file))
4355 return false;
4356 if (req->flags & (REQ_F_MUST_PUNT | REQ_F_POLLED))
4357 return false;
4358 if (!def->pollin && !def->pollout)
4359 return false;
4360
4361 apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC);
4362 if (unlikely(!apoll))
4363 return false;
4364
4365 req->flags |= REQ_F_POLLED;
Xiaoguang Wang405a5d22020-06-10 19:41:20 +08004366 if (req->flags & REQ_F_WORK_INITIALIZED)
4367 memcpy(&apoll->work, &req->work, sizeof(req->work));
Jens Axboe18bceab2020-05-15 11:56:54 -06004368 had_io = req->io != NULL;
Jens Axboed7718a92020-02-14 22:23:12 -07004369
Jens Axboe3537b6a2020-04-03 11:19:06 -06004370 get_task_struct(current);
Jens Axboed7718a92020-02-14 22:23:12 -07004371 req->task = current;
4372 req->apoll = apoll;
4373 INIT_HLIST_NODE(&req->hash_node);
4374
Nathan Chancellor8755d972020-03-02 16:01:19 -07004375 mask = 0;
Jens Axboed7718a92020-02-14 22:23:12 -07004376 if (def->pollin)
Nathan Chancellor8755d972020-03-02 16:01:19 -07004377 mask |= POLLIN | POLLRDNORM;
Jens Axboed7718a92020-02-14 22:23:12 -07004378 if (def->pollout)
4379 mask |= POLLOUT | POLLWRNORM;
4380 mask |= POLLERR | POLLPRI;
4381
4382 ipt.pt._qproc = io_async_queue_proc;
4383
4384 ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask,
4385 io_async_wake);
4386 if (ret) {
4387 ipt.error = 0;
Jens Axboe18bceab2020-05-15 11:56:54 -06004388 /* only remove double add if we did it here */
4389 if (!had_io)
4390 io_poll_remove_double(req);
Jens Axboed7718a92020-02-14 22:23:12 -07004391 spin_unlock_irq(&ctx->completion_lock);
Xiaoguang Wang405a5d22020-06-10 19:41:20 +08004392 if (req->flags & REQ_F_WORK_INITIALIZED)
4393 memcpy(&req->work, &apoll->work, sizeof(req->work));
Jens Axboed7718a92020-02-14 22:23:12 -07004394 kfree(apoll);
4395 return false;
4396 }
4397 spin_unlock_irq(&ctx->completion_lock);
4398 trace_io_uring_poll_arm(ctx, req->opcode, req->user_data, mask,
4399 apoll->poll.events);
4400 return true;
4401}
4402
4403static bool __io_poll_remove_one(struct io_kiocb *req,
4404 struct io_poll_iocb *poll)
4405{
Jens Axboeb41e9852020-02-17 09:52:41 -07004406 bool do_complete = false;
Jens Axboe221c5eb2019-01-17 09:41:58 -07004407
4408 spin_lock(&poll->head->lock);
4409 WRITE_ONCE(poll->canceled, true);
Jens Axboe392edb42019-12-09 17:52:20 -07004410 if (!list_empty(&poll->wait.entry)) {
4411 list_del_init(&poll->wait.entry);
Jens Axboeb41e9852020-02-17 09:52:41 -07004412 do_complete = true;
Jens Axboe221c5eb2019-01-17 09:41:58 -07004413 }
4414 spin_unlock(&poll->head->lock);
Jens Axboe3bfa5bc2020-05-17 13:54:12 -06004415 hash_del(&req->hash_node);
Jens Axboed7718a92020-02-14 22:23:12 -07004416 return do_complete;
4417}
4418
4419static bool io_poll_remove_one(struct io_kiocb *req)
4420{
4421 bool do_complete;
4422
4423 if (req->opcode == IORING_OP_POLL_ADD) {
Jens Axboe18bceab2020-05-15 11:56:54 -06004424 io_poll_remove_double(req);
Jens Axboed7718a92020-02-14 22:23:12 -07004425 do_complete = __io_poll_remove_one(req, &req->poll);
4426 } else {
Jens Axboe3bfa5bc2020-05-17 13:54:12 -06004427 struct async_poll *apoll = req->apoll;
4428
Jens Axboed7718a92020-02-14 22:23:12 -07004429 /* non-poll requests have submit ref still */
Jens Axboe3bfa5bc2020-05-17 13:54:12 -06004430 do_complete = __io_poll_remove_one(req, &apoll->poll);
4431 if (do_complete) {
Jens Axboed7718a92020-02-14 22:23:12 -07004432 io_put_req(req);
Jens Axboe3bfa5bc2020-05-17 13:54:12 -06004433 /*
4434 * restore ->work because we will call
4435 * io_req_work_drop_env below when dropping the
4436 * final reference.
4437 */
Xiaoguang Wang405a5d22020-06-10 19:41:20 +08004438 if (req->flags & REQ_F_WORK_INITIALIZED)
4439 memcpy(&req->work, &apoll->work,
4440 sizeof(req->work));
Jens Axboe3bfa5bc2020-05-17 13:54:12 -06004441 kfree(apoll);
4442 }
Xiaoguang Wangb1f573b2020-04-12 14:50:54 +08004443 }
4444
Jens Axboeb41e9852020-02-17 09:52:41 -07004445 if (do_complete) {
4446 io_cqring_fill_event(req, -ECANCELED);
4447 io_commit_cqring(req->ctx);
4448 req->flags |= REQ_F_COMP_LOCKED;
4449 io_put_req(req);
4450 }
4451
4452 return do_complete;
Jens Axboe221c5eb2019-01-17 09:41:58 -07004453}
4454
4455static void io_poll_remove_all(struct io_ring_ctx *ctx)
4456{
Jens Axboe78076bb2019-12-04 19:56:40 -07004457 struct hlist_node *tmp;
Jens Axboe221c5eb2019-01-17 09:41:58 -07004458 struct io_kiocb *req;
Jens Axboe8e2e1fa2020-04-13 17:05:14 -06004459 int posted = 0, i;
Jens Axboe221c5eb2019-01-17 09:41:58 -07004460
4461 spin_lock_irq(&ctx->completion_lock);
Jens Axboe78076bb2019-12-04 19:56:40 -07004462 for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
4463 struct hlist_head *list;
4464
4465 list = &ctx->cancel_hash[i];
4466 hlist_for_each_entry_safe(req, tmp, list, hash_node)
Jens Axboe8e2e1fa2020-04-13 17:05:14 -06004467 posted += io_poll_remove_one(req);
Jens Axboe221c5eb2019-01-17 09:41:58 -07004468 }
4469 spin_unlock_irq(&ctx->completion_lock);
Jens Axboeb41e9852020-02-17 09:52:41 -07004470
Jens Axboe8e2e1fa2020-04-13 17:05:14 -06004471 if (posted)
4472 io_cqring_ev_posted(ctx);
Jens Axboe221c5eb2019-01-17 09:41:58 -07004473}
4474
Jens Axboe47f46762019-11-09 17:43:02 -07004475static int io_poll_cancel(struct io_ring_ctx *ctx, __u64 sqe_addr)
4476{
Jens Axboe78076bb2019-12-04 19:56:40 -07004477 struct hlist_head *list;
Jens Axboe47f46762019-11-09 17:43:02 -07004478 struct io_kiocb *req;
4479
Jens Axboe78076bb2019-12-04 19:56:40 -07004480 list = &ctx->cancel_hash[hash_long(sqe_addr, ctx->cancel_hash_bits)];
4481 hlist_for_each_entry(req, list, hash_node) {
Jens Axboeb41e9852020-02-17 09:52:41 -07004482 if (sqe_addr != req->user_data)
4483 continue;
4484 if (io_poll_remove_one(req))
Jens Axboeeac406c2019-11-14 12:09:58 -07004485 return 0;
Jens Axboeb41e9852020-02-17 09:52:41 -07004486 return -EALREADY;
Jens Axboe47f46762019-11-09 17:43:02 -07004487 }
4488
4489 return -ENOENT;
4490}
4491
Jens Axboe3529d8c2019-12-19 18:24:38 -07004492static int io_poll_remove_prep(struct io_kiocb *req,
4493 const struct io_uring_sqe *sqe)
Jens Axboe221c5eb2019-01-17 09:41:58 -07004494{
Jens Axboe221c5eb2019-01-17 09:41:58 -07004495 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4496 return -EINVAL;
4497 if (sqe->ioprio || sqe->off || sqe->len || sqe->buf_index ||
4498 sqe->poll_events)
4499 return -EINVAL;
4500
Jens Axboe0969e782019-12-17 18:40:57 -07004501 req->poll.addr = READ_ONCE(sqe->addr);
Jens Axboe0969e782019-12-17 18:40:57 -07004502 return 0;
4503}
4504
4505/*
4506 * Find a running poll command that matches one specified in sqe->addr,
4507 * and remove it if found.
4508 */
4509static int io_poll_remove(struct io_kiocb *req)
4510{
4511 struct io_ring_ctx *ctx = req->ctx;
4512 u64 addr;
4513 int ret;
4514
Jens Axboe0969e782019-12-17 18:40:57 -07004515 addr = req->poll.addr;
Jens Axboe221c5eb2019-01-17 09:41:58 -07004516 spin_lock_irq(&ctx->completion_lock);
Jens Axboe0969e782019-12-17 18:40:57 -07004517 ret = io_poll_cancel(ctx, addr);
Jens Axboe221c5eb2019-01-17 09:41:58 -07004518 spin_unlock_irq(&ctx->completion_lock);
4519
Jens Axboe78e19bb2019-11-06 15:21:34 -07004520 io_cqring_add_event(req, ret);
Jens Axboe4e88d6e2019-12-07 20:59:47 -07004521 if (ret < 0)
4522 req_set_fail_links(req);
Jens Axboee65ef562019-03-12 10:16:44 -06004523 io_put_req(req);
Jens Axboe221c5eb2019-01-17 09:41:58 -07004524 return 0;
4525}
4526
Jens Axboe221c5eb2019-01-17 09:41:58 -07004527static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
4528 void *key)
4529{
Jens Axboec2f2eb72020-02-10 09:07:05 -07004530 struct io_kiocb *req = wait->private;
4531 struct io_poll_iocb *poll = &req->poll;
Jens Axboe221c5eb2019-01-17 09:41:58 -07004532
Jens Axboed7718a92020-02-14 22:23:12 -07004533 return __io_async_wake(req, poll, key_to_poll(key), io_poll_task_func);
Jens Axboe221c5eb2019-01-17 09:41:58 -07004534}
4535
Jens Axboe221c5eb2019-01-17 09:41:58 -07004536static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head,
4537 struct poll_table_struct *p)
4538{
4539 struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
4540
Jens Axboed7718a92020-02-14 22:23:12 -07004541 __io_queue_proc(&pt->req->poll, pt, head);
Jens Axboeeac406c2019-11-14 12:09:58 -07004542}
4543
Jens Axboe3529d8c2019-12-19 18:24:38 -07004544static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboe221c5eb2019-01-17 09:41:58 -07004545{
4546 struct io_poll_iocb *poll = &req->poll;
Jens Axboe221c5eb2019-01-17 09:41:58 -07004547 u16 events;
Jens Axboe221c5eb2019-01-17 09:41:58 -07004548
4549 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4550 return -EINVAL;
4551 if (sqe->addr || sqe->ioprio || sqe->off || sqe->len || sqe->buf_index)
4552 return -EINVAL;
Jens Axboe09bb8392019-03-13 12:39:28 -06004553 if (!poll->file)
4554 return -EBADF;
Jens Axboe221c5eb2019-01-17 09:41:58 -07004555
Jens Axboe221c5eb2019-01-17 09:41:58 -07004556 events = READ_ONCE(sqe->poll_events);
4557 poll->events = demangle_poll(events) | EPOLLERR | EPOLLHUP;
Jens Axboeb41e9852020-02-17 09:52:41 -07004558
Jens Axboe3537b6a2020-04-03 11:19:06 -06004559 get_task_struct(current);
Jens Axboeb41e9852020-02-17 09:52:41 -07004560 req->task = current;
Jens Axboe0969e782019-12-17 18:40:57 -07004561 return 0;
4562}
4563
Pavel Begunkov014db002020-03-03 21:33:12 +03004564static int io_poll_add(struct io_kiocb *req)
Jens Axboe0969e782019-12-17 18:40:57 -07004565{
4566 struct io_poll_iocb *poll = &req->poll;
4567 struct io_ring_ctx *ctx = req->ctx;
4568 struct io_poll_table ipt;
Jens Axboe0969e782019-12-17 18:40:57 -07004569 __poll_t mask;
Jens Axboe0969e782019-12-17 18:40:57 -07004570
Jens Axboe78076bb2019-12-04 19:56:40 -07004571 INIT_HLIST_NODE(&req->hash_node);
Jens Axboe36703242019-07-25 10:20:18 -06004572 INIT_LIST_HEAD(&req->list);
Jens Axboed7718a92020-02-14 22:23:12 -07004573 ipt.pt._qproc = io_poll_queue_proc;
Jens Axboe36703242019-07-25 10:20:18 -06004574
Jens Axboed7718a92020-02-14 22:23:12 -07004575 mask = __io_arm_poll_handler(req, &req->poll, &ipt, poll->events,
4576 io_poll_wake);
Jens Axboe221c5eb2019-01-17 09:41:58 -07004577
Jens Axboe8c838782019-03-12 15:48:16 -06004578 if (mask) { /* no async, we'd stolen it */
Jens Axboe8c838782019-03-12 15:48:16 -06004579 ipt.error = 0;
Jens Axboeb0dd8a42019-11-18 12:14:54 -07004580 io_poll_complete(req, mask, 0);
Jens Axboe8c838782019-03-12 15:48:16 -06004581 }
Jens Axboe221c5eb2019-01-17 09:41:58 -07004582 spin_unlock_irq(&ctx->completion_lock);
4583
Jens Axboe8c838782019-03-12 15:48:16 -06004584 if (mask) {
4585 io_cqring_ev_posted(ctx);
Pavel Begunkov014db002020-03-03 21:33:12 +03004586 io_put_req(req);
Jens Axboe221c5eb2019-01-17 09:41:58 -07004587 }
Jens Axboe8c838782019-03-12 15:48:16 -06004588 return ipt.error;
Jens Axboe221c5eb2019-01-17 09:41:58 -07004589}
4590
Jens Axboe5262f562019-09-17 12:26:57 -06004591static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
4592{
Jens Axboead8a48a2019-11-15 08:49:11 -07004593 struct io_timeout_data *data = container_of(timer,
4594 struct io_timeout_data, timer);
4595 struct io_kiocb *req = data->req;
4596 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe5262f562019-09-17 12:26:57 -06004597 unsigned long flags;
4598
Jens Axboe5262f562019-09-17 12:26:57 -06004599 atomic_inc(&ctx->cq_timeouts);
4600
4601 spin_lock_irqsave(&ctx->completion_lock, flags);
zhangyi (F)ef036812019-10-23 15:10:08 +08004602 /*
Jens Axboe11365042019-10-16 09:08:32 -06004603 * We could be racing with timeout deletion. If the list is empty,
4604 * then timeout lookup already found it and will be handling it.
zhangyi (F)ef036812019-10-23 15:10:08 +08004605 */
Pavel Begunkovbfe68a22020-05-30 14:54:18 +03004606 if (!list_empty(&req->list))
Jens Axboe11365042019-10-16 09:08:32 -06004607 list_del_init(&req->list);
Jens Axboe842f9612019-10-29 12:34:10 -06004608
Jens Axboe78e19bb2019-11-06 15:21:34 -07004609 io_cqring_fill_event(req, -ETIME);
Jens Axboe5262f562019-09-17 12:26:57 -06004610 io_commit_cqring(ctx);
4611 spin_unlock_irqrestore(&ctx->completion_lock, flags);
4612
4613 io_cqring_ev_posted(ctx);
Jens Axboe4e88d6e2019-12-07 20:59:47 -07004614 req_set_fail_links(req);
Jens Axboe5262f562019-09-17 12:26:57 -06004615 io_put_req(req);
4616 return HRTIMER_NORESTART;
4617}
4618
Jens Axboe47f46762019-11-09 17:43:02 -07004619static int io_timeout_cancel(struct io_ring_ctx *ctx, __u64 user_data)
4620{
4621 struct io_kiocb *req;
4622 int ret = -ENOENT;
4623
4624 list_for_each_entry(req, &ctx->timeout_list, list) {
4625 if (user_data == req->user_data) {
4626 list_del_init(&req->list);
4627 ret = 0;
4628 break;
4629 }
4630 }
4631
4632 if (ret == -ENOENT)
4633 return ret;
4634
Jens Axboe2d283902019-12-04 11:08:05 -07004635 ret = hrtimer_try_to_cancel(&req->io->timeout.timer);
Jens Axboe47f46762019-11-09 17:43:02 -07004636 if (ret == -1)
4637 return -EALREADY;
4638
Jens Axboe4e88d6e2019-12-07 20:59:47 -07004639 req_set_fail_links(req);
Jens Axboe47f46762019-11-09 17:43:02 -07004640 io_cqring_fill_event(req, -ECANCELED);
4641 io_put_req(req);
4642 return 0;
4643}
4644
Jens Axboe3529d8c2019-12-19 18:24:38 -07004645static int io_timeout_remove_prep(struct io_kiocb *req,
4646 const struct io_uring_sqe *sqe)
Jens Axboeb29472e2019-12-17 18:50:29 -07004647{
Jens Axboeb29472e2019-12-17 18:50:29 -07004648 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4649 return -EINVAL;
4650 if (sqe->flags || sqe->ioprio || sqe->buf_index || sqe->len)
4651 return -EINVAL;
4652
4653 req->timeout.addr = READ_ONCE(sqe->addr);
4654 req->timeout.flags = READ_ONCE(sqe->timeout_flags);
4655 if (req->timeout.flags)
4656 return -EINVAL;
4657
Jens Axboeb29472e2019-12-17 18:50:29 -07004658 return 0;
4659}
4660
Jens Axboe11365042019-10-16 09:08:32 -06004661/*
4662 * Remove or update an existing timeout command
4663 */
Jens Axboefc4df992019-12-10 14:38:45 -07004664static int io_timeout_remove(struct io_kiocb *req)
Jens Axboe11365042019-10-16 09:08:32 -06004665{
4666 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe47f46762019-11-09 17:43:02 -07004667 int ret;
Jens Axboe11365042019-10-16 09:08:32 -06004668
Jens Axboe11365042019-10-16 09:08:32 -06004669 spin_lock_irq(&ctx->completion_lock);
Jens Axboeb29472e2019-12-17 18:50:29 -07004670 ret = io_timeout_cancel(ctx, req->timeout.addr);
Jens Axboe11365042019-10-16 09:08:32 -06004671
Jens Axboe47f46762019-11-09 17:43:02 -07004672 io_cqring_fill_event(req, ret);
Jens Axboe11365042019-10-16 09:08:32 -06004673 io_commit_cqring(ctx);
4674 spin_unlock_irq(&ctx->completion_lock);
Jens Axboe5262f562019-09-17 12:26:57 -06004675 io_cqring_ev_posted(ctx);
Jens Axboe4e88d6e2019-12-07 20:59:47 -07004676 if (ret < 0)
4677 req_set_fail_links(req);
Jackie Liuec9c02a2019-11-08 23:50:36 +08004678 io_put_req(req);
Jens Axboe11365042019-10-16 09:08:32 -06004679 return 0;
Jens Axboe5262f562019-09-17 12:26:57 -06004680}
4681
Jens Axboe3529d8c2019-12-19 18:24:38 -07004682static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
Jens Axboe2d283902019-12-04 11:08:05 -07004683 bool is_timeout_link)
Jens Axboe5262f562019-09-17 12:26:57 -06004684{
Jens Axboead8a48a2019-11-15 08:49:11 -07004685 struct io_timeout_data *data;
Jens Axboea41525a2019-10-15 16:48:15 -06004686 unsigned flags;
Pavel Begunkov56080b02020-05-26 20:34:04 +03004687 u32 off = READ_ONCE(sqe->off);
Jens Axboe5262f562019-09-17 12:26:57 -06004688
Jens Axboead8a48a2019-11-15 08:49:11 -07004689 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboe5262f562019-09-17 12:26:57 -06004690 return -EINVAL;
Jens Axboead8a48a2019-11-15 08:49:11 -07004691 if (sqe->ioprio || sqe->buf_index || sqe->len != 1)
Jens Axboea41525a2019-10-15 16:48:15 -06004692 return -EINVAL;
Pavel Begunkov56080b02020-05-26 20:34:04 +03004693 if (off && is_timeout_link)
Jens Axboe2d283902019-12-04 11:08:05 -07004694 return -EINVAL;
Jens Axboea41525a2019-10-15 16:48:15 -06004695 flags = READ_ONCE(sqe->timeout_flags);
4696 if (flags & ~IORING_TIMEOUT_ABS)
Jens Axboe5262f562019-09-17 12:26:57 -06004697 return -EINVAL;
Arnd Bergmannbdf20072019-10-01 09:53:29 -06004698
Pavel Begunkovbfe68a22020-05-30 14:54:18 +03004699 req->timeout.off = off;
Jens Axboe26a61672019-12-20 09:02:01 -07004700
Jens Axboe3529d8c2019-12-19 18:24:38 -07004701 if (!req->io && io_alloc_async_ctx(req))
Jens Axboe26a61672019-12-20 09:02:01 -07004702 return -ENOMEM;
4703
4704 data = &req->io->timeout;
Jens Axboead8a48a2019-11-15 08:49:11 -07004705 data->req = req;
Jens Axboead8a48a2019-11-15 08:49:11 -07004706 req->flags |= REQ_F_TIMEOUT;
4707
4708 if (get_timespec64(&data->ts, u64_to_user_ptr(sqe->addr)))
Jens Axboe5262f562019-09-17 12:26:57 -06004709 return -EFAULT;
4710
Jens Axboe11365042019-10-16 09:08:32 -06004711 if (flags & IORING_TIMEOUT_ABS)
Jens Axboead8a48a2019-11-15 08:49:11 -07004712 data->mode = HRTIMER_MODE_ABS;
Jens Axboe11365042019-10-16 09:08:32 -06004713 else
Jens Axboead8a48a2019-11-15 08:49:11 -07004714 data->mode = HRTIMER_MODE_REL;
Jens Axboe11365042019-10-16 09:08:32 -06004715
Jens Axboead8a48a2019-11-15 08:49:11 -07004716 hrtimer_init(&data->timer, CLOCK_MONOTONIC, data->mode);
4717 return 0;
4718}
4719
Jens Axboefc4df992019-12-10 14:38:45 -07004720static int io_timeout(struct io_kiocb *req)
Jens Axboead8a48a2019-11-15 08:49:11 -07004721{
Jens Axboead8a48a2019-11-15 08:49:11 -07004722 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkovbfe68a22020-05-30 14:54:18 +03004723 struct io_timeout_data *data = &req->io->timeout;
Jens Axboead8a48a2019-11-15 08:49:11 -07004724 struct list_head *entry;
Pavel Begunkovbfe68a22020-05-30 14:54:18 +03004725 u32 tail, off = req->timeout.off;
Jens Axboead8a48a2019-11-15 08:49:11 -07004726
Pavel Begunkov733f5c92020-05-26 20:34:03 +03004727 spin_lock_irq(&ctx->completion_lock);
Jens Axboe93bd25b2019-11-11 23:34:31 -07004728
Jens Axboe5262f562019-09-17 12:26:57 -06004729 /*
4730 * sqe->off holds how many events that need to occur for this
Jens Axboe93bd25b2019-11-11 23:34:31 -07004731 * timeout event to be satisfied. If it isn't set, then this is
4732 * a pure timeout request, sequence isn't used.
Jens Axboe5262f562019-09-17 12:26:57 -06004733 */
Pavel Begunkovbfe68a22020-05-30 14:54:18 +03004734 if (!off) {
Jens Axboe93bd25b2019-11-11 23:34:31 -07004735 req->flags |= REQ_F_TIMEOUT_NOSEQ;
Jens Axboe93bd25b2019-11-11 23:34:31 -07004736 entry = ctx->timeout_list.prev;
4737 goto add;
4738 }
Jens Axboe5262f562019-09-17 12:26:57 -06004739
Pavel Begunkovbfe68a22020-05-30 14:54:18 +03004740 tail = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
4741 req->timeout.target_seq = tail + off;
Jens Axboe5262f562019-09-17 12:26:57 -06004742
4743 /*
4744 * Insertion sort, ensuring the first entry in the list is always
4745 * the one we need first.
4746 */
Jens Axboe5262f562019-09-17 12:26:57 -06004747 list_for_each_prev(entry, &ctx->timeout_list) {
4748 struct io_kiocb *nxt = list_entry(entry, struct io_kiocb, list);
Jens Axboe5262f562019-09-17 12:26:57 -06004749
Jens Axboe93bd25b2019-11-11 23:34:31 -07004750 if (nxt->flags & REQ_F_TIMEOUT_NOSEQ)
4751 continue;
Pavel Begunkovbfe68a22020-05-30 14:54:18 +03004752 /* nxt.seq is behind @tail, otherwise would've been completed */
4753 if (off >= nxt->timeout.target_seq - tail)
Jens Axboe5262f562019-09-17 12:26:57 -06004754 break;
4755 }
Jens Axboe93bd25b2019-11-11 23:34:31 -07004756add:
Jens Axboe5262f562019-09-17 12:26:57 -06004757 list_add(&req->list, entry);
Jens Axboead8a48a2019-11-15 08:49:11 -07004758 data->timer.function = io_timeout_fn;
4759 hrtimer_start(&data->timer, timespec64_to_ktime(data->ts), data->mode);
Jens Axboe842f9612019-10-29 12:34:10 -06004760 spin_unlock_irq(&ctx->completion_lock);
Jens Axboe5262f562019-09-17 12:26:57 -06004761 return 0;
4762}
4763
Jens Axboe62755e32019-10-28 21:49:21 -06004764static bool io_cancel_cb(struct io_wq_work *work, void *data)
Jens Axboede0617e2019-04-06 21:51:27 -06004765{
Jens Axboe62755e32019-10-28 21:49:21 -06004766 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
Jens Axboede0617e2019-04-06 21:51:27 -06004767
Jens Axboe62755e32019-10-28 21:49:21 -06004768 return req->user_data == (unsigned long) data;
4769}
4770
Jens Axboee977d6d2019-11-05 12:39:45 -07004771static int io_async_cancel_one(struct io_ring_ctx *ctx, void *sqe_addr)
Jens Axboe62755e32019-10-28 21:49:21 -06004772{
Jens Axboe62755e32019-10-28 21:49:21 -06004773 enum io_wq_cancel cancel_ret;
Jens Axboe62755e32019-10-28 21:49:21 -06004774 int ret = 0;
4775
Jens Axboe62755e32019-10-28 21:49:21 -06004776 cancel_ret = io_wq_cancel_cb(ctx->io_wq, io_cancel_cb, sqe_addr);
4777 switch (cancel_ret) {
4778 case IO_WQ_CANCEL_OK:
4779 ret = 0;
4780 break;
4781 case IO_WQ_CANCEL_RUNNING:
4782 ret = -EALREADY;
4783 break;
4784 case IO_WQ_CANCEL_NOTFOUND:
4785 ret = -ENOENT;
4786 break;
4787 }
4788
Jens Axboee977d6d2019-11-05 12:39:45 -07004789 return ret;
4790}
4791
Jens Axboe47f46762019-11-09 17:43:02 -07004792static void io_async_find_and_cancel(struct io_ring_ctx *ctx,
4793 struct io_kiocb *req, __u64 sqe_addr,
Pavel Begunkov014db002020-03-03 21:33:12 +03004794 int success_ret)
Jens Axboe47f46762019-11-09 17:43:02 -07004795{
4796 unsigned long flags;
4797 int ret;
4798
4799 ret = io_async_cancel_one(ctx, (void *) (unsigned long) sqe_addr);
4800 if (ret != -ENOENT) {
4801 spin_lock_irqsave(&ctx->completion_lock, flags);
4802 goto done;
4803 }
4804
4805 spin_lock_irqsave(&ctx->completion_lock, flags);
4806 ret = io_timeout_cancel(ctx, sqe_addr);
4807 if (ret != -ENOENT)
4808 goto done;
4809 ret = io_poll_cancel(ctx, sqe_addr);
4810done:
Jens Axboeb0dd8a42019-11-18 12:14:54 -07004811 if (!ret)
4812 ret = success_ret;
Jens Axboe47f46762019-11-09 17:43:02 -07004813 io_cqring_fill_event(req, ret);
4814 io_commit_cqring(ctx);
4815 spin_unlock_irqrestore(&ctx->completion_lock, flags);
4816 io_cqring_ev_posted(ctx);
4817
Jens Axboe4e88d6e2019-12-07 20:59:47 -07004818 if (ret < 0)
4819 req_set_fail_links(req);
Pavel Begunkov014db002020-03-03 21:33:12 +03004820 io_put_req(req);
Jens Axboe47f46762019-11-09 17:43:02 -07004821}
4822
Jens Axboe3529d8c2019-12-19 18:24:38 -07004823static int io_async_cancel_prep(struct io_kiocb *req,
4824 const struct io_uring_sqe *sqe)
Jens Axboee977d6d2019-11-05 12:39:45 -07004825{
Jens Axboefbf23842019-12-17 18:45:56 -07004826 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboee977d6d2019-11-05 12:39:45 -07004827 return -EINVAL;
4828 if (sqe->flags || sqe->ioprio || sqe->off || sqe->len ||
4829 sqe->cancel_flags)
4830 return -EINVAL;
4831
Jens Axboefbf23842019-12-17 18:45:56 -07004832 req->cancel.addr = READ_ONCE(sqe->addr);
4833 return 0;
4834}
4835
Pavel Begunkov014db002020-03-03 21:33:12 +03004836static int io_async_cancel(struct io_kiocb *req)
Jens Axboefbf23842019-12-17 18:45:56 -07004837{
4838 struct io_ring_ctx *ctx = req->ctx;
Jens Axboefbf23842019-12-17 18:45:56 -07004839
Pavel Begunkov014db002020-03-03 21:33:12 +03004840 io_async_find_and_cancel(ctx, req, req->cancel.addr, 0);
Jens Axboe62755e32019-10-28 21:49:21 -06004841 return 0;
4842}
4843
Jens Axboe05f3fb32019-12-09 11:22:50 -07004844static int io_files_update_prep(struct io_kiocb *req,
4845 const struct io_uring_sqe *sqe)
4846{
4847 if (sqe->flags || sqe->ioprio || sqe->rw_flags)
4848 return -EINVAL;
4849
4850 req->files_update.offset = READ_ONCE(sqe->off);
4851 req->files_update.nr_args = READ_ONCE(sqe->len);
4852 if (!req->files_update.nr_args)
4853 return -EINVAL;
4854 req->files_update.arg = READ_ONCE(sqe->addr);
4855 return 0;
4856}
4857
4858static int io_files_update(struct io_kiocb *req, bool force_nonblock)
4859{
4860 struct io_ring_ctx *ctx = req->ctx;
4861 struct io_uring_files_update up;
4862 int ret;
4863
Jens Axboef86cd202020-01-29 13:46:44 -07004864 if (force_nonblock)
Jens Axboe05f3fb32019-12-09 11:22:50 -07004865 return -EAGAIN;
Jens Axboe05f3fb32019-12-09 11:22:50 -07004866
4867 up.offset = req->files_update.offset;
4868 up.fds = req->files_update.arg;
4869
4870 mutex_lock(&ctx->uring_lock);
4871 ret = __io_sqe_files_update(ctx, &up, req->files_update.nr_args);
4872 mutex_unlock(&ctx->uring_lock);
4873
4874 if (ret < 0)
4875 req_set_fail_links(req);
4876 io_cqring_add_event(req, ret);
4877 io_put_req(req);
4878 return 0;
4879}
4880
Jens Axboe3529d8c2019-12-19 18:24:38 -07004881static int io_req_defer_prep(struct io_kiocb *req,
4882 const struct io_uring_sqe *sqe)
Jens Axboef67676d2019-12-02 11:03:47 -07004883{
Jens Axboee7815732019-12-17 19:45:06 -07004884 ssize_t ret = 0;
Jens Axboef67676d2019-12-02 11:03:47 -07004885
Pavel Begunkovf1d96a82020-03-13 22:29:14 +03004886 if (!sqe)
4887 return 0;
4888
Xiaoguang Wang7cdaf582020-06-10 19:41:19 +08004889 io_req_init_async(req);
4890
Jens Axboef86cd202020-01-29 13:46:44 -07004891 if (io_op_defs[req->opcode].file_table) {
4892 ret = io_grab_files(req);
4893 if (unlikely(ret))
4894 return ret;
4895 }
4896
Jens Axboecccf0ee2020-01-27 16:34:48 -07004897 io_req_work_grab_env(req, &io_op_defs[req->opcode]);
4898
Jens Axboed625c6e2019-12-17 19:53:05 -07004899 switch (req->opcode) {
Jens Axboee7815732019-12-17 19:45:06 -07004900 case IORING_OP_NOP:
4901 break;
Jens Axboef67676d2019-12-02 11:03:47 -07004902 case IORING_OP_READV:
4903 case IORING_OP_READ_FIXED:
Jens Axboe3a6820f2019-12-22 15:19:35 -07004904 case IORING_OP_READ:
Jens Axboe3529d8c2019-12-19 18:24:38 -07004905 ret = io_read_prep(req, sqe, true);
Jens Axboef67676d2019-12-02 11:03:47 -07004906 break;
4907 case IORING_OP_WRITEV:
4908 case IORING_OP_WRITE_FIXED:
Jens Axboe3a6820f2019-12-22 15:19:35 -07004909 case IORING_OP_WRITE:
Jens Axboe3529d8c2019-12-19 18:24:38 -07004910 ret = io_write_prep(req, sqe, true);
Jens Axboef67676d2019-12-02 11:03:47 -07004911 break;
Jens Axboe0969e782019-12-17 18:40:57 -07004912 case IORING_OP_POLL_ADD:
Jens Axboe3529d8c2019-12-19 18:24:38 -07004913 ret = io_poll_add_prep(req, sqe);
Jens Axboe0969e782019-12-17 18:40:57 -07004914 break;
4915 case IORING_OP_POLL_REMOVE:
Jens Axboe3529d8c2019-12-19 18:24:38 -07004916 ret = io_poll_remove_prep(req, sqe);
Jens Axboe0969e782019-12-17 18:40:57 -07004917 break;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004918 case IORING_OP_FSYNC:
Jens Axboe3529d8c2019-12-19 18:24:38 -07004919 ret = io_prep_fsync(req, sqe);
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004920 break;
4921 case IORING_OP_SYNC_FILE_RANGE:
Jens Axboe3529d8c2019-12-19 18:24:38 -07004922 ret = io_prep_sfr(req, sqe);
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004923 break;
Jens Axboe03b12302019-12-02 18:50:25 -07004924 case IORING_OP_SENDMSG:
Jens Axboefddafac2020-01-04 20:19:44 -07004925 case IORING_OP_SEND:
Jens Axboe3529d8c2019-12-19 18:24:38 -07004926 ret = io_sendmsg_prep(req, sqe);
Jens Axboe03b12302019-12-02 18:50:25 -07004927 break;
4928 case IORING_OP_RECVMSG:
Jens Axboefddafac2020-01-04 20:19:44 -07004929 case IORING_OP_RECV:
Jens Axboe3529d8c2019-12-19 18:24:38 -07004930 ret = io_recvmsg_prep(req, sqe);
Jens Axboe03b12302019-12-02 18:50:25 -07004931 break;
Jens Axboef499a022019-12-02 16:28:46 -07004932 case IORING_OP_CONNECT:
Jens Axboe3529d8c2019-12-19 18:24:38 -07004933 ret = io_connect_prep(req, sqe);
Jens Axboef499a022019-12-02 16:28:46 -07004934 break;
Jens Axboe2d283902019-12-04 11:08:05 -07004935 case IORING_OP_TIMEOUT:
Jens Axboe3529d8c2019-12-19 18:24:38 -07004936 ret = io_timeout_prep(req, sqe, false);
Jens Axboeb7bb4f72019-12-15 22:13:43 -07004937 break;
Jens Axboeb29472e2019-12-17 18:50:29 -07004938 case IORING_OP_TIMEOUT_REMOVE:
Jens Axboe3529d8c2019-12-19 18:24:38 -07004939 ret = io_timeout_remove_prep(req, sqe);
Jens Axboeb29472e2019-12-17 18:50:29 -07004940 break;
Jens Axboefbf23842019-12-17 18:45:56 -07004941 case IORING_OP_ASYNC_CANCEL:
Jens Axboe3529d8c2019-12-19 18:24:38 -07004942 ret = io_async_cancel_prep(req, sqe);
Jens Axboefbf23842019-12-17 18:45:56 -07004943 break;
Jens Axboe2d283902019-12-04 11:08:05 -07004944 case IORING_OP_LINK_TIMEOUT:
Jens Axboe3529d8c2019-12-19 18:24:38 -07004945 ret = io_timeout_prep(req, sqe, true);
Jens Axboeb7bb4f72019-12-15 22:13:43 -07004946 break;
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004947 case IORING_OP_ACCEPT:
Jens Axboe3529d8c2019-12-19 18:24:38 -07004948 ret = io_accept_prep(req, sqe);
Jens Axboe8ed8d3c2019-12-16 11:55:28 -07004949 break;
Jens Axboed63d1b52019-12-10 10:38:56 -07004950 case IORING_OP_FALLOCATE:
4951 ret = io_fallocate_prep(req, sqe);
4952 break;
Jens Axboe15b71ab2019-12-11 11:20:36 -07004953 case IORING_OP_OPENAT:
4954 ret = io_openat_prep(req, sqe);
4955 break;
Jens Axboeb5dba592019-12-11 14:02:38 -07004956 case IORING_OP_CLOSE:
4957 ret = io_close_prep(req, sqe);
4958 break;
Jens Axboe05f3fb32019-12-09 11:22:50 -07004959 case IORING_OP_FILES_UPDATE:
4960 ret = io_files_update_prep(req, sqe);
4961 break;
Jens Axboeeddc7ef2019-12-13 21:18:10 -07004962 case IORING_OP_STATX:
4963 ret = io_statx_prep(req, sqe);
4964 break;
Jens Axboe4840e412019-12-25 22:03:45 -07004965 case IORING_OP_FADVISE:
4966 ret = io_fadvise_prep(req, sqe);
4967 break;
Jens Axboec1ca7572019-12-25 22:18:28 -07004968 case IORING_OP_MADVISE:
4969 ret = io_madvise_prep(req, sqe);
4970 break;
Jens Axboecebdb982020-01-08 17:59:24 -07004971 case IORING_OP_OPENAT2:
4972 ret = io_openat2_prep(req, sqe);
4973 break;
Jens Axboe3e4827b2020-01-08 15:18:09 -07004974 case IORING_OP_EPOLL_CTL:
4975 ret = io_epoll_ctl_prep(req, sqe);
4976 break;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03004977 case IORING_OP_SPLICE:
4978 ret = io_splice_prep(req, sqe);
4979 break;
Jens Axboeddf0322d2020-02-23 16:41:33 -07004980 case IORING_OP_PROVIDE_BUFFERS:
4981 ret = io_provide_buffers_prep(req, sqe);
4982 break;
Jens Axboe067524e2020-03-02 16:32:28 -07004983 case IORING_OP_REMOVE_BUFFERS:
4984 ret = io_remove_buffers_prep(req, sqe);
4985 break;
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03004986 case IORING_OP_TEE:
4987 ret = io_tee_prep(req, sqe);
4988 break;
Jens Axboef67676d2019-12-02 11:03:47 -07004989 default:
Jens Axboee7815732019-12-17 19:45:06 -07004990 printk_once(KERN_WARNING "io_uring: unhandled opcode %d\n",
4991 req->opcode);
4992 ret = -EINVAL;
Jens Axboeb7bb4f72019-12-15 22:13:43 -07004993 break;
Jens Axboef67676d2019-12-02 11:03:47 -07004994 }
4995
Jens Axboeb7bb4f72019-12-15 22:13:43 -07004996 return ret;
Jens Axboef67676d2019-12-02 11:03:47 -07004997}
4998
Jens Axboe3529d8c2019-12-19 18:24:38 -07004999static int io_req_defer(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboede0617e2019-04-06 21:51:27 -06005000{
Jackie Liua197f662019-11-08 08:09:12 -07005001 struct io_ring_ctx *ctx = req->ctx;
Jens Axboef67676d2019-12-02 11:03:47 -07005002 int ret;
Jens Axboede0617e2019-04-06 21:51:27 -06005003
Bob Liu9d858b22019-11-13 18:06:25 +08005004 /* Still need defer if there is pending req in defer list. */
Pavel Begunkov4ee36312020-05-01 17:09:37 +03005005 if (!req_need_defer(req) && list_empty_careful(&ctx->defer_list))
Jens Axboede0617e2019-04-06 21:51:27 -06005006 return 0;
5007
Pavel Begunkov650b5482020-05-17 14:02:11 +03005008 if (!req->io) {
5009 if (io_alloc_async_ctx(req))
5010 return -EAGAIN;
5011 ret = io_req_defer_prep(req, sqe);
5012 if (ret < 0)
5013 return ret;
5014 }
Jens Axboe2d283902019-12-04 11:08:05 -07005015
Jens Axboede0617e2019-04-06 21:51:27 -06005016 spin_lock_irq(&ctx->completion_lock);
Bob Liu9d858b22019-11-13 18:06:25 +08005017 if (!req_need_defer(req) && list_empty(&ctx->defer_list)) {
Jens Axboede0617e2019-04-06 21:51:27 -06005018 spin_unlock_irq(&ctx->completion_lock);
Jens Axboede0617e2019-04-06 21:51:27 -06005019 return 0;
5020 }
5021
Jens Axboe915967f2019-11-21 09:01:20 -07005022 trace_io_uring_defer(ctx, req, req->user_data);
Jens Axboede0617e2019-04-06 21:51:27 -06005023 list_add_tail(&req->list, &ctx->defer_list);
5024 spin_unlock_irq(&ctx->completion_lock);
5025 return -EIOCBQUEUED;
5026}
5027
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03005028static void io_cleanup_req(struct io_kiocb *req)
5029{
5030 struct io_async_ctx *io = req->io;
5031
5032 switch (req->opcode) {
5033 case IORING_OP_READV:
5034 case IORING_OP_READ_FIXED:
5035 case IORING_OP_READ:
Jens Axboebcda7ba2020-02-23 16:42:51 -07005036 if (req->flags & REQ_F_BUFFER_SELECTED)
5037 kfree((void *)(unsigned long)req->rw.addr);
5038 /* fallthrough */
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03005039 case IORING_OP_WRITEV:
5040 case IORING_OP_WRITE_FIXED:
5041 case IORING_OP_WRITE:
5042 if (io->rw.iov != io->rw.fast_iov)
5043 kfree(io->rw.iov);
5044 break;
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03005045 case IORING_OP_RECVMSG:
Jens Axboe52de1fe2020-02-27 10:15:42 -07005046 if (req->flags & REQ_F_BUFFER_SELECTED)
5047 kfree(req->sr_msg.kbuf);
5048 /* fallthrough */
5049 case IORING_OP_SENDMSG:
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03005050 if (io->msg.iov != io->msg.fast_iov)
5051 kfree(io->msg.iov);
5052 break;
Jens Axboebcda7ba2020-02-23 16:42:51 -07005053 case IORING_OP_RECV:
5054 if (req->flags & REQ_F_BUFFER_SELECTED)
5055 kfree(req->sr_msg.kbuf);
5056 break;
Pavel Begunkov8fef80b2020-02-07 23:59:53 +03005057 case IORING_OP_OPENAT:
5058 case IORING_OP_OPENAT2:
Pavel Begunkov8fef80b2020-02-07 23:59:53 +03005059 break;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03005060 case IORING_OP_SPLICE:
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03005061 case IORING_OP_TEE:
Pavel Begunkov7d67af22020-02-24 11:32:45 +03005062 io_put_file(req, req->splice.file_in,
5063 (req->splice.flags & SPLICE_F_FD_IN_FIXED));
5064 break;
Pavel Begunkov99bc4c32020-02-07 22:04:45 +03005065 }
5066
5067 req->flags &= ~REQ_F_NEED_CLEANUP;
5068}
5069
Jens Axboe3529d8c2019-12-19 18:24:38 -07005070static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
Pavel Begunkov014db002020-03-03 21:33:12 +03005071 bool force_nonblock)
Jens Axboe2b188cc2019-01-07 10:46:33 -07005072{
Jackie Liua197f662019-11-08 08:09:12 -07005073 struct io_ring_ctx *ctx = req->ctx;
Jens Axboed625c6e2019-12-17 19:53:05 -07005074 int ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07005075
Jens Axboed625c6e2019-12-17 19:53:05 -07005076 switch (req->opcode) {
Jens Axboe2b188cc2019-01-07 10:46:33 -07005077 case IORING_OP_NOP:
Jens Axboe78e19bb2019-11-06 15:21:34 -07005078 ret = io_nop(req);
Jens Axboe2b188cc2019-01-07 10:46:33 -07005079 break;
5080 case IORING_OP_READV:
Jens Axboe3529d8c2019-12-19 18:24:38 -07005081 case IORING_OP_READ_FIXED:
Jens Axboe3a6820f2019-12-22 15:19:35 -07005082 case IORING_OP_READ:
Jens Axboe3529d8c2019-12-19 18:24:38 -07005083 if (sqe) {
5084 ret = io_read_prep(req, sqe, force_nonblock);
5085 if (ret < 0)
5086 break;
5087 }
Pavel Begunkov014db002020-03-03 21:33:12 +03005088 ret = io_read(req, force_nonblock);
Jens Axboe2b188cc2019-01-07 10:46:33 -07005089 break;
5090 case IORING_OP_WRITEV:
Jens Axboeedafcce2019-01-09 09:16:05 -07005091 case IORING_OP_WRITE_FIXED:
Jens Axboe3a6820f2019-12-22 15:19:35 -07005092 case IORING_OP_WRITE:
Jens Axboe3529d8c2019-12-19 18:24:38 -07005093 if (sqe) {
5094 ret = io_write_prep(req, sqe, force_nonblock);
5095 if (ret < 0)
5096 break;
5097 }
Pavel Begunkov014db002020-03-03 21:33:12 +03005098 ret = io_write(req, force_nonblock);
Jens Axboe2b188cc2019-01-07 10:46:33 -07005099 break;
Christoph Hellwigc992fe22019-01-11 09:43:02 -07005100 case IORING_OP_FSYNC:
Jens Axboe3529d8c2019-12-19 18:24:38 -07005101 if (sqe) {
5102 ret = io_prep_fsync(req, sqe);
5103 if (ret < 0)
5104 break;
5105 }
Pavel Begunkov014db002020-03-03 21:33:12 +03005106 ret = io_fsync(req, force_nonblock);
Christoph Hellwigc992fe22019-01-11 09:43:02 -07005107 break;
Jens Axboe221c5eb2019-01-17 09:41:58 -07005108 case IORING_OP_POLL_ADD:
Jens Axboe3529d8c2019-12-19 18:24:38 -07005109 if (sqe) {
5110 ret = io_poll_add_prep(req, sqe);
5111 if (ret)
5112 break;
5113 }
Pavel Begunkov014db002020-03-03 21:33:12 +03005114 ret = io_poll_add(req);
Jens Axboe221c5eb2019-01-17 09:41:58 -07005115 break;
5116 case IORING_OP_POLL_REMOVE:
Jens Axboe3529d8c2019-12-19 18:24:38 -07005117 if (sqe) {
5118 ret = io_poll_remove_prep(req, sqe);
5119 if (ret < 0)
5120 break;
5121 }
Jens Axboefc4df992019-12-10 14:38:45 -07005122 ret = io_poll_remove(req);
Jens Axboe221c5eb2019-01-17 09:41:58 -07005123 break;
Jens Axboe5d17b4a2019-04-09 14:56:44 -06005124 case IORING_OP_SYNC_FILE_RANGE:
Jens Axboe3529d8c2019-12-19 18:24:38 -07005125 if (sqe) {
5126 ret = io_prep_sfr(req, sqe);
5127 if (ret < 0)
5128 break;
5129 }
Pavel Begunkov014db002020-03-03 21:33:12 +03005130 ret = io_sync_file_range(req, force_nonblock);
Jens Axboe5d17b4a2019-04-09 14:56:44 -06005131 break;
Jens Axboe0fa03c62019-04-19 13:34:07 -06005132 case IORING_OP_SENDMSG:
Jens Axboefddafac2020-01-04 20:19:44 -07005133 case IORING_OP_SEND:
Jens Axboe3529d8c2019-12-19 18:24:38 -07005134 if (sqe) {
5135 ret = io_sendmsg_prep(req, sqe);
5136 if (ret < 0)
5137 break;
5138 }
Jens Axboefddafac2020-01-04 20:19:44 -07005139 if (req->opcode == IORING_OP_SENDMSG)
Pavel Begunkov014db002020-03-03 21:33:12 +03005140 ret = io_sendmsg(req, force_nonblock);
Jens Axboefddafac2020-01-04 20:19:44 -07005141 else
Pavel Begunkov014db002020-03-03 21:33:12 +03005142 ret = io_send(req, force_nonblock);
Jens Axboe0fa03c62019-04-19 13:34:07 -06005143 break;
Jens Axboeaa1fa282019-04-19 13:38:09 -06005144 case IORING_OP_RECVMSG:
Jens Axboefddafac2020-01-04 20:19:44 -07005145 case IORING_OP_RECV:
Jens Axboe3529d8c2019-12-19 18:24:38 -07005146 if (sqe) {
5147 ret = io_recvmsg_prep(req, sqe);
5148 if (ret)
5149 break;
5150 }
Jens Axboefddafac2020-01-04 20:19:44 -07005151 if (req->opcode == IORING_OP_RECVMSG)
Pavel Begunkov014db002020-03-03 21:33:12 +03005152 ret = io_recvmsg(req, force_nonblock);
Jens Axboefddafac2020-01-04 20:19:44 -07005153 else
Pavel Begunkov014db002020-03-03 21:33:12 +03005154 ret = io_recv(req, force_nonblock);
Jens Axboeaa1fa282019-04-19 13:38:09 -06005155 break;
Jens Axboe5262f562019-09-17 12:26:57 -06005156 case IORING_OP_TIMEOUT:
Jens Axboe3529d8c2019-12-19 18:24:38 -07005157 if (sqe) {
5158 ret = io_timeout_prep(req, sqe, false);
5159 if (ret)
5160 break;
5161 }
Jens Axboefc4df992019-12-10 14:38:45 -07005162 ret = io_timeout(req);
Jens Axboe5262f562019-09-17 12:26:57 -06005163 break;
Jens Axboe11365042019-10-16 09:08:32 -06005164 case IORING_OP_TIMEOUT_REMOVE:
Jens Axboe3529d8c2019-12-19 18:24:38 -07005165 if (sqe) {
5166 ret = io_timeout_remove_prep(req, sqe);
5167 if (ret)
5168 break;
5169 }
Jens Axboefc4df992019-12-10 14:38:45 -07005170 ret = io_timeout_remove(req);
Jens Axboe11365042019-10-16 09:08:32 -06005171 break;
Jens Axboe17f2fe32019-10-17 14:42:58 -06005172 case IORING_OP_ACCEPT:
Jens Axboe3529d8c2019-12-19 18:24:38 -07005173 if (sqe) {
5174 ret = io_accept_prep(req, sqe);
5175 if (ret)
5176 break;
5177 }
Pavel Begunkov014db002020-03-03 21:33:12 +03005178 ret = io_accept(req, force_nonblock);
Jens Axboe17f2fe32019-10-17 14:42:58 -06005179 break;
Jens Axboef8e85cf2019-11-23 14:24:24 -07005180 case IORING_OP_CONNECT:
Jens Axboe3529d8c2019-12-19 18:24:38 -07005181 if (sqe) {
5182 ret = io_connect_prep(req, sqe);
5183 if (ret)
5184 break;
5185 }
Pavel Begunkov014db002020-03-03 21:33:12 +03005186 ret = io_connect(req, force_nonblock);
Jens Axboef8e85cf2019-11-23 14:24:24 -07005187 break;
Jens Axboe62755e32019-10-28 21:49:21 -06005188 case IORING_OP_ASYNC_CANCEL:
Jens Axboe3529d8c2019-12-19 18:24:38 -07005189 if (sqe) {
5190 ret = io_async_cancel_prep(req, sqe);
5191 if (ret)
5192 break;
5193 }
Pavel Begunkov014db002020-03-03 21:33:12 +03005194 ret = io_async_cancel(req);
Jens Axboe62755e32019-10-28 21:49:21 -06005195 break;
Jens Axboed63d1b52019-12-10 10:38:56 -07005196 case IORING_OP_FALLOCATE:
5197 if (sqe) {
5198 ret = io_fallocate_prep(req, sqe);
5199 if (ret)
5200 break;
5201 }
Pavel Begunkov014db002020-03-03 21:33:12 +03005202 ret = io_fallocate(req, force_nonblock);
Jens Axboed63d1b52019-12-10 10:38:56 -07005203 break;
Jens Axboe15b71ab2019-12-11 11:20:36 -07005204 case IORING_OP_OPENAT:
5205 if (sqe) {
5206 ret = io_openat_prep(req, sqe);
5207 if (ret)
5208 break;
5209 }
Pavel Begunkov014db002020-03-03 21:33:12 +03005210 ret = io_openat(req, force_nonblock);
Jens Axboe15b71ab2019-12-11 11:20:36 -07005211 break;
Jens Axboeb5dba592019-12-11 14:02:38 -07005212 case IORING_OP_CLOSE:
5213 if (sqe) {
5214 ret = io_close_prep(req, sqe);
5215 if (ret)
5216 break;
5217 }
Pavel Begunkov014db002020-03-03 21:33:12 +03005218 ret = io_close(req, force_nonblock);
Jens Axboeb5dba592019-12-11 14:02:38 -07005219 break;
Jens Axboe05f3fb32019-12-09 11:22:50 -07005220 case IORING_OP_FILES_UPDATE:
5221 if (sqe) {
5222 ret = io_files_update_prep(req, sqe);
5223 if (ret)
5224 break;
5225 }
5226 ret = io_files_update(req, force_nonblock);
5227 break;
Jens Axboeeddc7ef2019-12-13 21:18:10 -07005228 case IORING_OP_STATX:
5229 if (sqe) {
5230 ret = io_statx_prep(req, sqe);
5231 if (ret)
5232 break;
5233 }
Pavel Begunkov014db002020-03-03 21:33:12 +03005234 ret = io_statx(req, force_nonblock);
Jens Axboeeddc7ef2019-12-13 21:18:10 -07005235 break;
Jens Axboe4840e412019-12-25 22:03:45 -07005236 case IORING_OP_FADVISE:
5237 if (sqe) {
5238 ret = io_fadvise_prep(req, sqe);
5239 if (ret)
5240 break;
5241 }
Pavel Begunkov014db002020-03-03 21:33:12 +03005242 ret = io_fadvise(req, force_nonblock);
Jens Axboe4840e412019-12-25 22:03:45 -07005243 break;
Jens Axboec1ca7572019-12-25 22:18:28 -07005244 case IORING_OP_MADVISE:
5245 if (sqe) {
5246 ret = io_madvise_prep(req, sqe);
5247 if (ret)
5248 break;
5249 }
Pavel Begunkov014db002020-03-03 21:33:12 +03005250 ret = io_madvise(req, force_nonblock);
Jens Axboec1ca7572019-12-25 22:18:28 -07005251 break;
Jens Axboecebdb982020-01-08 17:59:24 -07005252 case IORING_OP_OPENAT2:
5253 if (sqe) {
5254 ret = io_openat2_prep(req, sqe);
5255 if (ret)
5256 break;
5257 }
Pavel Begunkov014db002020-03-03 21:33:12 +03005258 ret = io_openat2(req, force_nonblock);
Jens Axboecebdb982020-01-08 17:59:24 -07005259 break;
Jens Axboe3e4827b2020-01-08 15:18:09 -07005260 case IORING_OP_EPOLL_CTL:
5261 if (sqe) {
5262 ret = io_epoll_ctl_prep(req, sqe);
5263 if (ret)
5264 break;
5265 }
Pavel Begunkov014db002020-03-03 21:33:12 +03005266 ret = io_epoll_ctl(req, force_nonblock);
Jens Axboe3e4827b2020-01-08 15:18:09 -07005267 break;
Pavel Begunkov7d67af22020-02-24 11:32:45 +03005268 case IORING_OP_SPLICE:
5269 if (sqe) {
5270 ret = io_splice_prep(req, sqe);
5271 if (ret < 0)
5272 break;
5273 }
Pavel Begunkov014db002020-03-03 21:33:12 +03005274 ret = io_splice(req, force_nonblock);
Pavel Begunkov7d67af22020-02-24 11:32:45 +03005275 break;
Jens Axboeddf0322d2020-02-23 16:41:33 -07005276 case IORING_OP_PROVIDE_BUFFERS:
5277 if (sqe) {
5278 ret = io_provide_buffers_prep(req, sqe);
5279 if (ret)
5280 break;
5281 }
5282 ret = io_provide_buffers(req, force_nonblock);
5283 break;
Jens Axboe067524e2020-03-02 16:32:28 -07005284 case IORING_OP_REMOVE_BUFFERS:
5285 if (sqe) {
5286 ret = io_remove_buffers_prep(req, sqe);
5287 if (ret)
5288 break;
5289 }
5290 ret = io_remove_buffers(req, force_nonblock);
Jens Axboe31b51512019-01-18 22:56:34 -07005291 break;
Pavel Begunkovf2a8d5c2020-05-17 14:18:06 +03005292 case IORING_OP_TEE:
5293 if (sqe) {
5294 ret = io_tee_prep(req, sqe);
5295 if (ret < 0)
5296 break;
5297 }
5298 ret = io_tee(req, force_nonblock);
5299 break;
Jens Axboe2b188cc2019-01-07 10:46:33 -07005300 default:
5301 ret = -EINVAL;
5302 break;
5303 }
5304
5305 if (ret)
5306 return ret;
5307
Jens Axboeb5325762020-05-19 21:20:27 -06005308 /* If the op doesn't have a file, we're not polling for it */
5309 if ((ctx->flags & IORING_SETUP_IOPOLL) && req->file) {
Jens Axboe11ba8202020-01-15 21:51:17 -07005310 const bool in_async = io_wq_current_is_worker();
5311
Jens Axboe9e645e112019-05-10 16:07:28 -06005312 if (req->result == -EAGAIN)
Jens Axboe2b188cc2019-01-07 10:46:33 -07005313 return -EAGAIN;
5314
Jens Axboe11ba8202020-01-15 21:51:17 -07005315 /* workqueue context doesn't hold uring_lock, grab it now */
5316 if (in_async)
5317 mutex_lock(&ctx->uring_lock);
5318
Jens Axboe2b188cc2019-01-07 10:46:33 -07005319 io_iopoll_req_issued(req);
Jens Axboe11ba8202020-01-15 21:51:17 -07005320
5321 if (in_async)
5322 mutex_unlock(&ctx->uring_lock);
Jens Axboedef596e2019-01-09 08:59:42 -07005323 }
5324
5325 return 0;
5326}
5327
Pavel Begunkovd4c81f32020-06-08 21:08:19 +03005328static void io_arm_async_linked_timeout(struct io_kiocb *req)
5329{
5330 struct io_kiocb *link;
5331
5332 /* link head's timeout is queued in io_queue_async_work() */
5333 if (!(req->flags & REQ_F_QUEUE_TIMEOUT))
5334 return;
5335
5336 link = list_first_entry(&req->link_list, struct io_kiocb, link_list);
5337 io_queue_linked_timeout(link);
5338}
5339
Jens Axboe561fb042019-10-24 07:25:42 -06005340static void io_wq_submit_work(struct io_wq_work **workptr)
Jens Axboedef596e2019-01-09 08:59:42 -07005341{
Jens Axboe561fb042019-10-24 07:25:42 -06005342 struct io_wq_work *work = *workptr;
Jens Axboe2b188cc2019-01-07 10:46:33 -07005343 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
Jens Axboe561fb042019-10-24 07:25:42 -06005344 int ret = 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07005345
Pavel Begunkovd4c81f32020-06-08 21:08:19 +03005346 io_arm_async_linked_timeout(req);
5347
Jens Axboe0c9d5cc2019-12-11 19:29:43 -07005348 /* if NO_CANCEL is set, we must still run the work */
5349 if ((work->flags & (IO_WQ_WORK_CANCEL|IO_WQ_WORK_NO_CANCEL)) ==
5350 IO_WQ_WORK_CANCEL) {
Jens Axboe561fb042019-10-24 07:25:42 -06005351 ret = -ECANCELED;
Jens Axboe0c9d5cc2019-12-11 19:29:43 -07005352 }
Jens Axboe31b51512019-01-18 22:56:34 -07005353
Jens Axboe561fb042019-10-24 07:25:42 -06005354 if (!ret) {
Jens Axboe561fb042019-10-24 07:25:42 -06005355 do {
Pavel Begunkov014db002020-03-03 21:33:12 +03005356 ret = io_issue_sqe(req, NULL, false);
Jens Axboe561fb042019-10-24 07:25:42 -06005357 /*
5358 * We can get EAGAIN for polled IO even though we're
5359 * forcing a sync submission from here, since we can't
5360 * wait for request slots on the block side.
5361 */
5362 if (ret != -EAGAIN)
5363 break;
5364 cond_resched();
5365 } while (1);
5366 }
Jens Axboe31b51512019-01-18 22:56:34 -07005367
Jens Axboe561fb042019-10-24 07:25:42 -06005368 if (ret) {
Jens Axboe4e88d6e2019-12-07 20:59:47 -07005369 req_set_fail_links(req);
Jens Axboe78e19bb2019-11-06 15:21:34 -07005370 io_cqring_add_event(req, ret);
Jens Axboe817869d2019-04-30 14:44:05 -06005371 io_put_req(req);
Jens Axboeedafcce2019-01-09 09:16:05 -07005372 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07005373
Pavel Begunkove9fd9392020-03-04 16:14:12 +03005374 io_steal_work(req, workptr);
Jens Axboe31b51512019-01-18 22:56:34 -07005375}
Jens Axboe2b188cc2019-01-07 10:46:33 -07005376
Jens Axboe65e19f52019-10-26 07:20:21 -06005377static inline struct file *io_file_from_index(struct io_ring_ctx *ctx,
5378 int index)
Jens Axboe09bb8392019-03-13 12:39:28 -06005379{
Jens Axboe65e19f52019-10-26 07:20:21 -06005380 struct fixed_file_table *table;
5381
Jens Axboe05f3fb32019-12-09 11:22:50 -07005382 table = &ctx->file_data->table[index >> IORING_FILE_TABLE_SHIFT];
Xiaoming Ni84695082020-05-11 19:25:43 +08005383 return table->files[index & IORING_FILE_TABLE_MASK];
Jens Axboe65e19f52019-10-26 07:20:21 -06005384}
5385
Pavel Begunkov8da11c12020-02-24 11:32:44 +03005386static int io_file_get(struct io_submit_state *state, struct io_kiocb *req,
5387 int fd, struct file **out_file, bool fixed)
5388{
5389 struct io_ring_ctx *ctx = req->ctx;
5390 struct file *file;
5391
5392 if (fixed) {
5393 if (unlikely(!ctx->file_data ||
5394 (unsigned) fd >= ctx->nr_user_files))
5395 return -EBADF;
5396 fd = array_index_nospec(fd, ctx->nr_user_files);
5397 file = io_file_from_index(ctx, fd);
Jens Axboefd2206e2020-06-02 16:40:47 -06005398 if (file) {
5399 req->fixed_file_refs = ctx->file_data->cur_refs;
5400 percpu_ref_get(req->fixed_file_refs);
5401 }
Pavel Begunkov8da11c12020-02-24 11:32:44 +03005402 } else {
5403 trace_io_uring_file_get(ctx, fd);
5404 file = __io_file_get(state, fd);
Pavel Begunkov8da11c12020-02-24 11:32:44 +03005405 }
5406
Jens Axboefd2206e2020-06-02 16:40:47 -06005407 if (file || io_op_defs[req->opcode].needs_file_no_error) {
5408 *out_file = file;
5409 return 0;
5410 }
5411 return -EBADF;
Pavel Begunkov8da11c12020-02-24 11:32:44 +03005412}
5413
Jens Axboe3529d8c2019-12-19 18:24:38 -07005414static int io_req_set_file(struct io_submit_state *state, struct io_kiocb *req,
Jens Axboe63ff8222020-05-07 14:56:15 -06005415 int fd)
Jens Axboe09bb8392019-03-13 12:39:28 -06005416{
Pavel Begunkov8da11c12020-02-24 11:32:44 +03005417 bool fixed;
Jens Axboe09bb8392019-03-13 12:39:28 -06005418
Jens Axboe63ff8222020-05-07 14:56:15 -06005419 fixed = (req->flags & REQ_F_FIXED_FILE) != 0;
Pavel Begunkov0cdaf762020-05-17 14:13:40 +03005420 if (unlikely(!fixed && io_async_submit(req->ctx)))
Pavel Begunkov8da11c12020-02-24 11:32:44 +03005421 return -EBADF;
Jens Axboe09bb8392019-03-13 12:39:28 -06005422
Pavel Begunkov8da11c12020-02-24 11:32:44 +03005423 return io_file_get(state, req, fd, &req->file, fixed);
Jens Axboe09bb8392019-03-13 12:39:28 -06005424}
5425
Jackie Liua197f662019-11-08 08:09:12 -07005426static int io_grab_files(struct io_kiocb *req)
Jens Axboe2b188cc2019-01-07 10:46:33 -07005427{
Jens Axboefcb323c2019-10-24 12:39:47 -06005428 int ret = -EBADF;
Jackie Liua197f662019-11-08 08:09:12 -07005429 struct io_ring_ctx *ctx = req->ctx;
Jens Axboefcb323c2019-10-24 12:39:47 -06005430
Jens Axboe5b0bbee2020-04-27 10:41:22 -06005431 if (req->work.files || (req->flags & REQ_F_NO_FILE_TABLE))
Jens Axboef86cd202020-01-29 13:46:44 -07005432 return 0;
Pavel Begunkovb14cca02020-01-17 04:45:59 +03005433 if (!ctx->ring_file)
Jens Axboeb5dba592019-12-11 14:02:38 -07005434 return -EBADF;
5435
Jens Axboefcb323c2019-10-24 12:39:47 -06005436 rcu_read_lock();
5437 spin_lock_irq(&ctx->inflight_lock);
5438 /*
5439 * We use the f_ops->flush() handler to ensure that we can flush
5440 * out work accessing these files if the fd is closed. Check if
5441 * the fd has changed since we started down this path, and disallow
5442 * this operation if it has.
5443 */
Pavel Begunkovb14cca02020-01-17 04:45:59 +03005444 if (fcheck(ctx->ring_fd) == ctx->ring_file) {
Jens Axboefcb323c2019-10-24 12:39:47 -06005445 list_add(&req->inflight_entry, &ctx->inflight_list);
5446 req->flags |= REQ_F_INFLIGHT;
5447 req->work.files = current->files;
5448 ret = 0;
5449 }
5450 spin_unlock_irq(&ctx->inflight_lock);
5451 rcu_read_unlock();
5452
5453 return ret;
5454}
5455
Jens Axboe2665abf2019-11-05 12:40:47 -07005456static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
5457{
Jens Axboead8a48a2019-11-15 08:49:11 -07005458 struct io_timeout_data *data = container_of(timer,
5459 struct io_timeout_data, timer);
5460 struct io_kiocb *req = data->req;
Jens Axboe2665abf2019-11-05 12:40:47 -07005461 struct io_ring_ctx *ctx = req->ctx;
5462 struct io_kiocb *prev = NULL;
5463 unsigned long flags;
Jens Axboe2665abf2019-11-05 12:40:47 -07005464
5465 spin_lock_irqsave(&ctx->completion_lock, flags);
5466
5467 /*
5468 * We don't expect the list to be empty, that will only happen if we
5469 * race with the completion of the linked work.
5470 */
Pavel Begunkov44932332019-12-05 16:16:35 +03005471 if (!list_empty(&req->link_list)) {
5472 prev = list_entry(req->link_list.prev, struct io_kiocb,
5473 link_list);
Jens Axboe5d960722019-11-19 15:31:28 -07005474 if (refcount_inc_not_zero(&prev->refs)) {
Pavel Begunkov44932332019-12-05 16:16:35 +03005475 list_del_init(&req->link_list);
Jens Axboe5d960722019-11-19 15:31:28 -07005476 prev->flags &= ~REQ_F_LINK_TIMEOUT;
5477 } else
Jens Axboe76a46e02019-11-10 23:34:16 -07005478 prev = NULL;
Jens Axboe2665abf2019-11-05 12:40:47 -07005479 }
5480
5481 spin_unlock_irqrestore(&ctx->completion_lock, flags);
5482
5483 if (prev) {
Jens Axboe4e88d6e2019-12-07 20:59:47 -07005484 req_set_fail_links(prev);
Pavel Begunkov014db002020-03-03 21:33:12 +03005485 io_async_find_and_cancel(ctx, req, prev->user_data, -ETIME);
Jens Axboe76a46e02019-11-10 23:34:16 -07005486 io_put_req(prev);
Jens Axboe47f46762019-11-09 17:43:02 -07005487 } else {
5488 io_cqring_add_event(req, -ETIME);
5489 io_put_req(req);
Jens Axboe2665abf2019-11-05 12:40:47 -07005490 }
Jens Axboe2665abf2019-11-05 12:40:47 -07005491 return HRTIMER_NORESTART;
5492}
5493
Jens Axboead8a48a2019-11-15 08:49:11 -07005494static void io_queue_linked_timeout(struct io_kiocb *req)
Jens Axboe2665abf2019-11-05 12:40:47 -07005495{
Jens Axboe76a46e02019-11-10 23:34:16 -07005496 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe2665abf2019-11-05 12:40:47 -07005497
Jens Axboe76a46e02019-11-10 23:34:16 -07005498 /*
5499 * If the list is now empty, then our linked request finished before
5500 * we got a chance to setup the timer
5501 */
5502 spin_lock_irq(&ctx->completion_lock);
Pavel Begunkov44932332019-12-05 16:16:35 +03005503 if (!list_empty(&req->link_list)) {
Jens Axboe2d283902019-12-04 11:08:05 -07005504 struct io_timeout_data *data = &req->io->timeout;
Jens Axboe94ae5e72019-11-14 19:39:52 -07005505
Jens Axboead8a48a2019-11-15 08:49:11 -07005506 data->timer.function = io_link_timeout_fn;
5507 hrtimer_start(&data->timer, timespec64_to_ktime(data->ts),
5508 data->mode);
Jens Axboe2665abf2019-11-05 12:40:47 -07005509 }
Jens Axboe76a46e02019-11-10 23:34:16 -07005510 spin_unlock_irq(&ctx->completion_lock);
Jens Axboe2665abf2019-11-05 12:40:47 -07005511
Jens Axboe2665abf2019-11-05 12:40:47 -07005512 /* drop submission reference */
Jens Axboe76a46e02019-11-10 23:34:16 -07005513 io_put_req(req);
Jens Axboe2665abf2019-11-05 12:40:47 -07005514}
5515
Jens Axboead8a48a2019-11-15 08:49:11 -07005516static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req)
Jens Axboe2665abf2019-11-05 12:40:47 -07005517{
5518 struct io_kiocb *nxt;
Jens Axboe2b188cc2019-01-07 10:46:33 -07005519
Pavel Begunkovdea3b492020-04-12 02:05:04 +03005520 if (!(req->flags & REQ_F_LINK_HEAD))
Jens Axboe2665abf2019-11-05 12:40:47 -07005521 return NULL;
Jens Axboed7718a92020-02-14 22:23:12 -07005522 /* for polled retry, if flag is set, we already went through here */
5523 if (req->flags & REQ_F_POLLED)
5524 return NULL;
Jens Axboe2665abf2019-11-05 12:40:47 -07005525
Pavel Begunkov44932332019-12-05 16:16:35 +03005526 nxt = list_first_entry_or_null(&req->link_list, struct io_kiocb,
5527 link_list);
Jens Axboed625c6e2019-12-17 19:53:05 -07005528 if (!nxt || nxt->opcode != IORING_OP_LINK_TIMEOUT)
Jens Axboe76a46e02019-11-10 23:34:16 -07005529 return NULL;
Jens Axboe2665abf2019-11-05 12:40:47 -07005530
Jens Axboe76a46e02019-11-10 23:34:16 -07005531 req->flags |= REQ_F_LINK_TIMEOUT;
Jens Axboe76a46e02019-11-10 23:34:16 -07005532 return nxt;
Jens Axboe2665abf2019-11-05 12:40:47 -07005533}
5534
Jens Axboe3529d8c2019-12-19 18:24:38 -07005535static void __io_queue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jens Axboe2b188cc2019-01-07 10:46:33 -07005536{
Jens Axboe4a0a7a12019-12-09 20:01:01 -07005537 struct io_kiocb *linked_timeout;
Pavel Begunkov4bc44942020-02-29 22:48:24 +03005538 struct io_kiocb *nxt;
Jens Axboe193155c2020-02-22 23:22:19 -07005539 const struct cred *old_creds = NULL;
Jens Axboe2b188cc2019-01-07 10:46:33 -07005540 int ret;
5541
Jens Axboe4a0a7a12019-12-09 20:01:01 -07005542again:
5543 linked_timeout = io_prep_linked_timeout(req);
5544
Xiaoguang Wang7cdaf582020-06-10 19:41:19 +08005545 if ((req->flags & REQ_F_WORK_INITIALIZED) && req->work.creds &&
5546 req->work.creds != current_cred()) {
Jens Axboe193155c2020-02-22 23:22:19 -07005547 if (old_creds)
5548 revert_creds(old_creds);
5549 if (old_creds == req->work.creds)
5550 old_creds = NULL; /* restored original creds */
5551 else
5552 old_creds = override_creds(req->work.creds);
5553 }
5554
Pavel Begunkov014db002020-03-03 21:33:12 +03005555 ret = io_issue_sqe(req, sqe, true);
Jens Axboe491381ce2019-10-17 09:20:46 -06005556
5557 /*
5558 * We async punt it if the file wasn't marked NOWAIT, or if the file
5559 * doesn't support non-blocking read/write attempts
5560 */
5561 if (ret == -EAGAIN && (!(req->flags & REQ_F_NOWAIT) ||
5562 (req->flags & REQ_F_MUST_PUNT))) {
Jens Axboed7718a92020-02-14 22:23:12 -07005563 if (io_arm_poll_handler(req)) {
5564 if (linked_timeout)
5565 io_queue_linked_timeout(linked_timeout);
Pavel Begunkov4bc44942020-02-29 22:48:24 +03005566 goto exit;
Jens Axboed7718a92020-02-14 22:23:12 -07005567 }
Pavel Begunkov86a761f2020-01-22 23:09:36 +03005568punt:
Xiaoguang Wang7cdaf582020-06-10 19:41:19 +08005569 io_req_init_async(req);
5570
Jens Axboef86cd202020-01-29 13:46:44 -07005571 if (io_op_defs[req->opcode].file_table) {
Pavel Begunkovbbad27b2019-11-19 23:32:47 +03005572 ret = io_grab_files(req);
5573 if (ret)
5574 goto err;
Jens Axboe2b188cc2019-01-07 10:46:33 -07005575 }
Pavel Begunkovbbad27b2019-11-19 23:32:47 +03005576
5577 /*
5578 * Queued up for async execution, worker will release
5579 * submit reference when the iocb is actually submitted.
5580 */
5581 io_queue_async_work(req);
Pavel Begunkov4bc44942020-02-29 22:48:24 +03005582 goto exit;
Jens Axboe2b188cc2019-01-07 10:46:33 -07005583 }
Jens Axboee65ef562019-03-12 10:16:44 -06005584
Jens Axboefcb323c2019-10-24 12:39:47 -06005585err:
Pavel Begunkov4bc44942020-02-29 22:48:24 +03005586 nxt = NULL;
Jens Axboee65ef562019-03-12 10:16:44 -06005587 /* drop submission reference */
Jens Axboe2a44f462020-02-25 13:25:41 -07005588 io_put_req_find_next(req, &nxt);
Jens Axboee65ef562019-03-12 10:16:44 -06005589
Pavel Begunkovf9bd67f2019-11-21 23:21:03 +03005590 if (linked_timeout) {
Jens Axboe76a46e02019-11-10 23:34:16 -07005591 if (!ret)
Pavel Begunkovf9bd67f2019-11-21 23:21:03 +03005592 io_queue_linked_timeout(linked_timeout);
Jens Axboe76a46e02019-11-10 23:34:16 -07005593 else
Pavel Begunkovf9bd67f2019-11-21 23:21:03 +03005594 io_put_req(linked_timeout);
Jens Axboe76a46e02019-11-10 23:34:16 -07005595 }
5596
Jens Axboee65ef562019-03-12 10:16:44 -06005597 /* and drop final reference, if we failed */
Jens Axboe9e645e112019-05-10 16:07:28 -06005598 if (ret) {
Jens Axboe78e19bb2019-11-06 15:21:34 -07005599 io_cqring_add_event(req, ret);
Jens Axboe4e88d6e2019-12-07 20:59:47 -07005600 req_set_fail_links(req);
Jens Axboee65ef562019-03-12 10:16:44 -06005601 io_put_req(req);
Jens Axboe9e645e112019-05-10 16:07:28 -06005602 }
Jens Axboe4a0a7a12019-12-09 20:01:01 -07005603 if (nxt) {
5604 req = nxt;
Pavel Begunkov86a761f2020-01-22 23:09:36 +03005605
5606 if (req->flags & REQ_F_FORCE_ASYNC)
5607 goto punt;
Jens Axboe4a0a7a12019-12-09 20:01:01 -07005608 goto again;
5609 }
Pavel Begunkov4bc44942020-02-29 22:48:24 +03005610exit:
Jens Axboe193155c2020-02-22 23:22:19 -07005611 if (old_creds)
5612 revert_creds(old_creds);
Jens Axboe2b188cc2019-01-07 10:46:33 -07005613}
5614
Jens Axboe3529d8c2019-12-19 18:24:38 -07005615static void io_queue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe)
Jackie Liu4fe2c962019-09-09 20:50:40 +08005616{
5617 int ret;
5618
Jens Axboe3529d8c2019-12-19 18:24:38 -07005619 ret = io_req_defer(req, sqe);
Jackie Liu4fe2c962019-09-09 20:50:40 +08005620 if (ret) {
5621 if (ret != -EIOCBQUEUED) {
Pavel Begunkov11185912020-01-22 23:09:35 +03005622fail_req:
Jens Axboe78e19bb2019-11-06 15:21:34 -07005623 io_cqring_add_event(req, ret);
Jens Axboe4e88d6e2019-12-07 20:59:47 -07005624 req_set_fail_links(req);
Jens Axboe78e19bb2019-11-06 15:21:34 -07005625 io_double_put_req(req);
Jackie Liu4fe2c962019-09-09 20:50:40 +08005626 }
Pavel Begunkov25508782019-12-30 21:24:47 +03005627 } else if (req->flags & REQ_F_FORCE_ASYNC) {
Pavel Begunkovbd2ab182020-05-17 14:02:12 +03005628 if (!req->io) {
5629 ret = -EAGAIN;
5630 if (io_alloc_async_ctx(req))
5631 goto fail_req;
5632 ret = io_req_defer_prep(req, sqe);
5633 if (unlikely(ret < 0))
5634 goto fail_req;
5635 }
5636
Jens Axboece35a472019-12-17 08:04:44 -07005637 /*
5638 * Never try inline submit of IOSQE_ASYNC is set, go straight
5639 * to async execution.
5640 */
5641 req->work.flags |= IO_WQ_WORK_CONCURRENT;
5642 io_queue_async_work(req);
5643 } else {
Jens Axboe3529d8c2019-12-19 18:24:38 -07005644 __io_queue_sqe(req, sqe);
Jens Axboece35a472019-12-17 08:04:44 -07005645 }
Jackie Liu4fe2c962019-09-09 20:50:40 +08005646}
5647
Pavel Begunkov1b4a51b2019-11-21 11:54:28 +03005648static inline void io_queue_link_head(struct io_kiocb *req)
Jackie Liu4fe2c962019-09-09 20:50:40 +08005649{
Jens Axboe94ae5e72019-11-14 19:39:52 -07005650 if (unlikely(req->flags & REQ_F_FAIL_LINK)) {
Pavel Begunkov1b4a51b2019-11-21 11:54:28 +03005651 io_cqring_add_event(req, -ECANCELED);
5652 io_double_put_req(req);
5653 } else
Jens Axboe3529d8c2019-12-19 18:24:38 -07005654 io_queue_sqe(req, NULL);
Jackie Liu4fe2c962019-09-09 20:50:40 +08005655}
5656
Pavel Begunkov1d4240c2020-04-12 02:05:03 +03005657static int io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
Xiaoguang Wang7d01bd72020-05-08 21:19:30 +08005658 struct io_kiocb **link)
Jens Axboe9e645e112019-05-10 16:07:28 -06005659{
Jackie Liua197f662019-11-08 08:09:12 -07005660 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkovef4ff582020-04-12 02:05:05 +03005661 int ret;
Jens Axboe9e645e112019-05-10 16:07:28 -06005662
Jens Axboe9e645e112019-05-10 16:07:28 -06005663 /*
5664 * If we already have a head request, queue this one for async
5665 * submittal once the head completes. If we don't have a head but
5666 * IOSQE_IO_LINK is set in the sqe, start a new head. This one will be
5667 * submitted sync once the chain is complete. If none of those
5668 * conditions are true (normal request), then just queue it.
5669 */
5670 if (*link) {
Pavel Begunkov9d763772019-12-17 02:22:07 +03005671 struct io_kiocb *head = *link;
Jens Axboe9e645e112019-05-10 16:07:28 -06005672
Pavel Begunkov8cdf2192020-01-25 00:40:24 +03005673 /*
5674 * Taking sequential execution of a link, draining both sides
5675 * of the link also fullfils IOSQE_IO_DRAIN semantics for all
5676 * requests in the link. So, it drains the head and the
5677 * next after the link request. The last one is done via
5678 * drain_next flag to persist the effect across calls.
5679 */
Pavel Begunkovef4ff582020-04-12 02:05:05 +03005680 if (req->flags & REQ_F_IO_DRAIN) {
Pavel Begunkov711be032020-01-17 03:57:59 +03005681 head->flags |= REQ_F_IO_DRAIN;
5682 ctx->drain_next = 1;
5683 }
Pavel Begunkov1d4240c2020-04-12 02:05:03 +03005684 if (io_alloc_async_ctx(req))
5685 return -EAGAIN;
Jens Axboe9e645e112019-05-10 16:07:28 -06005686
Jens Axboe3529d8c2019-12-19 18:24:38 -07005687 ret = io_req_defer_prep(req, sqe);
Jens Axboe2d283902019-12-04 11:08:05 -07005688 if (ret) {
Jens Axboe4e88d6e2019-12-07 20:59:47 -07005689 /* fail even hard links since we don't submit */
Pavel Begunkov9d763772019-12-17 02:22:07 +03005690 head->flags |= REQ_F_FAIL_LINK;
Pavel Begunkov1d4240c2020-04-12 02:05:03 +03005691 return ret;
Jens Axboe2d283902019-12-04 11:08:05 -07005692 }
Pavel Begunkov9d763772019-12-17 02:22:07 +03005693 trace_io_uring_link(ctx, req, head);
5694 list_add_tail(&req->link_list, &head->link_list);
Jens Axboe9e645e112019-05-10 16:07:28 -06005695
Pavel Begunkov32fe5252019-12-17 22:26:58 +03005696 /* last request of a link, enqueue the link */
Pavel Begunkovef4ff582020-04-12 02:05:05 +03005697 if (!(req->flags & (REQ_F_LINK | REQ_F_HARDLINK))) {
Pavel Begunkov32fe5252019-12-17 22:26:58 +03005698 io_queue_link_head(head);
5699 *link = NULL;
5700 }
Jens Axboe9e645e112019-05-10 16:07:28 -06005701 } else {
Pavel Begunkov711be032020-01-17 03:57:59 +03005702 if (unlikely(ctx->drain_next)) {
5703 req->flags |= REQ_F_IO_DRAIN;
Pavel Begunkovef4ff582020-04-12 02:05:05 +03005704 ctx->drain_next = 0;
Pavel Begunkov711be032020-01-17 03:57:59 +03005705 }
Pavel Begunkovef4ff582020-04-12 02:05:05 +03005706 if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) {
Pavel Begunkovdea3b492020-04-12 02:05:04 +03005707 req->flags |= REQ_F_LINK_HEAD;
Pavel Begunkov711be032020-01-17 03:57:59 +03005708 INIT_LIST_HEAD(&req->link_list);
Pavel Begunkovf1d96a82020-03-13 22:29:14 +03005709
Pavel Begunkov1d4240c2020-04-12 02:05:03 +03005710 if (io_alloc_async_ctx(req))
5711 return -EAGAIN;
5712
Pavel Begunkov711be032020-01-17 03:57:59 +03005713 ret = io_req_defer_prep(req, sqe);
5714 if (ret)
5715 req->flags |= REQ_F_FAIL_LINK;
5716 *link = req;
5717 } else {
5718 io_queue_sqe(req, sqe);
5719 }
Jens Axboe9e645e112019-05-10 16:07:28 -06005720 }
Pavel Begunkov2e6e1fd2019-12-05 16:15:45 +03005721
Pavel Begunkov1d4240c2020-04-12 02:05:03 +03005722 return 0;
Jens Axboe9e645e112019-05-10 16:07:28 -06005723}
5724
Jens Axboe9a56a232019-01-09 09:06:50 -07005725/*
5726 * Batched submission is done, ensure local IO is flushed out.
5727 */
5728static void io_submit_state_end(struct io_submit_state *state)
5729{
5730 blk_finish_plug(&state->plug);
Pavel Begunkov9f13c352020-05-17 14:13:41 +03005731 io_state_file_put(state);
Jens Axboe2579f912019-01-09 09:10:43 -07005732 if (state->free_reqs)
Pavel Begunkov6c8a3132020-02-01 03:58:00 +03005733 kmem_cache_free_bulk(req_cachep, state->free_reqs, state->reqs);
Jens Axboe9a56a232019-01-09 09:06:50 -07005734}
5735
5736/*
5737 * Start submission side cache.
5738 */
5739static void io_submit_state_start(struct io_submit_state *state,
Jackie Liu22efde52019-12-02 17:14:52 +08005740 unsigned int max_ios)
Jens Axboe9a56a232019-01-09 09:06:50 -07005741{
5742 blk_start_plug(&state->plug);
Jens Axboe2579f912019-01-09 09:10:43 -07005743 state->free_reqs = 0;
Jens Axboe9a56a232019-01-09 09:06:50 -07005744 state->file = NULL;
5745 state->ios_left = max_ios;
5746}
5747
Jens Axboe2b188cc2019-01-07 10:46:33 -07005748static void io_commit_sqring(struct io_ring_ctx *ctx)
5749{
Hristo Venev75b28af2019-08-26 17:23:46 +00005750 struct io_rings *rings = ctx->rings;
Jens Axboe2b188cc2019-01-07 10:46:33 -07005751
Pavel Begunkovcaf582c2019-12-30 21:24:46 +03005752 /*
5753 * Ensure any loads from the SQEs are done at this point,
5754 * since once we write the new head, the application could
5755 * write new data to them.
5756 */
5757 smp_store_release(&rings->sq.head, ctx->cached_sq_head);
Jens Axboe2b188cc2019-01-07 10:46:33 -07005758}
5759
5760/*
Jens Axboe3529d8c2019-12-19 18:24:38 -07005761 * Fetch an sqe, if one is available. Note that sqe_ptr will point to memory
Jens Axboe2b188cc2019-01-07 10:46:33 -07005762 * that is mapped by userspace. This means that care needs to be taken to
5763 * ensure that reads are stable, as we cannot rely on userspace always
5764 * being a good citizen. If members of the sqe are validated and then later
5765 * used, it's important that those reads are done through READ_ONCE() to
5766 * prevent a re-load down the line.
5767 */
Pavel Begunkov709b3022020-04-08 08:58:43 +03005768static const struct io_uring_sqe *io_get_sqe(struct io_ring_ctx *ctx)
Jens Axboe2b188cc2019-01-07 10:46:33 -07005769{
Hristo Venev75b28af2019-08-26 17:23:46 +00005770 u32 *sq_array = ctx->sq_array;
Jens Axboe2b188cc2019-01-07 10:46:33 -07005771 unsigned head;
5772
5773 /*
5774 * The cached sq head (or cq tail) serves two purposes:
5775 *
5776 * 1) allows us to batch the cost of updating the user visible
5777 * head updates.
5778 * 2) allows the kernel side to track the head on its own, even
5779 * though the application is the one updating it.
5780 */
Pavel Begunkovee7d46d2019-12-30 21:24:45 +03005781 head = READ_ONCE(sq_array[ctx->cached_sq_head & ctx->sq_mask]);
Pavel Begunkov709b3022020-04-08 08:58:43 +03005782 if (likely(head < ctx->sq_entries))
5783 return &ctx->sq_sqes[head];
Jens Axboe2b188cc2019-01-07 10:46:33 -07005784
5785 /* drop invalid entries */
Jens Axboe498ccd92019-10-25 10:04:25 -06005786 ctx->cached_sq_dropped++;
Pavel Begunkovee7d46d2019-12-30 21:24:45 +03005787 WRITE_ONCE(ctx->rings->sq_dropped, ctx->cached_sq_dropped);
Pavel Begunkov709b3022020-04-08 08:58:43 +03005788 return NULL;
5789}
5790
5791static inline void io_consume_sqe(struct io_ring_ctx *ctx)
5792{
5793 ctx->cached_sq_head++;
Jens Axboe2b188cc2019-01-07 10:46:33 -07005794}
5795
Pavel Begunkovef4ff582020-04-12 02:05:05 +03005796#define SQE_VALID_FLAGS (IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK| \
5797 IOSQE_IO_HARDLINK | IOSQE_ASYNC | \
5798 IOSQE_BUFFER_SELECT)
5799
5800static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
5801 const struct io_uring_sqe *sqe,
Pavel Begunkov0cdaf762020-05-17 14:13:40 +03005802 struct io_submit_state *state)
Pavel Begunkov0553b8b2020-04-08 08:58:45 +03005803{
Pavel Begunkovef4ff582020-04-12 02:05:05 +03005804 unsigned int sqe_flags;
Jens Axboe63ff8222020-05-07 14:56:15 -06005805 int id;
Pavel Begunkovef4ff582020-04-12 02:05:05 +03005806
Pavel Begunkov0553b8b2020-04-08 08:58:45 +03005807 /*
5808 * All io need record the previous position, if LINK vs DARIN,
5809 * it can be used to mark the position of the first IO in the
5810 * link list.
5811 */
Pavel Begunkov31af27c2020-04-15 00:39:50 +03005812 req->sequence = ctx->cached_sq_head - ctx->cached_sq_dropped;
Pavel Begunkov0553b8b2020-04-08 08:58:45 +03005813 req->opcode = READ_ONCE(sqe->opcode);
5814 req->user_data = READ_ONCE(sqe->user_data);
5815 req->io = NULL;
5816 req->file = NULL;
5817 req->ctx = ctx;
5818 req->flags = 0;
5819 /* one is dropped after submission, the other at completion */
5820 refcount_set(&req->refs, 2);
5821 req->task = NULL;
5822 req->result = 0;
Pavel Begunkovef4ff582020-04-12 02:05:05 +03005823
5824 if (unlikely(req->opcode >= IORING_OP_LAST))
5825 return -EINVAL;
5826
5827 if (io_op_defs[req->opcode].needs_mm && !current->mm) {
5828 if (unlikely(!mmget_not_zero(ctx->sqo_mm)))
5829 return -EFAULT;
5830 use_mm(ctx->sqo_mm);
5831 }
5832
5833 sqe_flags = READ_ONCE(sqe->flags);
5834 /* enforce forwards compatibility on users */
5835 if (unlikely(sqe_flags & ~SQE_VALID_FLAGS))
5836 return -EINVAL;
5837
5838 if ((sqe_flags & IOSQE_BUFFER_SELECT) &&
5839 !io_op_defs[req->opcode].buffer_select)
5840 return -EOPNOTSUPP;
5841
5842 id = READ_ONCE(sqe->personality);
5843 if (id) {
Xiaoguang Wang7cdaf582020-06-10 19:41:19 +08005844 io_req_init_async(req);
Pavel Begunkovef4ff582020-04-12 02:05:05 +03005845 req->work.creds = idr_find(&ctx->personality_idr, id);
5846 if (unlikely(!req->work.creds))
5847 return -EINVAL;
5848 get_cred(req->work.creds);
5849 }
5850
5851 /* same numerical values with corresponding REQ_F_*, safe to copy */
Pavel Begunkovc11368a52020-05-17 14:13:42 +03005852 req->flags |= sqe_flags;
Pavel Begunkovef4ff582020-04-12 02:05:05 +03005853
Jens Axboe63ff8222020-05-07 14:56:15 -06005854 if (!io_op_defs[req->opcode].needs_file)
5855 return 0;
5856
5857 return io_req_set_file(state, req, READ_ONCE(sqe->fd));
Pavel Begunkov0553b8b2020-04-08 08:58:45 +03005858}
5859
Pavel Begunkovfb5ccc92019-10-25 12:31:30 +03005860static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
Pavel Begunkov0cdaf762020-05-17 14:13:40 +03005861 struct file *ring_file, int ring_fd)
Jens Axboe6c271ce2019-01-10 11:22:30 -07005862{
5863 struct io_submit_state state, *statep = NULL;
Jens Axboe9e645e112019-05-10 16:07:28 -06005864 struct io_kiocb *link = NULL;
Jens Axboe9e645e112019-05-10 16:07:28 -06005865 int i, submitted = 0;
Jens Axboe6c271ce2019-01-10 11:22:30 -07005866
Jens Axboec4a2ed72019-11-21 21:01:26 -07005867 /* if we have a backlog and couldn't flush it all, return BUSY */
Jens Axboead3eb2c2019-12-18 17:12:20 -07005868 if (test_bit(0, &ctx->sq_check_overflow)) {
5869 if (!list_empty(&ctx->cq_overflow_list) &&
5870 !io_cqring_overflow_flush(ctx, false))
5871 return -EBUSY;
5872 }
Jens Axboe6c271ce2019-01-10 11:22:30 -07005873
Pavel Begunkovee7d46d2019-12-30 21:24:45 +03005874 /* make sure SQ entry isn't read before tail */
5875 nr = min3(nr, ctx->sq_entries, io_sqring_entries(ctx));
Pavel Begunkov9ef4f122019-12-30 21:24:44 +03005876
Pavel Begunkov2b85edf2019-12-28 14:13:03 +03005877 if (!percpu_ref_tryget_many(&ctx->refs, nr))
5878 return -EAGAIN;
Jens Axboe6c271ce2019-01-10 11:22:30 -07005879
5880 if (nr > IO_PLUG_THRESHOLD) {
Jackie Liu22efde52019-12-02 17:14:52 +08005881 io_submit_state_start(&state, nr);
Jens Axboe6c271ce2019-01-10 11:22:30 -07005882 statep = &state;
5883 }
5884
Pavel Begunkovb14cca02020-01-17 04:45:59 +03005885 ctx->ring_fd = ring_fd;
5886 ctx->ring_file = ring_file;
5887
Jens Axboe6c271ce2019-01-10 11:22:30 -07005888 for (i = 0; i < nr; i++) {
Jens Axboe3529d8c2019-12-19 18:24:38 -07005889 const struct io_uring_sqe *sqe;
Pavel Begunkov196be952019-11-07 01:41:06 +03005890 struct io_kiocb *req;
Pavel Begunkov1cb1edb2020-02-06 21:16:09 +03005891 int err;
Pavel Begunkovfb5ccc92019-10-25 12:31:30 +03005892
Pavel Begunkovb1e50e52020-04-08 08:58:44 +03005893 sqe = io_get_sqe(ctx);
5894 if (unlikely(!sqe)) {
5895 io_consume_sqe(ctx);
5896 break;
5897 }
Pavel Begunkov0553b8b2020-04-08 08:58:45 +03005898 req = io_alloc_req(ctx, statep);
Pavel Begunkov196be952019-11-07 01:41:06 +03005899 if (unlikely(!req)) {
5900 if (!submitted)
5901 submitted = -EAGAIN;
Pavel Begunkovfb5ccc92019-10-25 12:31:30 +03005902 break;
Jens Axboe9e645e112019-05-10 16:07:28 -06005903 }
Jens Axboe9e645e112019-05-10 16:07:28 -06005904
Pavel Begunkov0cdaf762020-05-17 14:13:40 +03005905 err = io_init_req(ctx, req, sqe, statep);
Pavel Begunkov709b3022020-04-08 08:58:43 +03005906 io_consume_sqe(ctx);
Jens Axboed3656342019-12-18 09:50:26 -07005907 /* will complete beyond this point, count as submitted */
5908 submitted++;
5909
Pavel Begunkovef4ff582020-04-12 02:05:05 +03005910 if (unlikely(err)) {
Pavel Begunkov1cb1edb2020-02-06 21:16:09 +03005911fail_req:
5912 io_cqring_add_event(req, err);
Jens Axboed3656342019-12-18 09:50:26 -07005913 io_double_put_req(req);
5914 break;
5915 }
5916
Jens Axboe354420f2020-01-08 18:55:15 -07005917 trace_io_uring_submit_sqe(ctx, req->opcode, req->user_data,
Pavel Begunkov0cdaf762020-05-17 14:13:40 +03005918 true, io_async_submit(ctx));
Xiaoguang Wang7d01bd72020-05-08 21:19:30 +08005919 err = io_submit_sqe(req, sqe, &link);
Pavel Begunkov1d4240c2020-04-12 02:05:03 +03005920 if (err)
5921 goto fail_req;
Jens Axboe6c271ce2019-01-10 11:22:30 -07005922 }
5923
Pavel Begunkov9466f432020-01-25 22:34:01 +03005924 if (unlikely(submitted != nr)) {
5925 int ref_used = (submitted == -EAGAIN) ? 0 : submitted;
5926
5927 percpu_ref_put_many(&ctx->refs, nr - ref_used);
5928 }
Jens Axboe9e645e112019-05-10 16:07:28 -06005929 if (link)
Pavel Begunkov1b4a51b2019-11-21 11:54:28 +03005930 io_queue_link_head(link);
Jens Axboe6c271ce2019-01-10 11:22:30 -07005931 if (statep)
5932 io_submit_state_end(&state);
5933
Pavel Begunkovae9428c2019-11-06 00:22:14 +03005934 /* Commit SQ ring head once we've consumed and submitted all SQEs */
5935 io_commit_sqring(ctx);
5936
Jens Axboe6c271ce2019-01-10 11:22:30 -07005937 return submitted;
5938}
5939
Pavel Begunkovbf9c2f12020-04-12 02:05:02 +03005940static inline void io_sq_thread_drop_mm(struct io_ring_ctx *ctx)
5941{
5942 struct mm_struct *mm = current->mm;
5943
5944 if (mm) {
5945 unuse_mm(mm);
5946 mmput(mm);
5947 }
5948}
5949
Jens Axboe6c271ce2019-01-10 11:22:30 -07005950static int io_sq_thread(void *data)
5951{
Jens Axboe6c271ce2019-01-10 11:22:30 -07005952 struct io_ring_ctx *ctx = data;
Jens Axboe181e4482019-11-25 08:52:30 -07005953 const struct cred *old_cred;
Jens Axboe6c271ce2019-01-10 11:22:30 -07005954 mm_segment_t old_fs;
5955 DEFINE_WAIT(wait);
Jens Axboe6c271ce2019-01-10 11:22:30 -07005956 unsigned long timeout;
Xiaoguang Wangbdcd3ea2020-02-25 22:12:08 +08005957 int ret = 0;
Jens Axboe6c271ce2019-01-10 11:22:30 -07005958
Jens Axboe0f158b42020-05-14 17:18:39 -06005959 complete(&ctx->sq_thread_comp);
Jackie Liua4c0b3d2019-07-08 13:41:12 +08005960
Jens Axboe6c271ce2019-01-10 11:22:30 -07005961 old_fs = get_fs();
5962 set_fs(USER_DS);
Jens Axboe181e4482019-11-25 08:52:30 -07005963 old_cred = override_creds(ctx->creds);
Jens Axboe6c271ce2019-01-10 11:22:30 -07005964
Xiaoguang Wangbdcd3ea2020-02-25 22:12:08 +08005965 timeout = jiffies + ctx->sq_thread_idle;
Roman Penyaev2bbcd6d2019-05-16 10:53:57 +02005966 while (!kthread_should_park()) {
Pavel Begunkovfb5ccc92019-10-25 12:31:30 +03005967 unsigned int to_submit;
Jens Axboe6c271ce2019-01-10 11:22:30 -07005968
Xiaoguang Wangbdcd3ea2020-02-25 22:12:08 +08005969 if (!list_empty(&ctx->poll_list)) {
Jens Axboe6c271ce2019-01-10 11:22:30 -07005970 unsigned nr_events = 0;
5971
Xiaoguang Wangbdcd3ea2020-02-25 22:12:08 +08005972 mutex_lock(&ctx->uring_lock);
5973 if (!list_empty(&ctx->poll_list))
5974 io_iopoll_getevents(ctx, &nr_events, 0);
5975 else
Jens Axboe6c271ce2019-01-10 11:22:30 -07005976 timeout = jiffies + ctx->sq_thread_idle;
Xiaoguang Wangbdcd3ea2020-02-25 22:12:08 +08005977 mutex_unlock(&ctx->uring_lock);
Jens Axboe6c271ce2019-01-10 11:22:30 -07005978 }
5979
Pavel Begunkovfb5ccc92019-10-25 12:31:30 +03005980 to_submit = io_sqring_entries(ctx);
Jens Axboec1edbf52019-11-10 16:56:04 -07005981
5982 /*
5983 * If submit got -EBUSY, flag us as needing the application
5984 * to enter the kernel to reap and flush events.
5985 */
5986 if (!to_submit || ret == -EBUSY) {
Jens Axboe6c271ce2019-01-10 11:22:30 -07005987 /*
Stefano Garzarella7143b5a2020-02-21 16:42:16 +01005988 * Drop cur_mm before scheduling, we can't hold it for
5989 * long periods (or over schedule()). Do this before
5990 * adding ourselves to the waitqueue, as the unuse/drop
5991 * may sleep.
5992 */
Pavel Begunkovbf9c2f12020-04-12 02:05:02 +03005993 io_sq_thread_drop_mm(ctx);
Stefano Garzarella7143b5a2020-02-21 16:42:16 +01005994
5995 /*
Jens Axboe6c271ce2019-01-10 11:22:30 -07005996 * We're polling. If we're within the defined idle
5997 * period, then let us spin without work before going
Jens Axboec1edbf52019-11-10 16:56:04 -07005998 * to sleep. The exception is if we got EBUSY doing
5999 * more IO, we should wait for the application to
6000 * reap events and wake us up.
Jens Axboe6c271ce2019-01-10 11:22:30 -07006001 */
Xiaoguang Wangbdcd3ea2020-02-25 22:12:08 +08006002 if (!list_empty(&ctx->poll_list) ||
Jens Axboedf069d82020-02-04 16:48:34 -07006003 (!time_after(jiffies, timeout) && ret != -EBUSY &&
6004 !percpu_ref_is_dying(&ctx->refs))) {
Jens Axboeb41e9852020-02-17 09:52:41 -07006005 if (current->task_works)
6006 task_work_run();
Jens Axboe9831a902019-09-19 09:48:55 -06006007 cond_resched();
Jens Axboe6c271ce2019-01-10 11:22:30 -07006008 continue;
6009 }
6010
Jens Axboe6c271ce2019-01-10 11:22:30 -07006011 prepare_to_wait(&ctx->sqo_wait, &wait,
6012 TASK_INTERRUPTIBLE);
6013
Xiaoguang Wangbdcd3ea2020-02-25 22:12:08 +08006014 /*
6015 * While doing polled IO, before going to sleep, we need
6016 * to check if there are new reqs added to poll_list, it
6017 * is because reqs may have been punted to io worker and
6018 * will be added to poll_list later, hence check the
6019 * poll_list again.
6020 */
6021 if ((ctx->flags & IORING_SETUP_IOPOLL) &&
6022 !list_empty_careful(&ctx->poll_list)) {
6023 finish_wait(&ctx->sqo_wait, &wait);
6024 continue;
6025 }
6026
Jens Axboe6c271ce2019-01-10 11:22:30 -07006027 /* Tell userspace we may need a wakeup call */
Hristo Venev75b28af2019-08-26 17:23:46 +00006028 ctx->rings->sq_flags |= IORING_SQ_NEED_WAKEUP;
Stefan Bühler0d7bae62019-04-19 11:57:45 +02006029 /* make sure to read SQ tail after writing flags */
6030 smp_mb();
Jens Axboe6c271ce2019-01-10 11:22:30 -07006031
Pavel Begunkovfb5ccc92019-10-25 12:31:30 +03006032 to_submit = io_sqring_entries(ctx);
Jens Axboec1edbf52019-11-10 16:56:04 -07006033 if (!to_submit || ret == -EBUSY) {
Roman Penyaev2bbcd6d2019-05-16 10:53:57 +02006034 if (kthread_should_park()) {
Jens Axboe6c271ce2019-01-10 11:22:30 -07006035 finish_wait(&ctx->sqo_wait, &wait);
6036 break;
6037 }
Jens Axboeb41e9852020-02-17 09:52:41 -07006038 if (current->task_works) {
6039 task_work_run();
Hillf Danton10bea962020-04-01 17:19:33 +08006040 finish_wait(&ctx->sqo_wait, &wait);
Jens Axboeb41e9852020-02-17 09:52:41 -07006041 continue;
6042 }
Jens Axboe6c271ce2019-01-10 11:22:30 -07006043 if (signal_pending(current))
6044 flush_signals(current);
6045 schedule();
6046 finish_wait(&ctx->sqo_wait, &wait);
6047
Hristo Venev75b28af2019-08-26 17:23:46 +00006048 ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP;
Xiaoguang Wangd4ae2712020-05-20 21:24:35 +08006049 ret = 0;
Jens Axboe6c271ce2019-01-10 11:22:30 -07006050 continue;
6051 }
6052 finish_wait(&ctx->sqo_wait, &wait);
6053
Hristo Venev75b28af2019-08-26 17:23:46 +00006054 ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP;
Jens Axboe6c271ce2019-01-10 11:22:30 -07006055 }
6056
Jens Axboe8a4955f2019-12-09 14:52:35 -07006057 mutex_lock(&ctx->uring_lock);
Xiaoguang Wang6b668c92020-05-20 15:35:03 +08006058 if (likely(!percpu_ref_is_dying(&ctx->refs)))
6059 ret = io_submit_sqes(ctx, to_submit, NULL, -1);
Jens Axboe8a4955f2019-12-09 14:52:35 -07006060 mutex_unlock(&ctx->uring_lock);
Xiaoguang Wangbdcd3ea2020-02-25 22:12:08 +08006061 timeout = jiffies + ctx->sq_thread_idle;
Jens Axboe6c271ce2019-01-10 11:22:30 -07006062 }
6063
Jens Axboeb41e9852020-02-17 09:52:41 -07006064 if (current->task_works)
6065 task_work_run();
6066
Jens Axboe6c271ce2019-01-10 11:22:30 -07006067 set_fs(old_fs);
Pavel Begunkovbf9c2f12020-04-12 02:05:02 +03006068 io_sq_thread_drop_mm(ctx);
Jens Axboe181e4482019-11-25 08:52:30 -07006069 revert_creds(old_cred);
Jens Axboe06058632019-04-13 09:26:03 -06006070
Roman Penyaev2bbcd6d2019-05-16 10:53:57 +02006071 kthread_parkme();
Jens Axboe06058632019-04-13 09:26:03 -06006072
Jens Axboe6c271ce2019-01-10 11:22:30 -07006073 return 0;
6074}
6075
Jens Axboebda52162019-09-24 13:47:15 -06006076struct io_wait_queue {
6077 struct wait_queue_entry wq;
6078 struct io_ring_ctx *ctx;
6079 unsigned to_wait;
6080 unsigned nr_timeouts;
6081};
6082
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07006083static inline bool io_should_wake(struct io_wait_queue *iowq, bool noflush)
Jens Axboebda52162019-09-24 13:47:15 -06006084{
6085 struct io_ring_ctx *ctx = iowq->ctx;
6086
6087 /*
Brian Gianforcarod195a662019-12-13 03:09:50 -08006088 * Wake up if we have enough events, or if a timeout occurred since we
Jens Axboebda52162019-09-24 13:47:15 -06006089 * started waiting. For timeouts, we always want to return to userspace,
6090 * regardless of event count.
6091 */
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07006092 return io_cqring_events(ctx, noflush) >= iowq->to_wait ||
Jens Axboebda52162019-09-24 13:47:15 -06006093 atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts;
6094}
6095
6096static int io_wake_function(struct wait_queue_entry *curr, unsigned int mode,
6097 int wake_flags, void *key)
6098{
6099 struct io_wait_queue *iowq = container_of(curr, struct io_wait_queue,
6100 wq);
6101
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07006102 /* use noflush == true, as we can't safely rely on locking context */
6103 if (!io_should_wake(iowq, true))
Jens Axboebda52162019-09-24 13:47:15 -06006104 return -1;
6105
6106 return autoremove_wake_function(curr, mode, wake_flags, key);
6107}
6108
Jens Axboe2b188cc2019-01-07 10:46:33 -07006109/*
6110 * Wait until events become available, if we don't already have some. The
6111 * application must reap them itself, as they reside on the shared cq ring.
6112 */
6113static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
6114 const sigset_t __user *sig, size_t sigsz)
6115{
Jens Axboebda52162019-09-24 13:47:15 -06006116 struct io_wait_queue iowq = {
6117 .wq = {
6118 .private = current,
6119 .func = io_wake_function,
6120 .entry = LIST_HEAD_INIT(iowq.wq.entry),
6121 },
6122 .ctx = ctx,
6123 .to_wait = min_events,
6124 };
Hristo Venev75b28af2019-08-26 17:23:46 +00006125 struct io_rings *rings = ctx->rings;
Jackie Liue9ffa5c2019-10-29 11:16:42 +08006126 int ret = 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07006127
Jens Axboeb41e9852020-02-17 09:52:41 -07006128 do {
6129 if (io_cqring_events(ctx, false) >= min_events)
6130 return 0;
6131 if (!current->task_works)
6132 break;
6133 task_work_run();
6134 } while (1);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006135
6136 if (sig) {
Arnd Bergmann9e75ad52019-03-25 15:34:53 +01006137#ifdef CONFIG_COMPAT
6138 if (in_compat_syscall())
6139 ret = set_compat_user_sigmask((const compat_sigset_t __user *)sig,
Oleg Nesterovb7724342019-07-16 16:29:53 -07006140 sigsz);
Arnd Bergmann9e75ad52019-03-25 15:34:53 +01006141 else
6142#endif
Oleg Nesterovb7724342019-07-16 16:29:53 -07006143 ret = set_user_sigmask(sig, sigsz);
Arnd Bergmann9e75ad52019-03-25 15:34:53 +01006144
Jens Axboe2b188cc2019-01-07 10:46:33 -07006145 if (ret)
6146 return ret;
6147 }
6148
Jens Axboebda52162019-09-24 13:47:15 -06006149 iowq.nr_timeouts = atomic_read(&ctx->cq_timeouts);
Dmitrii Dolgovc826bd72019-10-15 19:02:01 +02006150 trace_io_uring_cqring_wait(ctx, min_events);
Jens Axboebda52162019-09-24 13:47:15 -06006151 do {
6152 prepare_to_wait_exclusive(&ctx->wait, &iowq.wq,
6153 TASK_INTERRUPTIBLE);
Jens Axboeb41e9852020-02-17 09:52:41 -07006154 if (current->task_works)
6155 task_work_run();
Jens Axboe1d7bb1d2019-11-06 11:31:17 -07006156 if (io_should_wake(&iowq, false))
Jens Axboebda52162019-09-24 13:47:15 -06006157 break;
6158 schedule();
6159 if (signal_pending(current)) {
Jackie Liue9ffa5c2019-10-29 11:16:42 +08006160 ret = -EINTR;
Jens Axboebda52162019-09-24 13:47:15 -06006161 break;
6162 }
6163 } while (1);
6164 finish_wait(&ctx->wait, &iowq.wq);
6165
Jackie Liue9ffa5c2019-10-29 11:16:42 +08006166 restore_saved_sigmask_unless(ret == -EINTR);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006167
Hristo Venev75b28af2019-08-26 17:23:46 +00006168 return READ_ONCE(rings->cq.head) == READ_ONCE(rings->cq.tail) ? ret : 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07006169}
6170
Jens Axboe6b063142019-01-10 22:13:58 -07006171static void __io_sqe_files_unregister(struct io_ring_ctx *ctx)
6172{
6173#if defined(CONFIG_UNIX)
6174 if (ctx->ring_sock) {
6175 struct sock *sock = ctx->ring_sock->sk;
6176 struct sk_buff *skb;
6177
6178 while ((skb = skb_dequeue(&sock->sk_receive_queue)) != NULL)
6179 kfree_skb(skb);
6180 }
6181#else
6182 int i;
6183
Jens Axboe65e19f52019-10-26 07:20:21 -06006184 for (i = 0; i < ctx->nr_user_files; i++) {
6185 struct file *file;
6186
6187 file = io_file_from_index(ctx, i);
6188 if (file)
6189 fput(file);
6190 }
Jens Axboe6b063142019-01-10 22:13:58 -07006191#endif
6192}
6193
Jens Axboe05f3fb32019-12-09 11:22:50 -07006194static void io_file_ref_kill(struct percpu_ref *ref)
6195{
6196 struct fixed_file_data *data;
6197
6198 data = container_of(ref, struct fixed_file_data, refs);
6199 complete(&data->done);
6200}
6201
Jens Axboe6b063142019-01-10 22:13:58 -07006202static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
6203{
Jens Axboe05f3fb32019-12-09 11:22:50 -07006204 struct fixed_file_data *data = ctx->file_data;
Xiaoguang Wang05589552020-03-31 14:05:18 +08006205 struct fixed_file_ref_node *ref_node = NULL;
Jens Axboe65e19f52019-10-26 07:20:21 -06006206 unsigned nr_tables, i;
6207
Jens Axboe05f3fb32019-12-09 11:22:50 -07006208 if (!data)
Jens Axboe6b063142019-01-10 22:13:58 -07006209 return -ENXIO;
6210
Jens Axboe6a4d07c2020-05-15 14:30:38 -06006211 spin_lock(&data->lock);
Xiaoguang Wang05589552020-03-31 14:05:18 +08006212 if (!list_empty(&data->ref_list))
6213 ref_node = list_first_entry(&data->ref_list,
6214 struct fixed_file_ref_node, node);
Jens Axboe6a4d07c2020-05-15 14:30:38 -06006215 spin_unlock(&data->lock);
Xiaoguang Wang05589552020-03-31 14:05:18 +08006216 if (ref_node)
6217 percpu_ref_kill(&ref_node->refs);
6218
6219 percpu_ref_kill(&data->refs);
6220
6221 /* wait for all refs nodes to complete */
Jens Axboe4a38aed22020-05-14 17:21:15 -06006222 flush_delayed_work(&ctx->file_put_work);
Jens Axboe2faf8522020-02-04 19:54:55 -07006223 wait_for_completion(&data->done);
Jens Axboe05f3fb32019-12-09 11:22:50 -07006224
Jens Axboe6b063142019-01-10 22:13:58 -07006225 __io_sqe_files_unregister(ctx);
Jens Axboe65e19f52019-10-26 07:20:21 -06006226 nr_tables = DIV_ROUND_UP(ctx->nr_user_files, IORING_MAX_FILES_TABLE);
6227 for (i = 0; i < nr_tables; i++)
Jens Axboe05f3fb32019-12-09 11:22:50 -07006228 kfree(data->table[i].files);
6229 kfree(data->table);
Xiaoguang Wang05589552020-03-31 14:05:18 +08006230 percpu_ref_exit(&data->refs);
6231 kfree(data);
Jens Axboe05f3fb32019-12-09 11:22:50 -07006232 ctx->file_data = NULL;
Jens Axboe6b063142019-01-10 22:13:58 -07006233 ctx->nr_user_files = 0;
6234 return 0;
6235}
6236
Jens Axboe6c271ce2019-01-10 11:22:30 -07006237static void io_sq_thread_stop(struct io_ring_ctx *ctx)
6238{
6239 if (ctx->sqo_thread) {
Jens Axboe0f158b42020-05-14 17:18:39 -06006240 wait_for_completion(&ctx->sq_thread_comp);
Roman Penyaev2bbcd6d2019-05-16 10:53:57 +02006241 /*
6242 * The park is a bit of a work-around, without it we get
6243 * warning spews on shutdown with SQPOLL set and affinity
6244 * set to a single CPU.
6245 */
Jens Axboe06058632019-04-13 09:26:03 -06006246 kthread_park(ctx->sqo_thread);
Jens Axboe6c271ce2019-01-10 11:22:30 -07006247 kthread_stop(ctx->sqo_thread);
6248 ctx->sqo_thread = NULL;
6249 }
6250}
6251
Jens Axboe6b063142019-01-10 22:13:58 -07006252static void io_finish_async(struct io_ring_ctx *ctx)
6253{
Jens Axboe6c271ce2019-01-10 11:22:30 -07006254 io_sq_thread_stop(ctx);
6255
Jens Axboe561fb042019-10-24 07:25:42 -06006256 if (ctx->io_wq) {
6257 io_wq_destroy(ctx->io_wq);
6258 ctx->io_wq = NULL;
Jens Axboe6b063142019-01-10 22:13:58 -07006259 }
6260}
6261
6262#if defined(CONFIG_UNIX)
Jens Axboe6b063142019-01-10 22:13:58 -07006263/*
6264 * Ensure the UNIX gc is aware of our file set, so we are certain that
6265 * the io_uring can be safely unregistered on process exit, even if we have
6266 * loops in the file referencing.
6267 */
6268static int __io_sqe_files_scm(struct io_ring_ctx *ctx, int nr, int offset)
6269{
6270 struct sock *sk = ctx->ring_sock->sk;
6271 struct scm_fp_list *fpl;
6272 struct sk_buff *skb;
Jens Axboe08a45172019-10-03 08:11:03 -06006273 int i, nr_files;
Jens Axboe6b063142019-01-10 22:13:58 -07006274
Jens Axboe6b063142019-01-10 22:13:58 -07006275 fpl = kzalloc(sizeof(*fpl), GFP_KERNEL);
6276 if (!fpl)
6277 return -ENOMEM;
6278
6279 skb = alloc_skb(0, GFP_KERNEL);
6280 if (!skb) {
6281 kfree(fpl);
6282 return -ENOMEM;
6283 }
6284
6285 skb->sk = sk;
Jens Axboe6b063142019-01-10 22:13:58 -07006286
Jens Axboe08a45172019-10-03 08:11:03 -06006287 nr_files = 0;
Jens Axboe6b063142019-01-10 22:13:58 -07006288 fpl->user = get_uid(ctx->user);
6289 for (i = 0; i < nr; i++) {
Jens Axboe65e19f52019-10-26 07:20:21 -06006290 struct file *file = io_file_from_index(ctx, i + offset);
6291
6292 if (!file)
Jens Axboe08a45172019-10-03 08:11:03 -06006293 continue;
Jens Axboe65e19f52019-10-26 07:20:21 -06006294 fpl->fp[nr_files] = get_file(file);
Jens Axboe08a45172019-10-03 08:11:03 -06006295 unix_inflight(fpl->user, fpl->fp[nr_files]);
6296 nr_files++;
Jens Axboe6b063142019-01-10 22:13:58 -07006297 }
6298
Jens Axboe08a45172019-10-03 08:11:03 -06006299 if (nr_files) {
6300 fpl->max = SCM_MAX_FD;
6301 fpl->count = nr_files;
6302 UNIXCB(skb).fp = fpl;
Jens Axboe05f3fb32019-12-09 11:22:50 -07006303 skb->destructor = unix_destruct_scm;
Jens Axboe08a45172019-10-03 08:11:03 -06006304 refcount_add(skb->truesize, &sk->sk_wmem_alloc);
6305 skb_queue_head(&sk->sk_receive_queue, skb);
Jens Axboe6b063142019-01-10 22:13:58 -07006306
Jens Axboe08a45172019-10-03 08:11:03 -06006307 for (i = 0; i < nr_files; i++)
6308 fput(fpl->fp[i]);
6309 } else {
6310 kfree_skb(skb);
6311 kfree(fpl);
6312 }
Jens Axboe6b063142019-01-10 22:13:58 -07006313
6314 return 0;
6315}
6316
6317/*
6318 * If UNIX sockets are enabled, fd passing can cause a reference cycle which
6319 * causes regular reference counting to break down. We rely on the UNIX
6320 * garbage collection to take care of this problem for us.
6321 */
6322static int io_sqe_files_scm(struct io_ring_ctx *ctx)
6323{
6324 unsigned left, total;
6325 int ret = 0;
6326
6327 total = 0;
6328 left = ctx->nr_user_files;
6329 while (left) {
6330 unsigned this_files = min_t(unsigned, left, SCM_MAX_FD);
Jens Axboe6b063142019-01-10 22:13:58 -07006331
6332 ret = __io_sqe_files_scm(ctx, this_files, total);
6333 if (ret)
6334 break;
6335 left -= this_files;
6336 total += this_files;
6337 }
6338
6339 if (!ret)
6340 return 0;
6341
6342 while (total < ctx->nr_user_files) {
Jens Axboe65e19f52019-10-26 07:20:21 -06006343 struct file *file = io_file_from_index(ctx, total);
6344
6345 if (file)
6346 fput(file);
Jens Axboe6b063142019-01-10 22:13:58 -07006347 total++;
6348 }
6349
6350 return ret;
6351}
6352#else
6353static int io_sqe_files_scm(struct io_ring_ctx *ctx)
6354{
6355 return 0;
6356}
6357#endif
6358
Jens Axboe65e19f52019-10-26 07:20:21 -06006359static int io_sqe_alloc_file_tables(struct io_ring_ctx *ctx, unsigned nr_tables,
6360 unsigned nr_files)
6361{
6362 int i;
6363
6364 for (i = 0; i < nr_tables; i++) {
Jens Axboe05f3fb32019-12-09 11:22:50 -07006365 struct fixed_file_table *table = &ctx->file_data->table[i];
Jens Axboe65e19f52019-10-26 07:20:21 -06006366 unsigned this_files;
6367
6368 this_files = min(nr_files, IORING_MAX_FILES_TABLE);
6369 table->files = kcalloc(this_files, sizeof(struct file *),
6370 GFP_KERNEL);
6371 if (!table->files)
6372 break;
6373 nr_files -= this_files;
6374 }
6375
6376 if (i == nr_tables)
6377 return 0;
6378
6379 for (i = 0; i < nr_tables; i++) {
Jens Axboe05f3fb32019-12-09 11:22:50 -07006380 struct fixed_file_table *table = &ctx->file_data->table[i];
Jens Axboe65e19f52019-10-26 07:20:21 -06006381 kfree(table->files);
6382 }
6383 return 1;
6384}
6385
Jens Axboe05f3fb32019-12-09 11:22:50 -07006386static void io_ring_file_put(struct io_ring_ctx *ctx, struct file *file)
Jens Axboec3a31e62019-10-03 13:59:56 -06006387{
6388#if defined(CONFIG_UNIX)
Jens Axboec3a31e62019-10-03 13:59:56 -06006389 struct sock *sock = ctx->ring_sock->sk;
6390 struct sk_buff_head list, *head = &sock->sk_receive_queue;
6391 struct sk_buff *skb;
6392 int i;
6393
6394 __skb_queue_head_init(&list);
6395
6396 /*
6397 * Find the skb that holds this file in its SCM_RIGHTS. When found,
6398 * remove this entry and rearrange the file array.
6399 */
6400 skb = skb_dequeue(head);
6401 while (skb) {
6402 struct scm_fp_list *fp;
6403
6404 fp = UNIXCB(skb).fp;
6405 for (i = 0; i < fp->count; i++) {
6406 int left;
6407
6408 if (fp->fp[i] != file)
6409 continue;
6410
6411 unix_notinflight(fp->user, fp->fp[i]);
6412 left = fp->count - 1 - i;
6413 if (left) {
6414 memmove(&fp->fp[i], &fp->fp[i + 1],
6415 left * sizeof(struct file *));
6416 }
6417 fp->count--;
6418 if (!fp->count) {
6419 kfree_skb(skb);
6420 skb = NULL;
6421 } else {
6422 __skb_queue_tail(&list, skb);
6423 }
6424 fput(file);
6425 file = NULL;
6426 break;
6427 }
6428
6429 if (!file)
6430 break;
6431
6432 __skb_queue_tail(&list, skb);
6433
6434 skb = skb_dequeue(head);
6435 }
6436
6437 if (skb_peek(&list)) {
6438 spin_lock_irq(&head->lock);
6439 while ((skb = __skb_dequeue(&list)) != NULL)
6440 __skb_queue_tail(head, skb);
6441 spin_unlock_irq(&head->lock);
6442 }
6443#else
Jens Axboe05f3fb32019-12-09 11:22:50 -07006444 fput(file);
Jens Axboec3a31e62019-10-03 13:59:56 -06006445#endif
6446}
6447
Jens Axboe05f3fb32019-12-09 11:22:50 -07006448struct io_file_put {
Xiaoguang Wang05589552020-03-31 14:05:18 +08006449 struct list_head list;
Jens Axboe05f3fb32019-12-09 11:22:50 -07006450 struct file *file;
Jens Axboe05f3fb32019-12-09 11:22:50 -07006451};
6452
Jens Axboe4a38aed22020-05-14 17:21:15 -06006453static void __io_file_put_work(struct fixed_file_ref_node *ref_node)
Jens Axboe05f3fb32019-12-09 11:22:50 -07006454{
Jens Axboe4a38aed22020-05-14 17:21:15 -06006455 struct fixed_file_data *file_data = ref_node->file_data;
6456 struct io_ring_ctx *ctx = file_data->ctx;
Jens Axboe05f3fb32019-12-09 11:22:50 -07006457 struct io_file_put *pfile, *tmp;
Xiaoguang Wang05589552020-03-31 14:05:18 +08006458
6459 list_for_each_entry_safe(pfile, tmp, &ref_node->file_list, list) {
Jens Axboe6a4d07c2020-05-15 14:30:38 -06006460 list_del(&pfile->list);
Xiaoguang Wang05589552020-03-31 14:05:18 +08006461 io_ring_file_put(ctx, pfile->file);
6462 kfree(pfile);
Jens Axboe05f3fb32019-12-09 11:22:50 -07006463 }
6464
Jens Axboe6a4d07c2020-05-15 14:30:38 -06006465 spin_lock(&file_data->lock);
6466 list_del(&ref_node->node);
6467 spin_unlock(&file_data->lock);
Jens Axboe2faf8522020-02-04 19:54:55 -07006468
Xiaoguang Wang05589552020-03-31 14:05:18 +08006469 percpu_ref_exit(&ref_node->refs);
6470 kfree(ref_node);
6471 percpu_ref_put(&file_data->refs);
Jens Axboe05f3fb32019-12-09 11:22:50 -07006472}
6473
Jens Axboe4a38aed22020-05-14 17:21:15 -06006474static void io_file_put_work(struct work_struct *work)
6475{
6476 struct io_ring_ctx *ctx;
6477 struct llist_node *node;
6478
6479 ctx = container_of(work, struct io_ring_ctx, file_put_work.work);
6480 node = llist_del_all(&ctx->file_put_llist);
6481
6482 while (node) {
6483 struct fixed_file_ref_node *ref_node;
6484 struct llist_node *next = node->next;
6485
6486 ref_node = llist_entry(node, struct fixed_file_ref_node, llist);
6487 __io_file_put_work(ref_node);
6488 node = next;
6489 }
6490}
6491
Jens Axboe05f3fb32019-12-09 11:22:50 -07006492static void io_file_data_ref_zero(struct percpu_ref *ref)
6493{
Xiaoguang Wang05589552020-03-31 14:05:18 +08006494 struct fixed_file_ref_node *ref_node;
Jens Axboe4a38aed22020-05-14 17:21:15 -06006495 struct io_ring_ctx *ctx;
6496 bool first_add;
6497 int delay = HZ;
Jens Axboe05f3fb32019-12-09 11:22:50 -07006498
Xiaoguang Wang05589552020-03-31 14:05:18 +08006499 ref_node = container_of(ref, struct fixed_file_ref_node, refs);
Jens Axboe4a38aed22020-05-14 17:21:15 -06006500 ctx = ref_node->file_data->ctx;
Jens Axboe05f3fb32019-12-09 11:22:50 -07006501
Jens Axboe4a38aed22020-05-14 17:21:15 -06006502 if (percpu_ref_is_dying(&ctx->file_data->refs))
6503 delay = 0;
6504
6505 first_add = llist_add(&ref_node->llist, &ctx->file_put_llist);
6506 if (!delay)
6507 mod_delayed_work(system_wq, &ctx->file_put_work, 0);
6508 else if (first_add)
6509 queue_delayed_work(system_wq, &ctx->file_put_work, delay);
Xiaoguang Wang05589552020-03-31 14:05:18 +08006510}
6511
6512static struct fixed_file_ref_node *alloc_fixed_file_ref_node(
6513 struct io_ring_ctx *ctx)
6514{
6515 struct fixed_file_ref_node *ref_node;
6516
6517 ref_node = kzalloc(sizeof(*ref_node), GFP_KERNEL);
6518 if (!ref_node)
6519 return ERR_PTR(-ENOMEM);
6520
6521 if (percpu_ref_init(&ref_node->refs, io_file_data_ref_zero,
6522 0, GFP_KERNEL)) {
6523 kfree(ref_node);
6524 return ERR_PTR(-ENOMEM);
6525 }
6526 INIT_LIST_HEAD(&ref_node->node);
6527 INIT_LIST_HEAD(&ref_node->file_list);
Xiaoguang Wang05589552020-03-31 14:05:18 +08006528 ref_node->file_data = ctx->file_data;
6529 return ref_node;
Xiaoguang Wang05589552020-03-31 14:05:18 +08006530}
6531
6532static void destroy_fixed_file_ref_node(struct fixed_file_ref_node *ref_node)
6533{
6534 percpu_ref_exit(&ref_node->refs);
6535 kfree(ref_node);
Jens Axboe05f3fb32019-12-09 11:22:50 -07006536}
6537
6538static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
6539 unsigned nr_args)
6540{
6541 __s32 __user *fds = (__s32 __user *) arg;
6542 unsigned nr_tables;
6543 struct file *file;
6544 int fd, ret = 0;
6545 unsigned i;
Xiaoguang Wang05589552020-03-31 14:05:18 +08006546 struct fixed_file_ref_node *ref_node;
Jens Axboe05f3fb32019-12-09 11:22:50 -07006547
6548 if (ctx->file_data)
6549 return -EBUSY;
6550 if (!nr_args)
6551 return -EINVAL;
6552 if (nr_args > IORING_MAX_FIXED_FILES)
6553 return -EMFILE;
6554
6555 ctx->file_data = kzalloc(sizeof(*ctx->file_data), GFP_KERNEL);
6556 if (!ctx->file_data)
6557 return -ENOMEM;
6558 ctx->file_data->ctx = ctx;
6559 init_completion(&ctx->file_data->done);
Xiaoguang Wang05589552020-03-31 14:05:18 +08006560 INIT_LIST_HEAD(&ctx->file_data->ref_list);
Xiaoguang Wangf7fe9342020-04-07 20:02:31 +08006561 spin_lock_init(&ctx->file_data->lock);
Jens Axboe05f3fb32019-12-09 11:22:50 -07006562
6563 nr_tables = DIV_ROUND_UP(nr_args, IORING_MAX_FILES_TABLE);
6564 ctx->file_data->table = kcalloc(nr_tables,
6565 sizeof(struct fixed_file_table),
6566 GFP_KERNEL);
6567 if (!ctx->file_data->table) {
6568 kfree(ctx->file_data);
6569 ctx->file_data = NULL;
6570 return -ENOMEM;
6571 }
6572
Xiaoguang Wang05589552020-03-31 14:05:18 +08006573 if (percpu_ref_init(&ctx->file_data->refs, io_file_ref_kill,
Jens Axboe05f3fb32019-12-09 11:22:50 -07006574 PERCPU_REF_ALLOW_REINIT, GFP_KERNEL)) {
6575 kfree(ctx->file_data->table);
6576 kfree(ctx->file_data);
6577 ctx->file_data = NULL;
6578 return -ENOMEM;
6579 }
Jens Axboe05f3fb32019-12-09 11:22:50 -07006580
6581 if (io_sqe_alloc_file_tables(ctx, nr_tables, nr_args)) {
6582 percpu_ref_exit(&ctx->file_data->refs);
6583 kfree(ctx->file_data->table);
6584 kfree(ctx->file_data);
6585 ctx->file_data = NULL;
6586 return -ENOMEM;
6587 }
6588
6589 for (i = 0; i < nr_args; i++, ctx->nr_user_files++) {
6590 struct fixed_file_table *table;
6591 unsigned index;
6592
6593 ret = -EFAULT;
6594 if (copy_from_user(&fd, &fds[i], sizeof(fd)))
6595 break;
6596 /* allow sparse sets */
6597 if (fd == -1) {
6598 ret = 0;
6599 continue;
6600 }
6601
6602 table = &ctx->file_data->table[i >> IORING_FILE_TABLE_SHIFT];
6603 index = i & IORING_FILE_TABLE_MASK;
6604 file = fget(fd);
6605
6606 ret = -EBADF;
6607 if (!file)
6608 break;
6609
6610 /*
6611 * Don't allow io_uring instances to be registered. If UNIX
6612 * isn't enabled, then this causes a reference cycle and this
6613 * instance can never get freed. If UNIX is enabled we'll
6614 * handle it just fine, but there's still no point in allowing
6615 * a ring fd as it doesn't support regular read/write anyway.
6616 */
6617 if (file->f_op == &io_uring_fops) {
6618 fput(file);
6619 break;
6620 }
6621 ret = 0;
6622 table->files[index] = file;
6623 }
6624
6625 if (ret) {
6626 for (i = 0; i < ctx->nr_user_files; i++) {
6627 file = io_file_from_index(ctx, i);
6628 if (file)
6629 fput(file);
6630 }
6631 for (i = 0; i < nr_tables; i++)
6632 kfree(ctx->file_data->table[i].files);
6633
6634 kfree(ctx->file_data->table);
6635 kfree(ctx->file_data);
6636 ctx->file_data = NULL;
6637 ctx->nr_user_files = 0;
6638 return ret;
6639 }
6640
6641 ret = io_sqe_files_scm(ctx);
Xiaoguang Wang05589552020-03-31 14:05:18 +08006642 if (ret) {
Jens Axboe05f3fb32019-12-09 11:22:50 -07006643 io_sqe_files_unregister(ctx);
Xiaoguang Wang05589552020-03-31 14:05:18 +08006644 return ret;
6645 }
Jens Axboe05f3fb32019-12-09 11:22:50 -07006646
Xiaoguang Wang05589552020-03-31 14:05:18 +08006647 ref_node = alloc_fixed_file_ref_node(ctx);
6648 if (IS_ERR(ref_node)) {
6649 io_sqe_files_unregister(ctx);
6650 return PTR_ERR(ref_node);
6651 }
6652
6653 ctx->file_data->cur_refs = &ref_node->refs;
Jens Axboe6a4d07c2020-05-15 14:30:38 -06006654 spin_lock(&ctx->file_data->lock);
Xiaoguang Wang05589552020-03-31 14:05:18 +08006655 list_add(&ref_node->node, &ctx->file_data->ref_list);
Jens Axboe6a4d07c2020-05-15 14:30:38 -06006656 spin_unlock(&ctx->file_data->lock);
Xiaoguang Wang05589552020-03-31 14:05:18 +08006657 percpu_ref_get(&ctx->file_data->refs);
Jens Axboe05f3fb32019-12-09 11:22:50 -07006658 return ret;
6659}
6660
Jens Axboec3a31e62019-10-03 13:59:56 -06006661static int io_sqe_file_register(struct io_ring_ctx *ctx, struct file *file,
6662 int index)
6663{
6664#if defined(CONFIG_UNIX)
6665 struct sock *sock = ctx->ring_sock->sk;
6666 struct sk_buff_head *head = &sock->sk_receive_queue;
6667 struct sk_buff *skb;
6668
6669 /*
6670 * See if we can merge this file into an existing skb SCM_RIGHTS
6671 * file set. If there's no room, fall back to allocating a new skb
6672 * and filling it in.
6673 */
6674 spin_lock_irq(&head->lock);
6675 skb = skb_peek(head);
6676 if (skb) {
6677 struct scm_fp_list *fpl = UNIXCB(skb).fp;
6678
6679 if (fpl->count < SCM_MAX_FD) {
6680 __skb_unlink(skb, head);
6681 spin_unlock_irq(&head->lock);
6682 fpl->fp[fpl->count] = get_file(file);
6683 unix_inflight(fpl->user, fpl->fp[fpl->count]);
6684 fpl->count++;
6685 spin_lock_irq(&head->lock);
6686 __skb_queue_head(head, skb);
6687 } else {
6688 skb = NULL;
6689 }
6690 }
6691 spin_unlock_irq(&head->lock);
6692
6693 if (skb) {
6694 fput(file);
6695 return 0;
6696 }
6697
6698 return __io_sqe_files_scm(ctx, 1, index);
6699#else
6700 return 0;
6701#endif
6702}
6703
Hillf Dantona5318d32020-03-23 17:47:15 +08006704static int io_queue_file_removal(struct fixed_file_data *data,
Xiaoguang Wang05589552020-03-31 14:05:18 +08006705 struct file *file)
Jens Axboe05f3fb32019-12-09 11:22:50 -07006706{
Hillf Dantona5318d32020-03-23 17:47:15 +08006707 struct io_file_put *pfile;
Xiaoguang Wang05589552020-03-31 14:05:18 +08006708 struct percpu_ref *refs = data->cur_refs;
6709 struct fixed_file_ref_node *ref_node;
Jens Axboe05f3fb32019-12-09 11:22:50 -07006710
Jens Axboe05f3fb32019-12-09 11:22:50 -07006711 pfile = kzalloc(sizeof(*pfile), GFP_KERNEL);
Hillf Dantona5318d32020-03-23 17:47:15 +08006712 if (!pfile)
6713 return -ENOMEM;
Jens Axboe05f3fb32019-12-09 11:22:50 -07006714
Xiaoguang Wang05589552020-03-31 14:05:18 +08006715 ref_node = container_of(refs, struct fixed_file_ref_node, refs);
Jens Axboe05f3fb32019-12-09 11:22:50 -07006716 pfile->file = file;
Xiaoguang Wang05589552020-03-31 14:05:18 +08006717 list_add(&pfile->list, &ref_node->file_list);
6718
Hillf Dantona5318d32020-03-23 17:47:15 +08006719 return 0;
Jens Axboe05f3fb32019-12-09 11:22:50 -07006720}
6721
6722static int __io_sqe_files_update(struct io_ring_ctx *ctx,
6723 struct io_uring_files_update *up,
6724 unsigned nr_args)
6725{
6726 struct fixed_file_data *data = ctx->file_data;
Xiaoguang Wang05589552020-03-31 14:05:18 +08006727 struct fixed_file_ref_node *ref_node;
Jens Axboe05f3fb32019-12-09 11:22:50 -07006728 struct file *file;
Jens Axboec3a31e62019-10-03 13:59:56 -06006729 __s32 __user *fds;
6730 int fd, i, err;
6731 __u32 done;
Xiaoguang Wang05589552020-03-31 14:05:18 +08006732 bool needs_switch = false;
Jens Axboec3a31e62019-10-03 13:59:56 -06006733
Jens Axboe05f3fb32019-12-09 11:22:50 -07006734 if (check_add_overflow(up->offset, nr_args, &done))
Jens Axboec3a31e62019-10-03 13:59:56 -06006735 return -EOVERFLOW;
6736 if (done > ctx->nr_user_files)
6737 return -EINVAL;
6738
Xiaoguang Wang05589552020-03-31 14:05:18 +08006739 ref_node = alloc_fixed_file_ref_node(ctx);
6740 if (IS_ERR(ref_node))
6741 return PTR_ERR(ref_node);
6742
Jens Axboec3a31e62019-10-03 13:59:56 -06006743 done = 0;
Jens Axboe05f3fb32019-12-09 11:22:50 -07006744 fds = u64_to_user_ptr(up->fds);
Jens Axboec3a31e62019-10-03 13:59:56 -06006745 while (nr_args) {
Jens Axboe65e19f52019-10-26 07:20:21 -06006746 struct fixed_file_table *table;
6747 unsigned index;
6748
Jens Axboec3a31e62019-10-03 13:59:56 -06006749 err = 0;
6750 if (copy_from_user(&fd, &fds[done], sizeof(fd))) {
6751 err = -EFAULT;
6752 break;
6753 }
Jens Axboe05f3fb32019-12-09 11:22:50 -07006754 i = array_index_nospec(up->offset, ctx->nr_user_files);
6755 table = &ctx->file_data->table[i >> IORING_FILE_TABLE_SHIFT];
Jens Axboe65e19f52019-10-26 07:20:21 -06006756 index = i & IORING_FILE_TABLE_MASK;
6757 if (table->files[index]) {
Jens Axboe05f3fb32019-12-09 11:22:50 -07006758 file = io_file_from_index(ctx, index);
Hillf Dantona5318d32020-03-23 17:47:15 +08006759 err = io_queue_file_removal(data, file);
6760 if (err)
6761 break;
Jens Axboe65e19f52019-10-26 07:20:21 -06006762 table->files[index] = NULL;
Xiaoguang Wang05589552020-03-31 14:05:18 +08006763 needs_switch = true;
Jens Axboec3a31e62019-10-03 13:59:56 -06006764 }
6765 if (fd != -1) {
Jens Axboec3a31e62019-10-03 13:59:56 -06006766 file = fget(fd);
6767 if (!file) {
6768 err = -EBADF;
6769 break;
6770 }
6771 /*
6772 * Don't allow io_uring instances to be registered. If
6773 * UNIX isn't enabled, then this causes a reference
6774 * cycle and this instance can never get freed. If UNIX
6775 * is enabled we'll handle it just fine, but there's
6776 * still no point in allowing a ring fd as it doesn't
6777 * support regular read/write anyway.
6778 */
6779 if (file->f_op == &io_uring_fops) {
6780 fput(file);
6781 err = -EBADF;
6782 break;
6783 }
Jens Axboe65e19f52019-10-26 07:20:21 -06006784 table->files[index] = file;
Jens Axboec3a31e62019-10-03 13:59:56 -06006785 err = io_sqe_file_register(ctx, file, i);
6786 if (err)
6787 break;
6788 }
6789 nr_args--;
6790 done++;
Jens Axboe05f3fb32019-12-09 11:22:50 -07006791 up->offset++;
6792 }
6793
Xiaoguang Wang05589552020-03-31 14:05:18 +08006794 if (needs_switch) {
6795 percpu_ref_kill(data->cur_refs);
Jens Axboe6a4d07c2020-05-15 14:30:38 -06006796 spin_lock(&data->lock);
Xiaoguang Wang05589552020-03-31 14:05:18 +08006797 list_add(&ref_node->node, &data->ref_list);
6798 data->cur_refs = &ref_node->refs;
Jens Axboe6a4d07c2020-05-15 14:30:38 -06006799 spin_unlock(&data->lock);
Xiaoguang Wang05589552020-03-31 14:05:18 +08006800 percpu_ref_get(&ctx->file_data->refs);
6801 } else
6802 destroy_fixed_file_ref_node(ref_node);
Jens Axboec3a31e62019-10-03 13:59:56 -06006803
6804 return done ? done : err;
6805}
Xiaoguang Wang05589552020-03-31 14:05:18 +08006806
Jens Axboe05f3fb32019-12-09 11:22:50 -07006807static int io_sqe_files_update(struct io_ring_ctx *ctx, void __user *arg,
6808 unsigned nr_args)
6809{
6810 struct io_uring_files_update up;
6811
6812 if (!ctx->file_data)
6813 return -ENXIO;
6814 if (!nr_args)
6815 return -EINVAL;
6816 if (copy_from_user(&up, arg, sizeof(up)))
6817 return -EFAULT;
6818 if (up.resv)
6819 return -EINVAL;
6820
6821 return __io_sqe_files_update(ctx, &up, nr_args);
6822}
Jens Axboec3a31e62019-10-03 13:59:56 -06006823
Pavel Begunkove9fd9392020-03-04 16:14:12 +03006824static void io_free_work(struct io_wq_work *work)
Jens Axboe7d723062019-11-12 22:31:31 -07006825{
6826 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
6827
Pavel Begunkove9fd9392020-03-04 16:14:12 +03006828 /* Consider that io_steal_work() relies on this ref */
Jens Axboe7d723062019-11-12 22:31:31 -07006829 io_put_req(req);
6830}
6831
Pavel Begunkov24369c22020-01-28 03:15:48 +03006832static int io_init_wq_offload(struct io_ring_ctx *ctx,
6833 struct io_uring_params *p)
6834{
6835 struct io_wq_data data;
6836 struct fd f;
6837 struct io_ring_ctx *ctx_attach;
6838 unsigned int concurrency;
6839 int ret = 0;
6840
6841 data.user = ctx->user;
Pavel Begunkove9fd9392020-03-04 16:14:12 +03006842 data.free_work = io_free_work;
Pavel Begunkovf5fa38c2020-06-08 21:08:20 +03006843 data.do_work = io_wq_submit_work;
Pavel Begunkov24369c22020-01-28 03:15:48 +03006844
6845 if (!(p->flags & IORING_SETUP_ATTACH_WQ)) {
6846 /* Do QD, or 4 * CPUS, whatever is smallest */
6847 concurrency = min(ctx->sq_entries, 4 * num_online_cpus());
6848
6849 ctx->io_wq = io_wq_create(concurrency, &data);
6850 if (IS_ERR(ctx->io_wq)) {
6851 ret = PTR_ERR(ctx->io_wq);
6852 ctx->io_wq = NULL;
6853 }
6854 return ret;
6855 }
6856
6857 f = fdget(p->wq_fd);
6858 if (!f.file)
6859 return -EBADF;
6860
6861 if (f.file->f_op != &io_uring_fops) {
6862 ret = -EINVAL;
6863 goto out_fput;
6864 }
6865
6866 ctx_attach = f.file->private_data;
6867 /* @io_wq is protected by holding the fd */
6868 if (!io_wq_get(ctx_attach->io_wq, &data)) {
6869 ret = -EINVAL;
6870 goto out_fput;
6871 }
6872
6873 ctx->io_wq = ctx_attach->io_wq;
6874out_fput:
6875 fdput(f);
6876 return ret;
6877}
6878
Jens Axboe6c271ce2019-01-10 11:22:30 -07006879static int io_sq_offload_start(struct io_ring_ctx *ctx,
6880 struct io_uring_params *p)
Jens Axboe2b188cc2019-01-07 10:46:33 -07006881{
6882 int ret;
6883
6884 mmgrab(current->mm);
6885 ctx->sqo_mm = current->mm;
6886
Jens Axboe6c271ce2019-01-10 11:22:30 -07006887 if (ctx->flags & IORING_SETUP_SQPOLL) {
Jens Axboe3ec482d2019-04-08 10:51:01 -06006888 ret = -EPERM;
6889 if (!capable(CAP_SYS_ADMIN))
6890 goto err;
6891
Jens Axboe917257d2019-04-13 09:28:55 -06006892 ctx->sq_thread_idle = msecs_to_jiffies(p->sq_thread_idle);
6893 if (!ctx->sq_thread_idle)
6894 ctx->sq_thread_idle = HZ;
6895
Jens Axboe6c271ce2019-01-10 11:22:30 -07006896 if (p->flags & IORING_SETUP_SQ_AFF) {
Jens Axboe44a9bd12019-05-14 20:00:30 -06006897 int cpu = p->sq_thread_cpu;
Jens Axboe6c271ce2019-01-10 11:22:30 -07006898
Jens Axboe917257d2019-04-13 09:28:55 -06006899 ret = -EINVAL;
Jens Axboe44a9bd12019-05-14 20:00:30 -06006900 if (cpu >= nr_cpu_ids)
6901 goto err;
Shenghui Wang7889f442019-05-07 16:03:19 +08006902 if (!cpu_online(cpu))
Jens Axboe917257d2019-04-13 09:28:55 -06006903 goto err;
6904
Jens Axboe6c271ce2019-01-10 11:22:30 -07006905 ctx->sqo_thread = kthread_create_on_cpu(io_sq_thread,
6906 ctx, cpu,
6907 "io_uring-sq");
6908 } else {
6909 ctx->sqo_thread = kthread_create(io_sq_thread, ctx,
6910 "io_uring-sq");
6911 }
6912 if (IS_ERR(ctx->sqo_thread)) {
6913 ret = PTR_ERR(ctx->sqo_thread);
6914 ctx->sqo_thread = NULL;
6915 goto err;
6916 }
6917 wake_up_process(ctx->sqo_thread);
6918 } else if (p->flags & IORING_SETUP_SQ_AFF) {
6919 /* Can't have SQ_AFF without SQPOLL */
6920 ret = -EINVAL;
6921 goto err;
6922 }
6923
Pavel Begunkov24369c22020-01-28 03:15:48 +03006924 ret = io_init_wq_offload(ctx, p);
6925 if (ret)
Jens Axboe2b188cc2019-01-07 10:46:33 -07006926 goto err;
Jens Axboe2b188cc2019-01-07 10:46:33 -07006927
6928 return 0;
6929err:
Jens Axboe54a91f32019-09-10 09:15:04 -06006930 io_finish_async(ctx);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006931 mmdrop(ctx->sqo_mm);
6932 ctx->sqo_mm = NULL;
6933 return ret;
6934}
6935
6936static void io_unaccount_mem(struct user_struct *user, unsigned long nr_pages)
6937{
6938 atomic_long_sub(nr_pages, &user->locked_vm);
6939}
6940
6941static int io_account_mem(struct user_struct *user, unsigned long nr_pages)
6942{
6943 unsigned long page_limit, cur_pages, new_pages;
6944
6945 /* Don't allow more pages than we can safely lock */
6946 page_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
6947
6948 do {
6949 cur_pages = atomic_long_read(&user->locked_vm);
6950 new_pages = cur_pages + nr_pages;
6951 if (new_pages > page_limit)
6952 return -ENOMEM;
6953 } while (atomic_long_cmpxchg(&user->locked_vm, cur_pages,
6954 new_pages) != cur_pages);
6955
6956 return 0;
6957}
6958
6959static void io_mem_free(void *ptr)
6960{
Mark Rutland52e04ef2019-04-30 17:30:21 +01006961 struct page *page;
Jens Axboe2b188cc2019-01-07 10:46:33 -07006962
Mark Rutland52e04ef2019-04-30 17:30:21 +01006963 if (!ptr)
6964 return;
6965
6966 page = virt_to_head_page(ptr);
Jens Axboe2b188cc2019-01-07 10:46:33 -07006967 if (put_page_testzero(page))
6968 free_compound_page(page);
6969}
6970
6971static void *io_mem_alloc(size_t size)
6972{
6973 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN | __GFP_COMP |
6974 __GFP_NORETRY;
6975
6976 return (void *) __get_free_pages(gfp_flags, get_order(size));
6977}
6978
Hristo Venev75b28af2019-08-26 17:23:46 +00006979static unsigned long rings_size(unsigned sq_entries, unsigned cq_entries,
6980 size_t *sq_offset)
6981{
6982 struct io_rings *rings;
6983 size_t off, sq_array_size;
6984
6985 off = struct_size(rings, cqes, cq_entries);
6986 if (off == SIZE_MAX)
6987 return SIZE_MAX;
6988
6989#ifdef CONFIG_SMP
6990 off = ALIGN(off, SMP_CACHE_BYTES);
6991 if (off == 0)
6992 return SIZE_MAX;
6993#endif
6994
6995 sq_array_size = array_size(sizeof(u32), sq_entries);
6996 if (sq_array_size == SIZE_MAX)
6997 return SIZE_MAX;
6998
6999 if (check_add_overflow(off, sq_array_size, &off))
7000 return SIZE_MAX;
7001
7002 if (sq_offset)
7003 *sq_offset = off;
7004
7005 return off;
7006}
7007
Jens Axboe2b188cc2019-01-07 10:46:33 -07007008static unsigned long ring_pages(unsigned sq_entries, unsigned cq_entries)
7009{
Hristo Venev75b28af2019-08-26 17:23:46 +00007010 size_t pages;
Jens Axboe2b188cc2019-01-07 10:46:33 -07007011
Hristo Venev75b28af2019-08-26 17:23:46 +00007012 pages = (size_t)1 << get_order(
7013 rings_size(sq_entries, cq_entries, NULL));
7014 pages += (size_t)1 << get_order(
7015 array_size(sizeof(struct io_uring_sqe), sq_entries));
Jens Axboe2b188cc2019-01-07 10:46:33 -07007016
Hristo Venev75b28af2019-08-26 17:23:46 +00007017 return pages;
Jens Axboe2b188cc2019-01-07 10:46:33 -07007018}
7019
Jens Axboeedafcce2019-01-09 09:16:05 -07007020static int io_sqe_buffer_unregister(struct io_ring_ctx *ctx)
7021{
7022 int i, j;
7023
7024 if (!ctx->user_bufs)
7025 return -ENXIO;
7026
7027 for (i = 0; i < ctx->nr_user_bufs; i++) {
7028 struct io_mapped_ubuf *imu = &ctx->user_bufs[i];
7029
7030 for (j = 0; j < imu->nr_bvecs; j++)
John Hubbardf1f6a7d2020-01-30 22:13:35 -08007031 unpin_user_page(imu->bvec[j].bv_page);
Jens Axboeedafcce2019-01-09 09:16:05 -07007032
7033 if (ctx->account_mem)
7034 io_unaccount_mem(ctx->user, imu->nr_bvecs);
Mark Rutlandd4ef6472019-05-01 16:59:16 +01007035 kvfree(imu->bvec);
Jens Axboeedafcce2019-01-09 09:16:05 -07007036 imu->nr_bvecs = 0;
7037 }
7038
7039 kfree(ctx->user_bufs);
7040 ctx->user_bufs = NULL;
7041 ctx->nr_user_bufs = 0;
7042 return 0;
7043}
7044
7045static int io_copy_iov(struct io_ring_ctx *ctx, struct iovec *dst,
7046 void __user *arg, unsigned index)
7047{
7048 struct iovec __user *src;
7049
7050#ifdef CONFIG_COMPAT
7051 if (ctx->compat) {
7052 struct compat_iovec __user *ciovs;
7053 struct compat_iovec ciov;
7054
7055 ciovs = (struct compat_iovec __user *) arg;
7056 if (copy_from_user(&ciov, &ciovs[index], sizeof(ciov)))
7057 return -EFAULT;
7058
Jens Axboed55e5f52019-12-11 16:12:15 -07007059 dst->iov_base = u64_to_user_ptr((u64)ciov.iov_base);
Jens Axboeedafcce2019-01-09 09:16:05 -07007060 dst->iov_len = ciov.iov_len;
7061 return 0;
7062 }
7063#endif
7064 src = (struct iovec __user *) arg;
7065 if (copy_from_user(dst, &src[index], sizeof(*dst)))
7066 return -EFAULT;
7067 return 0;
7068}
7069
7070static int io_sqe_buffer_register(struct io_ring_ctx *ctx, void __user *arg,
7071 unsigned nr_args)
7072{
7073 struct vm_area_struct **vmas = NULL;
7074 struct page **pages = NULL;
7075 int i, j, got_pages = 0;
7076 int ret = -EINVAL;
7077
7078 if (ctx->user_bufs)
7079 return -EBUSY;
7080 if (!nr_args || nr_args > UIO_MAXIOV)
7081 return -EINVAL;
7082
7083 ctx->user_bufs = kcalloc(nr_args, sizeof(struct io_mapped_ubuf),
7084 GFP_KERNEL);
7085 if (!ctx->user_bufs)
7086 return -ENOMEM;
7087
7088 for (i = 0; i < nr_args; i++) {
7089 struct io_mapped_ubuf *imu = &ctx->user_bufs[i];
7090 unsigned long off, start, end, ubuf;
7091 int pret, nr_pages;
7092 struct iovec iov;
7093 size_t size;
7094
7095 ret = io_copy_iov(ctx, &iov, arg, i);
7096 if (ret)
Pavel Begunkova2786822019-05-26 12:35:47 +03007097 goto err;
Jens Axboeedafcce2019-01-09 09:16:05 -07007098
7099 /*
7100 * Don't impose further limits on the size and buffer
7101 * constraints here, we'll -EINVAL later when IO is
7102 * submitted if they are wrong.
7103 */
7104 ret = -EFAULT;
7105 if (!iov.iov_base || !iov.iov_len)
7106 goto err;
7107
7108 /* arbitrary limit, but we need something */
7109 if (iov.iov_len > SZ_1G)
7110 goto err;
7111
7112 ubuf = (unsigned long) iov.iov_base;
7113 end = (ubuf + iov.iov_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
7114 start = ubuf >> PAGE_SHIFT;
7115 nr_pages = end - start;
7116
7117 if (ctx->account_mem) {
7118 ret = io_account_mem(ctx->user, nr_pages);
7119 if (ret)
7120 goto err;
7121 }
7122
7123 ret = 0;
7124 if (!pages || nr_pages > got_pages) {
Denis Efremova8c73c12020-06-05 12:32:03 +03007125 kvfree(vmas);
7126 kvfree(pages);
Mark Rutlandd4ef6472019-05-01 16:59:16 +01007127 pages = kvmalloc_array(nr_pages, sizeof(struct page *),
Jens Axboeedafcce2019-01-09 09:16:05 -07007128 GFP_KERNEL);
Mark Rutlandd4ef6472019-05-01 16:59:16 +01007129 vmas = kvmalloc_array(nr_pages,
Jens Axboeedafcce2019-01-09 09:16:05 -07007130 sizeof(struct vm_area_struct *),
7131 GFP_KERNEL);
7132 if (!pages || !vmas) {
7133 ret = -ENOMEM;
7134 if (ctx->account_mem)
7135 io_unaccount_mem(ctx->user, nr_pages);
7136 goto err;
7137 }
7138 got_pages = nr_pages;
7139 }
7140
Mark Rutlandd4ef6472019-05-01 16:59:16 +01007141 imu->bvec = kvmalloc_array(nr_pages, sizeof(struct bio_vec),
Jens Axboeedafcce2019-01-09 09:16:05 -07007142 GFP_KERNEL);
7143 ret = -ENOMEM;
7144 if (!imu->bvec) {
7145 if (ctx->account_mem)
7146 io_unaccount_mem(ctx->user, nr_pages);
7147 goto err;
7148 }
7149
7150 ret = 0;
7151 down_read(&current->mm->mmap_sem);
John Hubbard2113b052020-01-30 22:13:13 -08007152 pret = pin_user_pages(ubuf, nr_pages,
Ira Weiny932f4a62019-05-13 17:17:03 -07007153 FOLL_WRITE | FOLL_LONGTERM,
7154 pages, vmas);
Jens Axboeedafcce2019-01-09 09:16:05 -07007155 if (pret == nr_pages) {
7156 /* don't support file backed memory */
7157 for (j = 0; j < nr_pages; j++) {
7158 struct vm_area_struct *vma = vmas[j];
7159
7160 if (vma->vm_file &&
7161 !is_file_hugepages(vma->vm_file)) {
7162 ret = -EOPNOTSUPP;
7163 break;
7164 }
7165 }
7166 } else {
7167 ret = pret < 0 ? pret : -EFAULT;
7168 }
7169 up_read(&current->mm->mmap_sem);
7170 if (ret) {
7171 /*
7172 * if we did partial map, or found file backed vmas,
7173 * release any pages we did get
7174 */
John Hubbard27c4d3a2019-08-04 19:32:06 -07007175 if (pret > 0)
John Hubbardf1f6a7d2020-01-30 22:13:35 -08007176 unpin_user_pages(pages, pret);
Jens Axboeedafcce2019-01-09 09:16:05 -07007177 if (ctx->account_mem)
7178 io_unaccount_mem(ctx->user, nr_pages);
Mark Rutlandd4ef6472019-05-01 16:59:16 +01007179 kvfree(imu->bvec);
Jens Axboeedafcce2019-01-09 09:16:05 -07007180 goto err;
7181 }
7182
7183 off = ubuf & ~PAGE_MASK;
7184 size = iov.iov_len;
7185 for (j = 0; j < nr_pages; j++) {
7186 size_t vec_len;
7187
7188 vec_len = min_t(size_t, size, PAGE_SIZE - off);
7189 imu->bvec[j].bv_page = pages[j];
7190 imu->bvec[j].bv_len = vec_len;
7191 imu->bvec[j].bv_offset = off;
7192 off = 0;
7193 size -= vec_len;
7194 }
7195 /* store original address for later verification */
7196 imu->ubuf = ubuf;
7197 imu->len = iov.iov_len;
7198 imu->nr_bvecs = nr_pages;
7199
7200 ctx->nr_user_bufs++;
7201 }
Mark Rutlandd4ef6472019-05-01 16:59:16 +01007202 kvfree(pages);
7203 kvfree(vmas);
Jens Axboeedafcce2019-01-09 09:16:05 -07007204 return 0;
7205err:
Mark Rutlandd4ef6472019-05-01 16:59:16 +01007206 kvfree(pages);
7207 kvfree(vmas);
Jens Axboeedafcce2019-01-09 09:16:05 -07007208 io_sqe_buffer_unregister(ctx);
7209 return ret;
7210}
7211
Jens Axboe9b402842019-04-11 11:45:41 -06007212static int io_eventfd_register(struct io_ring_ctx *ctx, void __user *arg)
7213{
7214 __s32 __user *fds = arg;
7215 int fd;
7216
7217 if (ctx->cq_ev_fd)
7218 return -EBUSY;
7219
7220 if (copy_from_user(&fd, fds, sizeof(*fds)))
7221 return -EFAULT;
7222
7223 ctx->cq_ev_fd = eventfd_ctx_fdget(fd);
7224 if (IS_ERR(ctx->cq_ev_fd)) {
7225 int ret = PTR_ERR(ctx->cq_ev_fd);
7226 ctx->cq_ev_fd = NULL;
7227 return ret;
7228 }
7229
7230 return 0;
7231}
7232
7233static int io_eventfd_unregister(struct io_ring_ctx *ctx)
7234{
7235 if (ctx->cq_ev_fd) {
7236 eventfd_ctx_put(ctx->cq_ev_fd);
7237 ctx->cq_ev_fd = NULL;
7238 return 0;
7239 }
7240
7241 return -ENXIO;
7242}
7243
Jens Axboe5a2e7452020-02-23 16:23:11 -07007244static int __io_destroy_buffers(int id, void *p, void *data)
7245{
7246 struct io_ring_ctx *ctx = data;
7247 struct io_buffer *buf = p;
7248
Jens Axboe067524e2020-03-02 16:32:28 -07007249 __io_remove_buffers(ctx, buf, id, -1U);
Jens Axboe5a2e7452020-02-23 16:23:11 -07007250 return 0;
7251}
7252
7253static void io_destroy_buffers(struct io_ring_ctx *ctx)
7254{
7255 idr_for_each(&ctx->io_buffer_idr, __io_destroy_buffers, ctx);
7256 idr_destroy(&ctx->io_buffer_idr);
7257}
7258
Jens Axboe2b188cc2019-01-07 10:46:33 -07007259static void io_ring_ctx_free(struct io_ring_ctx *ctx)
7260{
Jens Axboe6b063142019-01-10 22:13:58 -07007261 io_finish_async(ctx);
Jens Axboe2b188cc2019-01-07 10:46:33 -07007262 if (ctx->sqo_mm)
7263 mmdrop(ctx->sqo_mm);
Jens Axboedef596e2019-01-09 08:59:42 -07007264
7265 io_iopoll_reap_events(ctx);
Jens Axboeedafcce2019-01-09 09:16:05 -07007266 io_sqe_buffer_unregister(ctx);
Jens Axboe6b063142019-01-10 22:13:58 -07007267 io_sqe_files_unregister(ctx);
Jens Axboe9b402842019-04-11 11:45:41 -06007268 io_eventfd_unregister(ctx);
Jens Axboe5a2e7452020-02-23 16:23:11 -07007269 io_destroy_buffers(ctx);
Jens Axboe41726c92020-02-23 13:11:42 -07007270 idr_destroy(&ctx->personality_idr);
Jens Axboedef596e2019-01-09 08:59:42 -07007271
Jens Axboe2b188cc2019-01-07 10:46:33 -07007272#if defined(CONFIG_UNIX)
Eric Biggers355e8d22019-06-12 14:58:43 -07007273 if (ctx->ring_sock) {
7274 ctx->ring_sock->file = NULL; /* so that iput() is called */
Jens Axboe2b188cc2019-01-07 10:46:33 -07007275 sock_release(ctx->ring_sock);
Eric Biggers355e8d22019-06-12 14:58:43 -07007276 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07007277#endif
7278
Hristo Venev75b28af2019-08-26 17:23:46 +00007279 io_mem_free(ctx->rings);
Jens Axboe2b188cc2019-01-07 10:46:33 -07007280 io_mem_free(ctx->sq_sqes);
Jens Axboe2b188cc2019-01-07 10:46:33 -07007281
7282 percpu_ref_exit(&ctx->refs);
7283 if (ctx->account_mem)
7284 io_unaccount_mem(ctx->user,
7285 ring_pages(ctx->sq_entries, ctx->cq_entries));
7286 free_uid(ctx->user);
Jens Axboe181e4482019-11-25 08:52:30 -07007287 put_cred(ctx->creds);
Jens Axboe78076bb2019-12-04 19:56:40 -07007288 kfree(ctx->cancel_hash);
Jens Axboe0ddf92e2019-11-08 08:52:53 -07007289 kmem_cache_free(req_cachep, ctx->fallback_req);
Jens Axboe2b188cc2019-01-07 10:46:33 -07007290 kfree(ctx);
7291}
7292
7293static __poll_t io_uring_poll(struct file *file, poll_table *wait)
7294{
7295 struct io_ring_ctx *ctx = file->private_data;
7296 __poll_t mask = 0;
7297
7298 poll_wait(file, &ctx->cq_wait, wait);
Stefan Bühler4f7067c2019-04-24 23:54:17 +02007299 /*
7300 * synchronizes with barrier from wq_has_sleeper call in
7301 * io_commit_cqring
7302 */
Jens Axboe2b188cc2019-01-07 10:46:33 -07007303 smp_rmb();
Hristo Venev75b28af2019-08-26 17:23:46 +00007304 if (READ_ONCE(ctx->rings->sq.tail) - ctx->cached_sq_head !=
7305 ctx->rings->sq_ring_entries)
Jens Axboe2b188cc2019-01-07 10:46:33 -07007306 mask |= EPOLLOUT | EPOLLWRNORM;
Stefano Garzarella63e5d812020-02-07 13:18:28 +01007307 if (io_cqring_events(ctx, false))
Jens Axboe2b188cc2019-01-07 10:46:33 -07007308 mask |= EPOLLIN | EPOLLRDNORM;
7309
7310 return mask;
7311}
7312
7313static int io_uring_fasync(int fd, struct file *file, int on)
7314{
7315 struct io_ring_ctx *ctx = file->private_data;
7316
7317 return fasync_helper(fd, file, on, &ctx->cq_fasync);
7318}
7319
Jens Axboe071698e2020-01-28 10:04:42 -07007320static int io_remove_personalities(int id, void *p, void *data)
7321{
7322 struct io_ring_ctx *ctx = data;
7323 const struct cred *cred;
7324
7325 cred = idr_remove(&ctx->personality_idr, id);
7326 if (cred)
7327 put_cred(cred);
7328 return 0;
7329}
7330
Jens Axboe85faa7b2020-04-09 18:14:00 -06007331static void io_ring_exit_work(struct work_struct *work)
7332{
7333 struct io_ring_ctx *ctx;
7334
7335 ctx = container_of(work, struct io_ring_ctx, exit_work);
7336 if (ctx->rings)
7337 io_cqring_overflow_flush(ctx, true);
7338
Jens Axboe0f158b42020-05-14 17:18:39 -06007339 wait_for_completion(&ctx->ref_comp);
Jens Axboe85faa7b2020-04-09 18:14:00 -06007340 io_ring_ctx_free(ctx);
7341}
7342
Jens Axboe2b188cc2019-01-07 10:46:33 -07007343static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
7344{
7345 mutex_lock(&ctx->uring_lock);
7346 percpu_ref_kill(&ctx->refs);
7347 mutex_unlock(&ctx->uring_lock);
7348
Jens Axboe5262f562019-09-17 12:26:57 -06007349 io_kill_timeouts(ctx);
Jens Axboe221c5eb2019-01-17 09:41:58 -07007350 io_poll_remove_all(ctx);
Jens Axboe561fb042019-10-24 07:25:42 -06007351
7352 if (ctx->io_wq)
7353 io_wq_cancel_all(ctx->io_wq);
7354
Jens Axboedef596e2019-01-09 08:59:42 -07007355 io_iopoll_reap_events(ctx);
Jens Axboe15dff282019-11-13 09:09:23 -07007356 /* if we failed setting up the ctx, we might not have any rings */
7357 if (ctx->rings)
7358 io_cqring_overflow_flush(ctx, true);
Jens Axboe071698e2020-01-28 10:04:42 -07007359 idr_for_each(&ctx->personality_idr, io_remove_personalities, ctx);
Jens Axboe85faa7b2020-04-09 18:14:00 -06007360 INIT_WORK(&ctx->exit_work, io_ring_exit_work);
7361 queue_work(system_wq, &ctx->exit_work);
Jens Axboe2b188cc2019-01-07 10:46:33 -07007362}
7363
7364static int io_uring_release(struct inode *inode, struct file *file)
7365{
7366 struct io_ring_ctx *ctx = file->private_data;
7367
7368 file->private_data = NULL;
7369 io_ring_ctx_wait_and_kill(ctx);
7370 return 0;
7371}
7372
Jens Axboefcb323c2019-10-24 12:39:47 -06007373static void io_uring_cancel_files(struct io_ring_ctx *ctx,
7374 struct files_struct *files)
7375{
Jens Axboefcb323c2019-10-24 12:39:47 -06007376 while (!list_empty_careful(&ctx->inflight_list)) {
Xiaoguang Wangd8f1b972020-04-26 15:54:43 +08007377 struct io_kiocb *cancel_req = NULL, *req;
7378 DEFINE_WAIT(wait);
Jens Axboefcb323c2019-10-24 12:39:47 -06007379
7380 spin_lock_irq(&ctx->inflight_lock);
7381 list_for_each_entry(req, &ctx->inflight_list, inflight_entry) {
Jens Axboe768134d2019-11-10 20:30:53 -07007382 if (req->work.files != files)
7383 continue;
7384 /* req is being completed, ignore */
7385 if (!refcount_inc_not_zero(&req->refs))
7386 continue;
7387 cancel_req = req;
7388 break;
Jens Axboefcb323c2019-10-24 12:39:47 -06007389 }
Jens Axboe768134d2019-11-10 20:30:53 -07007390 if (cancel_req)
Jens Axboefcb323c2019-10-24 12:39:47 -06007391 prepare_to_wait(&ctx->inflight_wait, &wait,
Jens Axboe768134d2019-11-10 20:30:53 -07007392 TASK_UNINTERRUPTIBLE);
Jens Axboefcb323c2019-10-24 12:39:47 -06007393 spin_unlock_irq(&ctx->inflight_lock);
7394
Jens Axboe768134d2019-11-10 20:30:53 -07007395 /* We need to keep going until we don't find a matching req */
7396 if (!cancel_req)
Jens Axboefcb323c2019-10-24 12:39:47 -06007397 break;
Bob Liu2f6d9b92019-11-13 18:06:24 +08007398
Jens Axboe2ca10252020-02-13 17:17:35 -07007399 if (cancel_req->flags & REQ_F_OVERFLOW) {
7400 spin_lock_irq(&ctx->completion_lock);
7401 list_del(&cancel_req->list);
7402 cancel_req->flags &= ~REQ_F_OVERFLOW;
7403 if (list_empty(&ctx->cq_overflow_list)) {
7404 clear_bit(0, &ctx->sq_check_overflow);
7405 clear_bit(0, &ctx->cq_check_overflow);
7406 }
7407 spin_unlock_irq(&ctx->completion_lock);
7408
7409 WRITE_ONCE(ctx->rings->cq_overflow,
7410 atomic_inc_return(&ctx->cached_cq_overflow));
7411
7412 /*
7413 * Put inflight ref and overflow ref. If that's
7414 * all we had, then we're done with this request.
7415 */
7416 if (refcount_sub_and_test(2, &cancel_req->refs)) {
Pavel Begunkov4518a3c2020-05-26 20:34:02 +03007417 io_free_req(cancel_req);
Xiaoguang Wangd8f1b972020-04-26 15:54:43 +08007418 finish_wait(&ctx->inflight_wait, &wait);
Jens Axboe2ca10252020-02-13 17:17:35 -07007419 continue;
7420 }
Pavel Begunkov7b53d592020-05-30 14:19:15 +03007421 } else {
7422 io_wq_cancel_work(ctx->io_wq, &cancel_req->work);
7423 io_put_req(cancel_req);
Jens Axboe2ca10252020-02-13 17:17:35 -07007424 }
7425
Jens Axboefcb323c2019-10-24 12:39:47 -06007426 schedule();
Xiaoguang Wangd8f1b972020-04-26 15:54:43 +08007427 finish_wait(&ctx->inflight_wait, &wait);
Jens Axboefcb323c2019-10-24 12:39:47 -06007428 }
7429}
7430
7431static int io_uring_flush(struct file *file, void *data)
7432{
7433 struct io_ring_ctx *ctx = file->private_data;
7434
7435 io_uring_cancel_files(ctx, data);
Jens Axboe6ab23142020-02-08 20:23:59 -07007436
7437 /*
7438 * If the task is going away, cancel work it may have pending
7439 */
7440 if (fatal_signal_pending(current) || (current->flags & PF_EXITING))
7441 io_wq_cancel_pid(ctx->io_wq, task_pid_vnr(current));
7442
Jens Axboefcb323c2019-10-24 12:39:47 -06007443 return 0;
7444}
7445
Roman Penyaev6c5c2402019-11-28 12:53:22 +01007446static void *io_uring_validate_mmap_request(struct file *file,
7447 loff_t pgoff, size_t sz)
Jens Axboe2b188cc2019-01-07 10:46:33 -07007448{
Jens Axboe2b188cc2019-01-07 10:46:33 -07007449 struct io_ring_ctx *ctx = file->private_data;
Roman Penyaev6c5c2402019-11-28 12:53:22 +01007450 loff_t offset = pgoff << PAGE_SHIFT;
Jens Axboe2b188cc2019-01-07 10:46:33 -07007451 struct page *page;
7452 void *ptr;
7453
7454 switch (offset) {
7455 case IORING_OFF_SQ_RING:
Hristo Venev75b28af2019-08-26 17:23:46 +00007456 case IORING_OFF_CQ_RING:
7457 ptr = ctx->rings;
Jens Axboe2b188cc2019-01-07 10:46:33 -07007458 break;
7459 case IORING_OFF_SQES:
7460 ptr = ctx->sq_sqes;
7461 break;
Jens Axboe2b188cc2019-01-07 10:46:33 -07007462 default:
Roman Penyaev6c5c2402019-11-28 12:53:22 +01007463 return ERR_PTR(-EINVAL);
Jens Axboe2b188cc2019-01-07 10:46:33 -07007464 }
7465
7466 page = virt_to_head_page(ptr);
Matthew Wilcox (Oracle)a50b8542019-09-23 15:34:25 -07007467 if (sz > page_size(page))
Roman Penyaev6c5c2402019-11-28 12:53:22 +01007468 return ERR_PTR(-EINVAL);
7469
7470 return ptr;
7471}
7472
7473#ifdef CONFIG_MMU
7474
7475static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
7476{
7477 size_t sz = vma->vm_end - vma->vm_start;
7478 unsigned long pfn;
7479 void *ptr;
7480
7481 ptr = io_uring_validate_mmap_request(file, vma->vm_pgoff, sz);
7482 if (IS_ERR(ptr))
7483 return PTR_ERR(ptr);
Jens Axboe2b188cc2019-01-07 10:46:33 -07007484
7485 pfn = virt_to_phys(ptr) >> PAGE_SHIFT;
7486 return remap_pfn_range(vma, vma->vm_start, pfn, sz, vma->vm_page_prot);
7487}
7488
Roman Penyaev6c5c2402019-11-28 12:53:22 +01007489#else /* !CONFIG_MMU */
7490
7491static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
7492{
7493 return vma->vm_flags & (VM_SHARED | VM_MAYSHARE) ? 0 : -EINVAL;
7494}
7495
7496static unsigned int io_uring_nommu_mmap_capabilities(struct file *file)
7497{
7498 return NOMMU_MAP_DIRECT | NOMMU_MAP_READ | NOMMU_MAP_WRITE;
7499}
7500
7501static unsigned long io_uring_nommu_get_unmapped_area(struct file *file,
7502 unsigned long addr, unsigned long len,
7503 unsigned long pgoff, unsigned long flags)
7504{
7505 void *ptr;
7506
7507 ptr = io_uring_validate_mmap_request(file, pgoff, len);
7508 if (IS_ERR(ptr))
7509 return PTR_ERR(ptr);
7510
7511 return (unsigned long) ptr;
7512}
7513
7514#endif /* !CONFIG_MMU */
7515
Jens Axboe2b188cc2019-01-07 10:46:33 -07007516SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
7517 u32, min_complete, u32, flags, const sigset_t __user *, sig,
7518 size_t, sigsz)
7519{
7520 struct io_ring_ctx *ctx;
7521 long ret = -EBADF;
7522 int submitted = 0;
7523 struct fd f;
7524
Jens Axboeb41e9852020-02-17 09:52:41 -07007525 if (current->task_works)
7526 task_work_run();
7527
Jens Axboe6c271ce2019-01-10 11:22:30 -07007528 if (flags & ~(IORING_ENTER_GETEVENTS | IORING_ENTER_SQ_WAKEUP))
Jens Axboe2b188cc2019-01-07 10:46:33 -07007529 return -EINVAL;
7530
7531 f = fdget(fd);
7532 if (!f.file)
7533 return -EBADF;
7534
7535 ret = -EOPNOTSUPP;
7536 if (f.file->f_op != &io_uring_fops)
7537 goto out_fput;
7538
7539 ret = -ENXIO;
7540 ctx = f.file->private_data;
7541 if (!percpu_ref_tryget(&ctx->refs))
7542 goto out_fput;
7543
Jens Axboe6c271ce2019-01-10 11:22:30 -07007544 /*
7545 * For SQ polling, the thread will do all submissions and completions.
7546 * Just return the requested submit count, and wake the thread if
7547 * we were asked to.
7548 */
Jens Axboeb2a9ead2019-09-12 14:19:16 -06007549 ret = 0;
Jens Axboe6c271ce2019-01-10 11:22:30 -07007550 if (ctx->flags & IORING_SETUP_SQPOLL) {
Jens Axboec1edbf52019-11-10 16:56:04 -07007551 if (!list_empty_careful(&ctx->cq_overflow_list))
7552 io_cqring_overflow_flush(ctx, false);
Jens Axboe6c271ce2019-01-10 11:22:30 -07007553 if (flags & IORING_ENTER_SQ_WAKEUP)
7554 wake_up(&ctx->sqo_wait);
7555 submitted = to_submit;
Jens Axboeb2a9ead2019-09-12 14:19:16 -06007556 } else if (to_submit) {
Jens Axboe2b188cc2019-01-07 10:46:33 -07007557 mutex_lock(&ctx->uring_lock);
Pavel Begunkov0cdaf762020-05-17 14:13:40 +03007558 submitted = io_submit_sqes(ctx, to_submit, f.file, fd);
Jens Axboe2b188cc2019-01-07 10:46:33 -07007559 mutex_unlock(&ctx->uring_lock);
Pavel Begunkov7c504e652019-12-18 19:53:45 +03007560
7561 if (submitted != to_submit)
7562 goto out;
Jens Axboe2b188cc2019-01-07 10:46:33 -07007563 }
7564 if (flags & IORING_ENTER_GETEVENTS) {
Jens Axboedef596e2019-01-09 08:59:42 -07007565 unsigned nr_events = 0;
7566
Jens Axboe2b188cc2019-01-07 10:46:33 -07007567 min_complete = min(min_complete, ctx->cq_entries);
7568
Xiaoguang Wang32b22442020-03-11 09:26:09 +08007569 /*
7570 * When SETUP_IOPOLL and SETUP_SQPOLL are both enabled, user
7571 * space applications don't need to do io completion events
7572 * polling again, they can rely on io_sq_thread to do polling
7573 * work, which can reduce cpu usage and uring_lock contention.
7574 */
7575 if (ctx->flags & IORING_SETUP_IOPOLL &&
7576 !(ctx->flags & IORING_SETUP_SQPOLL)) {
Jens Axboedef596e2019-01-09 08:59:42 -07007577 ret = io_iopoll_check(ctx, &nr_events, min_complete);
Jens Axboedef596e2019-01-09 08:59:42 -07007578 } else {
7579 ret = io_cqring_wait(ctx, min_complete, sig, sigsz);
7580 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07007581 }
7582
Pavel Begunkov7c504e652019-12-18 19:53:45 +03007583out:
Pavel Begunkov6805b322019-10-08 02:18:42 +03007584 percpu_ref_put(&ctx->refs);
Jens Axboe2b188cc2019-01-07 10:46:33 -07007585out_fput:
7586 fdput(f);
7587 return submitted ? submitted : ret;
7588}
7589
Tobias Klauserbebdb652020-02-26 18:38:32 +01007590#ifdef CONFIG_PROC_FS
Jens Axboe87ce9552020-01-30 08:25:34 -07007591static int io_uring_show_cred(int id, void *p, void *data)
7592{
7593 const struct cred *cred = p;
7594 struct seq_file *m = data;
7595 struct user_namespace *uns = seq_user_ns(m);
7596 struct group_info *gi;
7597 kernel_cap_t cap;
7598 unsigned __capi;
7599 int g;
7600
7601 seq_printf(m, "%5d\n", id);
7602 seq_put_decimal_ull(m, "\tUid:\t", from_kuid_munged(uns, cred->uid));
7603 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->euid));
7604 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->suid));
7605 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->fsuid));
7606 seq_put_decimal_ull(m, "\n\tGid:\t", from_kgid_munged(uns, cred->gid));
7607 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->egid));
7608 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->sgid));
7609 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->fsgid));
7610 seq_puts(m, "\n\tGroups:\t");
7611 gi = cred->group_info;
7612 for (g = 0; g < gi->ngroups; g++) {
7613 seq_put_decimal_ull(m, g ? " " : "",
7614 from_kgid_munged(uns, gi->gid[g]));
7615 }
7616 seq_puts(m, "\n\tCapEff:\t");
7617 cap = cred->cap_effective;
7618 CAP_FOR_EACH_U32(__capi)
7619 seq_put_hex_ll(m, NULL, cap.cap[CAP_LAST_U32 - __capi], 8);
7620 seq_putc(m, '\n');
7621 return 0;
7622}
7623
7624static void __io_uring_show_fdinfo(struct io_ring_ctx *ctx, struct seq_file *m)
7625{
7626 int i;
7627
7628 mutex_lock(&ctx->uring_lock);
7629 seq_printf(m, "UserFiles:\t%u\n", ctx->nr_user_files);
7630 for (i = 0; i < ctx->nr_user_files; i++) {
7631 struct fixed_file_table *table;
7632 struct file *f;
7633
7634 table = &ctx->file_data->table[i >> IORING_FILE_TABLE_SHIFT];
7635 f = table->files[i & IORING_FILE_TABLE_MASK];
7636 if (f)
7637 seq_printf(m, "%5u: %s\n", i, file_dentry(f)->d_iname);
7638 else
7639 seq_printf(m, "%5u: <none>\n", i);
7640 }
7641 seq_printf(m, "UserBufs:\t%u\n", ctx->nr_user_bufs);
7642 for (i = 0; i < ctx->nr_user_bufs; i++) {
7643 struct io_mapped_ubuf *buf = &ctx->user_bufs[i];
7644
7645 seq_printf(m, "%5u: 0x%llx/%u\n", i, buf->ubuf,
7646 (unsigned int) buf->len);
7647 }
7648 if (!idr_is_empty(&ctx->personality_idr)) {
7649 seq_printf(m, "Personalities:\n");
7650 idr_for_each(&ctx->personality_idr, io_uring_show_cred, m);
7651 }
Jens Axboed7718a92020-02-14 22:23:12 -07007652 seq_printf(m, "PollList:\n");
7653 spin_lock_irq(&ctx->completion_lock);
7654 for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
7655 struct hlist_head *list = &ctx->cancel_hash[i];
7656 struct io_kiocb *req;
7657
7658 hlist_for_each_entry(req, list, hash_node)
7659 seq_printf(m, " op=%d, task_works=%d\n", req->opcode,
7660 req->task->task_works != NULL);
7661 }
7662 spin_unlock_irq(&ctx->completion_lock);
Jens Axboe87ce9552020-01-30 08:25:34 -07007663 mutex_unlock(&ctx->uring_lock);
7664}
7665
7666static void io_uring_show_fdinfo(struct seq_file *m, struct file *f)
7667{
7668 struct io_ring_ctx *ctx = f->private_data;
7669
7670 if (percpu_ref_tryget(&ctx->refs)) {
7671 __io_uring_show_fdinfo(ctx, m);
7672 percpu_ref_put(&ctx->refs);
7673 }
7674}
Tobias Klauserbebdb652020-02-26 18:38:32 +01007675#endif
Jens Axboe87ce9552020-01-30 08:25:34 -07007676
Jens Axboe2b188cc2019-01-07 10:46:33 -07007677static const struct file_operations io_uring_fops = {
7678 .release = io_uring_release,
Jens Axboefcb323c2019-10-24 12:39:47 -06007679 .flush = io_uring_flush,
Jens Axboe2b188cc2019-01-07 10:46:33 -07007680 .mmap = io_uring_mmap,
Roman Penyaev6c5c2402019-11-28 12:53:22 +01007681#ifndef CONFIG_MMU
7682 .get_unmapped_area = io_uring_nommu_get_unmapped_area,
7683 .mmap_capabilities = io_uring_nommu_mmap_capabilities,
7684#endif
Jens Axboe2b188cc2019-01-07 10:46:33 -07007685 .poll = io_uring_poll,
7686 .fasync = io_uring_fasync,
Tobias Klauserbebdb652020-02-26 18:38:32 +01007687#ifdef CONFIG_PROC_FS
Jens Axboe87ce9552020-01-30 08:25:34 -07007688 .show_fdinfo = io_uring_show_fdinfo,
Tobias Klauserbebdb652020-02-26 18:38:32 +01007689#endif
Jens Axboe2b188cc2019-01-07 10:46:33 -07007690};
7691
7692static int io_allocate_scq_urings(struct io_ring_ctx *ctx,
7693 struct io_uring_params *p)
7694{
Hristo Venev75b28af2019-08-26 17:23:46 +00007695 struct io_rings *rings;
7696 size_t size, sq_array_offset;
Jens Axboe2b188cc2019-01-07 10:46:33 -07007697
Hristo Venev75b28af2019-08-26 17:23:46 +00007698 size = rings_size(p->sq_entries, p->cq_entries, &sq_array_offset);
7699 if (size == SIZE_MAX)
7700 return -EOVERFLOW;
7701
7702 rings = io_mem_alloc(size);
7703 if (!rings)
Jens Axboe2b188cc2019-01-07 10:46:33 -07007704 return -ENOMEM;
7705
Hristo Venev75b28af2019-08-26 17:23:46 +00007706 ctx->rings = rings;
7707 ctx->sq_array = (u32 *)((char *)rings + sq_array_offset);
7708 rings->sq_ring_mask = p->sq_entries - 1;
7709 rings->cq_ring_mask = p->cq_entries - 1;
7710 rings->sq_ring_entries = p->sq_entries;
7711 rings->cq_ring_entries = p->cq_entries;
7712 ctx->sq_mask = rings->sq_ring_mask;
7713 ctx->cq_mask = rings->cq_ring_mask;
7714 ctx->sq_entries = rings->sq_ring_entries;
7715 ctx->cq_entries = rings->cq_ring_entries;
Jens Axboe2b188cc2019-01-07 10:46:33 -07007716
7717 size = array_size(sizeof(struct io_uring_sqe), p->sq_entries);
Jens Axboeeb065d32019-11-20 09:26:29 -07007718 if (size == SIZE_MAX) {
7719 io_mem_free(ctx->rings);
7720 ctx->rings = NULL;
Jens Axboe2b188cc2019-01-07 10:46:33 -07007721 return -EOVERFLOW;
Jens Axboeeb065d32019-11-20 09:26:29 -07007722 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07007723
7724 ctx->sq_sqes = io_mem_alloc(size);
Jens Axboeeb065d32019-11-20 09:26:29 -07007725 if (!ctx->sq_sqes) {
7726 io_mem_free(ctx->rings);
7727 ctx->rings = NULL;
Jens Axboe2b188cc2019-01-07 10:46:33 -07007728 return -ENOMEM;
Jens Axboeeb065d32019-11-20 09:26:29 -07007729 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07007730
Jens Axboe2b188cc2019-01-07 10:46:33 -07007731 return 0;
7732}
7733
7734/*
7735 * Allocate an anonymous fd, this is what constitutes the application
7736 * visible backing of an io_uring instance. The application mmaps this
7737 * fd to gain access to the SQ/CQ ring details. If UNIX sockets are enabled,
7738 * we have to tie this fd to a socket for file garbage collection purposes.
7739 */
7740static int io_uring_get_fd(struct io_ring_ctx *ctx)
7741{
7742 struct file *file;
7743 int ret;
7744
7745#if defined(CONFIG_UNIX)
7746 ret = sock_create_kern(&init_net, PF_UNIX, SOCK_RAW, IPPROTO_IP,
7747 &ctx->ring_sock);
7748 if (ret)
7749 return ret;
7750#endif
7751
7752 ret = get_unused_fd_flags(O_RDWR | O_CLOEXEC);
7753 if (ret < 0)
7754 goto err;
7755
7756 file = anon_inode_getfile("[io_uring]", &io_uring_fops, ctx,
7757 O_RDWR | O_CLOEXEC);
7758 if (IS_ERR(file)) {
7759 put_unused_fd(ret);
7760 ret = PTR_ERR(file);
7761 goto err;
7762 }
7763
7764#if defined(CONFIG_UNIX)
7765 ctx->ring_sock->file = file;
7766#endif
7767 fd_install(ret, file);
7768 return ret;
7769err:
7770#if defined(CONFIG_UNIX)
7771 sock_release(ctx->ring_sock);
7772 ctx->ring_sock = NULL;
7773#endif
7774 return ret;
7775}
7776
Xiaoguang Wang7f136572020-05-05 16:28:53 +08007777static int io_uring_create(unsigned entries, struct io_uring_params *p,
7778 struct io_uring_params __user *params)
Jens Axboe2b188cc2019-01-07 10:46:33 -07007779{
7780 struct user_struct *user = NULL;
7781 struct io_ring_ctx *ctx;
7782 bool account_mem;
7783 int ret;
7784
Jens Axboe8110c1a2019-12-28 15:39:54 -07007785 if (!entries)
Jens Axboe2b188cc2019-01-07 10:46:33 -07007786 return -EINVAL;
Jens Axboe8110c1a2019-12-28 15:39:54 -07007787 if (entries > IORING_MAX_ENTRIES) {
7788 if (!(p->flags & IORING_SETUP_CLAMP))
7789 return -EINVAL;
7790 entries = IORING_MAX_ENTRIES;
7791 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07007792
7793 /*
7794 * Use twice as many entries for the CQ ring. It's possible for the
7795 * application to drive a higher depth than the size of the SQ ring,
7796 * since the sqes are only used at submission time. This allows for
Jens Axboe33a107f2019-10-04 12:10:03 -06007797 * some flexibility in overcommitting a bit. If the application has
7798 * set IORING_SETUP_CQSIZE, it will have passed in the desired number
7799 * of CQ ring entries manually.
Jens Axboe2b188cc2019-01-07 10:46:33 -07007800 */
7801 p->sq_entries = roundup_pow_of_two(entries);
Jens Axboe33a107f2019-10-04 12:10:03 -06007802 if (p->flags & IORING_SETUP_CQSIZE) {
7803 /*
7804 * If IORING_SETUP_CQSIZE is set, we do the same roundup
7805 * to a power-of-two, if it isn't already. We do NOT impose
7806 * any cq vs sq ring sizing.
7807 */
Jens Axboe8110c1a2019-12-28 15:39:54 -07007808 if (p->cq_entries < p->sq_entries)
Jens Axboe33a107f2019-10-04 12:10:03 -06007809 return -EINVAL;
Jens Axboe8110c1a2019-12-28 15:39:54 -07007810 if (p->cq_entries > IORING_MAX_CQ_ENTRIES) {
7811 if (!(p->flags & IORING_SETUP_CLAMP))
7812 return -EINVAL;
7813 p->cq_entries = IORING_MAX_CQ_ENTRIES;
7814 }
Jens Axboe33a107f2019-10-04 12:10:03 -06007815 p->cq_entries = roundup_pow_of_two(p->cq_entries);
7816 } else {
7817 p->cq_entries = 2 * p->sq_entries;
7818 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07007819
7820 user = get_uid(current_user());
7821 account_mem = !capable(CAP_IPC_LOCK);
7822
7823 if (account_mem) {
7824 ret = io_account_mem(user,
7825 ring_pages(p->sq_entries, p->cq_entries));
7826 if (ret) {
7827 free_uid(user);
7828 return ret;
7829 }
7830 }
7831
7832 ctx = io_ring_ctx_alloc(p);
7833 if (!ctx) {
7834 if (account_mem)
7835 io_unaccount_mem(user, ring_pages(p->sq_entries,
7836 p->cq_entries));
7837 free_uid(user);
7838 return -ENOMEM;
7839 }
7840 ctx->compat = in_compat_syscall();
7841 ctx->account_mem = account_mem;
7842 ctx->user = user;
Jens Axboe0b8c0ec2019-12-02 08:50:00 -07007843 ctx->creds = get_current_cred();
Jens Axboe2b188cc2019-01-07 10:46:33 -07007844
7845 ret = io_allocate_scq_urings(ctx, p);
7846 if (ret)
7847 goto err;
7848
Jens Axboe6c271ce2019-01-10 11:22:30 -07007849 ret = io_sq_offload_start(ctx, p);
Jens Axboe2b188cc2019-01-07 10:46:33 -07007850 if (ret)
7851 goto err;
7852
Jens Axboe2b188cc2019-01-07 10:46:33 -07007853 memset(&p->sq_off, 0, sizeof(p->sq_off));
Hristo Venev75b28af2019-08-26 17:23:46 +00007854 p->sq_off.head = offsetof(struct io_rings, sq.head);
7855 p->sq_off.tail = offsetof(struct io_rings, sq.tail);
7856 p->sq_off.ring_mask = offsetof(struct io_rings, sq_ring_mask);
7857 p->sq_off.ring_entries = offsetof(struct io_rings, sq_ring_entries);
7858 p->sq_off.flags = offsetof(struct io_rings, sq_flags);
7859 p->sq_off.dropped = offsetof(struct io_rings, sq_dropped);
7860 p->sq_off.array = (char *)ctx->sq_array - (char *)ctx->rings;
Jens Axboe2b188cc2019-01-07 10:46:33 -07007861
7862 memset(&p->cq_off, 0, sizeof(p->cq_off));
Hristo Venev75b28af2019-08-26 17:23:46 +00007863 p->cq_off.head = offsetof(struct io_rings, cq.head);
7864 p->cq_off.tail = offsetof(struct io_rings, cq.tail);
7865 p->cq_off.ring_mask = offsetof(struct io_rings, cq_ring_mask);
7866 p->cq_off.ring_entries = offsetof(struct io_rings, cq_ring_entries);
7867 p->cq_off.overflow = offsetof(struct io_rings, cq_overflow);
7868 p->cq_off.cqes = offsetof(struct io_rings, cqes);
Stefano Garzarella0d9b5b32020-05-15 18:38:04 +02007869 p->cq_off.flags = offsetof(struct io_rings, cq_flags);
Jens Axboeac90f242019-09-06 10:26:21 -06007870
Xiaoguang Wang7f136572020-05-05 16:28:53 +08007871 p->features = IORING_FEAT_SINGLE_MMAP | IORING_FEAT_NODROP |
7872 IORING_FEAT_SUBMIT_STABLE | IORING_FEAT_RW_CUR_POS |
7873 IORING_FEAT_CUR_PERSONALITY | IORING_FEAT_FAST_POLL;
7874
7875 if (copy_to_user(params, p, sizeof(*p))) {
7876 ret = -EFAULT;
7877 goto err;
7878 }
Jens Axboe044c1ab2019-10-28 09:15:33 -06007879 /*
7880 * Install ring fd as the very last thing, so we don't risk someone
7881 * having closed it before we finish setup
7882 */
7883 ret = io_uring_get_fd(ctx);
7884 if (ret < 0)
7885 goto err;
7886
Dmitrii Dolgovc826bd72019-10-15 19:02:01 +02007887 trace_io_uring_create(ret, ctx, p->sq_entries, p->cq_entries, p->flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07007888 return ret;
7889err:
7890 io_ring_ctx_wait_and_kill(ctx);
7891 return ret;
7892}
7893
7894/*
7895 * Sets up an aio uring context, and returns the fd. Applications asks for a
7896 * ring size, we return the actual sq/cq ring sizes (among other things) in the
7897 * params structure passed in.
7898 */
7899static long io_uring_setup(u32 entries, struct io_uring_params __user *params)
7900{
7901 struct io_uring_params p;
Jens Axboe2b188cc2019-01-07 10:46:33 -07007902 int i;
7903
7904 if (copy_from_user(&p, params, sizeof(p)))
7905 return -EFAULT;
7906 for (i = 0; i < ARRAY_SIZE(p.resv); i++) {
7907 if (p.resv[i])
7908 return -EINVAL;
7909 }
7910
Jens Axboe6c271ce2019-01-10 11:22:30 -07007911 if (p.flags & ~(IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL |
Jens Axboe8110c1a2019-12-28 15:39:54 -07007912 IORING_SETUP_SQ_AFF | IORING_SETUP_CQSIZE |
Pavel Begunkov24369c22020-01-28 03:15:48 +03007913 IORING_SETUP_CLAMP | IORING_SETUP_ATTACH_WQ))
Jens Axboe2b188cc2019-01-07 10:46:33 -07007914 return -EINVAL;
7915
Xiaoguang Wang7f136572020-05-05 16:28:53 +08007916 return io_uring_create(entries, &p, params);
Jens Axboe2b188cc2019-01-07 10:46:33 -07007917}
7918
7919SYSCALL_DEFINE2(io_uring_setup, u32, entries,
7920 struct io_uring_params __user *, params)
7921{
7922 return io_uring_setup(entries, params);
7923}
7924
Jens Axboe66f4af92020-01-16 15:36:52 -07007925static int io_probe(struct io_ring_ctx *ctx, void __user *arg, unsigned nr_args)
7926{
7927 struct io_uring_probe *p;
7928 size_t size;
7929 int i, ret;
7930
7931 size = struct_size(p, ops, nr_args);
7932 if (size == SIZE_MAX)
7933 return -EOVERFLOW;
7934 p = kzalloc(size, GFP_KERNEL);
7935 if (!p)
7936 return -ENOMEM;
7937
7938 ret = -EFAULT;
7939 if (copy_from_user(p, arg, size))
7940 goto out;
7941 ret = -EINVAL;
7942 if (memchr_inv(p, 0, size))
7943 goto out;
7944
7945 p->last_op = IORING_OP_LAST - 1;
7946 if (nr_args > IORING_OP_LAST)
7947 nr_args = IORING_OP_LAST;
7948
7949 for (i = 0; i < nr_args; i++) {
7950 p->ops[i].op = i;
7951 if (!io_op_defs[i].not_supported)
7952 p->ops[i].flags = IO_URING_OP_SUPPORTED;
7953 }
7954 p->ops_len = i;
7955
7956 ret = 0;
7957 if (copy_to_user(arg, p, size))
7958 ret = -EFAULT;
7959out:
7960 kfree(p);
7961 return ret;
7962}
7963
Jens Axboe071698e2020-01-28 10:04:42 -07007964static int io_register_personality(struct io_ring_ctx *ctx)
7965{
7966 const struct cred *creds = get_current_cred();
7967 int id;
7968
7969 id = idr_alloc_cyclic(&ctx->personality_idr, (void *) creds, 1,
7970 USHRT_MAX, GFP_KERNEL);
7971 if (id < 0)
7972 put_cred(creds);
7973 return id;
7974}
7975
7976static int io_unregister_personality(struct io_ring_ctx *ctx, unsigned id)
7977{
7978 const struct cred *old_creds;
7979
7980 old_creds = idr_remove(&ctx->personality_idr, id);
7981 if (old_creds) {
7982 put_cred(old_creds);
7983 return 0;
7984 }
7985
7986 return -EINVAL;
7987}
7988
7989static bool io_register_op_must_quiesce(int op)
7990{
7991 switch (op) {
7992 case IORING_UNREGISTER_FILES:
7993 case IORING_REGISTER_FILES_UPDATE:
7994 case IORING_REGISTER_PROBE:
7995 case IORING_REGISTER_PERSONALITY:
7996 case IORING_UNREGISTER_PERSONALITY:
7997 return false;
7998 default:
7999 return true;
8000 }
8001}
8002
Jens Axboeedafcce2019-01-09 09:16:05 -07008003static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
8004 void __user *arg, unsigned nr_args)
Jens Axboeb19062a2019-04-15 10:49:38 -06008005 __releases(ctx->uring_lock)
8006 __acquires(ctx->uring_lock)
Jens Axboeedafcce2019-01-09 09:16:05 -07008007{
8008 int ret;
8009
Jens Axboe35fa71a2019-04-22 10:23:23 -06008010 /*
8011 * We're inside the ring mutex, if the ref is already dying, then
8012 * someone else killed the ctx or is already going through
8013 * io_uring_register().
8014 */
8015 if (percpu_ref_is_dying(&ctx->refs))
8016 return -ENXIO;
8017
Jens Axboe071698e2020-01-28 10:04:42 -07008018 if (io_register_op_must_quiesce(opcode)) {
Jens Axboe05f3fb32019-12-09 11:22:50 -07008019 percpu_ref_kill(&ctx->refs);
Jens Axboeb19062a2019-04-15 10:49:38 -06008020
Jens Axboe05f3fb32019-12-09 11:22:50 -07008021 /*
8022 * Drop uring mutex before waiting for references to exit. If
8023 * another thread is currently inside io_uring_enter() it might
8024 * need to grab the uring_lock to make progress. If we hold it
8025 * here across the drain wait, then we can deadlock. It's safe
8026 * to drop the mutex here, since no new references will come in
8027 * after we've killed the percpu ref.
8028 */
8029 mutex_unlock(&ctx->uring_lock);
Jens Axboe0f158b42020-05-14 17:18:39 -06008030 ret = wait_for_completion_interruptible(&ctx->ref_comp);
Jens Axboe05f3fb32019-12-09 11:22:50 -07008031 mutex_lock(&ctx->uring_lock);
Jens Axboec1503682020-01-08 08:26:07 -07008032 if (ret) {
8033 percpu_ref_resurrect(&ctx->refs);
8034 ret = -EINTR;
8035 goto out;
8036 }
Jens Axboe05f3fb32019-12-09 11:22:50 -07008037 }
Jens Axboeedafcce2019-01-09 09:16:05 -07008038
8039 switch (opcode) {
8040 case IORING_REGISTER_BUFFERS:
8041 ret = io_sqe_buffer_register(ctx, arg, nr_args);
8042 break;
8043 case IORING_UNREGISTER_BUFFERS:
8044 ret = -EINVAL;
8045 if (arg || nr_args)
8046 break;
8047 ret = io_sqe_buffer_unregister(ctx);
8048 break;
Jens Axboe6b063142019-01-10 22:13:58 -07008049 case IORING_REGISTER_FILES:
8050 ret = io_sqe_files_register(ctx, arg, nr_args);
8051 break;
8052 case IORING_UNREGISTER_FILES:
8053 ret = -EINVAL;
8054 if (arg || nr_args)
8055 break;
8056 ret = io_sqe_files_unregister(ctx);
8057 break;
Jens Axboec3a31e62019-10-03 13:59:56 -06008058 case IORING_REGISTER_FILES_UPDATE:
8059 ret = io_sqe_files_update(ctx, arg, nr_args);
8060 break;
Jens Axboe9b402842019-04-11 11:45:41 -06008061 case IORING_REGISTER_EVENTFD:
Jens Axboef2842ab2020-01-08 11:04:00 -07008062 case IORING_REGISTER_EVENTFD_ASYNC:
Jens Axboe9b402842019-04-11 11:45:41 -06008063 ret = -EINVAL;
8064 if (nr_args != 1)
8065 break;
8066 ret = io_eventfd_register(ctx, arg);
Jens Axboef2842ab2020-01-08 11:04:00 -07008067 if (ret)
8068 break;
8069 if (opcode == IORING_REGISTER_EVENTFD_ASYNC)
8070 ctx->eventfd_async = 1;
8071 else
8072 ctx->eventfd_async = 0;
Jens Axboe9b402842019-04-11 11:45:41 -06008073 break;
8074 case IORING_UNREGISTER_EVENTFD:
8075 ret = -EINVAL;
8076 if (arg || nr_args)
8077 break;
8078 ret = io_eventfd_unregister(ctx);
8079 break;
Jens Axboe66f4af92020-01-16 15:36:52 -07008080 case IORING_REGISTER_PROBE:
8081 ret = -EINVAL;
8082 if (!arg || nr_args > 256)
8083 break;
8084 ret = io_probe(ctx, arg, nr_args);
8085 break;
Jens Axboe071698e2020-01-28 10:04:42 -07008086 case IORING_REGISTER_PERSONALITY:
8087 ret = -EINVAL;
8088 if (arg || nr_args)
8089 break;
8090 ret = io_register_personality(ctx);
8091 break;
8092 case IORING_UNREGISTER_PERSONALITY:
8093 ret = -EINVAL;
8094 if (arg)
8095 break;
8096 ret = io_unregister_personality(ctx, nr_args);
8097 break;
Jens Axboeedafcce2019-01-09 09:16:05 -07008098 default:
8099 ret = -EINVAL;
8100 break;
8101 }
8102
Jens Axboe071698e2020-01-28 10:04:42 -07008103 if (io_register_op_must_quiesce(opcode)) {
Jens Axboe05f3fb32019-12-09 11:22:50 -07008104 /* bring the ctx back to life */
Jens Axboe05f3fb32019-12-09 11:22:50 -07008105 percpu_ref_reinit(&ctx->refs);
Jens Axboec1503682020-01-08 08:26:07 -07008106out:
Jens Axboe0f158b42020-05-14 17:18:39 -06008107 reinit_completion(&ctx->ref_comp);
Jens Axboe05f3fb32019-12-09 11:22:50 -07008108 }
Jens Axboeedafcce2019-01-09 09:16:05 -07008109 return ret;
8110}
8111
8112SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode,
8113 void __user *, arg, unsigned int, nr_args)
8114{
8115 struct io_ring_ctx *ctx;
8116 long ret = -EBADF;
8117 struct fd f;
8118
8119 f = fdget(fd);
8120 if (!f.file)
8121 return -EBADF;
8122
8123 ret = -EOPNOTSUPP;
8124 if (f.file->f_op != &io_uring_fops)
8125 goto out_fput;
8126
8127 ctx = f.file->private_data;
8128
8129 mutex_lock(&ctx->uring_lock);
8130 ret = __io_uring_register(ctx, opcode, arg, nr_args);
8131 mutex_unlock(&ctx->uring_lock);
Dmitrii Dolgovc826bd72019-10-15 19:02:01 +02008132 trace_io_uring_register(ctx, opcode, ctx->nr_user_files, ctx->nr_user_bufs,
8133 ctx->cq_ev_fd != NULL, ret);
Jens Axboeedafcce2019-01-09 09:16:05 -07008134out_fput:
8135 fdput(f);
8136 return ret;
8137}
8138
Jens Axboe2b188cc2019-01-07 10:46:33 -07008139static int __init io_uring_init(void)
8140{
Stefan Metzmacherd7f62e82020-01-29 14:39:41 +01008141#define __BUILD_BUG_VERIFY_ELEMENT(stype, eoffset, etype, ename) do { \
8142 BUILD_BUG_ON(offsetof(stype, ename) != eoffset); \
8143 BUILD_BUG_ON(sizeof(etype) != sizeof_field(stype, ename)); \
8144} while (0)
8145
8146#define BUILD_BUG_SQE_ELEM(eoffset, etype, ename) \
8147 __BUILD_BUG_VERIFY_ELEMENT(struct io_uring_sqe, eoffset, etype, ename)
8148 BUILD_BUG_ON(sizeof(struct io_uring_sqe) != 64);
8149 BUILD_BUG_SQE_ELEM(0, __u8, opcode);
8150 BUILD_BUG_SQE_ELEM(1, __u8, flags);
8151 BUILD_BUG_SQE_ELEM(2, __u16, ioprio);
8152 BUILD_BUG_SQE_ELEM(4, __s32, fd);
8153 BUILD_BUG_SQE_ELEM(8, __u64, off);
8154 BUILD_BUG_SQE_ELEM(8, __u64, addr2);
8155 BUILD_BUG_SQE_ELEM(16, __u64, addr);
Pavel Begunkov7d67af22020-02-24 11:32:45 +03008156 BUILD_BUG_SQE_ELEM(16, __u64, splice_off_in);
Stefan Metzmacherd7f62e82020-01-29 14:39:41 +01008157 BUILD_BUG_SQE_ELEM(24, __u32, len);
8158 BUILD_BUG_SQE_ELEM(28, __kernel_rwf_t, rw_flags);
8159 BUILD_BUG_SQE_ELEM(28, /* compat */ int, rw_flags);
8160 BUILD_BUG_SQE_ELEM(28, /* compat */ __u32, rw_flags);
8161 BUILD_BUG_SQE_ELEM(28, __u32, fsync_flags);
8162 BUILD_BUG_SQE_ELEM(28, __u16, poll_events);
8163 BUILD_BUG_SQE_ELEM(28, __u32, sync_range_flags);
8164 BUILD_BUG_SQE_ELEM(28, __u32, msg_flags);
8165 BUILD_BUG_SQE_ELEM(28, __u32, timeout_flags);
8166 BUILD_BUG_SQE_ELEM(28, __u32, accept_flags);
8167 BUILD_BUG_SQE_ELEM(28, __u32, cancel_flags);
8168 BUILD_BUG_SQE_ELEM(28, __u32, open_flags);
8169 BUILD_BUG_SQE_ELEM(28, __u32, statx_flags);
8170 BUILD_BUG_SQE_ELEM(28, __u32, fadvise_advice);
Pavel Begunkov7d67af22020-02-24 11:32:45 +03008171 BUILD_BUG_SQE_ELEM(28, __u32, splice_flags);
Stefan Metzmacherd7f62e82020-01-29 14:39:41 +01008172 BUILD_BUG_SQE_ELEM(32, __u64, user_data);
8173 BUILD_BUG_SQE_ELEM(40, __u16, buf_index);
8174 BUILD_BUG_SQE_ELEM(42, __u16, personality);
Pavel Begunkov7d67af22020-02-24 11:32:45 +03008175 BUILD_BUG_SQE_ELEM(44, __s32, splice_fd_in);
Stefan Metzmacherd7f62e82020-01-29 14:39:41 +01008176
Jens Axboed3656342019-12-18 09:50:26 -07008177 BUILD_BUG_ON(ARRAY_SIZE(io_op_defs) != IORING_OP_LAST);
Jens Axboe84557872020-03-03 15:28:17 -07008178 BUILD_BUG_ON(__REQ_F_LAST_BIT >= 8 * sizeof(int));
Jens Axboe2b188cc2019-01-07 10:46:33 -07008179 req_cachep = KMEM_CACHE(io_kiocb, SLAB_HWCACHE_ALIGN | SLAB_PANIC);
8180 return 0;
8181};
8182__initcall(io_uring_init);