blob: 91103fc9771d3a13c95b39ff48f050769c2cea45 [file] [log] [blame]
Jens Axboe2b188cc2019-01-07 10:46:33 -07001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Shared application/kernel submission and completion ring pairs, for
4 * supporting fast/efficient IO.
5 *
6 * A note on the read/write ordering memory barriers that are matched between
Stefan Bühler1e84b972019-04-24 23:54:16 +02007 * the application and kernel side.
8 *
9 * After the application reads the CQ ring tail, it must use an
10 * appropriate smp_rmb() to pair with the smp_wmb() the kernel uses
11 * before writing the tail (using smp_load_acquire to read the tail will
12 * do). It also needs a smp_mb() before updating CQ head (ordering the
13 * entry load(s) with the head store), pairing with an implicit barrier
14 * through a control-dependency in io_get_cqring (smp_store_release to
15 * store head will do). Failure to do so could lead to reading invalid
16 * CQ entries.
17 *
18 * Likewise, the application must use an appropriate smp_wmb() before
19 * writing the SQ tail (ordering SQ entry stores with the tail store),
20 * which pairs with smp_load_acquire in io_get_sqring (smp_store_release
21 * to store the tail will do). And it needs a barrier ordering the SQ
22 * head load before writing new SQ entries (smp_load_acquire to read
23 * head will do).
24 *
25 * When using the SQ poll thread (IORING_SETUP_SQPOLL), the application
26 * needs to check the SQ flags for IORING_SQ_NEED_WAKEUP *after*
27 * updating the SQ tail; a full memory barrier smp_mb() is needed
28 * between.
Jens Axboe2b188cc2019-01-07 10:46:33 -070029 *
30 * Also see the examples in the liburing library:
31 *
32 * git://git.kernel.dk/liburing
33 *
34 * io_uring also uses READ/WRITE_ONCE() for _any_ store or load that happens
35 * from data shared between the kernel and application. This is done both
36 * for ordering purposes, but also to ensure that once a value is loaded from
37 * data that the application could potentially modify, it remains stable.
38 *
39 * Copyright (C) 2018-2019 Jens Axboe
Christoph Hellwigc992fe22019-01-11 09:43:02 -070040 * Copyright (c) 2018-2019 Christoph Hellwig
Jens Axboe2b188cc2019-01-07 10:46:33 -070041 */
42#include <linux/kernel.h>
43#include <linux/init.h>
44#include <linux/errno.h>
45#include <linux/syscalls.h>
46#include <linux/compat.h>
47#include <linux/refcount.h>
48#include <linux/uio.h>
49
50#include <linux/sched/signal.h>
51#include <linux/fs.h>
52#include <linux/file.h>
53#include <linux/fdtable.h>
54#include <linux/mm.h>
55#include <linux/mman.h>
56#include <linux/mmu_context.h>
57#include <linux/percpu.h>
58#include <linux/slab.h>
Jens Axboe6c271ce2019-01-10 11:22:30 -070059#include <linux/kthread.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070060#include <linux/blkdev.h>
Jens Axboeedafcce2019-01-09 09:16:05 -070061#include <linux/bvec.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070062#include <linux/net.h>
63#include <net/sock.h>
64#include <net/af_unix.h>
Jens Axboe6b063142019-01-10 22:13:58 -070065#include <net/scm.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070066#include <linux/anon_inodes.h>
67#include <linux/sched/mm.h>
68#include <linux/uaccess.h>
69#include <linux/nospec.h>
Jens Axboeedafcce2019-01-09 09:16:05 -070070#include <linux/sizes.h>
71#include <linux/hugetlb.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070072
Dmitrii Dolgovc826bd72019-10-15 19:02:01 +020073#define CREATE_TRACE_POINTS
74#include <trace/events/io_uring.h>
75
Jens Axboe2b188cc2019-01-07 10:46:33 -070076#include <uapi/linux/io_uring.h>
77
78#include "internal.h"
Jens Axboe561fb042019-10-24 07:25:42 -060079#include "io-wq.h"
Jens Axboe2b188cc2019-01-07 10:46:33 -070080
Daniel Xu5277dea2019-09-14 14:23:45 -070081#define IORING_MAX_ENTRIES 32768
Jens Axboe33a107f2019-10-04 12:10:03 -060082#define IORING_MAX_CQ_ENTRIES (2 * IORING_MAX_ENTRIES)
Jens Axboe65e19f52019-10-26 07:20:21 -060083
84/*
85 * Shift of 9 is 512 entries, or exactly one page on 64-bit archs
86 */
87#define IORING_FILE_TABLE_SHIFT 9
88#define IORING_MAX_FILES_TABLE (1U << IORING_FILE_TABLE_SHIFT)
89#define IORING_FILE_TABLE_MASK (IORING_MAX_FILES_TABLE - 1)
90#define IORING_MAX_FIXED_FILES (64 * IORING_MAX_FILES_TABLE)
Jens Axboe2b188cc2019-01-07 10:46:33 -070091
92struct io_uring {
93 u32 head ____cacheline_aligned_in_smp;
94 u32 tail ____cacheline_aligned_in_smp;
95};
96
Stefan Bühler1e84b972019-04-24 23:54:16 +020097/*
Hristo Venev75b28af2019-08-26 17:23:46 +000098 * This data is shared with the application through the mmap at offsets
99 * IORING_OFF_SQ_RING and IORING_OFF_CQ_RING.
Stefan Bühler1e84b972019-04-24 23:54:16 +0200100 *
101 * The offsets to the member fields are published through struct
102 * io_sqring_offsets when calling io_uring_setup.
103 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000104struct io_rings {
Stefan Bühler1e84b972019-04-24 23:54:16 +0200105 /*
106 * Head and tail offsets into the ring; the offsets need to be
107 * masked to get valid indices.
108 *
Hristo Venev75b28af2019-08-26 17:23:46 +0000109 * The kernel controls head of the sq ring and the tail of the cq ring,
110 * and the application controls tail of the sq ring and the head of the
111 * cq ring.
Stefan Bühler1e84b972019-04-24 23:54:16 +0200112 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000113 struct io_uring sq, cq;
Stefan Bühler1e84b972019-04-24 23:54:16 +0200114 /*
Hristo Venev75b28af2019-08-26 17:23:46 +0000115 * Bitmasks to apply to head and tail offsets (constant, equals
Stefan Bühler1e84b972019-04-24 23:54:16 +0200116 * ring_entries - 1)
117 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000118 u32 sq_ring_mask, cq_ring_mask;
119 /* Ring sizes (constant, power of 2) */
120 u32 sq_ring_entries, cq_ring_entries;
Stefan Bühler1e84b972019-04-24 23:54:16 +0200121 /*
122 * Number of invalid entries dropped by the kernel due to
123 * invalid index stored in array
124 *
125 * Written by the kernel, shouldn't be modified by the
126 * application (i.e. get number of "new events" by comparing to
127 * cached value).
128 *
129 * After a new SQ head value was read by the application this
130 * counter includes all submissions that were dropped reaching
131 * the new SQ head (and possibly more).
132 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000133 u32 sq_dropped;
Stefan Bühler1e84b972019-04-24 23:54:16 +0200134 /*
135 * Runtime flags
136 *
137 * Written by the kernel, shouldn't be modified by the
138 * application.
139 *
140 * The application needs a full memory barrier before checking
141 * for IORING_SQ_NEED_WAKEUP after updating the sq tail.
142 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000143 u32 sq_flags;
Stefan Bühler1e84b972019-04-24 23:54:16 +0200144 /*
145 * Number of completion events lost because the queue was full;
146 * this should be avoided by the application by making sure
147 * there are not more requests pending thatn there is space in
148 * the completion queue.
149 *
150 * Written by the kernel, shouldn't be modified by the
151 * application (i.e. get number of "new events" by comparing to
152 * cached value).
153 *
154 * As completion events come in out of order this counter is not
155 * ordered with any other data.
156 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000157 u32 cq_overflow;
Stefan Bühler1e84b972019-04-24 23:54:16 +0200158 /*
159 * Ring buffer of completion events.
160 *
161 * The kernel writes completion events fresh every time they are
162 * produced, so the application is allowed to modify pending
163 * entries.
164 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000165 struct io_uring_cqe cqes[] ____cacheline_aligned_in_smp;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700166};
167
Jens Axboeedafcce2019-01-09 09:16:05 -0700168struct io_mapped_ubuf {
169 u64 ubuf;
170 size_t len;
171 struct bio_vec *bvec;
172 unsigned int nr_bvecs;
173};
174
Jens Axboe65e19f52019-10-26 07:20:21 -0600175struct fixed_file_table {
176 struct file **files;
177};
178
Jens Axboe2b188cc2019-01-07 10:46:33 -0700179struct io_ring_ctx {
180 struct {
181 struct percpu_ref refs;
182 } ____cacheline_aligned_in_smp;
183
184 struct {
185 unsigned int flags;
186 bool compat;
187 bool account_mem;
188
Hristo Venev75b28af2019-08-26 17:23:46 +0000189 /*
190 * Ring buffer of indices into array of io_uring_sqe, which is
191 * mmapped by the application using the IORING_OFF_SQES offset.
192 *
193 * This indirection could e.g. be used to assign fixed
194 * io_uring_sqe entries to operations and only submit them to
195 * the queue when needed.
196 *
197 * The kernel modifies neither the indices array nor the entries
198 * array.
199 */
200 u32 *sq_array;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700201 unsigned cached_sq_head;
202 unsigned sq_entries;
203 unsigned sq_mask;
Jens Axboe6c271ce2019-01-10 11:22:30 -0700204 unsigned sq_thread_idle;
Jens Axboe498ccd92019-10-25 10:04:25 -0600205 unsigned cached_sq_dropped;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700206 struct io_uring_sqe *sq_sqes;
Jens Axboede0617e2019-04-06 21:51:27 -0600207
208 struct list_head defer_list;
Jens Axboe5262f562019-09-17 12:26:57 -0600209 struct list_head timeout_list;
Jens Axboefcb323c2019-10-24 12:39:47 -0600210
211 wait_queue_head_t inflight_wait;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700212 } ____cacheline_aligned_in_smp;
213
214 /* IO offload */
Jens Axboe561fb042019-10-24 07:25:42 -0600215 struct io_wq *io_wq;
Jens Axboe6c271ce2019-01-10 11:22:30 -0700216 struct task_struct *sqo_thread; /* if using sq thread polling */
Jens Axboe2b188cc2019-01-07 10:46:33 -0700217 struct mm_struct *sqo_mm;
Jens Axboe6c271ce2019-01-10 11:22:30 -0700218 wait_queue_head_t sqo_wait;
Jackie Liua4c0b3d2019-07-08 13:41:12 +0800219 struct completion sqo_thread_started;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700220
221 struct {
Jens Axboe2b188cc2019-01-07 10:46:33 -0700222 unsigned cached_cq_tail;
Jens Axboe498ccd92019-10-25 10:04:25 -0600223 atomic_t cached_cq_overflow;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700224 unsigned cq_entries;
225 unsigned cq_mask;
226 struct wait_queue_head cq_wait;
227 struct fasync_struct *cq_fasync;
Jens Axboe9b402842019-04-11 11:45:41 -0600228 struct eventfd_ctx *cq_ev_fd;
Jens Axboe5262f562019-09-17 12:26:57 -0600229 atomic_t cq_timeouts;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700230 } ____cacheline_aligned_in_smp;
231
Hristo Venev75b28af2019-08-26 17:23:46 +0000232 struct io_rings *rings;
233
Jens Axboe6b063142019-01-10 22:13:58 -0700234 /*
235 * If used, fixed file set. Writers must ensure that ->refs is dead,
236 * readers must ensure that ->refs is alive as long as the file* is
237 * used. Only updated through io_uring_register(2).
238 */
Jens Axboe65e19f52019-10-26 07:20:21 -0600239 struct fixed_file_table *file_table;
Jens Axboe6b063142019-01-10 22:13:58 -0700240 unsigned nr_user_files;
241
Jens Axboeedafcce2019-01-09 09:16:05 -0700242 /* if used, fixed mapped user buffers */
243 unsigned nr_user_bufs;
244 struct io_mapped_ubuf *user_bufs;
245
Jens Axboe2b188cc2019-01-07 10:46:33 -0700246 struct user_struct *user;
247
248 struct completion ctx_done;
249
250 struct {
251 struct mutex uring_lock;
252 wait_queue_head_t wait;
253 } ____cacheline_aligned_in_smp;
254
255 struct {
256 spinlock_t completion_lock;
Jens Axboedef596e2019-01-09 08:59:42 -0700257 bool poll_multi_file;
258 /*
259 * ->poll_list is protected by the ctx->uring_lock for
260 * io_uring instances that don't use IORING_SETUP_SQPOLL.
261 * For SQPOLL, only the single threaded io_sq_thread() will
262 * manipulate the list, hence no extra locking is needed there.
263 */
264 struct list_head poll_list;
Jens Axboe221c5eb2019-01-17 09:41:58 -0700265 struct list_head cancel_list;
Jens Axboefcb323c2019-10-24 12:39:47 -0600266
267 spinlock_t inflight_lock;
268 struct list_head inflight_list;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700269 } ____cacheline_aligned_in_smp;
270
271#if defined(CONFIG_UNIX)
272 struct socket *ring_sock;
273#endif
274};
275
276struct sqe_submit {
277 const struct io_uring_sqe *sqe;
Jens Axboefcb323c2019-10-24 12:39:47 -0600278 struct file *ring_file;
279 int ring_fd;
Jackie Liu8776f3f2019-09-09 20:50:39 +0800280 u32 sequence;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700281 bool has_user;
Jackie Liuba5290c2019-10-09 09:19:59 +0800282 bool in_async;
Jens Axboe6c271ce2019-01-10 11:22:30 -0700283 bool needs_fixed_file;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700284};
285
Jens Axboe09bb8392019-03-13 12:39:28 -0600286/*
287 * First field must be the file pointer in all the
288 * iocb unions! See also 'struct kiocb' in <linux/fs.h>
289 */
Jens Axboe221c5eb2019-01-17 09:41:58 -0700290struct io_poll_iocb {
291 struct file *file;
292 struct wait_queue_head *head;
293 __poll_t events;
Jens Axboe8c838782019-03-12 15:48:16 -0600294 bool done;
Jens Axboe221c5eb2019-01-17 09:41:58 -0700295 bool canceled;
296 struct wait_queue_entry wait;
297};
298
Jens Axboe5262f562019-09-17 12:26:57 -0600299struct io_timeout {
300 struct file *file;
301 struct hrtimer timer;
302};
303
Jens Axboe09bb8392019-03-13 12:39:28 -0600304/*
305 * NOTE! Each of the iocb union members has the file pointer
306 * as the first entry in their struct definition. So you can
307 * access the file pointer through any of the sub-structs,
308 * or directly as just 'ki_filp' in this struct.
309 */
Jens Axboe2b188cc2019-01-07 10:46:33 -0700310struct io_kiocb {
Jens Axboe221c5eb2019-01-17 09:41:58 -0700311 union {
Jens Axboe09bb8392019-03-13 12:39:28 -0600312 struct file *file;
Jens Axboe221c5eb2019-01-17 09:41:58 -0700313 struct kiocb rw;
314 struct io_poll_iocb poll;
Jens Axboe5262f562019-09-17 12:26:57 -0600315 struct io_timeout timeout;
Jens Axboe221c5eb2019-01-17 09:41:58 -0700316 };
Jens Axboe2b188cc2019-01-07 10:46:33 -0700317
318 struct sqe_submit submit;
319
320 struct io_ring_ctx *ctx;
321 struct list_head list;
Jens Axboe9e645e112019-05-10 16:07:28 -0600322 struct list_head link_list;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700323 unsigned int flags;
Jens Axboec16361c2019-01-17 08:39:48 -0700324 refcount_t refs;
Stefan Bühler8449eed2019-04-27 20:34:19 +0200325#define REQ_F_NOWAIT 1 /* must not punt to workers */
Jens Axboedef596e2019-01-09 08:59:42 -0700326#define REQ_F_IOPOLL_COMPLETED 2 /* polled IO has completed */
Jens Axboe6b063142019-01-10 22:13:58 -0700327#define REQ_F_FIXED_FILE 4 /* ctx owns file */
Jens Axboe31b51512019-01-18 22:56:34 -0700328#define REQ_F_SEQ_PREV 8 /* sequential with previous */
Stefan Bühlere2033e32019-05-11 19:08:01 +0200329#define REQ_F_IO_DRAIN 16 /* drain existing IO first */
330#define REQ_F_IO_DRAINED 32 /* drain done */
Jens Axboe9e645e112019-05-10 16:07:28 -0600331#define REQ_F_LINK 64 /* linked sqes */
Jens Axboe2665abf2019-11-05 12:40:47 -0700332#define REQ_F_LINK_TIMEOUT 128 /* has linked timeout */
Zhengyuan Liuf7b76ac2019-07-16 23:26:14 +0800333#define REQ_F_FAIL_LINK 256 /* fail rest of links */
Jackie Liu4fe2c962019-09-09 20:50:40 +0800334#define REQ_F_SHADOW_DRAIN 512 /* link-drain shadow req */
Jens Axboe5262f562019-09-17 12:26:57 -0600335#define REQ_F_TIMEOUT 1024 /* timeout request */
Jens Axboe491381ce2019-10-17 09:20:46 -0600336#define REQ_F_ISREG 2048 /* regular file */
337#define REQ_F_MUST_PUNT 4096 /* must be punted even for NONBLOCK */
Jens Axboefcb323c2019-10-24 12:39:47 -0600338#define REQ_F_INFLIGHT 8192 /* on inflight list */
Jens Axboe2b188cc2019-01-07 10:46:33 -0700339 u64 user_data;
Jens Axboe9e645e112019-05-10 16:07:28 -0600340 u32 result;
Jens Axboede0617e2019-04-06 21:51:27 -0600341 u32 sequence;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700342
Jens Axboefcb323c2019-10-24 12:39:47 -0600343 struct list_head inflight_entry;
344
Jens Axboe561fb042019-10-24 07:25:42 -0600345 struct io_wq_work work;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700346};
347
348#define IO_PLUG_THRESHOLD 2
Jens Axboedef596e2019-01-09 08:59:42 -0700349#define IO_IOPOLL_BATCH 8
Jens Axboe2b188cc2019-01-07 10:46:33 -0700350
Jens Axboe9a56a232019-01-09 09:06:50 -0700351struct io_submit_state {
352 struct blk_plug plug;
353
354 /*
Jens Axboe2579f912019-01-09 09:10:43 -0700355 * io_kiocb alloc cache
356 */
357 void *reqs[IO_IOPOLL_BATCH];
358 unsigned int free_reqs;
359 unsigned int cur_req;
360
361 /*
Jens Axboe9a56a232019-01-09 09:06:50 -0700362 * File reference cache
363 */
364 struct file *file;
365 unsigned int fd;
366 unsigned int has_refs;
367 unsigned int used_refs;
368 unsigned int ios_left;
369};
370
Jens Axboe561fb042019-10-24 07:25:42 -0600371static void io_wq_submit_work(struct io_wq_work **workptr);
Jens Axboe78e19bb2019-11-06 15:21:34 -0700372static void io_cqring_fill_event(struct io_kiocb *req, long res);
Jackie Liu4fe2c962019-09-09 20:50:40 +0800373static void __io_free_req(struct io_kiocb *req);
Jens Axboe2665abf2019-11-05 12:40:47 -0700374static void io_put_req(struct io_kiocb *req, struct io_kiocb **nxtptr);
Jens Axboe78e19bb2019-11-06 15:21:34 -0700375static void io_double_put_req(struct io_kiocb *req);
Jens Axboede0617e2019-04-06 21:51:27 -0600376
Jens Axboe2b188cc2019-01-07 10:46:33 -0700377static struct kmem_cache *req_cachep;
378
379static const struct file_operations io_uring_fops;
380
381struct sock *io_uring_get_socket(struct file *file)
382{
383#if defined(CONFIG_UNIX)
384 if (file->f_op == &io_uring_fops) {
385 struct io_ring_ctx *ctx = file->private_data;
386
387 return ctx->ring_sock->sk;
388 }
389#endif
390 return NULL;
391}
392EXPORT_SYMBOL(io_uring_get_socket);
393
394static void io_ring_ctx_ref_free(struct percpu_ref *ref)
395{
396 struct io_ring_ctx *ctx = container_of(ref, struct io_ring_ctx, refs);
397
398 complete(&ctx->ctx_done);
399}
400
401static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
402{
403 struct io_ring_ctx *ctx;
404
405 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
406 if (!ctx)
407 return NULL;
408
Roman Gushchin21482892019-05-07 10:01:48 -0700409 if (percpu_ref_init(&ctx->refs, io_ring_ctx_ref_free,
410 PERCPU_REF_ALLOW_REINIT, GFP_KERNEL)) {
Jens Axboe2b188cc2019-01-07 10:46:33 -0700411 kfree(ctx);
412 return NULL;
413 }
414
415 ctx->flags = p->flags;
416 init_waitqueue_head(&ctx->cq_wait);
417 init_completion(&ctx->ctx_done);
Jackie Liua4c0b3d2019-07-08 13:41:12 +0800418 init_completion(&ctx->sqo_thread_started);
Jens Axboe2b188cc2019-01-07 10:46:33 -0700419 mutex_init(&ctx->uring_lock);
420 init_waitqueue_head(&ctx->wait);
421 spin_lock_init(&ctx->completion_lock);
Jens Axboedef596e2019-01-09 08:59:42 -0700422 INIT_LIST_HEAD(&ctx->poll_list);
Jens Axboe221c5eb2019-01-17 09:41:58 -0700423 INIT_LIST_HEAD(&ctx->cancel_list);
Jens Axboede0617e2019-04-06 21:51:27 -0600424 INIT_LIST_HEAD(&ctx->defer_list);
Jens Axboe5262f562019-09-17 12:26:57 -0600425 INIT_LIST_HEAD(&ctx->timeout_list);
Jens Axboefcb323c2019-10-24 12:39:47 -0600426 init_waitqueue_head(&ctx->inflight_wait);
427 spin_lock_init(&ctx->inflight_lock);
428 INIT_LIST_HEAD(&ctx->inflight_list);
Jens Axboe2b188cc2019-01-07 10:46:33 -0700429 return ctx;
430}
431
Jens Axboe7adf4ea2019-10-10 21:42:58 -0600432static inline bool __io_sequence_defer(struct io_ring_ctx *ctx,
433 struct io_kiocb *req)
Jens Axboede0617e2019-04-06 21:51:27 -0600434{
Jens Axboe498ccd92019-10-25 10:04:25 -0600435 return req->sequence != ctx->cached_cq_tail + ctx->cached_sq_dropped
436 + atomic_read(&ctx->cached_cq_overflow);
Jens Axboede0617e2019-04-06 21:51:27 -0600437}
438
Jens Axboe7adf4ea2019-10-10 21:42:58 -0600439static inline bool io_sequence_defer(struct io_ring_ctx *ctx,
440 struct io_kiocb *req)
441{
442 if ((req->flags & (REQ_F_IO_DRAIN|REQ_F_IO_DRAINED)) != REQ_F_IO_DRAIN)
443 return false;
444
445 return __io_sequence_defer(ctx, req);
446}
447
448static struct io_kiocb *io_get_deferred_req(struct io_ring_ctx *ctx)
Jens Axboede0617e2019-04-06 21:51:27 -0600449{
450 struct io_kiocb *req;
451
Jens Axboe7adf4ea2019-10-10 21:42:58 -0600452 req = list_first_entry_or_null(&ctx->defer_list, struct io_kiocb, list);
453 if (req && !io_sequence_defer(ctx, req)) {
Jens Axboede0617e2019-04-06 21:51:27 -0600454 list_del_init(&req->list);
455 return req;
456 }
457
458 return NULL;
459}
460
Jens Axboe5262f562019-09-17 12:26:57 -0600461static struct io_kiocb *io_get_timeout_req(struct io_ring_ctx *ctx)
462{
Jens Axboe7adf4ea2019-10-10 21:42:58 -0600463 struct io_kiocb *req;
464
465 req = list_first_entry_or_null(&ctx->timeout_list, struct io_kiocb, list);
466 if (req && !__io_sequence_defer(ctx, req)) {
467 list_del_init(&req->list);
468 return req;
469 }
470
471 return NULL;
Jens Axboe5262f562019-09-17 12:26:57 -0600472}
473
Jens Axboede0617e2019-04-06 21:51:27 -0600474static void __io_commit_cqring(struct io_ring_ctx *ctx)
Jens Axboe2b188cc2019-01-07 10:46:33 -0700475{
Hristo Venev75b28af2019-08-26 17:23:46 +0000476 struct io_rings *rings = ctx->rings;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700477
Hristo Venev75b28af2019-08-26 17:23:46 +0000478 if (ctx->cached_cq_tail != READ_ONCE(rings->cq.tail)) {
Jens Axboe2b188cc2019-01-07 10:46:33 -0700479 /* order cqe stores with ring update */
Hristo Venev75b28af2019-08-26 17:23:46 +0000480 smp_store_release(&rings->cq.tail, ctx->cached_cq_tail);
Jens Axboe2b188cc2019-01-07 10:46:33 -0700481
Jens Axboe2b188cc2019-01-07 10:46:33 -0700482 if (wq_has_sleeper(&ctx->cq_wait)) {
483 wake_up_interruptible(&ctx->cq_wait);
484 kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN);
485 }
486 }
487}
488
Jens Axboe561fb042019-10-24 07:25:42 -0600489static inline bool io_sqe_needs_user(const struct io_uring_sqe *sqe)
Jens Axboe18d9be12019-09-10 09:13:05 -0600490{
Jens Axboe561fb042019-10-24 07:25:42 -0600491 u8 opcode = READ_ONCE(sqe->opcode);
492
493 return !(opcode == IORING_OP_READ_FIXED ||
494 opcode == IORING_OP_WRITE_FIXED);
495}
496
497static inline bool io_prep_async_work(struct io_kiocb *req)
498{
499 bool do_hashed = false;
Jens Axboe54a91f32019-09-10 09:15:04 -0600500
Jens Axboe6cc47d12019-09-18 11:18:23 -0600501 if (req->submit.sqe) {
502 switch (req->submit.sqe->opcode) {
503 case IORING_OP_WRITEV:
504 case IORING_OP_WRITE_FIXED:
Jens Axboe561fb042019-10-24 07:25:42 -0600505 do_hashed = true;
Jens Axboe6cc47d12019-09-18 11:18:23 -0600506 break;
507 }
Jens Axboe561fb042019-10-24 07:25:42 -0600508 if (io_sqe_needs_user(req->submit.sqe))
509 req->work.flags |= IO_WQ_WORK_NEEDS_USER;
Jens Axboe54a91f32019-09-10 09:15:04 -0600510 }
511
Jens Axboe561fb042019-10-24 07:25:42 -0600512 return do_hashed;
513}
514
515static inline void io_queue_async_work(struct io_ring_ctx *ctx,
516 struct io_kiocb *req)
517{
518 bool do_hashed = io_prep_async_work(req);
519
520 trace_io_uring_queue_async_work(ctx, do_hashed, req, &req->work,
521 req->flags);
522 if (!do_hashed) {
523 io_wq_enqueue(ctx->io_wq, &req->work);
524 } else {
525 io_wq_enqueue_hashed(ctx->io_wq, &req->work,
526 file_inode(req->file));
527 }
Jens Axboe18d9be12019-09-10 09:13:05 -0600528}
529
Jens Axboe5262f562019-09-17 12:26:57 -0600530static void io_kill_timeout(struct io_kiocb *req)
531{
532 int ret;
533
534 ret = hrtimer_try_to_cancel(&req->timeout.timer);
535 if (ret != -1) {
536 atomic_inc(&req->ctx->cq_timeouts);
Jens Axboe842f9612019-10-29 12:34:10 -0600537 list_del_init(&req->list);
Jens Axboe78e19bb2019-11-06 15:21:34 -0700538 io_cqring_fill_event(req, 0);
539 io_put_req(req, NULL);
Jens Axboe5262f562019-09-17 12:26:57 -0600540 }
541}
542
543static void io_kill_timeouts(struct io_ring_ctx *ctx)
544{
545 struct io_kiocb *req, *tmp;
546
547 spin_lock_irq(&ctx->completion_lock);
548 list_for_each_entry_safe(req, tmp, &ctx->timeout_list, list)
549 io_kill_timeout(req);
550 spin_unlock_irq(&ctx->completion_lock);
551}
552
Jens Axboede0617e2019-04-06 21:51:27 -0600553static void io_commit_cqring(struct io_ring_ctx *ctx)
554{
555 struct io_kiocb *req;
556
Jens Axboe5262f562019-09-17 12:26:57 -0600557 while ((req = io_get_timeout_req(ctx)) != NULL)
558 io_kill_timeout(req);
559
Jens Axboede0617e2019-04-06 21:51:27 -0600560 __io_commit_cqring(ctx);
561
562 while ((req = io_get_deferred_req(ctx)) != NULL) {
Jackie Liu4fe2c962019-09-09 20:50:40 +0800563 if (req->flags & REQ_F_SHADOW_DRAIN) {
564 /* Just for drain, free it. */
565 __io_free_req(req);
566 continue;
567 }
Jens Axboede0617e2019-04-06 21:51:27 -0600568 req->flags |= REQ_F_IO_DRAINED;
Jens Axboe18d9be12019-09-10 09:13:05 -0600569 io_queue_async_work(ctx, req);
Jens Axboede0617e2019-04-06 21:51:27 -0600570 }
571}
572
Jens Axboe2b188cc2019-01-07 10:46:33 -0700573static struct io_uring_cqe *io_get_cqring(struct io_ring_ctx *ctx)
574{
Hristo Venev75b28af2019-08-26 17:23:46 +0000575 struct io_rings *rings = ctx->rings;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700576 unsigned tail;
577
578 tail = ctx->cached_cq_tail;
Stefan Bühler115e12e2019-04-24 23:54:18 +0200579 /*
580 * writes to the cq entry need to come after reading head; the
581 * control dependency is enough as we're using WRITE_ONCE to
582 * fill the cq entry
583 */
Hristo Venev75b28af2019-08-26 17:23:46 +0000584 if (tail - READ_ONCE(rings->cq.head) == rings->cq_ring_entries)
Jens Axboe2b188cc2019-01-07 10:46:33 -0700585 return NULL;
586
587 ctx->cached_cq_tail++;
Hristo Venev75b28af2019-08-26 17:23:46 +0000588 return &rings->cqes[tail & ctx->cq_mask];
Jens Axboe2b188cc2019-01-07 10:46:33 -0700589}
590
Jens Axboe78e19bb2019-11-06 15:21:34 -0700591static void io_cqring_fill_event(struct io_kiocb *req, long res)
Jens Axboe2b188cc2019-01-07 10:46:33 -0700592{
Jens Axboe78e19bb2019-11-06 15:21:34 -0700593 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700594 struct io_uring_cqe *cqe;
595
Jens Axboe78e19bb2019-11-06 15:21:34 -0700596 trace_io_uring_complete(ctx, req->user_data, res);
Jens Axboe51c3ff62019-11-03 06:52:50 -0700597
Jens Axboe2b188cc2019-01-07 10:46:33 -0700598 /*
599 * If we can't get a cq entry, userspace overflowed the
600 * submission (by quite a lot). Increment the overflow count in
601 * the ring.
602 */
603 cqe = io_get_cqring(ctx);
604 if (cqe) {
Jens Axboe78e19bb2019-11-06 15:21:34 -0700605 WRITE_ONCE(cqe->user_data, req->user_data);
Jens Axboe2b188cc2019-01-07 10:46:33 -0700606 WRITE_ONCE(cqe->res, res);
Jens Axboec71ffb62019-05-13 20:58:29 -0600607 WRITE_ONCE(cqe->flags, 0);
Jens Axboe2b188cc2019-01-07 10:46:33 -0700608 } else {
Jens Axboe498ccd92019-10-25 10:04:25 -0600609 WRITE_ONCE(ctx->rings->cq_overflow,
610 atomic_inc_return(&ctx->cached_cq_overflow));
Jens Axboe2b188cc2019-01-07 10:46:33 -0700611 }
612}
613
Jens Axboe8c838782019-03-12 15:48:16 -0600614static void io_cqring_ev_posted(struct io_ring_ctx *ctx)
615{
616 if (waitqueue_active(&ctx->wait))
617 wake_up(&ctx->wait);
618 if (waitqueue_active(&ctx->sqo_wait))
619 wake_up(&ctx->sqo_wait);
Jens Axboe9b402842019-04-11 11:45:41 -0600620 if (ctx->cq_ev_fd)
621 eventfd_signal(ctx->cq_ev_fd, 1);
Jens Axboe8c838782019-03-12 15:48:16 -0600622}
623
Jens Axboe78e19bb2019-11-06 15:21:34 -0700624static void io_cqring_add_event(struct io_kiocb *req, long res)
Jens Axboe2b188cc2019-01-07 10:46:33 -0700625{
Jens Axboe78e19bb2019-11-06 15:21:34 -0700626 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700627 unsigned long flags;
628
629 spin_lock_irqsave(&ctx->completion_lock, flags);
Jens Axboe78e19bb2019-11-06 15:21:34 -0700630 io_cqring_fill_event(req, res);
Jens Axboe2b188cc2019-01-07 10:46:33 -0700631 io_commit_cqring(ctx);
632 spin_unlock_irqrestore(&ctx->completion_lock, flags);
633
Jens Axboe8c838782019-03-12 15:48:16 -0600634 io_cqring_ev_posted(ctx);
Jens Axboe2b188cc2019-01-07 10:46:33 -0700635}
636
Jens Axboe2579f912019-01-09 09:10:43 -0700637static struct io_kiocb *io_get_req(struct io_ring_ctx *ctx,
638 struct io_submit_state *state)
Jens Axboe2b188cc2019-01-07 10:46:33 -0700639{
Jens Axboefd6fab22019-03-14 16:30:06 -0600640 gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700641 struct io_kiocb *req;
642
643 if (!percpu_ref_tryget(&ctx->refs))
644 return NULL;
645
Jens Axboe2579f912019-01-09 09:10:43 -0700646 if (!state) {
Jens Axboefd6fab22019-03-14 16:30:06 -0600647 req = kmem_cache_alloc(req_cachep, gfp);
Jens Axboe2579f912019-01-09 09:10:43 -0700648 if (unlikely(!req))
649 goto out;
650 } else if (!state->free_reqs) {
651 size_t sz;
652 int ret;
653
654 sz = min_t(size_t, state->ios_left, ARRAY_SIZE(state->reqs));
Jens Axboefd6fab22019-03-14 16:30:06 -0600655 ret = kmem_cache_alloc_bulk(req_cachep, gfp, sz, state->reqs);
656
657 /*
658 * Bulk alloc is all-or-nothing. If we fail to get a batch,
659 * retry single alloc to be on the safe side.
660 */
661 if (unlikely(ret <= 0)) {
662 state->reqs[0] = kmem_cache_alloc(req_cachep, gfp);
663 if (!state->reqs[0])
664 goto out;
665 ret = 1;
666 }
Jens Axboe2579f912019-01-09 09:10:43 -0700667 state->free_reqs = ret - 1;
668 state->cur_req = 1;
669 req = state->reqs[0];
670 } else {
671 req = state->reqs[state->cur_req];
672 state->free_reqs--;
673 state->cur_req++;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700674 }
675
Jens Axboe60c112b2019-06-21 10:20:18 -0600676 req->file = NULL;
Jens Axboe2579f912019-01-09 09:10:43 -0700677 req->ctx = ctx;
678 req->flags = 0;
Jens Axboee65ef562019-03-12 10:16:44 -0600679 /* one is dropped after submission, the other at completion */
680 refcount_set(&req->refs, 2);
Jens Axboe9e645e112019-05-10 16:07:28 -0600681 req->result = 0;
Jens Axboe561fb042019-10-24 07:25:42 -0600682 INIT_IO_WORK(&req->work, io_wq_submit_work);
Jens Axboe2579f912019-01-09 09:10:43 -0700683 return req;
684out:
Pavel Begunkov6805b322019-10-08 02:18:42 +0300685 percpu_ref_put(&ctx->refs);
Jens Axboe2b188cc2019-01-07 10:46:33 -0700686 return NULL;
687}
688
Jens Axboedef596e2019-01-09 08:59:42 -0700689static void io_free_req_many(struct io_ring_ctx *ctx, void **reqs, int *nr)
690{
691 if (*nr) {
692 kmem_cache_free_bulk(req_cachep, *nr, reqs);
Pavel Begunkov6805b322019-10-08 02:18:42 +0300693 percpu_ref_put_many(&ctx->refs, *nr);
Jens Axboedef596e2019-01-09 08:59:42 -0700694 *nr = 0;
695 }
696}
697
Jens Axboe9e645e112019-05-10 16:07:28 -0600698static void __io_free_req(struct io_kiocb *req)
Jens Axboe2b188cc2019-01-07 10:46:33 -0700699{
Jens Axboefcb323c2019-10-24 12:39:47 -0600700 struct io_ring_ctx *ctx = req->ctx;
701
Jens Axboe09bb8392019-03-13 12:39:28 -0600702 if (req->file && !(req->flags & REQ_F_FIXED_FILE))
703 fput(req->file);
Jens Axboefcb323c2019-10-24 12:39:47 -0600704 if (req->flags & REQ_F_INFLIGHT) {
705 unsigned long flags;
706
707 spin_lock_irqsave(&ctx->inflight_lock, flags);
708 list_del(&req->inflight_entry);
709 if (waitqueue_active(&ctx->inflight_wait))
710 wake_up(&ctx->inflight_wait);
711 spin_unlock_irqrestore(&ctx->inflight_lock, flags);
712 }
713 percpu_ref_put(&ctx->refs);
Jens Axboee65ef562019-03-12 10:16:44 -0600714 kmem_cache_free(req_cachep, req);
715}
716
Jens Axboe2665abf2019-11-05 12:40:47 -0700717static bool io_link_cancel_timeout(struct io_ring_ctx *ctx,
718 struct io_kiocb *req)
719{
720 int ret;
721
722 ret = hrtimer_try_to_cancel(&req->timeout.timer);
723 if (ret != -1) {
Jens Axboe78e19bb2019-11-06 15:21:34 -0700724 io_cqring_fill_event(req, -ECANCELED);
Jens Axboe2665abf2019-11-05 12:40:47 -0700725 io_commit_cqring(ctx);
726 req->flags &= ~REQ_F_LINK;
Jens Axboe78e19bb2019-11-06 15:21:34 -0700727 io_put_req(req, NULL);
Jens Axboe2665abf2019-11-05 12:40:47 -0700728 return true;
729 }
730
731 return false;
732}
733
Jens Axboeba816ad2019-09-28 11:36:45 -0600734static void io_req_link_next(struct io_kiocb *req, struct io_kiocb **nxtptr)
Jens Axboe9e645e112019-05-10 16:07:28 -0600735{
Jens Axboe2665abf2019-11-05 12:40:47 -0700736 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe9e645e112019-05-10 16:07:28 -0600737 struct io_kiocb *nxt;
Jens Axboe2665abf2019-11-05 12:40:47 -0700738 bool wake_ev = false;
Jens Axboe9e645e112019-05-10 16:07:28 -0600739
740 /*
741 * The list should never be empty when we are called here. But could
742 * potentially happen if the chain is messed up, check to be on the
743 * safe side.
744 */
745 nxt = list_first_entry_or_null(&req->link_list, struct io_kiocb, list);
Jens Axboe2665abf2019-11-05 12:40:47 -0700746 while (nxt) {
Jens Axboe9e645e112019-05-10 16:07:28 -0600747 list_del(&nxt->list);
748 if (!list_empty(&req->link_list)) {
749 INIT_LIST_HEAD(&nxt->link_list);
750 list_splice(&req->link_list, &nxt->link_list);
751 nxt->flags |= REQ_F_LINK;
752 }
753
Jens Axboeba816ad2019-09-28 11:36:45 -0600754 /*
755 * If we're in async work, we can continue processing the chain
756 * in this context instead of having to queue up new async work.
757 */
Jens Axboe2665abf2019-11-05 12:40:47 -0700758 if (req->flags & REQ_F_LINK_TIMEOUT) {
759 wake_ev = io_link_cancel_timeout(ctx, nxt);
760
761 /* we dropped this link, get next */
762 nxt = list_first_entry_or_null(&req->link_list,
763 struct io_kiocb, list);
764 } else if (nxtptr && current_work()) {
Jens Axboeba816ad2019-09-28 11:36:45 -0600765 *nxtptr = nxt;
Jens Axboe2665abf2019-11-05 12:40:47 -0700766 break;
767 } else {
Jens Axboeba816ad2019-09-28 11:36:45 -0600768 io_queue_async_work(req->ctx, nxt);
Jens Axboe2665abf2019-11-05 12:40:47 -0700769 break;
770 }
Jens Axboe9e645e112019-05-10 16:07:28 -0600771 }
Jens Axboe2665abf2019-11-05 12:40:47 -0700772
773 if (wake_ev)
774 io_cqring_ev_posted(ctx);
Jens Axboe9e645e112019-05-10 16:07:28 -0600775}
776
777/*
778 * Called if REQ_F_LINK is set, and we fail the head request
779 */
780static void io_fail_links(struct io_kiocb *req)
781{
Jens Axboe2665abf2019-11-05 12:40:47 -0700782 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe9e645e112019-05-10 16:07:28 -0600783 struct io_kiocb *link;
Jens Axboe2665abf2019-11-05 12:40:47 -0700784 unsigned long flags;
785
786 spin_lock_irqsave(&ctx->completion_lock, flags);
Jens Axboe9e645e112019-05-10 16:07:28 -0600787
788 while (!list_empty(&req->link_list)) {
789 link = list_first_entry(&req->link_list, struct io_kiocb, list);
Jens Axboe2665abf2019-11-05 12:40:47 -0700790 list_del_init(&link->list);
Jens Axboe9e645e112019-05-10 16:07:28 -0600791
Dmitrii Dolgovc826bd72019-10-15 19:02:01 +0200792 trace_io_uring_fail_link(req, link);
Jens Axboe2665abf2019-11-05 12:40:47 -0700793
794 if ((req->flags & REQ_F_LINK_TIMEOUT) &&
795 link->submit.sqe->opcode == IORING_OP_LINK_TIMEOUT) {
796 io_link_cancel_timeout(ctx, link);
797 } else {
Jens Axboe78e19bb2019-11-06 15:21:34 -0700798 io_cqring_fill_event(link, -ECANCELED);
799 io_double_put_req(link);
Jens Axboe2665abf2019-11-05 12:40:47 -0700800 }
Jens Axboe9e645e112019-05-10 16:07:28 -0600801 }
Jens Axboe2665abf2019-11-05 12:40:47 -0700802
803 io_commit_cqring(ctx);
804 spin_unlock_irqrestore(&ctx->completion_lock, flags);
805 io_cqring_ev_posted(ctx);
Jens Axboe9e645e112019-05-10 16:07:28 -0600806}
807
Jens Axboeba816ad2019-09-28 11:36:45 -0600808static void io_free_req(struct io_kiocb *req, struct io_kiocb **nxt)
Jens Axboe9e645e112019-05-10 16:07:28 -0600809{
Jens Axboe2665abf2019-11-05 12:40:47 -0700810 if (likely(!(req->flags & REQ_F_LINK))) {
811 __io_free_req(req);
812 return;
813 }
814
Jens Axboe9e645e112019-05-10 16:07:28 -0600815 /*
816 * If LINK is set, we have dependent requests in this chain. If we
817 * didn't fail this request, queue the first one up, moving any other
818 * dependencies to the next request. In case of failure, fail the rest
819 * of the chain.
820 */
Jens Axboe2665abf2019-11-05 12:40:47 -0700821 if (req->flags & REQ_F_FAIL_LINK) {
822 io_fail_links(req);
823 } else if (req->flags & REQ_F_LINK_TIMEOUT) {
824 struct io_ring_ctx *ctx = req->ctx;
825 unsigned long flags;
826
827 /*
828 * If this is a timeout link, we could be racing with the
829 * timeout timer. Grab the completion lock for this case to
830 * protection against that.
831 */
832 spin_lock_irqsave(&ctx->completion_lock, flags);
833 io_req_link_next(req, nxt);
834 spin_unlock_irqrestore(&ctx->completion_lock, flags);
835 } else {
836 io_req_link_next(req, nxt);
Jens Axboe9e645e112019-05-10 16:07:28 -0600837 }
838
839 __io_free_req(req);
840}
841
Jens Axboeba816ad2019-09-28 11:36:45 -0600842/*
843 * Drop reference to request, return next in chain (if there is one) if this
844 * was the last reference to this request.
845 */
846static struct io_kiocb *io_put_req_find_next(struct io_kiocb *req)
Jens Axboee65ef562019-03-12 10:16:44 -0600847{
Jens Axboeba816ad2019-09-28 11:36:45 -0600848 struct io_kiocb *nxt = NULL;
849
Jens Axboee65ef562019-03-12 10:16:44 -0600850 if (refcount_dec_and_test(&req->refs))
Jens Axboeba816ad2019-09-28 11:36:45 -0600851 io_free_req(req, &nxt);
852
853 return nxt;
854}
855
856static void io_put_req(struct io_kiocb *req, struct io_kiocb **nxtptr)
857{
858 struct io_kiocb *nxt;
859
860 nxt = io_put_req_find_next(req);
861 if (nxt) {
Jens Axboe561fb042019-10-24 07:25:42 -0600862 if (nxtptr)
Jens Axboeba816ad2019-09-28 11:36:45 -0600863 *nxtptr = nxt;
Jens Axboe561fb042019-10-24 07:25:42 -0600864 else
Jens Axboeba816ad2019-09-28 11:36:45 -0600865 io_queue_async_work(nxt->ctx, nxt);
Jens Axboeba816ad2019-09-28 11:36:45 -0600866 }
Jens Axboe2b188cc2019-01-07 10:46:33 -0700867}
868
Jens Axboe78e19bb2019-11-06 15:21:34 -0700869static void io_double_put_req(struct io_kiocb *req)
870{
871 /* drop both submit and complete references */
872 if (refcount_sub_and_test(2, &req->refs))
873 __io_free_req(req);
874}
875
Jens Axboe84f97dc2019-11-06 11:27:53 -0700876static unsigned io_cqring_events(struct io_ring_ctx *ctx)
Jens Axboea3a0e432019-08-20 11:03:11 -0600877{
Jens Axboe84f97dc2019-11-06 11:27:53 -0700878 struct io_rings *rings = ctx->rings;
879
Jens Axboea3a0e432019-08-20 11:03:11 -0600880 /* See comment at the top of this file */
881 smp_rmb();
Hristo Venev75b28af2019-08-26 17:23:46 +0000882 return READ_ONCE(rings->cq.tail) - READ_ONCE(rings->cq.head);
Jens Axboea3a0e432019-08-20 11:03:11 -0600883}
884
Pavel Begunkovfb5ccc92019-10-25 12:31:30 +0300885static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
886{
887 struct io_rings *rings = ctx->rings;
888
889 /* make sure SQ entry isn't read before tail */
890 return smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head;
891}
892
Jens Axboedef596e2019-01-09 08:59:42 -0700893/*
894 * Find and free completed poll iocbs
895 */
896static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
897 struct list_head *done)
898{
899 void *reqs[IO_IOPOLL_BATCH];
900 struct io_kiocb *req;
Jens Axboe09bb8392019-03-13 12:39:28 -0600901 int to_free;
Jens Axboedef596e2019-01-09 08:59:42 -0700902
Jens Axboe09bb8392019-03-13 12:39:28 -0600903 to_free = 0;
Jens Axboedef596e2019-01-09 08:59:42 -0700904 while (!list_empty(done)) {
905 req = list_first_entry(done, struct io_kiocb, list);
906 list_del(&req->list);
907
Jens Axboe78e19bb2019-11-06 15:21:34 -0700908 io_cqring_fill_event(req, req->result);
Jens Axboedef596e2019-01-09 08:59:42 -0700909 (*nr_events)++;
910
Jens Axboe09bb8392019-03-13 12:39:28 -0600911 if (refcount_dec_and_test(&req->refs)) {
912 /* If we're not using fixed files, we have to pair the
913 * completion part with the file put. Use regular
914 * completions for those, only batch free for fixed
Jens Axboe9e645e112019-05-10 16:07:28 -0600915 * file and non-linked commands.
Jens Axboe09bb8392019-03-13 12:39:28 -0600916 */
Jens Axboe9e645e112019-05-10 16:07:28 -0600917 if ((req->flags & (REQ_F_FIXED_FILE|REQ_F_LINK)) ==
918 REQ_F_FIXED_FILE) {
Jens Axboe09bb8392019-03-13 12:39:28 -0600919 reqs[to_free++] = req;
920 if (to_free == ARRAY_SIZE(reqs))
921 io_free_req_many(ctx, reqs, &to_free);
Jens Axboe6b063142019-01-10 22:13:58 -0700922 } else {
Jens Axboeba816ad2019-09-28 11:36:45 -0600923 io_free_req(req, NULL);
Jens Axboe6b063142019-01-10 22:13:58 -0700924 }
Jens Axboe9a56a232019-01-09 09:06:50 -0700925 }
Jens Axboedef596e2019-01-09 08:59:42 -0700926 }
Jens Axboedef596e2019-01-09 08:59:42 -0700927
Jens Axboe09bb8392019-03-13 12:39:28 -0600928 io_commit_cqring(ctx);
Jens Axboedef596e2019-01-09 08:59:42 -0700929 io_free_req_many(ctx, reqs, &to_free);
930}
931
932static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
933 long min)
934{
935 struct io_kiocb *req, *tmp;
936 LIST_HEAD(done);
937 bool spin;
938 int ret;
939
940 /*
941 * Only spin for completions if we don't have multiple devices hanging
942 * off our complete list, and we're under the requested amount.
943 */
944 spin = !ctx->poll_multi_file && *nr_events < min;
945
946 ret = 0;
947 list_for_each_entry_safe(req, tmp, &ctx->poll_list, list) {
948 struct kiocb *kiocb = &req->rw;
949
950 /*
951 * Move completed entries to our local list. If we find a
952 * request that requires polling, break out and complete
953 * the done list first, if we have entries there.
954 */
955 if (req->flags & REQ_F_IOPOLL_COMPLETED) {
956 list_move_tail(&req->list, &done);
957 continue;
958 }
959 if (!list_empty(&done))
960 break;
961
962 ret = kiocb->ki_filp->f_op->iopoll(kiocb, spin);
963 if (ret < 0)
964 break;
965
966 if (ret && spin)
967 spin = false;
968 ret = 0;
969 }
970
971 if (!list_empty(&done))
972 io_iopoll_complete(ctx, nr_events, &done);
973
974 return ret;
975}
976
977/*
978 * Poll for a mininum of 'min' events. Note that if min == 0 we consider that a
979 * non-spinning poll check - we'll still enter the driver poll loop, but only
980 * as a non-spinning completion check.
981 */
982static int io_iopoll_getevents(struct io_ring_ctx *ctx, unsigned int *nr_events,
983 long min)
984{
Jens Axboe08f54392019-08-21 22:19:11 -0600985 while (!list_empty(&ctx->poll_list) && !need_resched()) {
Jens Axboedef596e2019-01-09 08:59:42 -0700986 int ret;
987
988 ret = io_do_iopoll(ctx, nr_events, min);
989 if (ret < 0)
990 return ret;
991 if (!min || *nr_events >= min)
992 return 0;
993 }
994
995 return 1;
996}
997
998/*
999 * We can't just wait for polled events to come to us, we have to actively
1000 * find and complete them.
1001 */
1002static void io_iopoll_reap_events(struct io_ring_ctx *ctx)
1003{
1004 if (!(ctx->flags & IORING_SETUP_IOPOLL))
1005 return;
1006
1007 mutex_lock(&ctx->uring_lock);
1008 while (!list_empty(&ctx->poll_list)) {
1009 unsigned int nr_events = 0;
1010
1011 io_iopoll_getevents(ctx, &nr_events, 1);
Jens Axboe08f54392019-08-21 22:19:11 -06001012
1013 /*
1014 * Ensure we allow local-to-the-cpu processing to take place,
1015 * in this case we need to ensure that we reap all events.
1016 */
1017 cond_resched();
Jens Axboedef596e2019-01-09 08:59:42 -07001018 }
1019 mutex_unlock(&ctx->uring_lock);
1020}
1021
Jens Axboe2b2ed972019-10-25 10:06:15 -06001022static int __io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
1023 long min)
Jens Axboedef596e2019-01-09 08:59:42 -07001024{
Jens Axboe2b2ed972019-10-25 10:06:15 -06001025 int iters = 0, ret = 0;
Jens Axboedef596e2019-01-09 08:59:42 -07001026
1027 do {
1028 int tmin = 0;
1029
Jens Axboe500f9fb2019-08-19 12:15:59 -06001030 /*
Jens Axboea3a0e432019-08-20 11:03:11 -06001031 * Don't enter poll loop if we already have events pending.
1032 * If we do, we can potentially be spinning for commands that
1033 * already triggered a CQE (eg in error).
1034 */
Jens Axboe84f97dc2019-11-06 11:27:53 -07001035 if (io_cqring_events(ctx))
Jens Axboea3a0e432019-08-20 11:03:11 -06001036 break;
1037
1038 /*
Jens Axboe500f9fb2019-08-19 12:15:59 -06001039 * If a submit got punted to a workqueue, we can have the
1040 * application entering polling for a command before it gets
1041 * issued. That app will hold the uring_lock for the duration
1042 * of the poll right here, so we need to take a breather every
1043 * now and then to ensure that the issue has a chance to add
1044 * the poll to the issued list. Otherwise we can spin here
1045 * forever, while the workqueue is stuck trying to acquire the
1046 * very same mutex.
1047 */
1048 if (!(++iters & 7)) {
1049 mutex_unlock(&ctx->uring_lock);
1050 mutex_lock(&ctx->uring_lock);
1051 }
1052
Jens Axboedef596e2019-01-09 08:59:42 -07001053 if (*nr_events < min)
1054 tmin = min - *nr_events;
1055
1056 ret = io_iopoll_getevents(ctx, nr_events, tmin);
1057 if (ret <= 0)
1058 break;
1059 ret = 0;
1060 } while (min && !*nr_events && !need_resched());
1061
Jens Axboe2b2ed972019-10-25 10:06:15 -06001062 return ret;
1063}
1064
1065static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
1066 long min)
1067{
1068 int ret;
1069
1070 /*
1071 * We disallow the app entering submit/complete with polling, but we
1072 * still need to lock the ring to prevent racing with polled issue
1073 * that got punted to a workqueue.
1074 */
1075 mutex_lock(&ctx->uring_lock);
1076 ret = __io_iopoll_check(ctx, nr_events, min);
Jens Axboe500f9fb2019-08-19 12:15:59 -06001077 mutex_unlock(&ctx->uring_lock);
Jens Axboedef596e2019-01-09 08:59:42 -07001078 return ret;
1079}
1080
Jens Axboe491381ce2019-10-17 09:20:46 -06001081static void kiocb_end_write(struct io_kiocb *req)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001082{
Jens Axboe491381ce2019-10-17 09:20:46 -06001083 /*
1084 * Tell lockdep we inherited freeze protection from submission
1085 * thread.
1086 */
1087 if (req->flags & REQ_F_ISREG) {
1088 struct inode *inode = file_inode(req->file);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001089
Jens Axboe491381ce2019-10-17 09:20:46 -06001090 __sb_writers_acquired(inode->i_sb, SB_FREEZE_WRITE);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001091 }
Jens Axboe491381ce2019-10-17 09:20:46 -06001092 file_end_write(req->file);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001093}
1094
Jens Axboeba816ad2019-09-28 11:36:45 -06001095static void io_complete_rw_common(struct kiocb *kiocb, long res)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001096{
1097 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw);
1098
Jens Axboe491381ce2019-10-17 09:20:46 -06001099 if (kiocb->ki_flags & IOCB_WRITE)
1100 kiocb_end_write(req);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001101
Jens Axboe9e645e112019-05-10 16:07:28 -06001102 if ((req->flags & REQ_F_LINK) && res != req->result)
1103 req->flags |= REQ_F_FAIL_LINK;
Jens Axboe78e19bb2019-11-06 15:21:34 -07001104 io_cqring_add_event(req, res);
Jens Axboeba816ad2019-09-28 11:36:45 -06001105}
1106
1107static void io_complete_rw(struct kiocb *kiocb, long res, long res2)
1108{
1109 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw);
1110
1111 io_complete_rw_common(kiocb, res);
1112 io_put_req(req, NULL);
1113}
1114
1115static struct io_kiocb *__io_complete_rw(struct kiocb *kiocb, long res)
1116{
1117 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw);
1118
1119 io_complete_rw_common(kiocb, res);
1120 return io_put_req_find_next(req);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001121}
1122
Jens Axboedef596e2019-01-09 08:59:42 -07001123static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2)
1124{
1125 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw);
1126
Jens Axboe491381ce2019-10-17 09:20:46 -06001127 if (kiocb->ki_flags & IOCB_WRITE)
1128 kiocb_end_write(req);
Jens Axboedef596e2019-01-09 08:59:42 -07001129
Jens Axboe9e645e112019-05-10 16:07:28 -06001130 if ((req->flags & REQ_F_LINK) && res != req->result)
1131 req->flags |= REQ_F_FAIL_LINK;
1132 req->result = res;
Jens Axboedef596e2019-01-09 08:59:42 -07001133 if (res != -EAGAIN)
1134 req->flags |= REQ_F_IOPOLL_COMPLETED;
1135}
1136
1137/*
1138 * After the iocb has been issued, it's safe to be found on the poll list.
1139 * Adding the kiocb to the list AFTER submission ensures that we don't
1140 * find it from a io_iopoll_getevents() thread before the issuer is done
1141 * accessing the kiocb cookie.
1142 */
1143static void io_iopoll_req_issued(struct io_kiocb *req)
1144{
1145 struct io_ring_ctx *ctx = req->ctx;
1146
1147 /*
1148 * Track whether we have multiple files in our lists. This will impact
1149 * how we do polling eventually, not spinning if we're on potentially
1150 * different devices.
1151 */
1152 if (list_empty(&ctx->poll_list)) {
1153 ctx->poll_multi_file = false;
1154 } else if (!ctx->poll_multi_file) {
1155 struct io_kiocb *list_req;
1156
1157 list_req = list_first_entry(&ctx->poll_list, struct io_kiocb,
1158 list);
1159 if (list_req->rw.ki_filp != req->rw.ki_filp)
1160 ctx->poll_multi_file = true;
1161 }
1162
1163 /*
1164 * For fast devices, IO may have already completed. If it has, add
1165 * it to the front so we find it first.
1166 */
1167 if (req->flags & REQ_F_IOPOLL_COMPLETED)
1168 list_add(&req->list, &ctx->poll_list);
1169 else
1170 list_add_tail(&req->list, &ctx->poll_list);
1171}
1172
Jens Axboe3d6770f2019-04-13 11:50:54 -06001173static void io_file_put(struct io_submit_state *state)
Jens Axboe9a56a232019-01-09 09:06:50 -07001174{
Jens Axboe3d6770f2019-04-13 11:50:54 -06001175 if (state->file) {
Jens Axboe9a56a232019-01-09 09:06:50 -07001176 int diff = state->has_refs - state->used_refs;
1177
1178 if (diff)
1179 fput_many(state->file, diff);
1180 state->file = NULL;
1181 }
1182}
1183
1184/*
1185 * Get as many references to a file as we have IOs left in this submission,
1186 * assuming most submissions are for one file, or at least that each file
1187 * has more than one submission.
1188 */
1189static struct file *io_file_get(struct io_submit_state *state, int fd)
1190{
1191 if (!state)
1192 return fget(fd);
1193
1194 if (state->file) {
1195 if (state->fd == fd) {
1196 state->used_refs++;
1197 state->ios_left--;
1198 return state->file;
1199 }
Jens Axboe3d6770f2019-04-13 11:50:54 -06001200 io_file_put(state);
Jens Axboe9a56a232019-01-09 09:06:50 -07001201 }
1202 state->file = fget_many(fd, state->ios_left);
1203 if (!state->file)
1204 return NULL;
1205
1206 state->fd = fd;
1207 state->has_refs = state->ios_left;
1208 state->used_refs = 1;
1209 state->ios_left--;
1210 return state->file;
1211}
1212
Jens Axboe2b188cc2019-01-07 10:46:33 -07001213/*
1214 * If we tracked the file through the SCM inflight mechanism, we could support
1215 * any file. For now, just ensure that anything potentially problematic is done
1216 * inline.
1217 */
1218static bool io_file_supports_async(struct file *file)
1219{
1220 umode_t mode = file_inode(file)->i_mode;
1221
1222 if (S_ISBLK(mode) || S_ISCHR(mode))
1223 return true;
1224 if (S_ISREG(mode) && file->f_op != &io_uring_fops)
1225 return true;
1226
1227 return false;
1228}
1229
Pavel Begunkov267bc902019-11-07 01:41:08 +03001230static int io_prep_rw(struct io_kiocb *req, bool force_nonblock)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001231{
Pavel Begunkov267bc902019-11-07 01:41:08 +03001232 const struct io_uring_sqe *sqe = req->submit.sqe;
Jens Axboedef596e2019-01-09 08:59:42 -07001233 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001234 struct kiocb *kiocb = &req->rw;
Jens Axboe09bb8392019-03-13 12:39:28 -06001235 unsigned ioprio;
1236 int ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001237
Jens Axboe09bb8392019-03-13 12:39:28 -06001238 if (!req->file)
1239 return -EBADF;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001240
Jens Axboe491381ce2019-10-17 09:20:46 -06001241 if (S_ISREG(file_inode(req->file)->i_mode))
1242 req->flags |= REQ_F_ISREG;
1243
1244 /*
1245 * If the file doesn't support async, mark it as REQ_F_MUST_PUNT so
1246 * we know to async punt it even if it was opened O_NONBLOCK
1247 */
1248 if (force_nonblock && !io_file_supports_async(req->file)) {
1249 req->flags |= REQ_F_MUST_PUNT;
1250 return -EAGAIN;
1251 }
Jens Axboe6b063142019-01-10 22:13:58 -07001252
Jens Axboe2b188cc2019-01-07 10:46:33 -07001253 kiocb->ki_pos = READ_ONCE(sqe->off);
1254 kiocb->ki_flags = iocb_flags(kiocb->ki_filp);
1255 kiocb->ki_hint = ki_hint_validate(file_write_hint(kiocb->ki_filp));
1256
1257 ioprio = READ_ONCE(sqe->ioprio);
1258 if (ioprio) {
1259 ret = ioprio_check_cap(ioprio);
1260 if (ret)
Jens Axboe09bb8392019-03-13 12:39:28 -06001261 return ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001262
1263 kiocb->ki_ioprio = ioprio;
1264 } else
1265 kiocb->ki_ioprio = get_current_ioprio();
1266
1267 ret = kiocb_set_rw_flags(kiocb, READ_ONCE(sqe->rw_flags));
1268 if (unlikely(ret))
Jens Axboe09bb8392019-03-13 12:39:28 -06001269 return ret;
Stefan Bühler8449eed2019-04-27 20:34:19 +02001270
1271 /* don't allow async punt if RWF_NOWAIT was requested */
Jens Axboe491381ce2019-10-17 09:20:46 -06001272 if ((kiocb->ki_flags & IOCB_NOWAIT) ||
1273 (req->file->f_flags & O_NONBLOCK))
Stefan Bühler8449eed2019-04-27 20:34:19 +02001274 req->flags |= REQ_F_NOWAIT;
1275
1276 if (force_nonblock)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001277 kiocb->ki_flags |= IOCB_NOWAIT;
Stefan Bühler8449eed2019-04-27 20:34:19 +02001278
Jens Axboedef596e2019-01-09 08:59:42 -07001279 if (ctx->flags & IORING_SETUP_IOPOLL) {
Jens Axboedef596e2019-01-09 08:59:42 -07001280 if (!(kiocb->ki_flags & IOCB_DIRECT) ||
1281 !kiocb->ki_filp->f_op->iopoll)
Jens Axboe09bb8392019-03-13 12:39:28 -06001282 return -EOPNOTSUPP;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001283
Jens Axboedef596e2019-01-09 08:59:42 -07001284 kiocb->ki_flags |= IOCB_HIPRI;
1285 kiocb->ki_complete = io_complete_rw_iopoll;
1286 } else {
Jens Axboe09bb8392019-03-13 12:39:28 -06001287 if (kiocb->ki_flags & IOCB_HIPRI)
1288 return -EINVAL;
Jens Axboedef596e2019-01-09 08:59:42 -07001289 kiocb->ki_complete = io_complete_rw;
1290 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07001291 return 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001292}
1293
1294static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
1295{
1296 switch (ret) {
1297 case -EIOCBQUEUED:
1298 break;
1299 case -ERESTARTSYS:
1300 case -ERESTARTNOINTR:
1301 case -ERESTARTNOHAND:
1302 case -ERESTART_RESTARTBLOCK:
1303 /*
1304 * We can't just restart the syscall, since previously
1305 * submitted sqes may already be in progress. Just fail this
1306 * IO with EINTR.
1307 */
1308 ret = -EINTR;
1309 /* fall through */
1310 default:
1311 kiocb->ki_complete(kiocb, ret, 0);
1312 }
1313}
1314
Jens Axboeba816ad2019-09-28 11:36:45 -06001315static void kiocb_done(struct kiocb *kiocb, ssize_t ret, struct io_kiocb **nxt,
1316 bool in_async)
1317{
1318 if (in_async && ret >= 0 && nxt && kiocb->ki_complete == io_complete_rw)
1319 *nxt = __io_complete_rw(kiocb, ret);
1320 else
1321 io_rw_done(kiocb, ret);
1322}
1323
Jens Axboeedafcce2019-01-09 09:16:05 -07001324static int io_import_fixed(struct io_ring_ctx *ctx, int rw,
1325 const struct io_uring_sqe *sqe,
1326 struct iov_iter *iter)
1327{
1328 size_t len = READ_ONCE(sqe->len);
1329 struct io_mapped_ubuf *imu;
1330 unsigned index, buf_index;
1331 size_t offset;
1332 u64 buf_addr;
1333
1334 /* attempt to use fixed buffers without having provided iovecs */
1335 if (unlikely(!ctx->user_bufs))
1336 return -EFAULT;
1337
1338 buf_index = READ_ONCE(sqe->buf_index);
1339 if (unlikely(buf_index >= ctx->nr_user_bufs))
1340 return -EFAULT;
1341
1342 index = array_index_nospec(buf_index, ctx->nr_user_bufs);
1343 imu = &ctx->user_bufs[index];
1344 buf_addr = READ_ONCE(sqe->addr);
1345
1346 /* overflow */
1347 if (buf_addr + len < buf_addr)
1348 return -EFAULT;
1349 /* not inside the mapped region */
1350 if (buf_addr < imu->ubuf || buf_addr + len > imu->ubuf + imu->len)
1351 return -EFAULT;
1352
1353 /*
1354 * May not be a start of buffer, set size appropriately
1355 * and advance us to the beginning.
1356 */
1357 offset = buf_addr - imu->ubuf;
1358 iov_iter_bvec(iter, rw, imu->bvec, imu->nr_bvecs, offset + len);
Jens Axboebd11b3a2019-07-20 08:37:31 -06001359
1360 if (offset) {
1361 /*
1362 * Don't use iov_iter_advance() here, as it's really slow for
1363 * using the latter parts of a big fixed buffer - it iterates
1364 * over each segment manually. We can cheat a bit here, because
1365 * we know that:
1366 *
1367 * 1) it's a BVEC iter, we set it up
1368 * 2) all bvecs are PAGE_SIZE in size, except potentially the
1369 * first and last bvec
1370 *
1371 * So just find our index, and adjust the iterator afterwards.
1372 * If the offset is within the first bvec (or the whole first
1373 * bvec, just use iov_iter_advance(). This makes it easier
1374 * since we can just skip the first segment, which may not
1375 * be PAGE_SIZE aligned.
1376 */
1377 const struct bio_vec *bvec = imu->bvec;
1378
1379 if (offset <= bvec->bv_len) {
1380 iov_iter_advance(iter, offset);
1381 } else {
1382 unsigned long seg_skip;
1383
1384 /* skip first vec */
1385 offset -= bvec->bv_len;
1386 seg_skip = 1 + (offset >> PAGE_SHIFT);
1387
1388 iter->bvec = bvec + seg_skip;
1389 iter->nr_segs -= seg_skip;
Aleix Roca Nonell99c79f62019-08-15 14:03:22 +02001390 iter->count -= bvec->bv_len + offset;
Jens Axboebd11b3a2019-07-20 08:37:31 -06001391 iter->iov_offset = offset & ~PAGE_MASK;
Jens Axboebd11b3a2019-07-20 08:37:31 -06001392 }
1393 }
1394
Jens Axboeedafcce2019-01-09 09:16:05 -07001395 return 0;
1396}
1397
Jens Axboe87e5e6d2019-05-14 16:02:22 -06001398static ssize_t io_import_iovec(struct io_ring_ctx *ctx, int rw,
1399 const struct sqe_submit *s, struct iovec **iovec,
1400 struct iov_iter *iter)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001401{
1402 const struct io_uring_sqe *sqe = s->sqe;
1403 void __user *buf = u64_to_user_ptr(READ_ONCE(sqe->addr));
1404 size_t sqe_len = READ_ONCE(sqe->len);
Jens Axboeedafcce2019-01-09 09:16:05 -07001405 u8 opcode;
1406
1407 /*
1408 * We're reading ->opcode for the second time, but the first read
1409 * doesn't care whether it's _FIXED or not, so it doesn't matter
1410 * whether ->opcode changes concurrently. The first read does care
1411 * about whether it is a READ or a WRITE, so we don't trust this read
1412 * for that purpose and instead let the caller pass in the read/write
1413 * flag.
1414 */
1415 opcode = READ_ONCE(sqe->opcode);
1416 if (opcode == IORING_OP_READ_FIXED ||
1417 opcode == IORING_OP_WRITE_FIXED) {
Jens Axboe87e5e6d2019-05-14 16:02:22 -06001418 ssize_t ret = io_import_fixed(ctx, rw, sqe, iter);
Jens Axboeedafcce2019-01-09 09:16:05 -07001419 *iovec = NULL;
1420 return ret;
1421 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07001422
1423 if (!s->has_user)
1424 return -EFAULT;
1425
1426#ifdef CONFIG_COMPAT
1427 if (ctx->compat)
1428 return compat_import_iovec(rw, buf, sqe_len, UIO_FASTIOV,
1429 iovec, iter);
1430#endif
1431
1432 return import_iovec(rw, buf, sqe_len, UIO_FASTIOV, iovec, iter);
1433}
1434
Jens Axboe32960612019-09-23 11:05:34 -06001435/*
1436 * For files that don't have ->read_iter() and ->write_iter(), handle them
1437 * by looping over ->read() or ->write() manually.
1438 */
1439static ssize_t loop_rw_iter(int rw, struct file *file, struct kiocb *kiocb,
1440 struct iov_iter *iter)
1441{
1442 ssize_t ret = 0;
1443
1444 /*
1445 * Don't support polled IO through this interface, and we can't
1446 * support non-blocking either. For the latter, this just causes
1447 * the kiocb to be handled from an async context.
1448 */
1449 if (kiocb->ki_flags & IOCB_HIPRI)
1450 return -EOPNOTSUPP;
1451 if (kiocb->ki_flags & IOCB_NOWAIT)
1452 return -EAGAIN;
1453
1454 while (iov_iter_count(iter)) {
1455 struct iovec iovec = iov_iter_iovec(iter);
1456 ssize_t nr;
1457
1458 if (rw == READ) {
1459 nr = file->f_op->read(file, iovec.iov_base,
1460 iovec.iov_len, &kiocb->ki_pos);
1461 } else {
1462 nr = file->f_op->write(file, iovec.iov_base,
1463 iovec.iov_len, &kiocb->ki_pos);
1464 }
1465
1466 if (nr < 0) {
1467 if (!ret)
1468 ret = nr;
1469 break;
1470 }
1471 ret += nr;
1472 if (nr != iovec.iov_len)
1473 break;
1474 iov_iter_advance(iter, nr);
1475 }
1476
1477 return ret;
1478}
1479
Pavel Begunkov267bc902019-11-07 01:41:08 +03001480static int io_read(struct io_kiocb *req, struct io_kiocb **nxt,
1481 bool force_nonblock)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001482{
1483 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
1484 struct kiocb *kiocb = &req->rw;
1485 struct iov_iter iter;
1486 struct file *file;
Jens Axboe31b51512019-01-18 22:56:34 -07001487 size_t iov_count;
Jens Axboe9d93a3f2019-05-15 13:53:07 -06001488 ssize_t read_size, ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001489
Pavel Begunkov267bc902019-11-07 01:41:08 +03001490 ret = io_prep_rw(req, force_nonblock);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001491 if (ret)
1492 return ret;
1493 file = kiocb->ki_filp;
1494
Jens Axboe2b188cc2019-01-07 10:46:33 -07001495 if (unlikely(!(file->f_mode & FMODE_READ)))
Jens Axboe09bb8392019-03-13 12:39:28 -06001496 return -EBADF;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001497
Pavel Begunkov267bc902019-11-07 01:41:08 +03001498 ret = io_import_iovec(req->ctx, READ, &req->submit, &iovec, &iter);
Jens Axboe87e5e6d2019-05-14 16:02:22 -06001499 if (ret < 0)
Jens Axboe09bb8392019-03-13 12:39:28 -06001500 return ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001501
Jens Axboe9d93a3f2019-05-15 13:53:07 -06001502 read_size = ret;
Jens Axboe9e645e112019-05-10 16:07:28 -06001503 if (req->flags & REQ_F_LINK)
1504 req->result = read_size;
1505
Jens Axboe31b51512019-01-18 22:56:34 -07001506 iov_count = iov_iter_count(&iter);
1507 ret = rw_verify_area(READ, file, &kiocb->ki_pos, iov_count);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001508 if (!ret) {
1509 ssize_t ret2;
1510
Jens Axboe32960612019-09-23 11:05:34 -06001511 if (file->f_op->read_iter)
1512 ret2 = call_read_iter(file, kiocb, &iter);
1513 else
1514 ret2 = loop_rw_iter(READ, file, kiocb, &iter);
1515
Jens Axboe9d93a3f2019-05-15 13:53:07 -06001516 /*
1517 * In case of a short read, punt to async. This can happen
1518 * if we have data partially cached. Alternatively we can
1519 * return the short read, in which case the application will
1520 * need to issue another SQE and wait for it. That SQE will
1521 * need async punt anyway, so it's more efficient to do it
1522 * here.
1523 */
Jens Axboe491381ce2019-10-17 09:20:46 -06001524 if (force_nonblock && !(req->flags & REQ_F_NOWAIT) &&
1525 (req->flags & REQ_F_ISREG) &&
1526 ret2 > 0 && ret2 < read_size)
Jens Axboe9d93a3f2019-05-15 13:53:07 -06001527 ret2 = -EAGAIN;
1528 /* Catch -EAGAIN return for forced non-blocking submission */
Jens Axboe561fb042019-10-24 07:25:42 -06001529 if (!force_nonblock || ret2 != -EAGAIN)
Pavel Begunkov267bc902019-11-07 01:41:08 +03001530 kiocb_done(kiocb, ret2, nxt, req->submit.in_async);
Jens Axboe561fb042019-10-24 07:25:42 -06001531 else
Jens Axboe2b188cc2019-01-07 10:46:33 -07001532 ret = -EAGAIN;
1533 }
1534 kfree(iovec);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001535 return ret;
1536}
1537
Pavel Begunkov267bc902019-11-07 01:41:08 +03001538static int io_write(struct io_kiocb *req, struct io_kiocb **nxt,
1539 bool force_nonblock)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001540{
1541 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
1542 struct kiocb *kiocb = &req->rw;
1543 struct iov_iter iter;
1544 struct file *file;
Jens Axboe31b51512019-01-18 22:56:34 -07001545 size_t iov_count;
Jens Axboe87e5e6d2019-05-14 16:02:22 -06001546 ssize_t ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001547
Pavel Begunkov267bc902019-11-07 01:41:08 +03001548 ret = io_prep_rw(req, force_nonblock);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001549 if (ret)
1550 return ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001551
Jens Axboe2b188cc2019-01-07 10:46:33 -07001552 file = kiocb->ki_filp;
1553 if (unlikely(!(file->f_mode & FMODE_WRITE)))
Jens Axboe09bb8392019-03-13 12:39:28 -06001554 return -EBADF;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001555
Pavel Begunkov267bc902019-11-07 01:41:08 +03001556 ret = io_import_iovec(req->ctx, WRITE, &req->submit, &iovec, &iter);
Jens Axboe87e5e6d2019-05-14 16:02:22 -06001557 if (ret < 0)
Jens Axboe09bb8392019-03-13 12:39:28 -06001558 return ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001559
Jens Axboe9e645e112019-05-10 16:07:28 -06001560 if (req->flags & REQ_F_LINK)
1561 req->result = ret;
1562
Jens Axboe31b51512019-01-18 22:56:34 -07001563 iov_count = iov_iter_count(&iter);
1564
1565 ret = -EAGAIN;
Jens Axboe561fb042019-10-24 07:25:42 -06001566 if (force_nonblock && !(kiocb->ki_flags & IOCB_DIRECT))
Jens Axboe31b51512019-01-18 22:56:34 -07001567 goto out_free;
Jens Axboe31b51512019-01-18 22:56:34 -07001568
1569 ret = rw_verify_area(WRITE, file, &kiocb->ki_pos, iov_count);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001570 if (!ret) {
Roman Penyaev9bf79332019-03-25 20:09:24 +01001571 ssize_t ret2;
1572
Jens Axboe2b188cc2019-01-07 10:46:33 -07001573 /*
1574 * Open-code file_start_write here to grab freeze protection,
1575 * which will be released by another thread in
1576 * io_complete_rw(). Fool lockdep by telling it the lock got
1577 * released so that it doesn't complain about the held lock when
1578 * we return to userspace.
1579 */
Jens Axboe491381ce2019-10-17 09:20:46 -06001580 if (req->flags & REQ_F_ISREG) {
Jens Axboe2b188cc2019-01-07 10:46:33 -07001581 __sb_start_write(file_inode(file)->i_sb,
1582 SB_FREEZE_WRITE, true);
1583 __sb_writers_release(file_inode(file)->i_sb,
1584 SB_FREEZE_WRITE);
1585 }
1586 kiocb->ki_flags |= IOCB_WRITE;
Roman Penyaev9bf79332019-03-25 20:09:24 +01001587
Jens Axboe32960612019-09-23 11:05:34 -06001588 if (file->f_op->write_iter)
1589 ret2 = call_write_iter(file, kiocb, &iter);
1590 else
1591 ret2 = loop_rw_iter(WRITE, file, kiocb, &iter);
Jens Axboe561fb042019-10-24 07:25:42 -06001592 if (!force_nonblock || ret2 != -EAGAIN)
Pavel Begunkov267bc902019-11-07 01:41:08 +03001593 kiocb_done(kiocb, ret2, nxt, req->submit.in_async);
Jens Axboe561fb042019-10-24 07:25:42 -06001594 else
Roman Penyaev9bf79332019-03-25 20:09:24 +01001595 ret = -EAGAIN;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001596 }
Jens Axboe31b51512019-01-18 22:56:34 -07001597out_free:
Jens Axboe2b188cc2019-01-07 10:46:33 -07001598 kfree(iovec);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001599 return ret;
1600}
1601
1602/*
1603 * IORING_OP_NOP just posts a completion event, nothing else.
1604 */
Jens Axboe78e19bb2019-11-06 15:21:34 -07001605static int io_nop(struct io_kiocb *req)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001606{
1607 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001608
Jens Axboedef596e2019-01-09 08:59:42 -07001609 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
1610 return -EINVAL;
1611
Jens Axboe78e19bb2019-11-06 15:21:34 -07001612 io_cqring_add_event(req, 0);
Jens Axboeba816ad2019-09-28 11:36:45 -06001613 io_put_req(req, NULL);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001614 return 0;
1615}
1616
Christoph Hellwigc992fe22019-01-11 09:43:02 -07001617static int io_prep_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1618{
Jens Axboe6b063142019-01-10 22:13:58 -07001619 struct io_ring_ctx *ctx = req->ctx;
Christoph Hellwigc992fe22019-01-11 09:43:02 -07001620
Jens Axboe09bb8392019-03-13 12:39:28 -06001621 if (!req->file)
1622 return -EBADF;
Christoph Hellwigc992fe22019-01-11 09:43:02 -07001623
Jens Axboe6b063142019-01-10 22:13:58 -07001624 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboedef596e2019-01-09 08:59:42 -07001625 return -EINVAL;
Jens Axboeedafcce2019-01-09 09:16:05 -07001626 if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index))
Christoph Hellwigc992fe22019-01-11 09:43:02 -07001627 return -EINVAL;
1628
Christoph Hellwigc992fe22019-01-11 09:43:02 -07001629 return 0;
1630}
1631
1632static int io_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe,
Jens Axboeba816ad2019-09-28 11:36:45 -06001633 struct io_kiocb **nxt, bool force_nonblock)
Christoph Hellwigc992fe22019-01-11 09:43:02 -07001634{
1635 loff_t sqe_off = READ_ONCE(sqe->off);
1636 loff_t sqe_len = READ_ONCE(sqe->len);
1637 loff_t end = sqe_off + sqe_len;
1638 unsigned fsync_flags;
1639 int ret;
1640
1641 fsync_flags = READ_ONCE(sqe->fsync_flags);
1642 if (unlikely(fsync_flags & ~IORING_FSYNC_DATASYNC))
1643 return -EINVAL;
1644
1645 ret = io_prep_fsync(req, sqe);
1646 if (ret)
1647 return ret;
1648
1649 /* fsync always requires a blocking context */
1650 if (force_nonblock)
1651 return -EAGAIN;
1652
1653 ret = vfs_fsync_range(req->rw.ki_filp, sqe_off,
1654 end > 0 ? end : LLONG_MAX,
1655 fsync_flags & IORING_FSYNC_DATASYNC);
1656
Jens Axboe9e645e112019-05-10 16:07:28 -06001657 if (ret < 0 && (req->flags & REQ_F_LINK))
1658 req->flags |= REQ_F_FAIL_LINK;
Jens Axboe78e19bb2019-11-06 15:21:34 -07001659 io_cqring_add_event(req, ret);
Jens Axboeba816ad2019-09-28 11:36:45 -06001660 io_put_req(req, nxt);
Christoph Hellwigc992fe22019-01-11 09:43:02 -07001661 return 0;
1662}
1663
Jens Axboe5d17b4a2019-04-09 14:56:44 -06001664static int io_prep_sfr(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1665{
1666 struct io_ring_ctx *ctx = req->ctx;
1667 int ret = 0;
1668
1669 if (!req->file)
1670 return -EBADF;
Jens Axboe5d17b4a2019-04-09 14:56:44 -06001671
1672 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
1673 return -EINVAL;
1674 if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index))
1675 return -EINVAL;
1676
Jens Axboe5d17b4a2019-04-09 14:56:44 -06001677 return ret;
1678}
1679
1680static int io_sync_file_range(struct io_kiocb *req,
1681 const struct io_uring_sqe *sqe,
Jens Axboeba816ad2019-09-28 11:36:45 -06001682 struct io_kiocb **nxt,
Jens Axboe5d17b4a2019-04-09 14:56:44 -06001683 bool force_nonblock)
1684{
1685 loff_t sqe_off;
1686 loff_t sqe_len;
1687 unsigned flags;
1688 int ret;
1689
1690 ret = io_prep_sfr(req, sqe);
1691 if (ret)
1692 return ret;
1693
1694 /* sync_file_range always requires a blocking context */
1695 if (force_nonblock)
1696 return -EAGAIN;
1697
1698 sqe_off = READ_ONCE(sqe->off);
1699 sqe_len = READ_ONCE(sqe->len);
1700 flags = READ_ONCE(sqe->sync_range_flags);
1701
1702 ret = sync_file_range(req->rw.ki_filp, sqe_off, sqe_len, flags);
1703
Jens Axboe9e645e112019-05-10 16:07:28 -06001704 if (ret < 0 && (req->flags & REQ_F_LINK))
1705 req->flags |= REQ_F_FAIL_LINK;
Jens Axboe78e19bb2019-11-06 15:21:34 -07001706 io_cqring_add_event(req, ret);
Jens Axboeba816ad2019-09-28 11:36:45 -06001707 io_put_req(req, nxt);
Jens Axboe5d17b4a2019-04-09 14:56:44 -06001708 return 0;
1709}
1710
Jens Axboe0fa03c62019-04-19 13:34:07 -06001711#if defined(CONFIG_NET)
Jens Axboeaa1fa282019-04-19 13:38:09 -06001712static int io_send_recvmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe,
Jens Axboeba816ad2019-09-28 11:36:45 -06001713 struct io_kiocb **nxt, bool force_nonblock,
Jens Axboeaa1fa282019-04-19 13:38:09 -06001714 long (*fn)(struct socket *, struct user_msghdr __user *,
1715 unsigned int))
1716{
Jens Axboe0fa03c62019-04-19 13:34:07 -06001717 struct socket *sock;
1718 int ret;
1719
1720 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
1721 return -EINVAL;
1722
1723 sock = sock_from_file(req->file, &ret);
1724 if (sock) {
1725 struct user_msghdr __user *msg;
1726 unsigned flags;
1727
1728 flags = READ_ONCE(sqe->msg_flags);
1729 if (flags & MSG_DONTWAIT)
1730 req->flags |= REQ_F_NOWAIT;
1731 else if (force_nonblock)
1732 flags |= MSG_DONTWAIT;
1733
1734 msg = (struct user_msghdr __user *) (unsigned long)
1735 READ_ONCE(sqe->addr);
1736
Jens Axboeaa1fa282019-04-19 13:38:09 -06001737 ret = fn(sock, msg, flags);
Jens Axboe0fa03c62019-04-19 13:34:07 -06001738 if (force_nonblock && ret == -EAGAIN)
1739 return ret;
1740 }
1741
Jens Axboe78e19bb2019-11-06 15:21:34 -07001742 io_cqring_add_event(req, ret);
Jens Axboef1f40852019-11-05 20:33:16 -07001743 if (ret < 0 && (req->flags & REQ_F_LINK))
1744 req->flags |= REQ_F_FAIL_LINK;
Jens Axboeba816ad2019-09-28 11:36:45 -06001745 io_put_req(req, nxt);
Jens Axboe0fa03c62019-04-19 13:34:07 -06001746 return 0;
Jens Axboeaa1fa282019-04-19 13:38:09 -06001747}
1748#endif
1749
1750static int io_sendmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe,
Jens Axboeba816ad2019-09-28 11:36:45 -06001751 struct io_kiocb **nxt, bool force_nonblock)
Jens Axboeaa1fa282019-04-19 13:38:09 -06001752{
1753#if defined(CONFIG_NET)
Jens Axboeba816ad2019-09-28 11:36:45 -06001754 return io_send_recvmsg(req, sqe, nxt, force_nonblock,
1755 __sys_sendmsg_sock);
Jens Axboeaa1fa282019-04-19 13:38:09 -06001756#else
1757 return -EOPNOTSUPP;
1758#endif
1759}
1760
1761static int io_recvmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe,
Jens Axboeba816ad2019-09-28 11:36:45 -06001762 struct io_kiocb **nxt, bool force_nonblock)
Jens Axboeaa1fa282019-04-19 13:38:09 -06001763{
1764#if defined(CONFIG_NET)
Jens Axboeba816ad2019-09-28 11:36:45 -06001765 return io_send_recvmsg(req, sqe, nxt, force_nonblock,
1766 __sys_recvmsg_sock);
Jens Axboe0fa03c62019-04-19 13:34:07 -06001767#else
1768 return -EOPNOTSUPP;
1769#endif
1770}
1771
Jens Axboe17f2fe32019-10-17 14:42:58 -06001772static int io_accept(struct io_kiocb *req, const struct io_uring_sqe *sqe,
1773 struct io_kiocb **nxt, bool force_nonblock)
1774{
1775#if defined(CONFIG_NET)
1776 struct sockaddr __user *addr;
1777 int __user *addr_len;
1778 unsigned file_flags;
1779 int flags, ret;
1780
1781 if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
1782 return -EINVAL;
1783 if (sqe->ioprio || sqe->off || sqe->len || sqe->buf_index)
1784 return -EINVAL;
1785
1786 addr = (struct sockaddr __user *) (unsigned long) READ_ONCE(sqe->addr);
1787 addr_len = (int __user *) (unsigned long) READ_ONCE(sqe->addr2);
1788 flags = READ_ONCE(sqe->accept_flags);
1789 file_flags = force_nonblock ? O_NONBLOCK : 0;
1790
1791 ret = __sys_accept4_file(req->file, file_flags, addr, addr_len, flags);
1792 if (ret == -EAGAIN && force_nonblock) {
1793 req->work.flags |= IO_WQ_WORK_NEEDS_FILES;
1794 return -EAGAIN;
1795 }
1796 if (ret < 0 && (req->flags & REQ_F_LINK))
1797 req->flags |= REQ_F_FAIL_LINK;
Jens Axboe78e19bb2019-11-06 15:21:34 -07001798 io_cqring_add_event(req, ret);
Jens Axboe17f2fe32019-10-17 14:42:58 -06001799 io_put_req(req, nxt);
1800 return 0;
1801#else
1802 return -EOPNOTSUPP;
1803#endif
1804}
1805
Jens Axboe221c5eb2019-01-17 09:41:58 -07001806static void io_poll_remove_one(struct io_kiocb *req)
1807{
1808 struct io_poll_iocb *poll = &req->poll;
1809
1810 spin_lock(&poll->head->lock);
1811 WRITE_ONCE(poll->canceled, true);
1812 if (!list_empty(&poll->wait.entry)) {
1813 list_del_init(&poll->wait.entry);
Jens Axboe18d9be12019-09-10 09:13:05 -06001814 io_queue_async_work(req->ctx, req);
Jens Axboe221c5eb2019-01-17 09:41:58 -07001815 }
1816 spin_unlock(&poll->head->lock);
1817
1818 list_del_init(&req->list);
1819}
1820
1821static void io_poll_remove_all(struct io_ring_ctx *ctx)
1822{
1823 struct io_kiocb *req;
1824
1825 spin_lock_irq(&ctx->completion_lock);
1826 while (!list_empty(&ctx->cancel_list)) {
1827 req = list_first_entry(&ctx->cancel_list, struct io_kiocb,list);
1828 io_poll_remove_one(req);
1829 }
1830 spin_unlock_irq(&ctx->completion_lock);
1831}
1832
1833/*
1834 * Find a running poll command that matches one specified in sqe->addr,
1835 * and remove it if found.
1836 */
1837static int io_poll_remove(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1838{
1839 struct io_ring_ctx *ctx = req->ctx;
1840 struct io_kiocb *poll_req, *next;
1841 int ret = -ENOENT;
1842
1843 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
1844 return -EINVAL;
1845 if (sqe->ioprio || sqe->off || sqe->len || sqe->buf_index ||
1846 sqe->poll_events)
1847 return -EINVAL;
1848
1849 spin_lock_irq(&ctx->completion_lock);
1850 list_for_each_entry_safe(poll_req, next, &ctx->cancel_list, list) {
1851 if (READ_ONCE(sqe->addr) == poll_req->user_data) {
1852 io_poll_remove_one(poll_req);
1853 ret = 0;
1854 break;
1855 }
1856 }
1857 spin_unlock_irq(&ctx->completion_lock);
1858
Jens Axboe78e19bb2019-11-06 15:21:34 -07001859 io_cqring_add_event(req, ret);
Jens Axboef1f40852019-11-05 20:33:16 -07001860 if (ret < 0 && (req->flags & REQ_F_LINK))
1861 req->flags |= REQ_F_FAIL_LINK;
Jens Axboeba816ad2019-09-28 11:36:45 -06001862 io_put_req(req, NULL);
Jens Axboe221c5eb2019-01-17 09:41:58 -07001863 return 0;
1864}
1865
Jens Axboe8c838782019-03-12 15:48:16 -06001866static void io_poll_complete(struct io_ring_ctx *ctx, struct io_kiocb *req,
1867 __poll_t mask)
Jens Axboe221c5eb2019-01-17 09:41:58 -07001868{
Jens Axboe8c838782019-03-12 15:48:16 -06001869 req->poll.done = true;
Jens Axboe78e19bb2019-11-06 15:21:34 -07001870 io_cqring_fill_event(req, mangle_poll(mask));
Jens Axboe8c838782019-03-12 15:48:16 -06001871 io_commit_cqring(ctx);
Jens Axboe221c5eb2019-01-17 09:41:58 -07001872}
1873
Jens Axboe561fb042019-10-24 07:25:42 -06001874static void io_poll_complete_work(struct io_wq_work **workptr)
Jens Axboe221c5eb2019-01-17 09:41:58 -07001875{
Jens Axboe561fb042019-10-24 07:25:42 -06001876 struct io_wq_work *work = *workptr;
Jens Axboe221c5eb2019-01-17 09:41:58 -07001877 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
1878 struct io_poll_iocb *poll = &req->poll;
1879 struct poll_table_struct pt = { ._key = poll->events };
1880 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe89723d02019-11-05 15:32:58 -07001881 struct io_kiocb *nxt = NULL;
Jens Axboe221c5eb2019-01-17 09:41:58 -07001882 __poll_t mask = 0;
1883
Jens Axboe561fb042019-10-24 07:25:42 -06001884 if (work->flags & IO_WQ_WORK_CANCEL)
1885 WRITE_ONCE(poll->canceled, true);
1886
Jens Axboe221c5eb2019-01-17 09:41:58 -07001887 if (!READ_ONCE(poll->canceled))
1888 mask = vfs_poll(poll->file, &pt) & poll->events;
1889
1890 /*
1891 * Note that ->ki_cancel callers also delete iocb from active_reqs after
1892 * calling ->ki_cancel. We need the ctx_lock roundtrip here to
1893 * synchronize with them. In the cancellation case the list_del_init
1894 * itself is not actually needed, but harmless so we keep it in to
1895 * avoid further branches in the fast path.
1896 */
1897 spin_lock_irq(&ctx->completion_lock);
1898 if (!mask && !READ_ONCE(poll->canceled)) {
1899 add_wait_queue(poll->head, &poll->wait);
1900 spin_unlock_irq(&ctx->completion_lock);
1901 return;
1902 }
1903 list_del_init(&req->list);
Jens Axboe8c838782019-03-12 15:48:16 -06001904 io_poll_complete(ctx, req, mask);
Jens Axboe221c5eb2019-01-17 09:41:58 -07001905 spin_unlock_irq(&ctx->completion_lock);
1906
Jens Axboe8c838782019-03-12 15:48:16 -06001907 io_cqring_ev_posted(ctx);
Jens Axboe89723d02019-11-05 15:32:58 -07001908
1909 io_put_req(req, &nxt);
1910 if (nxt)
1911 *workptr = &nxt->work;
Jens Axboe221c5eb2019-01-17 09:41:58 -07001912}
1913
1914static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
1915 void *key)
1916{
1917 struct io_poll_iocb *poll = container_of(wait, struct io_poll_iocb,
1918 wait);
1919 struct io_kiocb *req = container_of(poll, struct io_kiocb, poll);
1920 struct io_ring_ctx *ctx = req->ctx;
1921 __poll_t mask = key_to_poll(key);
Jens Axboe8c838782019-03-12 15:48:16 -06001922 unsigned long flags;
Jens Axboe221c5eb2019-01-17 09:41:58 -07001923
1924 /* for instances that support it check for an event match first: */
Jens Axboe8c838782019-03-12 15:48:16 -06001925 if (mask && !(mask & poll->events))
1926 return 0;
Jens Axboe221c5eb2019-01-17 09:41:58 -07001927
1928 list_del_init(&poll->wait.entry);
Jens Axboe8c838782019-03-12 15:48:16 -06001929
1930 if (mask && spin_trylock_irqsave(&ctx->completion_lock, flags)) {
1931 list_del(&req->list);
1932 io_poll_complete(ctx, req, mask);
1933 spin_unlock_irqrestore(&ctx->completion_lock, flags);
1934
1935 io_cqring_ev_posted(ctx);
Jens Axboeba816ad2019-09-28 11:36:45 -06001936 io_put_req(req, NULL);
Jens Axboe8c838782019-03-12 15:48:16 -06001937 } else {
Jens Axboe18d9be12019-09-10 09:13:05 -06001938 io_queue_async_work(ctx, req);
Jens Axboe8c838782019-03-12 15:48:16 -06001939 }
1940
Jens Axboe221c5eb2019-01-17 09:41:58 -07001941 return 1;
1942}
1943
1944struct io_poll_table {
1945 struct poll_table_struct pt;
1946 struct io_kiocb *req;
1947 int error;
1948};
1949
1950static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head,
1951 struct poll_table_struct *p)
1952{
1953 struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
1954
1955 if (unlikely(pt->req->poll.head)) {
1956 pt->error = -EINVAL;
1957 return;
1958 }
1959
1960 pt->error = 0;
1961 pt->req->poll.head = head;
1962 add_wait_queue(head, &pt->req->poll.wait);
1963}
1964
Jens Axboe89723d02019-11-05 15:32:58 -07001965static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe,
1966 struct io_kiocb **nxt)
Jens Axboe221c5eb2019-01-17 09:41:58 -07001967{
1968 struct io_poll_iocb *poll = &req->poll;
1969 struct io_ring_ctx *ctx = req->ctx;
1970 struct io_poll_table ipt;
Jens Axboe8c838782019-03-12 15:48:16 -06001971 bool cancel = false;
Jens Axboe221c5eb2019-01-17 09:41:58 -07001972 __poll_t mask;
1973 u16 events;
Jens Axboe221c5eb2019-01-17 09:41:58 -07001974
1975 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
1976 return -EINVAL;
1977 if (sqe->addr || sqe->ioprio || sqe->off || sqe->len || sqe->buf_index)
1978 return -EINVAL;
Jens Axboe09bb8392019-03-13 12:39:28 -06001979 if (!poll->file)
1980 return -EBADF;
Jens Axboe221c5eb2019-01-17 09:41:58 -07001981
Jens Axboe6cc47d12019-09-18 11:18:23 -06001982 req->submit.sqe = NULL;
Jens Axboe561fb042019-10-24 07:25:42 -06001983 INIT_IO_WORK(&req->work, io_poll_complete_work);
Jens Axboe221c5eb2019-01-17 09:41:58 -07001984 events = READ_ONCE(sqe->poll_events);
1985 poll->events = demangle_poll(events) | EPOLLERR | EPOLLHUP;
1986
Jens Axboe221c5eb2019-01-17 09:41:58 -07001987 poll->head = NULL;
Jens Axboe8c838782019-03-12 15:48:16 -06001988 poll->done = false;
Jens Axboe221c5eb2019-01-17 09:41:58 -07001989 poll->canceled = false;
1990
1991 ipt.pt._qproc = io_poll_queue_proc;
1992 ipt.pt._key = poll->events;
1993 ipt.req = req;
1994 ipt.error = -EINVAL; /* same as no support for IOCB_CMD_POLL */
1995
1996 /* initialized the list so that we can do list_empty checks */
1997 INIT_LIST_HEAD(&poll->wait.entry);
1998 init_waitqueue_func_entry(&poll->wait, io_poll_wake);
1999
Jens Axboe36703242019-07-25 10:20:18 -06002000 INIT_LIST_HEAD(&req->list);
2001
Jens Axboe221c5eb2019-01-17 09:41:58 -07002002 mask = vfs_poll(poll->file, &ipt.pt) & poll->events;
Jens Axboe221c5eb2019-01-17 09:41:58 -07002003
2004 spin_lock_irq(&ctx->completion_lock);
Jens Axboe8c838782019-03-12 15:48:16 -06002005 if (likely(poll->head)) {
2006 spin_lock(&poll->head->lock);
2007 if (unlikely(list_empty(&poll->wait.entry))) {
2008 if (ipt.error)
2009 cancel = true;
2010 ipt.error = 0;
2011 mask = 0;
2012 }
2013 if (mask || ipt.error)
2014 list_del_init(&poll->wait.entry);
2015 else if (cancel)
2016 WRITE_ONCE(poll->canceled, true);
2017 else if (!poll->done) /* actually waiting for an event */
2018 list_add_tail(&req->list, &ctx->cancel_list);
2019 spin_unlock(&poll->head->lock);
Jens Axboe221c5eb2019-01-17 09:41:58 -07002020 }
Jens Axboe8c838782019-03-12 15:48:16 -06002021 if (mask) { /* no async, we'd stolen it */
Jens Axboe8c838782019-03-12 15:48:16 -06002022 ipt.error = 0;
2023 io_poll_complete(ctx, req, mask);
2024 }
Jens Axboe221c5eb2019-01-17 09:41:58 -07002025 spin_unlock_irq(&ctx->completion_lock);
2026
Jens Axboe8c838782019-03-12 15:48:16 -06002027 if (mask) {
2028 io_cqring_ev_posted(ctx);
Jens Axboe89723d02019-11-05 15:32:58 -07002029 io_put_req(req, nxt);
Jens Axboe221c5eb2019-01-17 09:41:58 -07002030 }
Jens Axboe8c838782019-03-12 15:48:16 -06002031 return ipt.error;
Jens Axboe221c5eb2019-01-17 09:41:58 -07002032}
2033
Jens Axboe5262f562019-09-17 12:26:57 -06002034static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
2035{
2036 struct io_ring_ctx *ctx;
Jens Axboe11365042019-10-16 09:08:32 -06002037 struct io_kiocb *req;
Jens Axboe5262f562019-09-17 12:26:57 -06002038 unsigned long flags;
2039
2040 req = container_of(timer, struct io_kiocb, timeout.timer);
2041 ctx = req->ctx;
2042 atomic_inc(&ctx->cq_timeouts);
2043
2044 spin_lock_irqsave(&ctx->completion_lock, flags);
zhangyi (F)ef036812019-10-23 15:10:08 +08002045 /*
Jens Axboe11365042019-10-16 09:08:32 -06002046 * We could be racing with timeout deletion. If the list is empty,
2047 * then timeout lookup already found it and will be handling it.
zhangyi (F)ef036812019-10-23 15:10:08 +08002048 */
Jens Axboe842f9612019-10-29 12:34:10 -06002049 if (!list_empty(&req->list)) {
Jens Axboe11365042019-10-16 09:08:32 -06002050 struct io_kiocb *prev;
Jens Axboe5262f562019-09-17 12:26:57 -06002051
Jens Axboe11365042019-10-16 09:08:32 -06002052 /*
2053 * Adjust the reqs sequence before the current one because it
2054 * will consume a slot in the cq_ring and the the cq_tail
2055 * pointer will be increased, otherwise other timeout reqs may
2056 * return in advance without waiting for enough wait_nr.
2057 */
2058 prev = req;
2059 list_for_each_entry_continue_reverse(prev, &ctx->timeout_list, list)
2060 prev->sequence++;
Jens Axboe11365042019-10-16 09:08:32 -06002061 list_del_init(&req->list);
Jens Axboe11365042019-10-16 09:08:32 -06002062 }
Jens Axboe842f9612019-10-29 12:34:10 -06002063
Jens Axboe78e19bb2019-11-06 15:21:34 -07002064 io_cqring_fill_event(req, -ETIME);
Jens Axboe842f9612019-10-29 12:34:10 -06002065 io_commit_cqring(ctx);
Jens Axboe5262f562019-09-17 12:26:57 -06002066 spin_unlock_irqrestore(&ctx->completion_lock, flags);
2067
Jens Axboe842f9612019-10-29 12:34:10 -06002068 io_cqring_ev_posted(ctx);
Jens Axboef1f40852019-11-05 20:33:16 -07002069 if (req->flags & REQ_F_LINK)
2070 req->flags |= REQ_F_FAIL_LINK;
Jens Axboe842f9612019-10-29 12:34:10 -06002071 io_put_req(req, NULL);
Jens Axboe11365042019-10-16 09:08:32 -06002072 return HRTIMER_NORESTART;
2073}
2074
2075/*
2076 * Remove or update an existing timeout command
2077 */
2078static int io_timeout_remove(struct io_kiocb *req,
2079 const struct io_uring_sqe *sqe)
2080{
2081 struct io_ring_ctx *ctx = req->ctx;
2082 struct io_kiocb *treq;
2083 int ret = -ENOENT;
2084 __u64 user_data;
2085 unsigned flags;
2086
2087 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
2088 return -EINVAL;
2089 if (sqe->flags || sqe->ioprio || sqe->buf_index || sqe->len)
2090 return -EINVAL;
2091 flags = READ_ONCE(sqe->timeout_flags);
2092 if (flags)
2093 return -EINVAL;
2094
2095 user_data = READ_ONCE(sqe->addr);
2096 spin_lock_irq(&ctx->completion_lock);
2097 list_for_each_entry(treq, &ctx->timeout_list, list) {
2098 if (user_data == treq->user_data) {
2099 list_del_init(&treq->list);
2100 ret = 0;
2101 break;
2102 }
2103 }
2104
2105 /* didn't find timeout */
2106 if (ret) {
2107fill_ev:
Jens Axboe78e19bb2019-11-06 15:21:34 -07002108 io_cqring_fill_event(req, ret);
Jens Axboe11365042019-10-16 09:08:32 -06002109 io_commit_cqring(ctx);
2110 spin_unlock_irq(&ctx->completion_lock);
2111 io_cqring_ev_posted(ctx);
Jens Axboef1f40852019-11-05 20:33:16 -07002112 if (req->flags & REQ_F_LINK)
2113 req->flags |= REQ_F_FAIL_LINK;
Jens Axboe11365042019-10-16 09:08:32 -06002114 io_put_req(req, NULL);
2115 return 0;
2116 }
2117
2118 ret = hrtimer_try_to_cancel(&treq->timeout.timer);
2119 if (ret == -1) {
2120 ret = -EBUSY;
2121 goto fill_ev;
2122 }
2123
Jens Axboe78e19bb2019-11-06 15:21:34 -07002124 io_cqring_fill_event(req, 0);
2125 io_cqring_fill_event(treq, -ECANCELED);
Jens Axboe11365042019-10-16 09:08:32 -06002126 io_commit_cqring(ctx);
2127 spin_unlock_irq(&ctx->completion_lock);
Jens Axboe5262f562019-09-17 12:26:57 -06002128 io_cqring_ev_posted(ctx);
2129
Jens Axboe11365042019-10-16 09:08:32 -06002130 io_put_req(treq, NULL);
Jens Axboeba816ad2019-09-28 11:36:45 -06002131 io_put_req(req, NULL);
Jens Axboe11365042019-10-16 09:08:32 -06002132 return 0;
Jens Axboe5262f562019-09-17 12:26:57 -06002133}
2134
2135static int io_timeout(struct io_kiocb *req, const struct io_uring_sqe *sqe)
2136{
yangerkun5da0fb12019-10-15 21:59:29 +08002137 unsigned count;
Jens Axboe5262f562019-09-17 12:26:57 -06002138 struct io_ring_ctx *ctx = req->ctx;
2139 struct list_head *entry;
Jens Axboea41525a2019-10-15 16:48:15 -06002140 enum hrtimer_mode mode;
Arnd Bergmannbdf20072019-10-01 09:53:29 -06002141 struct timespec64 ts;
zhangyi (F)a1f58ba2019-10-23 15:10:09 +08002142 unsigned span = 0;
Jens Axboea41525a2019-10-15 16:48:15 -06002143 unsigned flags;
Jens Axboe5262f562019-09-17 12:26:57 -06002144
2145 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
2146 return -EINVAL;
Jens Axboea41525a2019-10-15 16:48:15 -06002147 if (sqe->flags || sqe->ioprio || sqe->buf_index || sqe->len != 1)
2148 return -EINVAL;
2149 flags = READ_ONCE(sqe->timeout_flags);
2150 if (flags & ~IORING_TIMEOUT_ABS)
Jens Axboe5262f562019-09-17 12:26:57 -06002151 return -EINVAL;
Arnd Bergmannbdf20072019-10-01 09:53:29 -06002152
2153 if (get_timespec64(&ts, u64_to_user_ptr(sqe->addr)))
Jens Axboe5262f562019-09-17 12:26:57 -06002154 return -EFAULT;
2155
Jens Axboe11365042019-10-16 09:08:32 -06002156 if (flags & IORING_TIMEOUT_ABS)
2157 mode = HRTIMER_MODE_ABS;
2158 else
2159 mode = HRTIMER_MODE_REL;
2160
2161 hrtimer_init(&req->timeout.timer, CLOCK_MONOTONIC, mode);
2162
Jens Axboe5262f562019-09-17 12:26:57 -06002163 /*
2164 * sqe->off holds how many events that need to occur for this
2165 * timeout event to be satisfied.
2166 */
2167 count = READ_ONCE(sqe->off);
2168 if (!count)
2169 count = 1;
2170
2171 req->sequence = ctx->cached_sq_head + count - 1;
yangerkun5da0fb12019-10-15 21:59:29 +08002172 /* reuse it to store the count */
2173 req->submit.sequence = count;
Jens Axboe5262f562019-09-17 12:26:57 -06002174 req->flags |= REQ_F_TIMEOUT;
2175
2176 /*
2177 * Insertion sort, ensuring the first entry in the list is always
2178 * the one we need first.
2179 */
Jens Axboe5262f562019-09-17 12:26:57 -06002180 spin_lock_irq(&ctx->completion_lock);
2181 list_for_each_prev(entry, &ctx->timeout_list) {
2182 struct io_kiocb *nxt = list_entry(entry, struct io_kiocb, list);
yangerkun5da0fb12019-10-15 21:59:29 +08002183 unsigned nxt_sq_head;
2184 long long tmp, tmp_nxt;
Jens Axboe5262f562019-09-17 12:26:57 -06002185
yangerkun5da0fb12019-10-15 21:59:29 +08002186 /*
2187 * Since cached_sq_head + count - 1 can overflow, use type long
2188 * long to store it.
2189 */
2190 tmp = (long long)ctx->cached_sq_head + count - 1;
2191 nxt_sq_head = nxt->sequence - nxt->submit.sequence + 1;
2192 tmp_nxt = (long long)nxt_sq_head + nxt->submit.sequence - 1;
2193
2194 /*
2195 * cached_sq_head may overflow, and it will never overflow twice
2196 * once there is some timeout req still be valid.
2197 */
2198 if (ctx->cached_sq_head < nxt_sq_head)
yangerkun8b07a652019-10-17 12:12:35 +08002199 tmp += UINT_MAX;
yangerkun5da0fb12019-10-15 21:59:29 +08002200
zhangyi (F)a1f58ba2019-10-23 15:10:09 +08002201 if (tmp > tmp_nxt)
Jens Axboe5262f562019-09-17 12:26:57 -06002202 break;
zhangyi (F)a1f58ba2019-10-23 15:10:09 +08002203
2204 /*
2205 * Sequence of reqs after the insert one and itself should
2206 * be adjusted because each timeout req consumes a slot.
2207 */
2208 span++;
2209 nxt->sequence++;
Jens Axboe5262f562019-09-17 12:26:57 -06002210 }
zhangyi (F)a1f58ba2019-10-23 15:10:09 +08002211 req->sequence -= span;
Jens Axboe5262f562019-09-17 12:26:57 -06002212 list_add(&req->list, entry);
Jens Axboe5262f562019-09-17 12:26:57 -06002213 req->timeout.timer.function = io_timeout_fn;
Jens Axboea41525a2019-10-15 16:48:15 -06002214 hrtimer_start(&req->timeout.timer, timespec64_to_ktime(ts), mode);
Jens Axboe842f9612019-10-29 12:34:10 -06002215 spin_unlock_irq(&ctx->completion_lock);
Jens Axboe5262f562019-09-17 12:26:57 -06002216 return 0;
2217}
2218
Jens Axboe62755e32019-10-28 21:49:21 -06002219static bool io_cancel_cb(struct io_wq_work *work, void *data)
2220{
2221 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
2222
2223 return req->user_data == (unsigned long) data;
2224}
2225
Jens Axboee977d6d2019-11-05 12:39:45 -07002226static int io_async_cancel_one(struct io_ring_ctx *ctx, void *sqe_addr)
Jens Axboe62755e32019-10-28 21:49:21 -06002227{
Jens Axboe62755e32019-10-28 21:49:21 -06002228 enum io_wq_cancel cancel_ret;
Jens Axboe62755e32019-10-28 21:49:21 -06002229 int ret = 0;
2230
Jens Axboe62755e32019-10-28 21:49:21 -06002231 cancel_ret = io_wq_cancel_cb(ctx->io_wq, io_cancel_cb, sqe_addr);
2232 switch (cancel_ret) {
2233 case IO_WQ_CANCEL_OK:
2234 ret = 0;
2235 break;
2236 case IO_WQ_CANCEL_RUNNING:
2237 ret = -EALREADY;
2238 break;
2239 case IO_WQ_CANCEL_NOTFOUND:
2240 ret = -ENOENT;
2241 break;
2242 }
2243
Jens Axboee977d6d2019-11-05 12:39:45 -07002244 return ret;
2245}
2246
2247static int io_async_cancel(struct io_kiocb *req, const struct io_uring_sqe *sqe,
2248 struct io_kiocb **nxt)
2249{
2250 struct io_ring_ctx *ctx = req->ctx;
2251 void *sqe_addr;
2252 int ret;
2253
2254 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
2255 return -EINVAL;
2256 if (sqe->flags || sqe->ioprio || sqe->off || sqe->len ||
2257 sqe->cancel_flags)
2258 return -EINVAL;
2259
2260 sqe_addr = (void *) (unsigned long) READ_ONCE(sqe->addr);
2261 ret = io_async_cancel_one(ctx, sqe_addr);
2262
Jens Axboe62755e32019-10-28 21:49:21 -06002263 if (ret < 0 && (req->flags & REQ_F_LINK))
2264 req->flags |= REQ_F_FAIL_LINK;
Jens Axboe78e19bb2019-11-06 15:21:34 -07002265 io_cqring_add_event(req, ret);
Jens Axboe62755e32019-10-28 21:49:21 -06002266 io_put_req(req, nxt);
2267 return 0;
2268}
2269
Pavel Begunkov267bc902019-11-07 01:41:08 +03002270static int io_req_defer(struct io_ring_ctx *ctx, struct io_kiocb *req)
Jens Axboede0617e2019-04-06 21:51:27 -06002271{
Pavel Begunkov267bc902019-11-07 01:41:08 +03002272 const struct io_uring_sqe *sqe = req->submit.sqe;
Jens Axboede0617e2019-04-06 21:51:27 -06002273 struct io_uring_sqe *sqe_copy;
2274
2275 if (!io_sequence_defer(ctx, req) && list_empty(&ctx->defer_list))
2276 return 0;
2277
2278 sqe_copy = kmalloc(sizeof(*sqe_copy), GFP_KERNEL);
2279 if (!sqe_copy)
2280 return -EAGAIN;
2281
2282 spin_lock_irq(&ctx->completion_lock);
2283 if (!io_sequence_defer(ctx, req) && list_empty(&ctx->defer_list)) {
2284 spin_unlock_irq(&ctx->completion_lock);
2285 kfree(sqe_copy);
2286 return 0;
2287 }
2288
2289 memcpy(sqe_copy, sqe, sizeof(*sqe_copy));
2290 req->submit.sqe = sqe_copy;
2291
Dmitrii Dolgovc826bd72019-10-15 19:02:01 +02002292 trace_io_uring_defer(ctx, req, false);
Jens Axboede0617e2019-04-06 21:51:27 -06002293 list_add_tail(&req->list, &ctx->defer_list);
2294 spin_unlock_irq(&ctx->completion_lock);
2295 return -EIOCBQUEUED;
2296}
2297
Jens Axboe2b188cc2019-01-07 10:46:33 -07002298static int __io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
Pavel Begunkov267bc902019-11-07 01:41:08 +03002299 struct io_kiocb **nxt, bool force_nonblock)
Jens Axboe2b188cc2019-01-07 10:46:33 -07002300{
Jens Axboee0c5c572019-03-12 10:18:47 -06002301 int ret, opcode;
Pavel Begunkov267bc902019-11-07 01:41:08 +03002302 struct sqe_submit *s = &req->submit;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002303
Jens Axboe2b188cc2019-01-07 10:46:33 -07002304 opcode = READ_ONCE(s->sqe->opcode);
2305 switch (opcode) {
2306 case IORING_OP_NOP:
Jens Axboe78e19bb2019-11-06 15:21:34 -07002307 ret = io_nop(req);
Jens Axboe2b188cc2019-01-07 10:46:33 -07002308 break;
2309 case IORING_OP_READV:
Jens Axboeedafcce2019-01-09 09:16:05 -07002310 if (unlikely(s->sqe->buf_index))
2311 return -EINVAL;
Pavel Begunkov267bc902019-11-07 01:41:08 +03002312 ret = io_read(req, nxt, force_nonblock);
Jens Axboe2b188cc2019-01-07 10:46:33 -07002313 break;
2314 case IORING_OP_WRITEV:
Jens Axboeedafcce2019-01-09 09:16:05 -07002315 if (unlikely(s->sqe->buf_index))
2316 return -EINVAL;
Pavel Begunkov267bc902019-11-07 01:41:08 +03002317 ret = io_write(req, nxt, force_nonblock);
Jens Axboeedafcce2019-01-09 09:16:05 -07002318 break;
2319 case IORING_OP_READ_FIXED:
Pavel Begunkov267bc902019-11-07 01:41:08 +03002320 ret = io_read(req, nxt, force_nonblock);
Jens Axboeedafcce2019-01-09 09:16:05 -07002321 break;
2322 case IORING_OP_WRITE_FIXED:
Pavel Begunkov267bc902019-11-07 01:41:08 +03002323 ret = io_write(req, nxt, force_nonblock);
Jens Axboe2b188cc2019-01-07 10:46:33 -07002324 break;
Christoph Hellwigc992fe22019-01-11 09:43:02 -07002325 case IORING_OP_FSYNC:
Jens Axboeba816ad2019-09-28 11:36:45 -06002326 ret = io_fsync(req, s->sqe, nxt, force_nonblock);
Christoph Hellwigc992fe22019-01-11 09:43:02 -07002327 break;
Jens Axboe221c5eb2019-01-17 09:41:58 -07002328 case IORING_OP_POLL_ADD:
Jens Axboe89723d02019-11-05 15:32:58 -07002329 ret = io_poll_add(req, s->sqe, nxt);
Jens Axboe221c5eb2019-01-17 09:41:58 -07002330 break;
2331 case IORING_OP_POLL_REMOVE:
2332 ret = io_poll_remove(req, s->sqe);
2333 break;
Jens Axboe5d17b4a2019-04-09 14:56:44 -06002334 case IORING_OP_SYNC_FILE_RANGE:
Jens Axboeba816ad2019-09-28 11:36:45 -06002335 ret = io_sync_file_range(req, s->sqe, nxt, force_nonblock);
Jens Axboe5d17b4a2019-04-09 14:56:44 -06002336 break;
Jens Axboe0fa03c62019-04-19 13:34:07 -06002337 case IORING_OP_SENDMSG:
Jens Axboeba816ad2019-09-28 11:36:45 -06002338 ret = io_sendmsg(req, s->sqe, nxt, force_nonblock);
Jens Axboe0fa03c62019-04-19 13:34:07 -06002339 break;
Jens Axboeaa1fa282019-04-19 13:38:09 -06002340 case IORING_OP_RECVMSG:
Jens Axboeba816ad2019-09-28 11:36:45 -06002341 ret = io_recvmsg(req, s->sqe, nxt, force_nonblock);
Jens Axboeaa1fa282019-04-19 13:38:09 -06002342 break;
Jens Axboe5262f562019-09-17 12:26:57 -06002343 case IORING_OP_TIMEOUT:
2344 ret = io_timeout(req, s->sqe);
2345 break;
Jens Axboe11365042019-10-16 09:08:32 -06002346 case IORING_OP_TIMEOUT_REMOVE:
2347 ret = io_timeout_remove(req, s->sqe);
2348 break;
Jens Axboe17f2fe32019-10-17 14:42:58 -06002349 case IORING_OP_ACCEPT:
2350 ret = io_accept(req, s->sqe, nxt, force_nonblock);
2351 break;
Jens Axboe62755e32019-10-28 21:49:21 -06002352 case IORING_OP_ASYNC_CANCEL:
2353 ret = io_async_cancel(req, s->sqe, nxt);
2354 break;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002355 default:
2356 ret = -EINVAL;
2357 break;
2358 }
2359
Jens Axboedef596e2019-01-09 08:59:42 -07002360 if (ret)
2361 return ret;
2362
2363 if (ctx->flags & IORING_SETUP_IOPOLL) {
Jens Axboe9e645e112019-05-10 16:07:28 -06002364 if (req->result == -EAGAIN)
Jens Axboedef596e2019-01-09 08:59:42 -07002365 return -EAGAIN;
2366
2367 /* workqueue context doesn't hold uring_lock, grab it now */
Jackie Liuba5290c2019-10-09 09:19:59 +08002368 if (s->in_async)
Jens Axboedef596e2019-01-09 08:59:42 -07002369 mutex_lock(&ctx->uring_lock);
2370 io_iopoll_req_issued(req);
Jackie Liuba5290c2019-10-09 09:19:59 +08002371 if (s->in_async)
Jens Axboedef596e2019-01-09 08:59:42 -07002372 mutex_unlock(&ctx->uring_lock);
2373 }
2374
2375 return 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002376}
2377
Jens Axboe561fb042019-10-24 07:25:42 -06002378static void io_wq_submit_work(struct io_wq_work **workptr)
Jens Axboe31b51512019-01-18 22:56:34 -07002379{
Jens Axboe561fb042019-10-24 07:25:42 -06002380 struct io_wq_work *work = *workptr;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002381 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
Jens Axboe2b188cc2019-01-07 10:46:33 -07002382 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe561fb042019-10-24 07:25:42 -06002383 struct sqe_submit *s = &req->submit;
2384 const struct io_uring_sqe *sqe = s->sqe;
2385 struct io_kiocb *nxt = NULL;
2386 int ret = 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002387
Jens Axboe561fb042019-10-24 07:25:42 -06002388 /* Ensure we clear previously set non-block flag */
2389 req->rw.ki_flags &= ~IOCB_NOWAIT;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002390
Jens Axboe561fb042019-10-24 07:25:42 -06002391 if (work->flags & IO_WQ_WORK_CANCEL)
2392 ret = -ECANCELED;
Jens Axboe31b51512019-01-18 22:56:34 -07002393
Jens Axboe561fb042019-10-24 07:25:42 -06002394 if (!ret) {
2395 s->has_user = (work->flags & IO_WQ_WORK_HAS_MM) != 0;
2396 s->in_async = true;
2397 do {
Pavel Begunkov267bc902019-11-07 01:41:08 +03002398 ret = __io_submit_sqe(ctx, req, &nxt, false);
Jens Axboe561fb042019-10-24 07:25:42 -06002399 /*
2400 * We can get EAGAIN for polled IO even though we're
2401 * forcing a sync submission from here, since we can't
2402 * wait for request slots on the block side.
2403 */
2404 if (ret != -EAGAIN)
2405 break;
2406 cond_resched();
2407 } while (1);
2408 }
Jens Axboe31b51512019-01-18 22:56:34 -07002409
Jens Axboe561fb042019-10-24 07:25:42 -06002410 /* drop submission reference */
2411 io_put_req(req, NULL);
Jens Axboe817869d2019-04-30 14:44:05 -06002412
Jens Axboe561fb042019-10-24 07:25:42 -06002413 if (ret) {
Jens Axboef1f40852019-11-05 20:33:16 -07002414 if (req->flags & REQ_F_LINK)
2415 req->flags |= REQ_F_FAIL_LINK;
Jens Axboe78e19bb2019-11-06 15:21:34 -07002416 io_cqring_add_event(req, ret);
Jens Axboeba816ad2019-09-28 11:36:45 -06002417 io_put_req(req, NULL);
Jens Axboeedafcce2019-01-09 09:16:05 -07002418 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07002419
Jens Axboe561fb042019-10-24 07:25:42 -06002420 /* async context always use a copy of the sqe */
2421 kfree(sqe);
2422
2423 /* if a dependent link is ready, pass it back */
2424 if (!ret && nxt) {
2425 io_prep_async_work(nxt);
2426 *workptr = &nxt->work;
Jens Axboeedafcce2019-01-09 09:16:05 -07002427 }
Jens Axboe31b51512019-01-18 22:56:34 -07002428}
Jens Axboe2b188cc2019-01-07 10:46:33 -07002429
Jens Axboe09bb8392019-03-13 12:39:28 -06002430static bool io_op_needs_file(const struct io_uring_sqe *sqe)
2431{
2432 int op = READ_ONCE(sqe->opcode);
2433
2434 switch (op) {
2435 case IORING_OP_NOP:
2436 case IORING_OP_POLL_REMOVE:
2437 return false;
2438 default:
2439 return true;
2440 }
2441}
2442
Jens Axboe65e19f52019-10-26 07:20:21 -06002443static inline struct file *io_file_from_index(struct io_ring_ctx *ctx,
2444 int index)
2445{
2446 struct fixed_file_table *table;
2447
2448 table = &ctx->file_table[index >> IORING_FILE_TABLE_SHIFT];
2449 return table->files[index & IORING_FILE_TABLE_MASK];
2450}
2451
Pavel Begunkov267bc902019-11-07 01:41:08 +03002452static int io_req_set_file(struct io_ring_ctx *ctx,
Jens Axboe09bb8392019-03-13 12:39:28 -06002453 struct io_submit_state *state, struct io_kiocb *req)
2454{
Pavel Begunkov267bc902019-11-07 01:41:08 +03002455 struct sqe_submit *s = &req->submit;
Jens Axboe09bb8392019-03-13 12:39:28 -06002456 unsigned flags;
2457 int fd;
2458
2459 flags = READ_ONCE(s->sqe->flags);
2460 fd = READ_ONCE(s->sqe->fd);
2461
Jackie Liu4fe2c962019-09-09 20:50:40 +08002462 if (flags & IOSQE_IO_DRAIN)
Jens Axboede0617e2019-04-06 21:51:27 -06002463 req->flags |= REQ_F_IO_DRAIN;
Jackie Liu4fe2c962019-09-09 20:50:40 +08002464 /*
2465 * All io need record the previous position, if LINK vs DARIN,
2466 * it can be used to mark the position of the first IO in the
2467 * link list.
2468 */
2469 req->sequence = s->sequence;
Jens Axboede0617e2019-04-06 21:51:27 -06002470
Jens Axboe60c112b2019-06-21 10:20:18 -06002471 if (!io_op_needs_file(s->sqe))
Jens Axboe09bb8392019-03-13 12:39:28 -06002472 return 0;
Jens Axboe09bb8392019-03-13 12:39:28 -06002473
2474 if (flags & IOSQE_FIXED_FILE) {
Jens Axboe65e19f52019-10-26 07:20:21 -06002475 if (unlikely(!ctx->file_table ||
Jens Axboe09bb8392019-03-13 12:39:28 -06002476 (unsigned) fd >= ctx->nr_user_files))
2477 return -EBADF;
Jens Axboeb7620122019-10-26 07:22:55 -06002478 fd = array_index_nospec(fd, ctx->nr_user_files);
Jens Axboe65e19f52019-10-26 07:20:21 -06002479 req->file = io_file_from_index(ctx, fd);
2480 if (!req->file)
Jens Axboe08a45172019-10-03 08:11:03 -06002481 return -EBADF;
Jens Axboe09bb8392019-03-13 12:39:28 -06002482 req->flags |= REQ_F_FIXED_FILE;
2483 } else {
2484 if (s->needs_fixed_file)
2485 return -EBADF;
Dmitrii Dolgovc826bd72019-10-15 19:02:01 +02002486 trace_io_uring_file_get(ctx, fd);
Jens Axboe09bb8392019-03-13 12:39:28 -06002487 req->file = io_file_get(state, fd);
2488 if (unlikely(!req->file))
2489 return -EBADF;
2490 }
2491
2492 return 0;
2493}
2494
Jens Axboefcb323c2019-10-24 12:39:47 -06002495static int io_grab_files(struct io_ring_ctx *ctx, struct io_kiocb *req)
2496{
2497 int ret = -EBADF;
2498
2499 rcu_read_lock();
2500 spin_lock_irq(&ctx->inflight_lock);
2501 /*
2502 * We use the f_ops->flush() handler to ensure that we can flush
2503 * out work accessing these files if the fd is closed. Check if
2504 * the fd has changed since we started down this path, and disallow
2505 * this operation if it has.
2506 */
2507 if (fcheck(req->submit.ring_fd) == req->submit.ring_file) {
2508 list_add(&req->inflight_entry, &ctx->inflight_list);
2509 req->flags |= REQ_F_INFLIGHT;
2510 req->work.files = current->files;
2511 ret = 0;
2512 }
2513 spin_unlock_irq(&ctx->inflight_lock);
2514 rcu_read_unlock();
2515
2516 return ret;
2517}
2518
Jens Axboe2665abf2019-11-05 12:40:47 -07002519static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
2520{
2521 struct io_kiocb *req = container_of(timer, struct io_kiocb,
2522 timeout.timer);
2523 struct io_ring_ctx *ctx = req->ctx;
2524 struct io_kiocb *prev = NULL;
2525 unsigned long flags;
2526 int ret = -ETIME;
2527
2528 spin_lock_irqsave(&ctx->completion_lock, flags);
2529
2530 /*
2531 * We don't expect the list to be empty, that will only happen if we
2532 * race with the completion of the linked work.
2533 */
2534 if (!list_empty(&req->list)) {
2535 prev = list_entry(req->list.prev, struct io_kiocb, link_list);
2536 list_del_init(&req->list);
2537 }
2538
2539 spin_unlock_irqrestore(&ctx->completion_lock, flags);
2540
2541 if (prev) {
2542 void *user_data = (void *) (unsigned long) prev->user_data;
2543 ret = io_async_cancel_one(ctx, user_data);
2544 }
2545
Jens Axboe78e19bb2019-11-06 15:21:34 -07002546 io_cqring_add_event(req, ret);
Jens Axboe2665abf2019-11-05 12:40:47 -07002547 io_put_req(req, NULL);
2548 return HRTIMER_NORESTART;
2549}
2550
2551static int io_queue_linked_timeout(struct io_kiocb *req, struct io_kiocb *nxt)
2552{
2553 const struct io_uring_sqe *sqe = nxt->submit.sqe;
2554 enum hrtimer_mode mode;
2555 struct timespec64 ts;
2556 int ret = -EINVAL;
2557
2558 if (sqe->ioprio || sqe->buf_index || sqe->len != 1 || sqe->off)
2559 goto err;
2560 if (sqe->timeout_flags & ~IORING_TIMEOUT_ABS)
2561 goto err;
2562 if (get_timespec64(&ts, u64_to_user_ptr(sqe->addr))) {
2563 ret = -EFAULT;
2564 goto err;
2565 }
2566
2567 req->flags |= REQ_F_LINK_TIMEOUT;
2568
2569 if (sqe->timeout_flags & IORING_TIMEOUT_ABS)
2570 mode = HRTIMER_MODE_ABS;
2571 else
2572 mode = HRTIMER_MODE_REL;
2573 hrtimer_init(&nxt->timeout.timer, CLOCK_MONOTONIC, mode);
2574 nxt->timeout.timer.function = io_link_timeout_fn;
2575 hrtimer_start(&nxt->timeout.timer, timespec64_to_ktime(ts), mode);
2576 ret = 0;
2577err:
2578 /* drop submission reference */
2579 io_put_req(nxt, NULL);
2580
2581 if (ret) {
2582 struct io_ring_ctx *ctx = req->ctx;
2583
2584 /*
2585 * Break the link and fail linked timeout, parent will get
2586 * failed by the regular submission path.
2587 */
2588 list_del(&nxt->list);
Jens Axboe78e19bb2019-11-06 15:21:34 -07002589 io_cqring_fill_event(nxt, ret);
Jens Axboe2665abf2019-11-05 12:40:47 -07002590 trace_io_uring_fail_link(req, nxt);
2591 io_commit_cqring(ctx);
2592 io_put_req(nxt, NULL);
2593 ret = -ECANCELED;
2594 }
2595
2596 return ret;
2597}
2598
2599static inline struct io_kiocb *io_get_linked_timeout(struct io_kiocb *req)
2600{
2601 struct io_kiocb *nxt;
2602
2603 if (!(req->flags & REQ_F_LINK))
2604 return NULL;
2605
2606 nxt = list_first_entry_or_null(&req->link_list, struct io_kiocb, list);
2607 if (nxt && nxt->submit.sqe->opcode == IORING_OP_LINK_TIMEOUT)
2608 return nxt;
2609
2610 return NULL;
2611}
2612
Pavel Begunkov267bc902019-11-07 01:41:08 +03002613static int __io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req)
Jens Axboe2b188cc2019-01-07 10:46:33 -07002614{
Jens Axboe2665abf2019-11-05 12:40:47 -07002615 struct io_kiocb *nxt;
Jens Axboee0c5c572019-03-12 10:18:47 -06002616 int ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002617
Jens Axboe2665abf2019-11-05 12:40:47 -07002618 nxt = io_get_linked_timeout(req);
2619 if (unlikely(nxt)) {
2620 ret = io_queue_linked_timeout(req, nxt);
2621 if (ret)
2622 goto err;
2623 }
2624
Pavel Begunkov267bc902019-11-07 01:41:08 +03002625 ret = __io_submit_sqe(ctx, req, NULL, true);
Jens Axboe491381ce2019-10-17 09:20:46 -06002626
2627 /*
2628 * We async punt it if the file wasn't marked NOWAIT, or if the file
2629 * doesn't support non-blocking read/write attempts
2630 */
2631 if (ret == -EAGAIN && (!(req->flags & REQ_F_NOWAIT) ||
2632 (req->flags & REQ_F_MUST_PUNT))) {
Pavel Begunkov267bc902019-11-07 01:41:08 +03002633 struct sqe_submit *s = &req->submit;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002634 struct io_uring_sqe *sqe_copy;
2635
Jackie Liu954dab12019-09-18 10:37:52 +08002636 sqe_copy = kmemdup(s->sqe, sizeof(*sqe_copy), GFP_KERNEL);
Jens Axboe2b188cc2019-01-07 10:46:33 -07002637 if (sqe_copy) {
Jens Axboe2b188cc2019-01-07 10:46:33 -07002638 s->sqe = sqe_copy;
Jens Axboefcb323c2019-10-24 12:39:47 -06002639 if (req->work.flags & IO_WQ_WORK_NEEDS_FILES) {
2640 ret = io_grab_files(ctx, req);
2641 if (ret) {
2642 kfree(sqe_copy);
2643 goto err;
2644 }
2645 }
Jens Axboee65ef562019-03-12 10:16:44 -06002646
2647 /*
2648 * Queued up for async execution, worker will release
Jens Axboe9e645e112019-05-10 16:07:28 -06002649 * submit reference when the iocb is actually submitted.
Jens Axboee65ef562019-03-12 10:16:44 -06002650 */
Jens Axboefcb323c2019-10-24 12:39:47 -06002651 io_queue_async_work(ctx, req);
Jens Axboee65ef562019-03-12 10:16:44 -06002652 return 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002653 }
2654 }
Jens Axboee65ef562019-03-12 10:16:44 -06002655
2656 /* drop submission reference */
Jens Axboefcb323c2019-10-24 12:39:47 -06002657err:
Jens Axboeba816ad2019-09-28 11:36:45 -06002658 io_put_req(req, NULL);
Jens Axboee65ef562019-03-12 10:16:44 -06002659
2660 /* and drop final reference, if we failed */
Jens Axboe9e645e112019-05-10 16:07:28 -06002661 if (ret) {
Jens Axboe78e19bb2019-11-06 15:21:34 -07002662 io_cqring_add_event(req, ret);
Jens Axboe9e645e112019-05-10 16:07:28 -06002663 if (req->flags & REQ_F_LINK)
2664 req->flags |= REQ_F_FAIL_LINK;
Jens Axboeba816ad2019-09-28 11:36:45 -06002665 io_put_req(req, NULL);
Jens Axboe9e645e112019-05-10 16:07:28 -06002666 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07002667
2668 return ret;
2669}
2670
Pavel Begunkov267bc902019-11-07 01:41:08 +03002671static int io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req)
Jackie Liu4fe2c962019-09-09 20:50:40 +08002672{
2673 int ret;
2674
Pavel Begunkov267bc902019-11-07 01:41:08 +03002675 ret = io_req_defer(ctx, req);
Jackie Liu4fe2c962019-09-09 20:50:40 +08002676 if (ret) {
2677 if (ret != -EIOCBQUEUED) {
Jens Axboe78e19bb2019-11-06 15:21:34 -07002678 io_cqring_add_event(req, ret);
2679 io_double_put_req(req);
Jackie Liu4fe2c962019-09-09 20:50:40 +08002680 }
2681 return 0;
2682 }
2683
Pavel Begunkov267bc902019-11-07 01:41:08 +03002684 return __io_queue_sqe(ctx, req);
Jackie Liu4fe2c962019-09-09 20:50:40 +08002685}
2686
2687static int io_queue_link_head(struct io_ring_ctx *ctx, struct io_kiocb *req,
Pavel Begunkov267bc902019-11-07 01:41:08 +03002688 struct io_kiocb *shadow)
Jackie Liu4fe2c962019-09-09 20:50:40 +08002689{
2690 int ret;
2691 int need_submit = false;
2692
2693 if (!shadow)
Pavel Begunkov267bc902019-11-07 01:41:08 +03002694 return io_queue_sqe(ctx, req);
Jackie Liu4fe2c962019-09-09 20:50:40 +08002695
2696 /*
2697 * Mark the first IO in link list as DRAIN, let all the following
2698 * IOs enter the defer list. all IO needs to be completed before link
2699 * list.
2700 */
2701 req->flags |= REQ_F_IO_DRAIN;
Pavel Begunkov267bc902019-11-07 01:41:08 +03002702 ret = io_req_defer(ctx, req);
Jackie Liu4fe2c962019-09-09 20:50:40 +08002703 if (ret) {
2704 if (ret != -EIOCBQUEUED) {
Jens Axboe78e19bb2019-11-06 15:21:34 -07002705 io_cqring_add_event(req, ret);
2706 io_double_put_req(req);
Pavel Begunkov7b202382019-10-27 22:10:36 +03002707 __io_free_req(shadow);
Jackie Liu4fe2c962019-09-09 20:50:40 +08002708 return 0;
2709 }
2710 } else {
2711 /*
2712 * If ret == 0 means that all IOs in front of link io are
2713 * running done. let's queue link head.
2714 */
2715 need_submit = true;
2716 }
2717
2718 /* Insert shadow req to defer_list, blocking next IOs */
2719 spin_lock_irq(&ctx->completion_lock);
Dmitrii Dolgovc826bd72019-10-15 19:02:01 +02002720 trace_io_uring_defer(ctx, shadow, true);
Jackie Liu4fe2c962019-09-09 20:50:40 +08002721 list_add_tail(&shadow->list, &ctx->defer_list);
2722 spin_unlock_irq(&ctx->completion_lock);
2723
2724 if (need_submit)
Pavel Begunkov267bc902019-11-07 01:41:08 +03002725 return __io_queue_sqe(ctx, req);
Jackie Liu4fe2c962019-09-09 20:50:40 +08002726
2727 return 0;
2728}
2729
Jens Axboe9e645e112019-05-10 16:07:28 -06002730#define SQE_VALID_FLAGS (IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK)
2731
Pavel Begunkov196be952019-11-07 01:41:06 +03002732static void io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
Pavel Begunkov267bc902019-11-07 01:41:08 +03002733 struct io_submit_state *state, struct io_kiocb **link)
Jens Axboe9e645e112019-05-10 16:07:28 -06002734{
2735 struct io_uring_sqe *sqe_copy;
Pavel Begunkov267bc902019-11-07 01:41:08 +03002736 struct sqe_submit *s = &req->submit;
Jens Axboe9e645e112019-05-10 16:07:28 -06002737 int ret;
2738
Jens Axboe78e19bb2019-11-06 15:21:34 -07002739 req->user_data = s->sqe->user_data;
2740
Jens Axboe9e645e112019-05-10 16:07:28 -06002741 /* enforce forwards compatibility on users */
2742 if (unlikely(s->sqe->flags & ~SQE_VALID_FLAGS)) {
2743 ret = -EINVAL;
Pavel Begunkov196be952019-11-07 01:41:06 +03002744 goto err_req;
Jens Axboe9e645e112019-05-10 16:07:28 -06002745 }
2746
Pavel Begunkov267bc902019-11-07 01:41:08 +03002747 ret = io_req_set_file(ctx, state, req);
Jens Axboe9e645e112019-05-10 16:07:28 -06002748 if (unlikely(ret)) {
2749err_req:
Jens Axboe78e19bb2019-11-06 15:21:34 -07002750 io_cqring_add_event(req, ret);
2751 io_double_put_req(req);
Jens Axboe9e645e112019-05-10 16:07:28 -06002752 return;
2753 }
2754
Jens Axboe9e645e112019-05-10 16:07:28 -06002755 /*
2756 * If we already have a head request, queue this one for async
2757 * submittal once the head completes. If we don't have a head but
2758 * IOSQE_IO_LINK is set in the sqe, start a new head. This one will be
2759 * submitted sync once the chain is complete. If none of those
2760 * conditions are true (normal request), then just queue it.
2761 */
2762 if (*link) {
2763 struct io_kiocb *prev = *link;
2764
2765 sqe_copy = kmemdup(s->sqe, sizeof(*sqe_copy), GFP_KERNEL);
2766 if (!sqe_copy) {
2767 ret = -EAGAIN;
2768 goto err_req;
2769 }
2770
2771 s->sqe = sqe_copy;
Dmitrii Dolgovc826bd72019-10-15 19:02:01 +02002772 trace_io_uring_link(ctx, req, prev);
Jens Axboe9e645e112019-05-10 16:07:28 -06002773 list_add_tail(&req->list, &prev->link_list);
2774 } else if (s->sqe->flags & IOSQE_IO_LINK) {
2775 req->flags |= REQ_F_LINK;
2776
Jens Axboe9e645e112019-05-10 16:07:28 -06002777 INIT_LIST_HEAD(&req->link_list);
2778 *link = req;
Jens Axboe2665abf2019-11-05 12:40:47 -07002779 } else if (READ_ONCE(s->sqe->opcode) == IORING_OP_LINK_TIMEOUT) {
2780 /* Only valid as a linked SQE */
2781 ret = -EINVAL;
2782 goto err_req;
Jens Axboe9e645e112019-05-10 16:07:28 -06002783 } else {
Pavel Begunkov267bc902019-11-07 01:41:08 +03002784 io_queue_sqe(ctx, req);
Jens Axboe9e645e112019-05-10 16:07:28 -06002785 }
2786}
2787
Jens Axboe9a56a232019-01-09 09:06:50 -07002788/*
2789 * Batched submission is done, ensure local IO is flushed out.
2790 */
2791static void io_submit_state_end(struct io_submit_state *state)
2792{
2793 blk_finish_plug(&state->plug);
Jens Axboe3d6770f2019-04-13 11:50:54 -06002794 io_file_put(state);
Jens Axboe2579f912019-01-09 09:10:43 -07002795 if (state->free_reqs)
2796 kmem_cache_free_bulk(req_cachep, state->free_reqs,
2797 &state->reqs[state->cur_req]);
Jens Axboe9a56a232019-01-09 09:06:50 -07002798}
2799
2800/*
2801 * Start submission side cache.
2802 */
2803static void io_submit_state_start(struct io_submit_state *state,
2804 struct io_ring_ctx *ctx, unsigned max_ios)
2805{
2806 blk_start_plug(&state->plug);
Jens Axboe2579f912019-01-09 09:10:43 -07002807 state->free_reqs = 0;
Jens Axboe9a56a232019-01-09 09:06:50 -07002808 state->file = NULL;
2809 state->ios_left = max_ios;
2810}
2811
Jens Axboe2b188cc2019-01-07 10:46:33 -07002812static void io_commit_sqring(struct io_ring_ctx *ctx)
2813{
Hristo Venev75b28af2019-08-26 17:23:46 +00002814 struct io_rings *rings = ctx->rings;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002815
Hristo Venev75b28af2019-08-26 17:23:46 +00002816 if (ctx->cached_sq_head != READ_ONCE(rings->sq.head)) {
Jens Axboe2b188cc2019-01-07 10:46:33 -07002817 /*
2818 * Ensure any loads from the SQEs are done at this point,
2819 * since once we write the new head, the application could
2820 * write new data to them.
2821 */
Hristo Venev75b28af2019-08-26 17:23:46 +00002822 smp_store_release(&rings->sq.head, ctx->cached_sq_head);
Jens Axboe2b188cc2019-01-07 10:46:33 -07002823 }
2824}
2825
2826/*
Jens Axboe2b188cc2019-01-07 10:46:33 -07002827 * Fetch an sqe, if one is available. Note that s->sqe will point to memory
2828 * that is mapped by userspace. This means that care needs to be taken to
2829 * ensure that reads are stable, as we cannot rely on userspace always
2830 * being a good citizen. If members of the sqe are validated and then later
2831 * used, it's important that those reads are done through READ_ONCE() to
2832 * prevent a re-load down the line.
2833 */
2834static bool io_get_sqring(struct io_ring_ctx *ctx, struct sqe_submit *s)
2835{
Hristo Venev75b28af2019-08-26 17:23:46 +00002836 struct io_rings *rings = ctx->rings;
2837 u32 *sq_array = ctx->sq_array;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002838 unsigned head;
2839
2840 /*
2841 * The cached sq head (or cq tail) serves two purposes:
2842 *
2843 * 1) allows us to batch the cost of updating the user visible
2844 * head updates.
2845 * 2) allows the kernel side to track the head on its own, even
2846 * though the application is the one updating it.
2847 */
2848 head = ctx->cached_sq_head;
Stefan Bühlere523a292019-04-19 11:57:44 +02002849 /* make sure SQ entry isn't read before tail */
Hristo Venev75b28af2019-08-26 17:23:46 +00002850 if (head == smp_load_acquire(&rings->sq.tail))
Jens Axboe2b188cc2019-01-07 10:46:33 -07002851 return false;
2852
Hristo Venev75b28af2019-08-26 17:23:46 +00002853 head = READ_ONCE(sq_array[head & ctx->sq_mask]);
Jens Axboe2b188cc2019-01-07 10:46:33 -07002854 if (head < ctx->sq_entries) {
Jens Axboefcb323c2019-10-24 12:39:47 -06002855 s->ring_file = NULL;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002856 s->sqe = &ctx->sq_sqes[head];
Jackie Liu8776f3f2019-09-09 20:50:39 +08002857 s->sequence = ctx->cached_sq_head;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002858 ctx->cached_sq_head++;
2859 return true;
2860 }
2861
2862 /* drop invalid entries */
2863 ctx->cached_sq_head++;
Jens Axboe498ccd92019-10-25 10:04:25 -06002864 ctx->cached_sq_dropped++;
2865 WRITE_ONCE(rings->sq_dropped, ctx->cached_sq_dropped);
Jens Axboe2b188cc2019-01-07 10:46:33 -07002866 return false;
2867}
2868
Pavel Begunkovfb5ccc92019-10-25 12:31:30 +03002869static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
Pavel Begunkovae9428c2019-11-06 00:22:14 +03002870 struct file *ring_file, int ring_fd,
2871 struct mm_struct **mm, bool async)
Jens Axboe6c271ce2019-01-10 11:22:30 -07002872{
2873 struct io_submit_state state, *statep = NULL;
Jens Axboe9e645e112019-05-10 16:07:28 -06002874 struct io_kiocb *link = NULL;
Jackie Liu4fe2c962019-09-09 20:50:40 +08002875 struct io_kiocb *shadow_req = NULL;
Jens Axboe9e645e112019-05-10 16:07:28 -06002876 int i, submitted = 0;
Pavel Begunkov95a1b3ff2019-10-27 23:15:41 +03002877 bool mm_fault = false;
Jens Axboe6c271ce2019-01-10 11:22:30 -07002878
2879 if (nr > IO_PLUG_THRESHOLD) {
2880 io_submit_state_start(&state, ctx, nr);
2881 statep = &state;
2882 }
2883
2884 for (i = 0; i < nr; i++) {
Pavel Begunkov196be952019-11-07 01:41:06 +03002885 struct io_kiocb *req;
Pavel Begunkov50585b92019-11-07 01:41:07 +03002886 unsigned int sqe_flags;
Pavel Begunkovfb5ccc92019-10-25 12:31:30 +03002887
Pavel Begunkov196be952019-11-07 01:41:06 +03002888 req = io_get_req(ctx, statep);
2889 if (unlikely(!req)) {
2890 if (!submitted)
2891 submitted = -EAGAIN;
Pavel Begunkovfb5ccc92019-10-25 12:31:30 +03002892 break;
Pavel Begunkov196be952019-11-07 01:41:06 +03002893 }
Pavel Begunkov50585b92019-11-07 01:41:07 +03002894 if (!io_get_sqring(ctx, &req->submit)) {
Pavel Begunkov196be952019-11-07 01:41:06 +03002895 __io_free_req(req);
2896 break;
2897 }
Pavel Begunkovfb5ccc92019-10-25 12:31:30 +03002898
Pavel Begunkov50585b92019-11-07 01:41:07 +03002899 if (io_sqe_needs_user(req->submit.sqe) && !*mm) {
Pavel Begunkov95a1b3ff2019-10-27 23:15:41 +03002900 mm_fault = mm_fault || !mmget_not_zero(ctx->sqo_mm);
2901 if (!mm_fault) {
2902 use_mm(ctx->sqo_mm);
2903 *mm = ctx->sqo_mm;
2904 }
2905 }
2906
Pavel Begunkov50585b92019-11-07 01:41:07 +03002907 sqe_flags = req->submit.sqe->flags;
2908
2909 if (link && (sqe_flags & IOSQE_IO_DRAIN)) {
Jackie Liu4fe2c962019-09-09 20:50:40 +08002910 if (!shadow_req) {
2911 shadow_req = io_get_req(ctx, NULL);
Jackie Liua1041c22019-09-18 17:25:52 +08002912 if (unlikely(!shadow_req))
2913 goto out;
Jackie Liu4fe2c962019-09-09 20:50:40 +08002914 shadow_req->flags |= (REQ_F_IO_DRAIN | REQ_F_SHADOW_DRAIN);
2915 refcount_dec(&shadow_req->refs);
2916 }
Pavel Begunkov50585b92019-11-07 01:41:07 +03002917 shadow_req->sequence = req->submit.sequence;
Jackie Liu4fe2c962019-09-09 20:50:40 +08002918 }
2919
Jackie Liua1041c22019-09-18 17:25:52 +08002920out:
Pavel Begunkov50585b92019-11-07 01:41:07 +03002921 req->submit.ring_file = ring_file;
2922 req->submit.ring_fd = ring_fd;
2923 req->submit.has_user = *mm != NULL;
2924 req->submit.in_async = async;
2925 req->submit.needs_fixed_file = async;
2926 trace_io_uring_submit_sqe(ctx, req->submit.sqe->user_data,
2927 true, async);
Pavel Begunkov267bc902019-11-07 01:41:08 +03002928 io_submit_sqe(ctx, req, statep, &link);
Pavel Begunkov95a1b3ff2019-10-27 23:15:41 +03002929 submitted++;
Pavel Begunkove5eb6362019-11-06 00:22:15 +03002930
2931 /*
2932 * If previous wasn't linked and we have a linked command,
2933 * that's the end of the chain. Submit the previous link.
2934 */
Pavel Begunkov50585b92019-11-07 01:41:07 +03002935 if (!(sqe_flags & IOSQE_IO_LINK) && link) {
Pavel Begunkov267bc902019-11-07 01:41:08 +03002936 io_queue_link_head(ctx, link, shadow_req);
Pavel Begunkove5eb6362019-11-06 00:22:15 +03002937 link = NULL;
2938 shadow_req = NULL;
2939 }
Jens Axboe6c271ce2019-01-10 11:22:30 -07002940 }
2941
Jens Axboe9e645e112019-05-10 16:07:28 -06002942 if (link)
Pavel Begunkov267bc902019-11-07 01:41:08 +03002943 io_queue_link_head(ctx, link, shadow_req);
Jens Axboe6c271ce2019-01-10 11:22:30 -07002944 if (statep)
2945 io_submit_state_end(&state);
2946
Pavel Begunkovae9428c2019-11-06 00:22:14 +03002947 /* Commit SQ ring head once we've consumed and submitted all SQEs */
2948 io_commit_sqring(ctx);
2949
Jens Axboe6c271ce2019-01-10 11:22:30 -07002950 return submitted;
2951}
2952
2953static int io_sq_thread(void *data)
2954{
Jens Axboe6c271ce2019-01-10 11:22:30 -07002955 struct io_ring_ctx *ctx = data;
2956 struct mm_struct *cur_mm = NULL;
2957 mm_segment_t old_fs;
2958 DEFINE_WAIT(wait);
2959 unsigned inflight;
2960 unsigned long timeout;
2961
Jackie Liua4c0b3d2019-07-08 13:41:12 +08002962 complete(&ctx->sqo_thread_started);
2963
Jens Axboe6c271ce2019-01-10 11:22:30 -07002964 old_fs = get_fs();
2965 set_fs(USER_DS);
2966
2967 timeout = inflight = 0;
Roman Penyaev2bbcd6d2019-05-16 10:53:57 +02002968 while (!kthread_should_park()) {
Pavel Begunkovfb5ccc92019-10-25 12:31:30 +03002969 unsigned int to_submit;
Jens Axboe6c271ce2019-01-10 11:22:30 -07002970
2971 if (inflight) {
2972 unsigned nr_events = 0;
2973
2974 if (ctx->flags & IORING_SETUP_IOPOLL) {
Jens Axboe2b2ed972019-10-25 10:06:15 -06002975 /*
2976 * inflight is the count of the maximum possible
2977 * entries we submitted, but it can be smaller
2978 * if we dropped some of them. If we don't have
2979 * poll entries available, then we know that we
2980 * have nothing left to poll for. Reset the
2981 * inflight count to zero in that case.
2982 */
2983 mutex_lock(&ctx->uring_lock);
2984 if (!list_empty(&ctx->poll_list))
2985 __io_iopoll_check(ctx, &nr_events, 0);
2986 else
2987 inflight = 0;
2988 mutex_unlock(&ctx->uring_lock);
Jens Axboe6c271ce2019-01-10 11:22:30 -07002989 } else {
2990 /*
2991 * Normal IO, just pretend everything completed.
2992 * We don't have to poll completions for that.
2993 */
2994 nr_events = inflight;
2995 }
2996
2997 inflight -= nr_events;
2998 if (!inflight)
2999 timeout = jiffies + ctx->sq_thread_idle;
3000 }
3001
Pavel Begunkovfb5ccc92019-10-25 12:31:30 +03003002 to_submit = io_sqring_entries(ctx);
3003 if (!to_submit) {
Jens Axboe6c271ce2019-01-10 11:22:30 -07003004 /*
3005 * We're polling. If we're within the defined idle
3006 * period, then let us spin without work before going
3007 * to sleep.
3008 */
3009 if (inflight || !time_after(jiffies, timeout)) {
Jens Axboe9831a902019-09-19 09:48:55 -06003010 cond_resched();
Jens Axboe6c271ce2019-01-10 11:22:30 -07003011 continue;
3012 }
3013
3014 /*
3015 * Drop cur_mm before scheduling, we can't hold it for
3016 * long periods (or over schedule()). Do this before
3017 * adding ourselves to the waitqueue, as the unuse/drop
3018 * may sleep.
3019 */
3020 if (cur_mm) {
3021 unuse_mm(cur_mm);
3022 mmput(cur_mm);
3023 cur_mm = NULL;
3024 }
3025
3026 prepare_to_wait(&ctx->sqo_wait, &wait,
3027 TASK_INTERRUPTIBLE);
3028
3029 /* Tell userspace we may need a wakeup call */
Hristo Venev75b28af2019-08-26 17:23:46 +00003030 ctx->rings->sq_flags |= IORING_SQ_NEED_WAKEUP;
Stefan Bühler0d7bae62019-04-19 11:57:45 +02003031 /* make sure to read SQ tail after writing flags */
3032 smp_mb();
Jens Axboe6c271ce2019-01-10 11:22:30 -07003033
Pavel Begunkovfb5ccc92019-10-25 12:31:30 +03003034 to_submit = io_sqring_entries(ctx);
3035 if (!to_submit) {
Roman Penyaev2bbcd6d2019-05-16 10:53:57 +02003036 if (kthread_should_park()) {
Jens Axboe6c271ce2019-01-10 11:22:30 -07003037 finish_wait(&ctx->sqo_wait, &wait);
3038 break;
3039 }
3040 if (signal_pending(current))
3041 flush_signals(current);
3042 schedule();
3043 finish_wait(&ctx->sqo_wait, &wait);
3044
Hristo Venev75b28af2019-08-26 17:23:46 +00003045 ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP;
Jens Axboe6c271ce2019-01-10 11:22:30 -07003046 continue;
3047 }
3048 finish_wait(&ctx->sqo_wait, &wait);
3049
Hristo Venev75b28af2019-08-26 17:23:46 +00003050 ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP;
Jens Axboe6c271ce2019-01-10 11:22:30 -07003051 }
3052
Pavel Begunkovfb5ccc92019-10-25 12:31:30 +03003053 to_submit = min(to_submit, ctx->sq_entries);
Pavel Begunkovae9428c2019-11-06 00:22:14 +03003054 inflight += io_submit_sqes(ctx, to_submit, NULL, -1, &cur_mm,
3055 true);
Jens Axboe6c271ce2019-01-10 11:22:30 -07003056 }
3057
3058 set_fs(old_fs);
3059 if (cur_mm) {
3060 unuse_mm(cur_mm);
3061 mmput(cur_mm);
3062 }
Jens Axboe06058632019-04-13 09:26:03 -06003063
Roman Penyaev2bbcd6d2019-05-16 10:53:57 +02003064 kthread_parkme();
Jens Axboe06058632019-04-13 09:26:03 -06003065
Jens Axboe6c271ce2019-01-10 11:22:30 -07003066 return 0;
3067}
3068
Jens Axboebda52162019-09-24 13:47:15 -06003069struct io_wait_queue {
3070 struct wait_queue_entry wq;
3071 struct io_ring_ctx *ctx;
3072 unsigned to_wait;
3073 unsigned nr_timeouts;
3074};
3075
3076static inline bool io_should_wake(struct io_wait_queue *iowq)
3077{
3078 struct io_ring_ctx *ctx = iowq->ctx;
3079
3080 /*
3081 * Wake up if we have enough events, or if a timeout occured since we
3082 * started waiting. For timeouts, we always want to return to userspace,
3083 * regardless of event count.
3084 */
Jens Axboe84f97dc2019-11-06 11:27:53 -07003085 return io_cqring_events(ctx) >= iowq->to_wait ||
Jens Axboebda52162019-09-24 13:47:15 -06003086 atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts;
3087}
3088
3089static int io_wake_function(struct wait_queue_entry *curr, unsigned int mode,
3090 int wake_flags, void *key)
3091{
3092 struct io_wait_queue *iowq = container_of(curr, struct io_wait_queue,
3093 wq);
3094
3095 if (!io_should_wake(iowq))
3096 return -1;
3097
3098 return autoremove_wake_function(curr, mode, wake_flags, key);
3099}
3100
Jens Axboe2b188cc2019-01-07 10:46:33 -07003101/*
3102 * Wait until events become available, if we don't already have some. The
3103 * application must reap them itself, as they reside on the shared cq ring.
3104 */
3105static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
3106 const sigset_t __user *sig, size_t sigsz)
3107{
Jens Axboebda52162019-09-24 13:47:15 -06003108 struct io_wait_queue iowq = {
3109 .wq = {
3110 .private = current,
3111 .func = io_wake_function,
3112 .entry = LIST_HEAD_INIT(iowq.wq.entry),
3113 },
3114 .ctx = ctx,
3115 .to_wait = min_events,
3116 };
Hristo Venev75b28af2019-08-26 17:23:46 +00003117 struct io_rings *rings = ctx->rings;
Jackie Liue9ffa5c2019-10-29 11:16:42 +08003118 int ret = 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003119
Jens Axboe84f97dc2019-11-06 11:27:53 -07003120 if (io_cqring_events(ctx) >= min_events)
Jens Axboe2b188cc2019-01-07 10:46:33 -07003121 return 0;
3122
3123 if (sig) {
Arnd Bergmann9e75ad52019-03-25 15:34:53 +01003124#ifdef CONFIG_COMPAT
3125 if (in_compat_syscall())
3126 ret = set_compat_user_sigmask((const compat_sigset_t __user *)sig,
Oleg Nesterovb7724342019-07-16 16:29:53 -07003127 sigsz);
Arnd Bergmann9e75ad52019-03-25 15:34:53 +01003128 else
3129#endif
Oleg Nesterovb7724342019-07-16 16:29:53 -07003130 ret = set_user_sigmask(sig, sigsz);
Arnd Bergmann9e75ad52019-03-25 15:34:53 +01003131
Jens Axboe2b188cc2019-01-07 10:46:33 -07003132 if (ret)
3133 return ret;
3134 }
3135
Jens Axboebda52162019-09-24 13:47:15 -06003136 iowq.nr_timeouts = atomic_read(&ctx->cq_timeouts);
Dmitrii Dolgovc826bd72019-10-15 19:02:01 +02003137 trace_io_uring_cqring_wait(ctx, min_events);
Jens Axboebda52162019-09-24 13:47:15 -06003138 do {
3139 prepare_to_wait_exclusive(&ctx->wait, &iowq.wq,
3140 TASK_INTERRUPTIBLE);
3141 if (io_should_wake(&iowq))
3142 break;
3143 schedule();
3144 if (signal_pending(current)) {
Jackie Liue9ffa5c2019-10-29 11:16:42 +08003145 ret = -EINTR;
Jens Axboebda52162019-09-24 13:47:15 -06003146 break;
3147 }
3148 } while (1);
3149 finish_wait(&ctx->wait, &iowq.wq);
3150
Jackie Liue9ffa5c2019-10-29 11:16:42 +08003151 restore_saved_sigmask_unless(ret == -EINTR);
Jens Axboe2b188cc2019-01-07 10:46:33 -07003152
Hristo Venev75b28af2019-08-26 17:23:46 +00003153 return READ_ONCE(rings->cq.head) == READ_ONCE(rings->cq.tail) ? ret : 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003154}
3155
Jens Axboe6b063142019-01-10 22:13:58 -07003156static void __io_sqe_files_unregister(struct io_ring_ctx *ctx)
3157{
3158#if defined(CONFIG_UNIX)
3159 if (ctx->ring_sock) {
3160 struct sock *sock = ctx->ring_sock->sk;
3161 struct sk_buff *skb;
3162
3163 while ((skb = skb_dequeue(&sock->sk_receive_queue)) != NULL)
3164 kfree_skb(skb);
3165 }
3166#else
3167 int i;
3168
Jens Axboe65e19f52019-10-26 07:20:21 -06003169 for (i = 0; i < ctx->nr_user_files; i++) {
3170 struct file *file;
3171
3172 file = io_file_from_index(ctx, i);
3173 if (file)
3174 fput(file);
3175 }
Jens Axboe6b063142019-01-10 22:13:58 -07003176#endif
3177}
3178
3179static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
3180{
Jens Axboe65e19f52019-10-26 07:20:21 -06003181 unsigned nr_tables, i;
3182
3183 if (!ctx->file_table)
Jens Axboe6b063142019-01-10 22:13:58 -07003184 return -ENXIO;
3185
3186 __io_sqe_files_unregister(ctx);
Jens Axboe65e19f52019-10-26 07:20:21 -06003187 nr_tables = DIV_ROUND_UP(ctx->nr_user_files, IORING_MAX_FILES_TABLE);
3188 for (i = 0; i < nr_tables; i++)
3189 kfree(ctx->file_table[i].files);
3190 kfree(ctx->file_table);
3191 ctx->file_table = NULL;
Jens Axboe6b063142019-01-10 22:13:58 -07003192 ctx->nr_user_files = 0;
3193 return 0;
3194}
3195
Jens Axboe6c271ce2019-01-10 11:22:30 -07003196static void io_sq_thread_stop(struct io_ring_ctx *ctx)
3197{
3198 if (ctx->sqo_thread) {
Jackie Liua4c0b3d2019-07-08 13:41:12 +08003199 wait_for_completion(&ctx->sqo_thread_started);
Roman Penyaev2bbcd6d2019-05-16 10:53:57 +02003200 /*
3201 * The park is a bit of a work-around, without it we get
3202 * warning spews on shutdown with SQPOLL set and affinity
3203 * set to a single CPU.
3204 */
Jens Axboe06058632019-04-13 09:26:03 -06003205 kthread_park(ctx->sqo_thread);
Jens Axboe6c271ce2019-01-10 11:22:30 -07003206 kthread_stop(ctx->sqo_thread);
3207 ctx->sqo_thread = NULL;
3208 }
3209}
3210
Jens Axboe6b063142019-01-10 22:13:58 -07003211static void io_finish_async(struct io_ring_ctx *ctx)
3212{
Jens Axboe6c271ce2019-01-10 11:22:30 -07003213 io_sq_thread_stop(ctx);
3214
Jens Axboe561fb042019-10-24 07:25:42 -06003215 if (ctx->io_wq) {
3216 io_wq_destroy(ctx->io_wq);
3217 ctx->io_wq = NULL;
Jens Axboe6b063142019-01-10 22:13:58 -07003218 }
3219}
3220
3221#if defined(CONFIG_UNIX)
3222static void io_destruct_skb(struct sk_buff *skb)
3223{
3224 struct io_ring_ctx *ctx = skb->sk->sk_user_data;
3225
Jens Axboe561fb042019-10-24 07:25:42 -06003226 if (ctx->io_wq)
3227 io_wq_flush(ctx->io_wq);
Jens Axboe8a997342019-10-09 14:40:13 -06003228
Jens Axboe6b063142019-01-10 22:13:58 -07003229 unix_destruct_scm(skb);
3230}
3231
3232/*
3233 * Ensure the UNIX gc is aware of our file set, so we are certain that
3234 * the io_uring can be safely unregistered on process exit, even if we have
3235 * loops in the file referencing.
3236 */
3237static int __io_sqe_files_scm(struct io_ring_ctx *ctx, int nr, int offset)
3238{
3239 struct sock *sk = ctx->ring_sock->sk;
3240 struct scm_fp_list *fpl;
3241 struct sk_buff *skb;
Jens Axboe08a45172019-10-03 08:11:03 -06003242 int i, nr_files;
Jens Axboe6b063142019-01-10 22:13:58 -07003243
3244 if (!capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN)) {
3245 unsigned long inflight = ctx->user->unix_inflight + nr;
3246
3247 if (inflight > task_rlimit(current, RLIMIT_NOFILE))
3248 return -EMFILE;
3249 }
3250
3251 fpl = kzalloc(sizeof(*fpl), GFP_KERNEL);
3252 if (!fpl)
3253 return -ENOMEM;
3254
3255 skb = alloc_skb(0, GFP_KERNEL);
3256 if (!skb) {
3257 kfree(fpl);
3258 return -ENOMEM;
3259 }
3260
3261 skb->sk = sk;
Jens Axboe6b063142019-01-10 22:13:58 -07003262
Jens Axboe08a45172019-10-03 08:11:03 -06003263 nr_files = 0;
Jens Axboe6b063142019-01-10 22:13:58 -07003264 fpl->user = get_uid(ctx->user);
3265 for (i = 0; i < nr; i++) {
Jens Axboe65e19f52019-10-26 07:20:21 -06003266 struct file *file = io_file_from_index(ctx, i + offset);
3267
3268 if (!file)
Jens Axboe08a45172019-10-03 08:11:03 -06003269 continue;
Jens Axboe65e19f52019-10-26 07:20:21 -06003270 fpl->fp[nr_files] = get_file(file);
Jens Axboe08a45172019-10-03 08:11:03 -06003271 unix_inflight(fpl->user, fpl->fp[nr_files]);
3272 nr_files++;
Jens Axboe6b063142019-01-10 22:13:58 -07003273 }
3274
Jens Axboe08a45172019-10-03 08:11:03 -06003275 if (nr_files) {
3276 fpl->max = SCM_MAX_FD;
3277 fpl->count = nr_files;
3278 UNIXCB(skb).fp = fpl;
3279 skb->destructor = io_destruct_skb;
3280 refcount_add(skb->truesize, &sk->sk_wmem_alloc);
3281 skb_queue_head(&sk->sk_receive_queue, skb);
Jens Axboe6b063142019-01-10 22:13:58 -07003282
Jens Axboe08a45172019-10-03 08:11:03 -06003283 for (i = 0; i < nr_files; i++)
3284 fput(fpl->fp[i]);
3285 } else {
3286 kfree_skb(skb);
3287 kfree(fpl);
3288 }
Jens Axboe6b063142019-01-10 22:13:58 -07003289
3290 return 0;
3291}
3292
3293/*
3294 * If UNIX sockets are enabled, fd passing can cause a reference cycle which
3295 * causes regular reference counting to break down. We rely on the UNIX
3296 * garbage collection to take care of this problem for us.
3297 */
3298static int io_sqe_files_scm(struct io_ring_ctx *ctx)
3299{
3300 unsigned left, total;
3301 int ret = 0;
3302
3303 total = 0;
3304 left = ctx->nr_user_files;
3305 while (left) {
3306 unsigned this_files = min_t(unsigned, left, SCM_MAX_FD);
Jens Axboe6b063142019-01-10 22:13:58 -07003307
3308 ret = __io_sqe_files_scm(ctx, this_files, total);
3309 if (ret)
3310 break;
3311 left -= this_files;
3312 total += this_files;
3313 }
3314
3315 if (!ret)
3316 return 0;
3317
3318 while (total < ctx->nr_user_files) {
Jens Axboe65e19f52019-10-26 07:20:21 -06003319 struct file *file = io_file_from_index(ctx, total);
3320
3321 if (file)
3322 fput(file);
Jens Axboe6b063142019-01-10 22:13:58 -07003323 total++;
3324 }
3325
3326 return ret;
3327}
3328#else
3329static int io_sqe_files_scm(struct io_ring_ctx *ctx)
3330{
3331 return 0;
3332}
3333#endif
3334
Jens Axboe65e19f52019-10-26 07:20:21 -06003335static int io_sqe_alloc_file_tables(struct io_ring_ctx *ctx, unsigned nr_tables,
3336 unsigned nr_files)
3337{
3338 int i;
3339
3340 for (i = 0; i < nr_tables; i++) {
3341 struct fixed_file_table *table = &ctx->file_table[i];
3342 unsigned this_files;
3343
3344 this_files = min(nr_files, IORING_MAX_FILES_TABLE);
3345 table->files = kcalloc(this_files, sizeof(struct file *),
3346 GFP_KERNEL);
3347 if (!table->files)
3348 break;
3349 nr_files -= this_files;
3350 }
3351
3352 if (i == nr_tables)
3353 return 0;
3354
3355 for (i = 0; i < nr_tables; i++) {
3356 struct fixed_file_table *table = &ctx->file_table[i];
3357 kfree(table->files);
3358 }
3359 return 1;
3360}
3361
Jens Axboe6b063142019-01-10 22:13:58 -07003362static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
3363 unsigned nr_args)
3364{
3365 __s32 __user *fds = (__s32 __user *) arg;
Jens Axboe65e19f52019-10-26 07:20:21 -06003366 unsigned nr_tables;
Jens Axboe6b063142019-01-10 22:13:58 -07003367 int fd, ret = 0;
3368 unsigned i;
3369
Jens Axboe65e19f52019-10-26 07:20:21 -06003370 if (ctx->file_table)
Jens Axboe6b063142019-01-10 22:13:58 -07003371 return -EBUSY;
3372 if (!nr_args)
3373 return -EINVAL;
3374 if (nr_args > IORING_MAX_FIXED_FILES)
3375 return -EMFILE;
3376
Jens Axboe65e19f52019-10-26 07:20:21 -06003377 nr_tables = DIV_ROUND_UP(nr_args, IORING_MAX_FILES_TABLE);
3378 ctx->file_table = kcalloc(nr_tables, sizeof(struct fixed_file_table),
3379 GFP_KERNEL);
3380 if (!ctx->file_table)
Jens Axboe6b063142019-01-10 22:13:58 -07003381 return -ENOMEM;
3382
Jens Axboe65e19f52019-10-26 07:20:21 -06003383 if (io_sqe_alloc_file_tables(ctx, nr_tables, nr_args)) {
3384 kfree(ctx->file_table);
3385 return -ENOMEM;
3386 }
3387
Jens Axboe08a45172019-10-03 08:11:03 -06003388 for (i = 0; i < nr_args; i++, ctx->nr_user_files++) {
Jens Axboe65e19f52019-10-26 07:20:21 -06003389 struct fixed_file_table *table;
3390 unsigned index;
3391
Jens Axboe6b063142019-01-10 22:13:58 -07003392 ret = -EFAULT;
3393 if (copy_from_user(&fd, &fds[i], sizeof(fd)))
3394 break;
Jens Axboe08a45172019-10-03 08:11:03 -06003395 /* allow sparse sets */
3396 if (fd == -1) {
3397 ret = 0;
3398 continue;
3399 }
Jens Axboe6b063142019-01-10 22:13:58 -07003400
Jens Axboe65e19f52019-10-26 07:20:21 -06003401 table = &ctx->file_table[i >> IORING_FILE_TABLE_SHIFT];
3402 index = i & IORING_FILE_TABLE_MASK;
3403 table->files[index] = fget(fd);
Jens Axboe6b063142019-01-10 22:13:58 -07003404
3405 ret = -EBADF;
Jens Axboe65e19f52019-10-26 07:20:21 -06003406 if (!table->files[index])
Jens Axboe6b063142019-01-10 22:13:58 -07003407 break;
3408 /*
3409 * Don't allow io_uring instances to be registered. If UNIX
3410 * isn't enabled, then this causes a reference cycle and this
3411 * instance can never get freed. If UNIX is enabled we'll
3412 * handle it just fine, but there's still no point in allowing
3413 * a ring fd as it doesn't support regular read/write anyway.
3414 */
Jens Axboe65e19f52019-10-26 07:20:21 -06003415 if (table->files[index]->f_op == &io_uring_fops) {
3416 fput(table->files[index]);
Jens Axboe6b063142019-01-10 22:13:58 -07003417 break;
3418 }
Jens Axboe6b063142019-01-10 22:13:58 -07003419 ret = 0;
3420 }
3421
3422 if (ret) {
Jens Axboe65e19f52019-10-26 07:20:21 -06003423 for (i = 0; i < ctx->nr_user_files; i++) {
3424 struct file *file;
Jens Axboe6b063142019-01-10 22:13:58 -07003425
Jens Axboe65e19f52019-10-26 07:20:21 -06003426 file = io_file_from_index(ctx, i);
3427 if (file)
3428 fput(file);
3429 }
3430 for (i = 0; i < nr_tables; i++)
3431 kfree(ctx->file_table[i].files);
3432
3433 kfree(ctx->file_table);
3434 ctx->file_table = NULL;
Jens Axboe6b063142019-01-10 22:13:58 -07003435 ctx->nr_user_files = 0;
3436 return ret;
3437 }
3438
3439 ret = io_sqe_files_scm(ctx);
3440 if (ret)
3441 io_sqe_files_unregister(ctx);
3442
3443 return ret;
3444}
3445
Jens Axboec3a31e62019-10-03 13:59:56 -06003446static void io_sqe_file_unregister(struct io_ring_ctx *ctx, int index)
3447{
3448#if defined(CONFIG_UNIX)
Jens Axboe65e19f52019-10-26 07:20:21 -06003449 struct file *file = io_file_from_index(ctx, index);
Jens Axboec3a31e62019-10-03 13:59:56 -06003450 struct sock *sock = ctx->ring_sock->sk;
3451 struct sk_buff_head list, *head = &sock->sk_receive_queue;
3452 struct sk_buff *skb;
3453 int i;
3454
3455 __skb_queue_head_init(&list);
3456
3457 /*
3458 * Find the skb that holds this file in its SCM_RIGHTS. When found,
3459 * remove this entry and rearrange the file array.
3460 */
3461 skb = skb_dequeue(head);
3462 while (skb) {
3463 struct scm_fp_list *fp;
3464
3465 fp = UNIXCB(skb).fp;
3466 for (i = 0; i < fp->count; i++) {
3467 int left;
3468
3469 if (fp->fp[i] != file)
3470 continue;
3471
3472 unix_notinflight(fp->user, fp->fp[i]);
3473 left = fp->count - 1 - i;
3474 if (left) {
3475 memmove(&fp->fp[i], &fp->fp[i + 1],
3476 left * sizeof(struct file *));
3477 }
3478 fp->count--;
3479 if (!fp->count) {
3480 kfree_skb(skb);
3481 skb = NULL;
3482 } else {
3483 __skb_queue_tail(&list, skb);
3484 }
3485 fput(file);
3486 file = NULL;
3487 break;
3488 }
3489
3490 if (!file)
3491 break;
3492
3493 __skb_queue_tail(&list, skb);
3494
3495 skb = skb_dequeue(head);
3496 }
3497
3498 if (skb_peek(&list)) {
3499 spin_lock_irq(&head->lock);
3500 while ((skb = __skb_dequeue(&list)) != NULL)
3501 __skb_queue_tail(head, skb);
3502 spin_unlock_irq(&head->lock);
3503 }
3504#else
Jens Axboe65e19f52019-10-26 07:20:21 -06003505 fput(io_file_from_index(ctx, index));
Jens Axboec3a31e62019-10-03 13:59:56 -06003506#endif
3507}
3508
3509static int io_sqe_file_register(struct io_ring_ctx *ctx, struct file *file,
3510 int index)
3511{
3512#if defined(CONFIG_UNIX)
3513 struct sock *sock = ctx->ring_sock->sk;
3514 struct sk_buff_head *head = &sock->sk_receive_queue;
3515 struct sk_buff *skb;
3516
3517 /*
3518 * See if we can merge this file into an existing skb SCM_RIGHTS
3519 * file set. If there's no room, fall back to allocating a new skb
3520 * and filling it in.
3521 */
3522 spin_lock_irq(&head->lock);
3523 skb = skb_peek(head);
3524 if (skb) {
3525 struct scm_fp_list *fpl = UNIXCB(skb).fp;
3526
3527 if (fpl->count < SCM_MAX_FD) {
3528 __skb_unlink(skb, head);
3529 spin_unlock_irq(&head->lock);
3530 fpl->fp[fpl->count] = get_file(file);
3531 unix_inflight(fpl->user, fpl->fp[fpl->count]);
3532 fpl->count++;
3533 spin_lock_irq(&head->lock);
3534 __skb_queue_head(head, skb);
3535 } else {
3536 skb = NULL;
3537 }
3538 }
3539 spin_unlock_irq(&head->lock);
3540
3541 if (skb) {
3542 fput(file);
3543 return 0;
3544 }
3545
3546 return __io_sqe_files_scm(ctx, 1, index);
3547#else
3548 return 0;
3549#endif
3550}
3551
3552static int io_sqe_files_update(struct io_ring_ctx *ctx, void __user *arg,
3553 unsigned nr_args)
3554{
3555 struct io_uring_files_update up;
3556 __s32 __user *fds;
3557 int fd, i, err;
3558 __u32 done;
3559
Jens Axboe65e19f52019-10-26 07:20:21 -06003560 if (!ctx->file_table)
Jens Axboec3a31e62019-10-03 13:59:56 -06003561 return -ENXIO;
3562 if (!nr_args)
3563 return -EINVAL;
3564 if (copy_from_user(&up, arg, sizeof(up)))
3565 return -EFAULT;
3566 if (check_add_overflow(up.offset, nr_args, &done))
3567 return -EOVERFLOW;
3568 if (done > ctx->nr_user_files)
3569 return -EINVAL;
3570
3571 done = 0;
3572 fds = (__s32 __user *) up.fds;
3573 while (nr_args) {
Jens Axboe65e19f52019-10-26 07:20:21 -06003574 struct fixed_file_table *table;
3575 unsigned index;
3576
Jens Axboec3a31e62019-10-03 13:59:56 -06003577 err = 0;
3578 if (copy_from_user(&fd, &fds[done], sizeof(fd))) {
3579 err = -EFAULT;
3580 break;
3581 }
3582 i = array_index_nospec(up.offset, ctx->nr_user_files);
Jens Axboe65e19f52019-10-26 07:20:21 -06003583 table = &ctx->file_table[i >> IORING_FILE_TABLE_SHIFT];
3584 index = i & IORING_FILE_TABLE_MASK;
3585 if (table->files[index]) {
Jens Axboec3a31e62019-10-03 13:59:56 -06003586 io_sqe_file_unregister(ctx, i);
Jens Axboe65e19f52019-10-26 07:20:21 -06003587 table->files[index] = NULL;
Jens Axboec3a31e62019-10-03 13:59:56 -06003588 }
3589 if (fd != -1) {
3590 struct file *file;
3591
3592 file = fget(fd);
3593 if (!file) {
3594 err = -EBADF;
3595 break;
3596 }
3597 /*
3598 * Don't allow io_uring instances to be registered. If
3599 * UNIX isn't enabled, then this causes a reference
3600 * cycle and this instance can never get freed. If UNIX
3601 * is enabled we'll handle it just fine, but there's
3602 * still no point in allowing a ring fd as it doesn't
3603 * support regular read/write anyway.
3604 */
3605 if (file->f_op == &io_uring_fops) {
3606 fput(file);
3607 err = -EBADF;
3608 break;
3609 }
Jens Axboe65e19f52019-10-26 07:20:21 -06003610 table->files[index] = file;
Jens Axboec3a31e62019-10-03 13:59:56 -06003611 err = io_sqe_file_register(ctx, file, i);
3612 if (err)
3613 break;
3614 }
3615 nr_args--;
3616 done++;
3617 up.offset++;
3618 }
3619
3620 return done ? done : err;
3621}
3622
Jens Axboe6c271ce2019-01-10 11:22:30 -07003623static int io_sq_offload_start(struct io_ring_ctx *ctx,
3624 struct io_uring_params *p)
Jens Axboe2b188cc2019-01-07 10:46:33 -07003625{
Jens Axboe561fb042019-10-24 07:25:42 -06003626 unsigned concurrency;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003627 int ret;
3628
Jens Axboe6c271ce2019-01-10 11:22:30 -07003629 init_waitqueue_head(&ctx->sqo_wait);
Jens Axboe2b188cc2019-01-07 10:46:33 -07003630 mmgrab(current->mm);
3631 ctx->sqo_mm = current->mm;
3632
Jens Axboe6c271ce2019-01-10 11:22:30 -07003633 if (ctx->flags & IORING_SETUP_SQPOLL) {
Jens Axboe3ec482d2019-04-08 10:51:01 -06003634 ret = -EPERM;
3635 if (!capable(CAP_SYS_ADMIN))
3636 goto err;
3637
Jens Axboe917257d2019-04-13 09:28:55 -06003638 ctx->sq_thread_idle = msecs_to_jiffies(p->sq_thread_idle);
3639 if (!ctx->sq_thread_idle)
3640 ctx->sq_thread_idle = HZ;
3641
Jens Axboe6c271ce2019-01-10 11:22:30 -07003642 if (p->flags & IORING_SETUP_SQ_AFF) {
Jens Axboe44a9bd12019-05-14 20:00:30 -06003643 int cpu = p->sq_thread_cpu;
Jens Axboe6c271ce2019-01-10 11:22:30 -07003644
Jens Axboe917257d2019-04-13 09:28:55 -06003645 ret = -EINVAL;
Jens Axboe44a9bd12019-05-14 20:00:30 -06003646 if (cpu >= nr_cpu_ids)
3647 goto err;
Shenghui Wang7889f442019-05-07 16:03:19 +08003648 if (!cpu_online(cpu))
Jens Axboe917257d2019-04-13 09:28:55 -06003649 goto err;
3650
Jens Axboe6c271ce2019-01-10 11:22:30 -07003651 ctx->sqo_thread = kthread_create_on_cpu(io_sq_thread,
3652 ctx, cpu,
3653 "io_uring-sq");
3654 } else {
3655 ctx->sqo_thread = kthread_create(io_sq_thread, ctx,
3656 "io_uring-sq");
3657 }
3658 if (IS_ERR(ctx->sqo_thread)) {
3659 ret = PTR_ERR(ctx->sqo_thread);
3660 ctx->sqo_thread = NULL;
3661 goto err;
3662 }
3663 wake_up_process(ctx->sqo_thread);
3664 } else if (p->flags & IORING_SETUP_SQ_AFF) {
3665 /* Can't have SQ_AFF without SQPOLL */
3666 ret = -EINVAL;
3667 goto err;
3668 }
3669
Jens Axboe561fb042019-10-24 07:25:42 -06003670 /* Do QD, or 4 * CPUS, whatever is smallest */
3671 concurrency = min(ctx->sq_entries, 4 * num_online_cpus());
3672 ctx->io_wq = io_wq_create(concurrency, ctx->sqo_mm);
Jens Axboe975c99a52019-10-30 08:42:56 -06003673 if (IS_ERR(ctx->io_wq)) {
3674 ret = PTR_ERR(ctx->io_wq);
3675 ctx->io_wq = NULL;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003676 goto err;
3677 }
3678
3679 return 0;
3680err:
Jens Axboe54a91f32019-09-10 09:15:04 -06003681 io_finish_async(ctx);
Jens Axboe2b188cc2019-01-07 10:46:33 -07003682 mmdrop(ctx->sqo_mm);
3683 ctx->sqo_mm = NULL;
3684 return ret;
3685}
3686
3687static void io_unaccount_mem(struct user_struct *user, unsigned long nr_pages)
3688{
3689 atomic_long_sub(nr_pages, &user->locked_vm);
3690}
3691
3692static int io_account_mem(struct user_struct *user, unsigned long nr_pages)
3693{
3694 unsigned long page_limit, cur_pages, new_pages;
3695
3696 /* Don't allow more pages than we can safely lock */
3697 page_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
3698
3699 do {
3700 cur_pages = atomic_long_read(&user->locked_vm);
3701 new_pages = cur_pages + nr_pages;
3702 if (new_pages > page_limit)
3703 return -ENOMEM;
3704 } while (atomic_long_cmpxchg(&user->locked_vm, cur_pages,
3705 new_pages) != cur_pages);
3706
3707 return 0;
3708}
3709
3710static void io_mem_free(void *ptr)
3711{
Mark Rutland52e04ef2019-04-30 17:30:21 +01003712 struct page *page;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003713
Mark Rutland52e04ef2019-04-30 17:30:21 +01003714 if (!ptr)
3715 return;
3716
3717 page = virt_to_head_page(ptr);
Jens Axboe2b188cc2019-01-07 10:46:33 -07003718 if (put_page_testzero(page))
3719 free_compound_page(page);
3720}
3721
3722static void *io_mem_alloc(size_t size)
3723{
3724 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN | __GFP_COMP |
3725 __GFP_NORETRY;
3726
3727 return (void *) __get_free_pages(gfp_flags, get_order(size));
3728}
3729
Hristo Venev75b28af2019-08-26 17:23:46 +00003730static unsigned long rings_size(unsigned sq_entries, unsigned cq_entries,
3731 size_t *sq_offset)
3732{
3733 struct io_rings *rings;
3734 size_t off, sq_array_size;
3735
3736 off = struct_size(rings, cqes, cq_entries);
3737 if (off == SIZE_MAX)
3738 return SIZE_MAX;
3739
3740#ifdef CONFIG_SMP
3741 off = ALIGN(off, SMP_CACHE_BYTES);
3742 if (off == 0)
3743 return SIZE_MAX;
3744#endif
3745
3746 sq_array_size = array_size(sizeof(u32), sq_entries);
3747 if (sq_array_size == SIZE_MAX)
3748 return SIZE_MAX;
3749
3750 if (check_add_overflow(off, sq_array_size, &off))
3751 return SIZE_MAX;
3752
3753 if (sq_offset)
3754 *sq_offset = off;
3755
3756 return off;
3757}
3758
Jens Axboe2b188cc2019-01-07 10:46:33 -07003759static unsigned long ring_pages(unsigned sq_entries, unsigned cq_entries)
3760{
Hristo Venev75b28af2019-08-26 17:23:46 +00003761 size_t pages;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003762
Hristo Venev75b28af2019-08-26 17:23:46 +00003763 pages = (size_t)1 << get_order(
3764 rings_size(sq_entries, cq_entries, NULL));
3765 pages += (size_t)1 << get_order(
3766 array_size(sizeof(struct io_uring_sqe), sq_entries));
Jens Axboe2b188cc2019-01-07 10:46:33 -07003767
Hristo Venev75b28af2019-08-26 17:23:46 +00003768 return pages;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003769}
3770
Jens Axboeedafcce2019-01-09 09:16:05 -07003771static int io_sqe_buffer_unregister(struct io_ring_ctx *ctx)
3772{
3773 int i, j;
3774
3775 if (!ctx->user_bufs)
3776 return -ENXIO;
3777
3778 for (i = 0; i < ctx->nr_user_bufs; i++) {
3779 struct io_mapped_ubuf *imu = &ctx->user_bufs[i];
3780
3781 for (j = 0; j < imu->nr_bvecs; j++)
John Hubbard27c4d3a2019-08-04 19:32:06 -07003782 put_user_page(imu->bvec[j].bv_page);
Jens Axboeedafcce2019-01-09 09:16:05 -07003783
3784 if (ctx->account_mem)
3785 io_unaccount_mem(ctx->user, imu->nr_bvecs);
Mark Rutlandd4ef6472019-05-01 16:59:16 +01003786 kvfree(imu->bvec);
Jens Axboeedafcce2019-01-09 09:16:05 -07003787 imu->nr_bvecs = 0;
3788 }
3789
3790 kfree(ctx->user_bufs);
3791 ctx->user_bufs = NULL;
3792 ctx->nr_user_bufs = 0;
3793 return 0;
3794}
3795
3796static int io_copy_iov(struct io_ring_ctx *ctx, struct iovec *dst,
3797 void __user *arg, unsigned index)
3798{
3799 struct iovec __user *src;
3800
3801#ifdef CONFIG_COMPAT
3802 if (ctx->compat) {
3803 struct compat_iovec __user *ciovs;
3804 struct compat_iovec ciov;
3805
3806 ciovs = (struct compat_iovec __user *) arg;
3807 if (copy_from_user(&ciov, &ciovs[index], sizeof(ciov)))
3808 return -EFAULT;
3809
3810 dst->iov_base = (void __user *) (unsigned long) ciov.iov_base;
3811 dst->iov_len = ciov.iov_len;
3812 return 0;
3813 }
3814#endif
3815 src = (struct iovec __user *) arg;
3816 if (copy_from_user(dst, &src[index], sizeof(*dst)))
3817 return -EFAULT;
3818 return 0;
3819}
3820
3821static int io_sqe_buffer_register(struct io_ring_ctx *ctx, void __user *arg,
3822 unsigned nr_args)
3823{
3824 struct vm_area_struct **vmas = NULL;
3825 struct page **pages = NULL;
3826 int i, j, got_pages = 0;
3827 int ret = -EINVAL;
3828
3829 if (ctx->user_bufs)
3830 return -EBUSY;
3831 if (!nr_args || nr_args > UIO_MAXIOV)
3832 return -EINVAL;
3833
3834 ctx->user_bufs = kcalloc(nr_args, sizeof(struct io_mapped_ubuf),
3835 GFP_KERNEL);
3836 if (!ctx->user_bufs)
3837 return -ENOMEM;
3838
3839 for (i = 0; i < nr_args; i++) {
3840 struct io_mapped_ubuf *imu = &ctx->user_bufs[i];
3841 unsigned long off, start, end, ubuf;
3842 int pret, nr_pages;
3843 struct iovec iov;
3844 size_t size;
3845
3846 ret = io_copy_iov(ctx, &iov, arg, i);
3847 if (ret)
Pavel Begunkova2786822019-05-26 12:35:47 +03003848 goto err;
Jens Axboeedafcce2019-01-09 09:16:05 -07003849
3850 /*
3851 * Don't impose further limits on the size and buffer
3852 * constraints here, we'll -EINVAL later when IO is
3853 * submitted if they are wrong.
3854 */
3855 ret = -EFAULT;
3856 if (!iov.iov_base || !iov.iov_len)
3857 goto err;
3858
3859 /* arbitrary limit, but we need something */
3860 if (iov.iov_len > SZ_1G)
3861 goto err;
3862
3863 ubuf = (unsigned long) iov.iov_base;
3864 end = (ubuf + iov.iov_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
3865 start = ubuf >> PAGE_SHIFT;
3866 nr_pages = end - start;
3867
3868 if (ctx->account_mem) {
3869 ret = io_account_mem(ctx->user, nr_pages);
3870 if (ret)
3871 goto err;
3872 }
3873
3874 ret = 0;
3875 if (!pages || nr_pages > got_pages) {
3876 kfree(vmas);
3877 kfree(pages);
Mark Rutlandd4ef6472019-05-01 16:59:16 +01003878 pages = kvmalloc_array(nr_pages, sizeof(struct page *),
Jens Axboeedafcce2019-01-09 09:16:05 -07003879 GFP_KERNEL);
Mark Rutlandd4ef6472019-05-01 16:59:16 +01003880 vmas = kvmalloc_array(nr_pages,
Jens Axboeedafcce2019-01-09 09:16:05 -07003881 sizeof(struct vm_area_struct *),
3882 GFP_KERNEL);
3883 if (!pages || !vmas) {
3884 ret = -ENOMEM;
3885 if (ctx->account_mem)
3886 io_unaccount_mem(ctx->user, nr_pages);
3887 goto err;
3888 }
3889 got_pages = nr_pages;
3890 }
3891
Mark Rutlandd4ef6472019-05-01 16:59:16 +01003892 imu->bvec = kvmalloc_array(nr_pages, sizeof(struct bio_vec),
Jens Axboeedafcce2019-01-09 09:16:05 -07003893 GFP_KERNEL);
3894 ret = -ENOMEM;
3895 if (!imu->bvec) {
3896 if (ctx->account_mem)
3897 io_unaccount_mem(ctx->user, nr_pages);
3898 goto err;
3899 }
3900
3901 ret = 0;
3902 down_read(&current->mm->mmap_sem);
Ira Weiny932f4a62019-05-13 17:17:03 -07003903 pret = get_user_pages(ubuf, nr_pages,
3904 FOLL_WRITE | FOLL_LONGTERM,
3905 pages, vmas);
Jens Axboeedafcce2019-01-09 09:16:05 -07003906 if (pret == nr_pages) {
3907 /* don't support file backed memory */
3908 for (j = 0; j < nr_pages; j++) {
3909 struct vm_area_struct *vma = vmas[j];
3910
3911 if (vma->vm_file &&
3912 !is_file_hugepages(vma->vm_file)) {
3913 ret = -EOPNOTSUPP;
3914 break;
3915 }
3916 }
3917 } else {
3918 ret = pret < 0 ? pret : -EFAULT;
3919 }
3920 up_read(&current->mm->mmap_sem);
3921 if (ret) {
3922 /*
3923 * if we did partial map, or found file backed vmas,
3924 * release any pages we did get
3925 */
John Hubbard27c4d3a2019-08-04 19:32:06 -07003926 if (pret > 0)
3927 put_user_pages(pages, pret);
Jens Axboeedafcce2019-01-09 09:16:05 -07003928 if (ctx->account_mem)
3929 io_unaccount_mem(ctx->user, nr_pages);
Mark Rutlandd4ef6472019-05-01 16:59:16 +01003930 kvfree(imu->bvec);
Jens Axboeedafcce2019-01-09 09:16:05 -07003931 goto err;
3932 }
3933
3934 off = ubuf & ~PAGE_MASK;
3935 size = iov.iov_len;
3936 for (j = 0; j < nr_pages; j++) {
3937 size_t vec_len;
3938
3939 vec_len = min_t(size_t, size, PAGE_SIZE - off);
3940 imu->bvec[j].bv_page = pages[j];
3941 imu->bvec[j].bv_len = vec_len;
3942 imu->bvec[j].bv_offset = off;
3943 off = 0;
3944 size -= vec_len;
3945 }
3946 /* store original address for later verification */
3947 imu->ubuf = ubuf;
3948 imu->len = iov.iov_len;
3949 imu->nr_bvecs = nr_pages;
3950
3951 ctx->nr_user_bufs++;
3952 }
Mark Rutlandd4ef6472019-05-01 16:59:16 +01003953 kvfree(pages);
3954 kvfree(vmas);
Jens Axboeedafcce2019-01-09 09:16:05 -07003955 return 0;
3956err:
Mark Rutlandd4ef6472019-05-01 16:59:16 +01003957 kvfree(pages);
3958 kvfree(vmas);
Jens Axboeedafcce2019-01-09 09:16:05 -07003959 io_sqe_buffer_unregister(ctx);
3960 return ret;
3961}
3962
Jens Axboe9b402842019-04-11 11:45:41 -06003963static int io_eventfd_register(struct io_ring_ctx *ctx, void __user *arg)
3964{
3965 __s32 __user *fds = arg;
3966 int fd;
3967
3968 if (ctx->cq_ev_fd)
3969 return -EBUSY;
3970
3971 if (copy_from_user(&fd, fds, sizeof(*fds)))
3972 return -EFAULT;
3973
3974 ctx->cq_ev_fd = eventfd_ctx_fdget(fd);
3975 if (IS_ERR(ctx->cq_ev_fd)) {
3976 int ret = PTR_ERR(ctx->cq_ev_fd);
3977 ctx->cq_ev_fd = NULL;
3978 return ret;
3979 }
3980
3981 return 0;
3982}
3983
3984static int io_eventfd_unregister(struct io_ring_ctx *ctx)
3985{
3986 if (ctx->cq_ev_fd) {
3987 eventfd_ctx_put(ctx->cq_ev_fd);
3988 ctx->cq_ev_fd = NULL;
3989 return 0;
3990 }
3991
3992 return -ENXIO;
3993}
3994
Jens Axboe2b188cc2019-01-07 10:46:33 -07003995static void io_ring_ctx_free(struct io_ring_ctx *ctx)
3996{
Jens Axboe6b063142019-01-10 22:13:58 -07003997 io_finish_async(ctx);
Jens Axboe2b188cc2019-01-07 10:46:33 -07003998 if (ctx->sqo_mm)
3999 mmdrop(ctx->sqo_mm);
Jens Axboedef596e2019-01-09 08:59:42 -07004000
4001 io_iopoll_reap_events(ctx);
Jens Axboeedafcce2019-01-09 09:16:05 -07004002 io_sqe_buffer_unregister(ctx);
Jens Axboe6b063142019-01-10 22:13:58 -07004003 io_sqe_files_unregister(ctx);
Jens Axboe9b402842019-04-11 11:45:41 -06004004 io_eventfd_unregister(ctx);
Jens Axboedef596e2019-01-09 08:59:42 -07004005
Jens Axboe2b188cc2019-01-07 10:46:33 -07004006#if defined(CONFIG_UNIX)
Eric Biggers355e8d22019-06-12 14:58:43 -07004007 if (ctx->ring_sock) {
4008 ctx->ring_sock->file = NULL; /* so that iput() is called */
Jens Axboe2b188cc2019-01-07 10:46:33 -07004009 sock_release(ctx->ring_sock);
Eric Biggers355e8d22019-06-12 14:58:43 -07004010 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07004011#endif
4012
Hristo Venev75b28af2019-08-26 17:23:46 +00004013 io_mem_free(ctx->rings);
Jens Axboe2b188cc2019-01-07 10:46:33 -07004014 io_mem_free(ctx->sq_sqes);
Jens Axboe2b188cc2019-01-07 10:46:33 -07004015
4016 percpu_ref_exit(&ctx->refs);
4017 if (ctx->account_mem)
4018 io_unaccount_mem(ctx->user,
4019 ring_pages(ctx->sq_entries, ctx->cq_entries));
4020 free_uid(ctx->user);
4021 kfree(ctx);
4022}
4023
4024static __poll_t io_uring_poll(struct file *file, poll_table *wait)
4025{
4026 struct io_ring_ctx *ctx = file->private_data;
4027 __poll_t mask = 0;
4028
4029 poll_wait(file, &ctx->cq_wait, wait);
Stefan Bühler4f7067c2019-04-24 23:54:17 +02004030 /*
4031 * synchronizes with barrier from wq_has_sleeper call in
4032 * io_commit_cqring
4033 */
Jens Axboe2b188cc2019-01-07 10:46:33 -07004034 smp_rmb();
Hristo Venev75b28af2019-08-26 17:23:46 +00004035 if (READ_ONCE(ctx->rings->sq.tail) - ctx->cached_sq_head !=
4036 ctx->rings->sq_ring_entries)
Jens Axboe2b188cc2019-01-07 10:46:33 -07004037 mask |= EPOLLOUT | EPOLLWRNORM;
yangerkundaa5de52019-09-24 20:53:34 +08004038 if (READ_ONCE(ctx->rings->cq.head) != ctx->cached_cq_tail)
Jens Axboe2b188cc2019-01-07 10:46:33 -07004039 mask |= EPOLLIN | EPOLLRDNORM;
4040
4041 return mask;
4042}
4043
4044static int io_uring_fasync(int fd, struct file *file, int on)
4045{
4046 struct io_ring_ctx *ctx = file->private_data;
4047
4048 return fasync_helper(fd, file, on, &ctx->cq_fasync);
4049}
4050
4051static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
4052{
4053 mutex_lock(&ctx->uring_lock);
4054 percpu_ref_kill(&ctx->refs);
4055 mutex_unlock(&ctx->uring_lock);
4056
Jens Axboe5262f562019-09-17 12:26:57 -06004057 io_kill_timeouts(ctx);
Jens Axboe221c5eb2019-01-17 09:41:58 -07004058 io_poll_remove_all(ctx);
Jens Axboe561fb042019-10-24 07:25:42 -06004059
4060 if (ctx->io_wq)
4061 io_wq_cancel_all(ctx->io_wq);
4062
Jens Axboedef596e2019-01-09 08:59:42 -07004063 io_iopoll_reap_events(ctx);
Jens Axboe2b188cc2019-01-07 10:46:33 -07004064 wait_for_completion(&ctx->ctx_done);
4065 io_ring_ctx_free(ctx);
4066}
4067
4068static int io_uring_release(struct inode *inode, struct file *file)
4069{
4070 struct io_ring_ctx *ctx = file->private_data;
4071
4072 file->private_data = NULL;
4073 io_ring_ctx_wait_and_kill(ctx);
4074 return 0;
4075}
4076
Jens Axboefcb323c2019-10-24 12:39:47 -06004077static void io_uring_cancel_files(struct io_ring_ctx *ctx,
4078 struct files_struct *files)
4079{
4080 struct io_kiocb *req;
4081 DEFINE_WAIT(wait);
4082
4083 while (!list_empty_careful(&ctx->inflight_list)) {
4084 enum io_wq_cancel ret = IO_WQ_CANCEL_NOTFOUND;
4085
4086 spin_lock_irq(&ctx->inflight_lock);
4087 list_for_each_entry(req, &ctx->inflight_list, inflight_entry) {
4088 if (req->work.files == files) {
4089 ret = io_wq_cancel_work(ctx->io_wq, &req->work);
4090 break;
4091 }
4092 }
4093 if (ret == IO_WQ_CANCEL_RUNNING)
4094 prepare_to_wait(&ctx->inflight_wait, &wait,
4095 TASK_UNINTERRUPTIBLE);
4096
4097 spin_unlock_irq(&ctx->inflight_lock);
4098
4099 /*
4100 * We need to keep going until we get NOTFOUND. We only cancel
4101 * one work at the time.
4102 *
4103 * If we get CANCEL_RUNNING, then wait for a work to complete
4104 * before continuing.
4105 */
4106 if (ret == IO_WQ_CANCEL_OK)
4107 continue;
4108 else if (ret != IO_WQ_CANCEL_RUNNING)
4109 break;
4110 schedule();
4111 }
4112}
4113
4114static int io_uring_flush(struct file *file, void *data)
4115{
4116 struct io_ring_ctx *ctx = file->private_data;
4117
4118 io_uring_cancel_files(ctx, data);
4119 if (fatal_signal_pending(current) || (current->flags & PF_EXITING))
4120 io_wq_cancel_all(ctx->io_wq);
4121 return 0;
4122}
4123
Jens Axboe2b188cc2019-01-07 10:46:33 -07004124static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
4125{
4126 loff_t offset = (loff_t) vma->vm_pgoff << PAGE_SHIFT;
4127 unsigned long sz = vma->vm_end - vma->vm_start;
4128 struct io_ring_ctx *ctx = file->private_data;
4129 unsigned long pfn;
4130 struct page *page;
4131 void *ptr;
4132
4133 switch (offset) {
4134 case IORING_OFF_SQ_RING:
Hristo Venev75b28af2019-08-26 17:23:46 +00004135 case IORING_OFF_CQ_RING:
4136 ptr = ctx->rings;
Jens Axboe2b188cc2019-01-07 10:46:33 -07004137 break;
4138 case IORING_OFF_SQES:
4139 ptr = ctx->sq_sqes;
4140 break;
Jens Axboe2b188cc2019-01-07 10:46:33 -07004141 default:
4142 return -EINVAL;
4143 }
4144
4145 page = virt_to_head_page(ptr);
Matthew Wilcox (Oracle)a50b8542019-09-23 15:34:25 -07004146 if (sz > page_size(page))
Jens Axboe2b188cc2019-01-07 10:46:33 -07004147 return -EINVAL;
4148
4149 pfn = virt_to_phys(ptr) >> PAGE_SHIFT;
4150 return remap_pfn_range(vma, vma->vm_start, pfn, sz, vma->vm_page_prot);
4151}
4152
4153SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
4154 u32, min_complete, u32, flags, const sigset_t __user *, sig,
4155 size_t, sigsz)
4156{
4157 struct io_ring_ctx *ctx;
4158 long ret = -EBADF;
4159 int submitted = 0;
4160 struct fd f;
4161
Jens Axboe6c271ce2019-01-10 11:22:30 -07004162 if (flags & ~(IORING_ENTER_GETEVENTS | IORING_ENTER_SQ_WAKEUP))
Jens Axboe2b188cc2019-01-07 10:46:33 -07004163 return -EINVAL;
4164
4165 f = fdget(fd);
4166 if (!f.file)
4167 return -EBADF;
4168
4169 ret = -EOPNOTSUPP;
4170 if (f.file->f_op != &io_uring_fops)
4171 goto out_fput;
4172
4173 ret = -ENXIO;
4174 ctx = f.file->private_data;
4175 if (!percpu_ref_tryget(&ctx->refs))
4176 goto out_fput;
4177
Jens Axboe6c271ce2019-01-10 11:22:30 -07004178 /*
4179 * For SQ polling, the thread will do all submissions and completions.
4180 * Just return the requested submit count, and wake the thread if
4181 * we were asked to.
4182 */
Jens Axboeb2a9ead2019-09-12 14:19:16 -06004183 ret = 0;
Jens Axboe6c271ce2019-01-10 11:22:30 -07004184 if (ctx->flags & IORING_SETUP_SQPOLL) {
4185 if (flags & IORING_ENTER_SQ_WAKEUP)
4186 wake_up(&ctx->sqo_wait);
4187 submitted = to_submit;
Jens Axboeb2a9ead2019-09-12 14:19:16 -06004188 } else if (to_submit) {
Pavel Begunkovae9428c2019-11-06 00:22:14 +03004189 struct mm_struct *cur_mm;
Jens Axboe2b188cc2019-01-07 10:46:33 -07004190
Pavel Begunkovae9428c2019-11-06 00:22:14 +03004191 to_submit = min(to_submit, ctx->sq_entries);
Jens Axboe2b188cc2019-01-07 10:46:33 -07004192 mutex_lock(&ctx->uring_lock);
Pavel Begunkovae9428c2019-11-06 00:22:14 +03004193 /* already have mm, so io_submit_sqes() won't try to grab it */
4194 cur_mm = ctx->sqo_mm;
4195 submitted = io_submit_sqes(ctx, to_submit, f.file, fd,
4196 &cur_mm, false);
Jens Axboe2b188cc2019-01-07 10:46:33 -07004197 mutex_unlock(&ctx->uring_lock);
Jens Axboe2b188cc2019-01-07 10:46:33 -07004198 }
4199 if (flags & IORING_ENTER_GETEVENTS) {
Jens Axboedef596e2019-01-09 08:59:42 -07004200 unsigned nr_events = 0;
4201
Jens Axboe2b188cc2019-01-07 10:46:33 -07004202 min_complete = min(min_complete, ctx->cq_entries);
4203
Jens Axboedef596e2019-01-09 08:59:42 -07004204 if (ctx->flags & IORING_SETUP_IOPOLL) {
Jens Axboedef596e2019-01-09 08:59:42 -07004205 ret = io_iopoll_check(ctx, &nr_events, min_complete);
Jens Axboedef596e2019-01-09 08:59:42 -07004206 } else {
4207 ret = io_cqring_wait(ctx, min_complete, sig, sigsz);
4208 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07004209 }
4210
Pavel Begunkov6805b322019-10-08 02:18:42 +03004211 percpu_ref_put(&ctx->refs);
Jens Axboe2b188cc2019-01-07 10:46:33 -07004212out_fput:
4213 fdput(f);
4214 return submitted ? submitted : ret;
4215}
4216
4217static const struct file_operations io_uring_fops = {
4218 .release = io_uring_release,
Jens Axboefcb323c2019-10-24 12:39:47 -06004219 .flush = io_uring_flush,
Jens Axboe2b188cc2019-01-07 10:46:33 -07004220 .mmap = io_uring_mmap,
4221 .poll = io_uring_poll,
4222 .fasync = io_uring_fasync,
4223};
4224
4225static int io_allocate_scq_urings(struct io_ring_ctx *ctx,
4226 struct io_uring_params *p)
4227{
Hristo Venev75b28af2019-08-26 17:23:46 +00004228 struct io_rings *rings;
4229 size_t size, sq_array_offset;
Jens Axboe2b188cc2019-01-07 10:46:33 -07004230
Hristo Venev75b28af2019-08-26 17:23:46 +00004231 size = rings_size(p->sq_entries, p->cq_entries, &sq_array_offset);
4232 if (size == SIZE_MAX)
4233 return -EOVERFLOW;
4234
4235 rings = io_mem_alloc(size);
4236 if (!rings)
Jens Axboe2b188cc2019-01-07 10:46:33 -07004237 return -ENOMEM;
4238
Hristo Venev75b28af2019-08-26 17:23:46 +00004239 ctx->rings = rings;
4240 ctx->sq_array = (u32 *)((char *)rings + sq_array_offset);
4241 rings->sq_ring_mask = p->sq_entries - 1;
4242 rings->cq_ring_mask = p->cq_entries - 1;
4243 rings->sq_ring_entries = p->sq_entries;
4244 rings->cq_ring_entries = p->cq_entries;
4245 ctx->sq_mask = rings->sq_ring_mask;
4246 ctx->cq_mask = rings->cq_ring_mask;
4247 ctx->sq_entries = rings->sq_ring_entries;
4248 ctx->cq_entries = rings->cq_ring_entries;
Jens Axboe2b188cc2019-01-07 10:46:33 -07004249
4250 size = array_size(sizeof(struct io_uring_sqe), p->sq_entries);
4251 if (size == SIZE_MAX)
4252 return -EOVERFLOW;
4253
4254 ctx->sq_sqes = io_mem_alloc(size);
Mark Rutland52e04ef2019-04-30 17:30:21 +01004255 if (!ctx->sq_sqes)
Jens Axboe2b188cc2019-01-07 10:46:33 -07004256 return -ENOMEM;
Jens Axboe2b188cc2019-01-07 10:46:33 -07004257
Jens Axboe2b188cc2019-01-07 10:46:33 -07004258 return 0;
4259}
4260
4261/*
4262 * Allocate an anonymous fd, this is what constitutes the application
4263 * visible backing of an io_uring instance. The application mmaps this
4264 * fd to gain access to the SQ/CQ ring details. If UNIX sockets are enabled,
4265 * we have to tie this fd to a socket for file garbage collection purposes.
4266 */
4267static int io_uring_get_fd(struct io_ring_ctx *ctx)
4268{
4269 struct file *file;
4270 int ret;
4271
4272#if defined(CONFIG_UNIX)
4273 ret = sock_create_kern(&init_net, PF_UNIX, SOCK_RAW, IPPROTO_IP,
4274 &ctx->ring_sock);
4275 if (ret)
4276 return ret;
4277#endif
4278
4279 ret = get_unused_fd_flags(O_RDWR | O_CLOEXEC);
4280 if (ret < 0)
4281 goto err;
4282
4283 file = anon_inode_getfile("[io_uring]", &io_uring_fops, ctx,
4284 O_RDWR | O_CLOEXEC);
4285 if (IS_ERR(file)) {
4286 put_unused_fd(ret);
4287 ret = PTR_ERR(file);
4288 goto err;
4289 }
4290
4291#if defined(CONFIG_UNIX)
4292 ctx->ring_sock->file = file;
Jens Axboe6b063142019-01-10 22:13:58 -07004293 ctx->ring_sock->sk->sk_user_data = ctx;
Jens Axboe2b188cc2019-01-07 10:46:33 -07004294#endif
4295 fd_install(ret, file);
4296 return ret;
4297err:
4298#if defined(CONFIG_UNIX)
4299 sock_release(ctx->ring_sock);
4300 ctx->ring_sock = NULL;
4301#endif
4302 return ret;
4303}
4304
4305static int io_uring_create(unsigned entries, struct io_uring_params *p)
4306{
4307 struct user_struct *user = NULL;
4308 struct io_ring_ctx *ctx;
4309 bool account_mem;
4310 int ret;
4311
4312 if (!entries || entries > IORING_MAX_ENTRIES)
4313 return -EINVAL;
4314
4315 /*
4316 * Use twice as many entries for the CQ ring. It's possible for the
4317 * application to drive a higher depth than the size of the SQ ring,
4318 * since the sqes are only used at submission time. This allows for
Jens Axboe33a107f2019-10-04 12:10:03 -06004319 * some flexibility in overcommitting a bit. If the application has
4320 * set IORING_SETUP_CQSIZE, it will have passed in the desired number
4321 * of CQ ring entries manually.
Jens Axboe2b188cc2019-01-07 10:46:33 -07004322 */
4323 p->sq_entries = roundup_pow_of_two(entries);
Jens Axboe33a107f2019-10-04 12:10:03 -06004324 if (p->flags & IORING_SETUP_CQSIZE) {
4325 /*
4326 * If IORING_SETUP_CQSIZE is set, we do the same roundup
4327 * to a power-of-two, if it isn't already. We do NOT impose
4328 * any cq vs sq ring sizing.
4329 */
4330 if (p->cq_entries < p->sq_entries || p->cq_entries > IORING_MAX_CQ_ENTRIES)
4331 return -EINVAL;
4332 p->cq_entries = roundup_pow_of_two(p->cq_entries);
4333 } else {
4334 p->cq_entries = 2 * p->sq_entries;
4335 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07004336
4337 user = get_uid(current_user());
4338 account_mem = !capable(CAP_IPC_LOCK);
4339
4340 if (account_mem) {
4341 ret = io_account_mem(user,
4342 ring_pages(p->sq_entries, p->cq_entries));
4343 if (ret) {
4344 free_uid(user);
4345 return ret;
4346 }
4347 }
4348
4349 ctx = io_ring_ctx_alloc(p);
4350 if (!ctx) {
4351 if (account_mem)
4352 io_unaccount_mem(user, ring_pages(p->sq_entries,
4353 p->cq_entries));
4354 free_uid(user);
4355 return -ENOMEM;
4356 }
4357 ctx->compat = in_compat_syscall();
4358 ctx->account_mem = account_mem;
4359 ctx->user = user;
4360
4361 ret = io_allocate_scq_urings(ctx, p);
4362 if (ret)
4363 goto err;
4364
Jens Axboe6c271ce2019-01-10 11:22:30 -07004365 ret = io_sq_offload_start(ctx, p);
Jens Axboe2b188cc2019-01-07 10:46:33 -07004366 if (ret)
4367 goto err;
4368
Jens Axboe2b188cc2019-01-07 10:46:33 -07004369 memset(&p->sq_off, 0, sizeof(p->sq_off));
Hristo Venev75b28af2019-08-26 17:23:46 +00004370 p->sq_off.head = offsetof(struct io_rings, sq.head);
4371 p->sq_off.tail = offsetof(struct io_rings, sq.tail);
4372 p->sq_off.ring_mask = offsetof(struct io_rings, sq_ring_mask);
4373 p->sq_off.ring_entries = offsetof(struct io_rings, sq_ring_entries);
4374 p->sq_off.flags = offsetof(struct io_rings, sq_flags);
4375 p->sq_off.dropped = offsetof(struct io_rings, sq_dropped);
4376 p->sq_off.array = (char *)ctx->sq_array - (char *)ctx->rings;
Jens Axboe2b188cc2019-01-07 10:46:33 -07004377
4378 memset(&p->cq_off, 0, sizeof(p->cq_off));
Hristo Venev75b28af2019-08-26 17:23:46 +00004379 p->cq_off.head = offsetof(struct io_rings, cq.head);
4380 p->cq_off.tail = offsetof(struct io_rings, cq.tail);
4381 p->cq_off.ring_mask = offsetof(struct io_rings, cq_ring_mask);
4382 p->cq_off.ring_entries = offsetof(struct io_rings, cq_ring_entries);
4383 p->cq_off.overflow = offsetof(struct io_rings, cq_overflow);
4384 p->cq_off.cqes = offsetof(struct io_rings, cqes);
Jens Axboeac90f242019-09-06 10:26:21 -06004385
Jens Axboe044c1ab2019-10-28 09:15:33 -06004386 /*
4387 * Install ring fd as the very last thing, so we don't risk someone
4388 * having closed it before we finish setup
4389 */
4390 ret = io_uring_get_fd(ctx);
4391 if (ret < 0)
4392 goto err;
4393
Jens Axboeac90f242019-09-06 10:26:21 -06004394 p->features = IORING_FEAT_SINGLE_MMAP;
Dmitrii Dolgovc826bd72019-10-15 19:02:01 +02004395 trace_io_uring_create(ret, ctx, p->sq_entries, p->cq_entries, p->flags);
Jens Axboe2b188cc2019-01-07 10:46:33 -07004396 return ret;
4397err:
4398 io_ring_ctx_wait_and_kill(ctx);
4399 return ret;
4400}
4401
4402/*
4403 * Sets up an aio uring context, and returns the fd. Applications asks for a
4404 * ring size, we return the actual sq/cq ring sizes (among other things) in the
4405 * params structure passed in.
4406 */
4407static long io_uring_setup(u32 entries, struct io_uring_params __user *params)
4408{
4409 struct io_uring_params p;
4410 long ret;
4411 int i;
4412
4413 if (copy_from_user(&p, params, sizeof(p)))
4414 return -EFAULT;
4415 for (i = 0; i < ARRAY_SIZE(p.resv); i++) {
4416 if (p.resv[i])
4417 return -EINVAL;
4418 }
4419
Jens Axboe6c271ce2019-01-10 11:22:30 -07004420 if (p.flags & ~(IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL |
Jens Axboe33a107f2019-10-04 12:10:03 -06004421 IORING_SETUP_SQ_AFF | IORING_SETUP_CQSIZE))
Jens Axboe2b188cc2019-01-07 10:46:33 -07004422 return -EINVAL;
4423
4424 ret = io_uring_create(entries, &p);
4425 if (ret < 0)
4426 return ret;
4427
4428 if (copy_to_user(params, &p, sizeof(p)))
4429 return -EFAULT;
4430
4431 return ret;
4432}
4433
4434SYSCALL_DEFINE2(io_uring_setup, u32, entries,
4435 struct io_uring_params __user *, params)
4436{
4437 return io_uring_setup(entries, params);
4438}
4439
Jens Axboeedafcce2019-01-09 09:16:05 -07004440static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
4441 void __user *arg, unsigned nr_args)
Jens Axboeb19062a2019-04-15 10:49:38 -06004442 __releases(ctx->uring_lock)
4443 __acquires(ctx->uring_lock)
Jens Axboeedafcce2019-01-09 09:16:05 -07004444{
4445 int ret;
4446
Jens Axboe35fa71a2019-04-22 10:23:23 -06004447 /*
4448 * We're inside the ring mutex, if the ref is already dying, then
4449 * someone else killed the ctx or is already going through
4450 * io_uring_register().
4451 */
4452 if (percpu_ref_is_dying(&ctx->refs))
4453 return -ENXIO;
4454
Jens Axboeedafcce2019-01-09 09:16:05 -07004455 percpu_ref_kill(&ctx->refs);
Jens Axboeb19062a2019-04-15 10:49:38 -06004456
4457 /*
4458 * Drop uring mutex before waiting for references to exit. If another
4459 * thread is currently inside io_uring_enter() it might need to grab
4460 * the uring_lock to make progress. If we hold it here across the drain
4461 * wait, then we can deadlock. It's safe to drop the mutex here, since
4462 * no new references will come in after we've killed the percpu ref.
4463 */
4464 mutex_unlock(&ctx->uring_lock);
Jens Axboeedafcce2019-01-09 09:16:05 -07004465 wait_for_completion(&ctx->ctx_done);
Jens Axboeb19062a2019-04-15 10:49:38 -06004466 mutex_lock(&ctx->uring_lock);
Jens Axboeedafcce2019-01-09 09:16:05 -07004467
4468 switch (opcode) {
4469 case IORING_REGISTER_BUFFERS:
4470 ret = io_sqe_buffer_register(ctx, arg, nr_args);
4471 break;
4472 case IORING_UNREGISTER_BUFFERS:
4473 ret = -EINVAL;
4474 if (arg || nr_args)
4475 break;
4476 ret = io_sqe_buffer_unregister(ctx);
4477 break;
Jens Axboe6b063142019-01-10 22:13:58 -07004478 case IORING_REGISTER_FILES:
4479 ret = io_sqe_files_register(ctx, arg, nr_args);
4480 break;
4481 case IORING_UNREGISTER_FILES:
4482 ret = -EINVAL;
4483 if (arg || nr_args)
4484 break;
4485 ret = io_sqe_files_unregister(ctx);
4486 break;
Jens Axboec3a31e62019-10-03 13:59:56 -06004487 case IORING_REGISTER_FILES_UPDATE:
4488 ret = io_sqe_files_update(ctx, arg, nr_args);
4489 break;
Jens Axboe9b402842019-04-11 11:45:41 -06004490 case IORING_REGISTER_EVENTFD:
4491 ret = -EINVAL;
4492 if (nr_args != 1)
4493 break;
4494 ret = io_eventfd_register(ctx, arg);
4495 break;
4496 case IORING_UNREGISTER_EVENTFD:
4497 ret = -EINVAL;
4498 if (arg || nr_args)
4499 break;
4500 ret = io_eventfd_unregister(ctx);
4501 break;
Jens Axboeedafcce2019-01-09 09:16:05 -07004502 default:
4503 ret = -EINVAL;
4504 break;
4505 }
4506
4507 /* bring the ctx back to life */
4508 reinit_completion(&ctx->ctx_done);
4509 percpu_ref_reinit(&ctx->refs);
4510 return ret;
4511}
4512
4513SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode,
4514 void __user *, arg, unsigned int, nr_args)
4515{
4516 struct io_ring_ctx *ctx;
4517 long ret = -EBADF;
4518 struct fd f;
4519
4520 f = fdget(fd);
4521 if (!f.file)
4522 return -EBADF;
4523
4524 ret = -EOPNOTSUPP;
4525 if (f.file->f_op != &io_uring_fops)
4526 goto out_fput;
4527
4528 ctx = f.file->private_data;
4529
4530 mutex_lock(&ctx->uring_lock);
4531 ret = __io_uring_register(ctx, opcode, arg, nr_args);
4532 mutex_unlock(&ctx->uring_lock);
Dmitrii Dolgovc826bd72019-10-15 19:02:01 +02004533 trace_io_uring_register(ctx, opcode, ctx->nr_user_files, ctx->nr_user_bufs,
4534 ctx->cq_ev_fd != NULL, ret);
Jens Axboeedafcce2019-01-09 09:16:05 -07004535out_fput:
4536 fdput(f);
4537 return ret;
4538}
4539
Jens Axboe2b188cc2019-01-07 10:46:33 -07004540static int __init io_uring_init(void)
4541{
4542 req_cachep = KMEM_CACHE(io_kiocb, SLAB_HWCACHE_ALIGN | SLAB_PANIC);
4543 return 0;
4544};
4545__initcall(io_uring_init);