blob: 2039f888197e3b7253445e89da9e4c6f072a21cc [file] [log] [blame]
Jens Axboe2b188cc2019-01-07 10:46:33 -07001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Shared application/kernel submission and completion ring pairs, for
4 * supporting fast/efficient IO.
5 *
6 * A note on the read/write ordering memory barriers that are matched between
Stefan Bühler1e84b972019-04-24 23:54:16 +02007 * the application and kernel side.
8 *
9 * After the application reads the CQ ring tail, it must use an
10 * appropriate smp_rmb() to pair with the smp_wmb() the kernel uses
11 * before writing the tail (using smp_load_acquire to read the tail will
12 * do). It also needs a smp_mb() before updating CQ head (ordering the
13 * entry load(s) with the head store), pairing with an implicit barrier
14 * through a control-dependency in io_get_cqring (smp_store_release to
15 * store head will do). Failure to do so could lead to reading invalid
16 * CQ entries.
17 *
18 * Likewise, the application must use an appropriate smp_wmb() before
19 * writing the SQ tail (ordering SQ entry stores with the tail store),
20 * which pairs with smp_load_acquire in io_get_sqring (smp_store_release
21 * to store the tail will do). And it needs a barrier ordering the SQ
22 * head load before writing new SQ entries (smp_load_acquire to read
23 * head will do).
24 *
25 * When using the SQ poll thread (IORING_SETUP_SQPOLL), the application
26 * needs to check the SQ flags for IORING_SQ_NEED_WAKEUP *after*
27 * updating the SQ tail; a full memory barrier smp_mb() is needed
28 * between.
Jens Axboe2b188cc2019-01-07 10:46:33 -070029 *
30 * Also see the examples in the liburing library:
31 *
32 * git://git.kernel.dk/liburing
33 *
34 * io_uring also uses READ/WRITE_ONCE() for _any_ store or load that happens
35 * from data shared between the kernel and application. This is done both
36 * for ordering purposes, but also to ensure that once a value is loaded from
37 * data that the application could potentially modify, it remains stable.
38 *
39 * Copyright (C) 2018-2019 Jens Axboe
Christoph Hellwigc992fe22019-01-11 09:43:02 -070040 * Copyright (c) 2018-2019 Christoph Hellwig
Jens Axboe2b188cc2019-01-07 10:46:33 -070041 */
42#include <linux/kernel.h>
43#include <linux/init.h>
44#include <linux/errno.h>
45#include <linux/syscalls.h>
46#include <linux/compat.h>
47#include <linux/refcount.h>
48#include <linux/uio.h>
49
50#include <linux/sched/signal.h>
51#include <linux/fs.h>
52#include <linux/file.h>
53#include <linux/fdtable.h>
54#include <linux/mm.h>
55#include <linux/mman.h>
56#include <linux/mmu_context.h>
57#include <linux/percpu.h>
58#include <linux/slab.h>
59#include <linux/workqueue.h>
Jens Axboe6c271ce2019-01-10 11:22:30 -070060#include <linux/kthread.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070061#include <linux/blkdev.h>
Jens Axboeedafcce2019-01-09 09:16:05 -070062#include <linux/bvec.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070063#include <linux/net.h>
64#include <net/sock.h>
65#include <net/af_unix.h>
Jens Axboe6b063142019-01-10 22:13:58 -070066#include <net/scm.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070067#include <linux/anon_inodes.h>
68#include <linux/sched/mm.h>
69#include <linux/uaccess.h>
70#include <linux/nospec.h>
Jens Axboeedafcce2019-01-09 09:16:05 -070071#include <linux/sizes.h>
72#include <linux/hugetlb.h>
Jens Axboe2b188cc2019-01-07 10:46:33 -070073
74#include <uapi/linux/io_uring.h>
75
76#include "internal.h"
77
78#define IORING_MAX_ENTRIES 4096
Jens Axboe6b063142019-01-10 22:13:58 -070079#define IORING_MAX_FIXED_FILES 1024
Jens Axboe2b188cc2019-01-07 10:46:33 -070080
81struct io_uring {
82 u32 head ____cacheline_aligned_in_smp;
83 u32 tail ____cacheline_aligned_in_smp;
84};
85
Stefan Bühler1e84b972019-04-24 23:54:16 +020086/*
87 * This data is shared with the application through the mmap at offset
88 * IORING_OFF_SQ_RING.
89 *
90 * The offsets to the member fields are published through struct
91 * io_sqring_offsets when calling io_uring_setup.
92 */
Jens Axboe2b188cc2019-01-07 10:46:33 -070093struct io_sq_ring {
Stefan Bühler1e84b972019-04-24 23:54:16 +020094 /*
95 * Head and tail offsets into the ring; the offsets need to be
96 * masked to get valid indices.
97 *
98 * The kernel controls head and the application controls tail.
99 */
Jens Axboe2b188cc2019-01-07 10:46:33 -0700100 struct io_uring r;
Stefan Bühler1e84b972019-04-24 23:54:16 +0200101 /*
102 * Bitmask to apply to head and tail offsets (constant, equals
103 * ring_entries - 1)
104 */
Jens Axboe2b188cc2019-01-07 10:46:33 -0700105 u32 ring_mask;
Stefan Bühler1e84b972019-04-24 23:54:16 +0200106 /* Ring size (constant, power of 2) */
Jens Axboe2b188cc2019-01-07 10:46:33 -0700107 u32 ring_entries;
Stefan Bühler1e84b972019-04-24 23:54:16 +0200108 /*
109 * Number of invalid entries dropped by the kernel due to
110 * invalid index stored in array
111 *
112 * Written by the kernel, shouldn't be modified by the
113 * application (i.e. get number of "new events" by comparing to
114 * cached value).
115 *
116 * After a new SQ head value was read by the application this
117 * counter includes all submissions that were dropped reaching
118 * the new SQ head (and possibly more).
119 */
Jens Axboe2b188cc2019-01-07 10:46:33 -0700120 u32 dropped;
Stefan Bühler1e84b972019-04-24 23:54:16 +0200121 /*
122 * Runtime flags
123 *
124 * Written by the kernel, shouldn't be modified by the
125 * application.
126 *
127 * The application needs a full memory barrier before checking
128 * for IORING_SQ_NEED_WAKEUP after updating the sq tail.
129 */
Jens Axboe2b188cc2019-01-07 10:46:33 -0700130 u32 flags;
Stefan Bühler1e84b972019-04-24 23:54:16 +0200131 /*
132 * Ring buffer of indices into array of io_uring_sqe, which is
133 * mmapped by the application using the IORING_OFF_SQES offset.
134 *
135 * This indirection could e.g. be used to assign fixed
136 * io_uring_sqe entries to operations and only submit them to
137 * the queue when needed.
138 *
139 * The kernel modifies neither the indices array nor the entries
140 * array.
141 */
Jens Axboe2b188cc2019-01-07 10:46:33 -0700142 u32 array[];
143};
144
Stefan Bühler1e84b972019-04-24 23:54:16 +0200145/*
146 * This data is shared with the application through the mmap at offset
147 * IORING_OFF_CQ_RING.
148 *
149 * The offsets to the member fields are published through struct
150 * io_cqring_offsets when calling io_uring_setup.
151 */
Jens Axboe2b188cc2019-01-07 10:46:33 -0700152struct io_cq_ring {
Stefan Bühler1e84b972019-04-24 23:54:16 +0200153 /*
154 * Head and tail offsets into the ring; the offsets need to be
155 * masked to get valid indices.
156 *
157 * The application controls head and the kernel tail.
158 */
Jens Axboe2b188cc2019-01-07 10:46:33 -0700159 struct io_uring r;
Stefan Bühler1e84b972019-04-24 23:54:16 +0200160 /*
161 * Bitmask to apply to head and tail offsets (constant, equals
162 * ring_entries - 1)
163 */
Jens Axboe2b188cc2019-01-07 10:46:33 -0700164 u32 ring_mask;
Stefan Bühler1e84b972019-04-24 23:54:16 +0200165 /* Ring size (constant, power of 2) */
Jens Axboe2b188cc2019-01-07 10:46:33 -0700166 u32 ring_entries;
Stefan Bühler1e84b972019-04-24 23:54:16 +0200167 /*
168 * Number of completion events lost because the queue was full;
169 * this should be avoided by the application by making sure
170 * there are not more requests pending thatn there is space in
171 * the completion queue.
172 *
173 * Written by the kernel, shouldn't be modified by the
174 * application (i.e. get number of "new events" by comparing to
175 * cached value).
176 *
177 * As completion events come in out of order this counter is not
178 * ordered with any other data.
179 */
Jens Axboe2b188cc2019-01-07 10:46:33 -0700180 u32 overflow;
Stefan Bühler1e84b972019-04-24 23:54:16 +0200181 /*
182 * Ring buffer of completion events.
183 *
184 * The kernel writes completion events fresh every time they are
185 * produced, so the application is allowed to modify pending
186 * entries.
187 */
Jens Axboe2b188cc2019-01-07 10:46:33 -0700188 struct io_uring_cqe cqes[];
189};
190
Jens Axboeedafcce2019-01-09 09:16:05 -0700191struct io_mapped_ubuf {
192 u64 ubuf;
193 size_t len;
194 struct bio_vec *bvec;
195 unsigned int nr_bvecs;
196};
197
Jens Axboe31b51512019-01-18 22:56:34 -0700198struct async_list {
199 spinlock_t lock;
200 atomic_t cnt;
201 struct list_head list;
202
203 struct file *file;
204 off_t io_end;
Zhengyuan Liu9310a7ba2019-07-22 10:23:27 +0800205 size_t io_len;
Jens Axboe31b51512019-01-18 22:56:34 -0700206};
207
Jens Axboe2b188cc2019-01-07 10:46:33 -0700208struct io_ring_ctx {
209 struct {
210 struct percpu_ref refs;
211 } ____cacheline_aligned_in_smp;
212
213 struct {
214 unsigned int flags;
215 bool compat;
216 bool account_mem;
217
218 /* SQ ring */
219 struct io_sq_ring *sq_ring;
220 unsigned cached_sq_head;
221 unsigned sq_entries;
222 unsigned sq_mask;
Jens Axboe6c271ce2019-01-10 11:22:30 -0700223 unsigned sq_thread_idle;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700224 struct io_uring_sqe *sq_sqes;
Jens Axboede0617e2019-04-06 21:51:27 -0600225
226 struct list_head defer_list;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700227 } ____cacheline_aligned_in_smp;
228
229 /* IO offload */
230 struct workqueue_struct *sqo_wq;
Jens Axboe6c271ce2019-01-10 11:22:30 -0700231 struct task_struct *sqo_thread; /* if using sq thread polling */
Jens Axboe2b188cc2019-01-07 10:46:33 -0700232 struct mm_struct *sqo_mm;
Jens Axboe6c271ce2019-01-10 11:22:30 -0700233 wait_queue_head_t sqo_wait;
Jackie Liua4c0b3d2019-07-08 13:41:12 +0800234 struct completion sqo_thread_started;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700235
236 struct {
237 /* CQ ring */
238 struct io_cq_ring *cq_ring;
239 unsigned cached_cq_tail;
240 unsigned cq_entries;
241 unsigned cq_mask;
242 struct wait_queue_head cq_wait;
243 struct fasync_struct *cq_fasync;
Jens Axboe9b402842019-04-11 11:45:41 -0600244 struct eventfd_ctx *cq_ev_fd;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700245 } ____cacheline_aligned_in_smp;
246
Jens Axboe6b063142019-01-10 22:13:58 -0700247 /*
248 * If used, fixed file set. Writers must ensure that ->refs is dead,
249 * readers must ensure that ->refs is alive as long as the file* is
250 * used. Only updated through io_uring_register(2).
251 */
252 struct file **user_files;
253 unsigned nr_user_files;
254
Jens Axboeedafcce2019-01-09 09:16:05 -0700255 /* if used, fixed mapped user buffers */
256 unsigned nr_user_bufs;
257 struct io_mapped_ubuf *user_bufs;
258
Jens Axboe2b188cc2019-01-07 10:46:33 -0700259 struct user_struct *user;
260
261 struct completion ctx_done;
262
263 struct {
264 struct mutex uring_lock;
265 wait_queue_head_t wait;
266 } ____cacheline_aligned_in_smp;
267
268 struct {
269 spinlock_t completion_lock;
Jens Axboedef596e2019-01-09 08:59:42 -0700270 bool poll_multi_file;
271 /*
272 * ->poll_list is protected by the ctx->uring_lock for
273 * io_uring instances that don't use IORING_SETUP_SQPOLL.
274 * For SQPOLL, only the single threaded io_sq_thread() will
275 * manipulate the list, hence no extra locking is needed there.
276 */
277 struct list_head poll_list;
Jens Axboe221c5eb2019-01-17 09:41:58 -0700278 struct list_head cancel_list;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700279 } ____cacheline_aligned_in_smp;
280
Jens Axboe31b51512019-01-18 22:56:34 -0700281 struct async_list pending_async[2];
282
Jens Axboe2b188cc2019-01-07 10:46:33 -0700283#if defined(CONFIG_UNIX)
284 struct socket *ring_sock;
285#endif
286};
287
288struct sqe_submit {
289 const struct io_uring_sqe *sqe;
290 unsigned short index;
291 bool has_user;
Jens Axboedef596e2019-01-09 08:59:42 -0700292 bool needs_lock;
Jens Axboe6c271ce2019-01-10 11:22:30 -0700293 bool needs_fixed_file;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700294};
295
Jens Axboe09bb8392019-03-13 12:39:28 -0600296/*
297 * First field must be the file pointer in all the
298 * iocb unions! See also 'struct kiocb' in <linux/fs.h>
299 */
Jens Axboe221c5eb2019-01-17 09:41:58 -0700300struct io_poll_iocb {
301 struct file *file;
302 struct wait_queue_head *head;
303 __poll_t events;
Jens Axboe8c838782019-03-12 15:48:16 -0600304 bool done;
Jens Axboe221c5eb2019-01-17 09:41:58 -0700305 bool canceled;
306 struct wait_queue_entry wait;
307};
308
Jens Axboe09bb8392019-03-13 12:39:28 -0600309/*
310 * NOTE! Each of the iocb union members has the file pointer
311 * as the first entry in their struct definition. So you can
312 * access the file pointer through any of the sub-structs,
313 * or directly as just 'ki_filp' in this struct.
314 */
Jens Axboe2b188cc2019-01-07 10:46:33 -0700315struct io_kiocb {
Jens Axboe221c5eb2019-01-17 09:41:58 -0700316 union {
Jens Axboe09bb8392019-03-13 12:39:28 -0600317 struct file *file;
Jens Axboe221c5eb2019-01-17 09:41:58 -0700318 struct kiocb rw;
319 struct io_poll_iocb poll;
320 };
Jens Axboe2b188cc2019-01-07 10:46:33 -0700321
322 struct sqe_submit submit;
323
324 struct io_ring_ctx *ctx;
325 struct list_head list;
Jens Axboe9e645e112019-05-10 16:07:28 -0600326 struct list_head link_list;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700327 unsigned int flags;
Jens Axboec16361c2019-01-17 08:39:48 -0700328 refcount_t refs;
Stefan Bühler8449eed2019-04-27 20:34:19 +0200329#define REQ_F_NOWAIT 1 /* must not punt to workers */
Jens Axboedef596e2019-01-09 08:59:42 -0700330#define REQ_F_IOPOLL_COMPLETED 2 /* polled IO has completed */
Jens Axboe6b063142019-01-10 22:13:58 -0700331#define REQ_F_FIXED_FILE 4 /* ctx owns file */
Jens Axboe31b51512019-01-18 22:56:34 -0700332#define REQ_F_SEQ_PREV 8 /* sequential with previous */
Stefan Bühlere2033e32019-05-11 19:08:01 +0200333#define REQ_F_IO_DRAIN 16 /* drain existing IO first */
334#define REQ_F_IO_DRAINED 32 /* drain done */
Jens Axboe9e645e112019-05-10 16:07:28 -0600335#define REQ_F_LINK 64 /* linked sqes */
Zhengyuan Liuf7b76ac2019-07-16 23:26:14 +0800336#define REQ_F_LINK_DONE 128 /* linked sqes done */
337#define REQ_F_FAIL_LINK 256 /* fail rest of links */
Jens Axboe2b188cc2019-01-07 10:46:33 -0700338 u64 user_data;
Jens Axboe9e645e112019-05-10 16:07:28 -0600339 u32 result;
Jens Axboede0617e2019-04-06 21:51:27 -0600340 u32 sequence;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700341
342 struct work_struct work;
343};
344
345#define IO_PLUG_THRESHOLD 2
Jens Axboedef596e2019-01-09 08:59:42 -0700346#define IO_IOPOLL_BATCH 8
Jens Axboe2b188cc2019-01-07 10:46:33 -0700347
Jens Axboe9a56a232019-01-09 09:06:50 -0700348struct io_submit_state {
349 struct blk_plug plug;
350
351 /*
Jens Axboe2579f912019-01-09 09:10:43 -0700352 * io_kiocb alloc cache
353 */
354 void *reqs[IO_IOPOLL_BATCH];
355 unsigned int free_reqs;
356 unsigned int cur_req;
357
358 /*
Jens Axboe9a56a232019-01-09 09:06:50 -0700359 * File reference cache
360 */
361 struct file *file;
362 unsigned int fd;
363 unsigned int has_refs;
364 unsigned int used_refs;
365 unsigned int ios_left;
366};
367
Jens Axboede0617e2019-04-06 21:51:27 -0600368static void io_sq_wq_submit_work(struct work_struct *work);
369
Jens Axboe2b188cc2019-01-07 10:46:33 -0700370static struct kmem_cache *req_cachep;
371
372static const struct file_operations io_uring_fops;
373
374struct sock *io_uring_get_socket(struct file *file)
375{
376#if defined(CONFIG_UNIX)
377 if (file->f_op == &io_uring_fops) {
378 struct io_ring_ctx *ctx = file->private_data;
379
380 return ctx->ring_sock->sk;
381 }
382#endif
383 return NULL;
384}
385EXPORT_SYMBOL(io_uring_get_socket);
386
387static void io_ring_ctx_ref_free(struct percpu_ref *ref)
388{
389 struct io_ring_ctx *ctx = container_of(ref, struct io_ring_ctx, refs);
390
391 complete(&ctx->ctx_done);
392}
393
394static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
395{
396 struct io_ring_ctx *ctx;
Jens Axboe31b51512019-01-18 22:56:34 -0700397 int i;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700398
399 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
400 if (!ctx)
401 return NULL;
402
Roman Gushchin21482892019-05-07 10:01:48 -0700403 if (percpu_ref_init(&ctx->refs, io_ring_ctx_ref_free,
404 PERCPU_REF_ALLOW_REINIT, GFP_KERNEL)) {
Jens Axboe2b188cc2019-01-07 10:46:33 -0700405 kfree(ctx);
406 return NULL;
407 }
408
409 ctx->flags = p->flags;
410 init_waitqueue_head(&ctx->cq_wait);
411 init_completion(&ctx->ctx_done);
Jackie Liua4c0b3d2019-07-08 13:41:12 +0800412 init_completion(&ctx->sqo_thread_started);
Jens Axboe2b188cc2019-01-07 10:46:33 -0700413 mutex_init(&ctx->uring_lock);
414 init_waitqueue_head(&ctx->wait);
Jens Axboe31b51512019-01-18 22:56:34 -0700415 for (i = 0; i < ARRAY_SIZE(ctx->pending_async); i++) {
416 spin_lock_init(&ctx->pending_async[i].lock);
417 INIT_LIST_HEAD(&ctx->pending_async[i].list);
418 atomic_set(&ctx->pending_async[i].cnt, 0);
419 }
Jens Axboe2b188cc2019-01-07 10:46:33 -0700420 spin_lock_init(&ctx->completion_lock);
Jens Axboedef596e2019-01-09 08:59:42 -0700421 INIT_LIST_HEAD(&ctx->poll_list);
Jens Axboe221c5eb2019-01-17 09:41:58 -0700422 INIT_LIST_HEAD(&ctx->cancel_list);
Jens Axboede0617e2019-04-06 21:51:27 -0600423 INIT_LIST_HEAD(&ctx->defer_list);
Jens Axboe2b188cc2019-01-07 10:46:33 -0700424 return ctx;
425}
426
Jens Axboede0617e2019-04-06 21:51:27 -0600427static inline bool io_sequence_defer(struct io_ring_ctx *ctx,
428 struct io_kiocb *req)
429{
430 if ((req->flags & (REQ_F_IO_DRAIN|REQ_F_IO_DRAINED)) != REQ_F_IO_DRAIN)
431 return false;
432
Zhengyuan Liudbd0f6d2019-07-13 11:58:26 +0800433 return req->sequence != ctx->cached_cq_tail + ctx->sq_ring->dropped;
Jens Axboede0617e2019-04-06 21:51:27 -0600434}
435
436static struct io_kiocb *io_get_deferred_req(struct io_ring_ctx *ctx)
437{
438 struct io_kiocb *req;
439
440 if (list_empty(&ctx->defer_list))
441 return NULL;
442
443 req = list_first_entry(&ctx->defer_list, struct io_kiocb, list);
444 if (!io_sequence_defer(ctx, req)) {
445 list_del_init(&req->list);
446 return req;
447 }
448
449 return NULL;
450}
451
452static void __io_commit_cqring(struct io_ring_ctx *ctx)
Jens Axboe2b188cc2019-01-07 10:46:33 -0700453{
454 struct io_cq_ring *ring = ctx->cq_ring;
455
456 if (ctx->cached_cq_tail != READ_ONCE(ring->r.tail)) {
457 /* order cqe stores with ring update */
458 smp_store_release(&ring->r.tail, ctx->cached_cq_tail);
459
Jens Axboe2b188cc2019-01-07 10:46:33 -0700460 if (wq_has_sleeper(&ctx->cq_wait)) {
461 wake_up_interruptible(&ctx->cq_wait);
462 kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN);
463 }
464 }
465}
466
Jens Axboede0617e2019-04-06 21:51:27 -0600467static void io_commit_cqring(struct io_ring_ctx *ctx)
468{
469 struct io_kiocb *req;
470
471 __io_commit_cqring(ctx);
472
473 while ((req = io_get_deferred_req(ctx)) != NULL) {
474 req->flags |= REQ_F_IO_DRAINED;
475 queue_work(ctx->sqo_wq, &req->work);
476 }
477}
478
Jens Axboe2b188cc2019-01-07 10:46:33 -0700479static struct io_uring_cqe *io_get_cqring(struct io_ring_ctx *ctx)
480{
481 struct io_cq_ring *ring = ctx->cq_ring;
482 unsigned tail;
483
484 tail = ctx->cached_cq_tail;
Stefan Bühler115e12e2019-04-24 23:54:18 +0200485 /*
486 * writes to the cq entry need to come after reading head; the
487 * control dependency is enough as we're using WRITE_ONCE to
488 * fill the cq entry
489 */
Jens Axboe74f464e2019-04-17 08:57:48 -0600490 if (tail - READ_ONCE(ring->r.head) == ring->ring_entries)
Jens Axboe2b188cc2019-01-07 10:46:33 -0700491 return NULL;
492
493 ctx->cached_cq_tail++;
494 return &ring->cqes[tail & ctx->cq_mask];
495}
496
497static void io_cqring_fill_event(struct io_ring_ctx *ctx, u64 ki_user_data,
Jens Axboec71ffb62019-05-13 20:58:29 -0600498 long res)
Jens Axboe2b188cc2019-01-07 10:46:33 -0700499{
500 struct io_uring_cqe *cqe;
501
502 /*
503 * If we can't get a cq entry, userspace overflowed the
504 * submission (by quite a lot). Increment the overflow count in
505 * the ring.
506 */
507 cqe = io_get_cqring(ctx);
508 if (cqe) {
509 WRITE_ONCE(cqe->user_data, ki_user_data);
510 WRITE_ONCE(cqe->res, res);
Jens Axboec71ffb62019-05-13 20:58:29 -0600511 WRITE_ONCE(cqe->flags, 0);
Jens Axboe2b188cc2019-01-07 10:46:33 -0700512 } else {
513 unsigned overflow = READ_ONCE(ctx->cq_ring->overflow);
514
515 WRITE_ONCE(ctx->cq_ring->overflow, overflow + 1);
516 }
517}
518
Jens Axboe8c838782019-03-12 15:48:16 -0600519static void io_cqring_ev_posted(struct io_ring_ctx *ctx)
520{
521 if (waitqueue_active(&ctx->wait))
522 wake_up(&ctx->wait);
523 if (waitqueue_active(&ctx->sqo_wait))
524 wake_up(&ctx->sqo_wait);
Jens Axboe9b402842019-04-11 11:45:41 -0600525 if (ctx->cq_ev_fd)
526 eventfd_signal(ctx->cq_ev_fd, 1);
Jens Axboe8c838782019-03-12 15:48:16 -0600527}
528
529static void io_cqring_add_event(struct io_ring_ctx *ctx, u64 user_data,
Jens Axboec71ffb62019-05-13 20:58:29 -0600530 long res)
Jens Axboe2b188cc2019-01-07 10:46:33 -0700531{
532 unsigned long flags;
533
534 spin_lock_irqsave(&ctx->completion_lock, flags);
Jens Axboec71ffb62019-05-13 20:58:29 -0600535 io_cqring_fill_event(ctx, user_data, res);
Jens Axboe2b188cc2019-01-07 10:46:33 -0700536 io_commit_cqring(ctx);
537 spin_unlock_irqrestore(&ctx->completion_lock, flags);
538
Jens Axboe8c838782019-03-12 15:48:16 -0600539 io_cqring_ev_posted(ctx);
Jens Axboe2b188cc2019-01-07 10:46:33 -0700540}
541
542static void io_ring_drop_ctx_refs(struct io_ring_ctx *ctx, unsigned refs)
543{
544 percpu_ref_put_many(&ctx->refs, refs);
545
546 if (waitqueue_active(&ctx->wait))
547 wake_up(&ctx->wait);
548}
549
Jens Axboe2579f912019-01-09 09:10:43 -0700550static struct io_kiocb *io_get_req(struct io_ring_ctx *ctx,
551 struct io_submit_state *state)
Jens Axboe2b188cc2019-01-07 10:46:33 -0700552{
Jens Axboefd6fab22019-03-14 16:30:06 -0600553 gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700554 struct io_kiocb *req;
555
556 if (!percpu_ref_tryget(&ctx->refs))
557 return NULL;
558
Jens Axboe2579f912019-01-09 09:10:43 -0700559 if (!state) {
Jens Axboefd6fab22019-03-14 16:30:06 -0600560 req = kmem_cache_alloc(req_cachep, gfp);
Jens Axboe2579f912019-01-09 09:10:43 -0700561 if (unlikely(!req))
562 goto out;
563 } else if (!state->free_reqs) {
564 size_t sz;
565 int ret;
566
567 sz = min_t(size_t, state->ios_left, ARRAY_SIZE(state->reqs));
Jens Axboefd6fab22019-03-14 16:30:06 -0600568 ret = kmem_cache_alloc_bulk(req_cachep, gfp, sz, state->reqs);
569
570 /*
571 * Bulk alloc is all-or-nothing. If we fail to get a batch,
572 * retry single alloc to be on the safe side.
573 */
574 if (unlikely(ret <= 0)) {
575 state->reqs[0] = kmem_cache_alloc(req_cachep, gfp);
576 if (!state->reqs[0])
577 goto out;
578 ret = 1;
579 }
Jens Axboe2579f912019-01-09 09:10:43 -0700580 state->free_reqs = ret - 1;
581 state->cur_req = 1;
582 req = state->reqs[0];
583 } else {
584 req = state->reqs[state->cur_req];
585 state->free_reqs--;
586 state->cur_req++;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700587 }
588
Jens Axboe60c112b2019-06-21 10:20:18 -0600589 req->file = NULL;
Jens Axboe2579f912019-01-09 09:10:43 -0700590 req->ctx = ctx;
591 req->flags = 0;
Jens Axboee65ef562019-03-12 10:16:44 -0600592 /* one is dropped after submission, the other at completion */
593 refcount_set(&req->refs, 2);
Jens Axboe9e645e112019-05-10 16:07:28 -0600594 req->result = 0;
Jens Axboe2579f912019-01-09 09:10:43 -0700595 return req;
596out:
Jens Axboe2b188cc2019-01-07 10:46:33 -0700597 io_ring_drop_ctx_refs(ctx, 1);
598 return NULL;
599}
600
Jens Axboedef596e2019-01-09 08:59:42 -0700601static void io_free_req_many(struct io_ring_ctx *ctx, void **reqs, int *nr)
602{
603 if (*nr) {
604 kmem_cache_free_bulk(req_cachep, *nr, reqs);
605 io_ring_drop_ctx_refs(ctx, *nr);
606 *nr = 0;
607 }
608}
609
Jens Axboe9e645e112019-05-10 16:07:28 -0600610static void __io_free_req(struct io_kiocb *req)
Jens Axboe2b188cc2019-01-07 10:46:33 -0700611{
Jens Axboe09bb8392019-03-13 12:39:28 -0600612 if (req->file && !(req->flags & REQ_F_FIXED_FILE))
613 fput(req->file);
Jens Axboee65ef562019-03-12 10:16:44 -0600614 io_ring_drop_ctx_refs(req->ctx, 1);
615 kmem_cache_free(req_cachep, req);
616}
617
Jens Axboe9e645e112019-05-10 16:07:28 -0600618static void io_req_link_next(struct io_kiocb *req)
619{
620 struct io_kiocb *nxt;
621
622 /*
623 * The list should never be empty when we are called here. But could
624 * potentially happen if the chain is messed up, check to be on the
625 * safe side.
626 */
627 nxt = list_first_entry_or_null(&req->link_list, struct io_kiocb, list);
628 if (nxt) {
629 list_del(&nxt->list);
630 if (!list_empty(&req->link_list)) {
631 INIT_LIST_HEAD(&nxt->link_list);
632 list_splice(&req->link_list, &nxt->link_list);
633 nxt->flags |= REQ_F_LINK;
634 }
635
Zhengyuan Liuf7b76ac2019-07-16 23:26:14 +0800636 nxt->flags |= REQ_F_LINK_DONE;
Jens Axboe9e645e112019-05-10 16:07:28 -0600637 INIT_WORK(&nxt->work, io_sq_wq_submit_work);
638 queue_work(req->ctx->sqo_wq, &nxt->work);
639 }
640}
641
642/*
643 * Called if REQ_F_LINK is set, and we fail the head request
644 */
645static void io_fail_links(struct io_kiocb *req)
646{
647 struct io_kiocb *link;
648
649 while (!list_empty(&req->link_list)) {
650 link = list_first_entry(&req->link_list, struct io_kiocb, list);
651 list_del(&link->list);
652
653 io_cqring_add_event(req->ctx, link->user_data, -ECANCELED);
654 __io_free_req(link);
655 }
656}
657
658static void io_free_req(struct io_kiocb *req)
659{
660 /*
661 * If LINK is set, we have dependent requests in this chain. If we
662 * didn't fail this request, queue the first one up, moving any other
663 * dependencies to the next request. In case of failure, fail the rest
664 * of the chain.
665 */
666 if (req->flags & REQ_F_LINK) {
667 if (req->flags & REQ_F_FAIL_LINK)
668 io_fail_links(req);
669 else
670 io_req_link_next(req);
671 }
672
673 __io_free_req(req);
674}
675
Jens Axboee65ef562019-03-12 10:16:44 -0600676static void io_put_req(struct io_kiocb *req)
677{
678 if (refcount_dec_and_test(&req->refs))
679 io_free_req(req);
Jens Axboe2b188cc2019-01-07 10:46:33 -0700680}
681
Jens Axboedef596e2019-01-09 08:59:42 -0700682/*
683 * Find and free completed poll iocbs
684 */
685static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
686 struct list_head *done)
687{
688 void *reqs[IO_IOPOLL_BATCH];
689 struct io_kiocb *req;
Jens Axboe09bb8392019-03-13 12:39:28 -0600690 int to_free;
Jens Axboedef596e2019-01-09 08:59:42 -0700691
Jens Axboe09bb8392019-03-13 12:39:28 -0600692 to_free = 0;
Jens Axboedef596e2019-01-09 08:59:42 -0700693 while (!list_empty(done)) {
694 req = list_first_entry(done, struct io_kiocb, list);
695 list_del(&req->list);
696
Jens Axboe9e645e112019-05-10 16:07:28 -0600697 io_cqring_fill_event(ctx, req->user_data, req->result);
Jens Axboedef596e2019-01-09 08:59:42 -0700698 (*nr_events)++;
699
Jens Axboe09bb8392019-03-13 12:39:28 -0600700 if (refcount_dec_and_test(&req->refs)) {
701 /* If we're not using fixed files, we have to pair the
702 * completion part with the file put. Use regular
703 * completions for those, only batch free for fixed
Jens Axboe9e645e112019-05-10 16:07:28 -0600704 * file and non-linked commands.
Jens Axboe09bb8392019-03-13 12:39:28 -0600705 */
Jens Axboe9e645e112019-05-10 16:07:28 -0600706 if ((req->flags & (REQ_F_FIXED_FILE|REQ_F_LINK)) ==
707 REQ_F_FIXED_FILE) {
Jens Axboe09bb8392019-03-13 12:39:28 -0600708 reqs[to_free++] = req;
709 if (to_free == ARRAY_SIZE(reqs))
710 io_free_req_many(ctx, reqs, &to_free);
Jens Axboe6b063142019-01-10 22:13:58 -0700711 } else {
Jens Axboe09bb8392019-03-13 12:39:28 -0600712 io_free_req(req);
Jens Axboe6b063142019-01-10 22:13:58 -0700713 }
Jens Axboe9a56a232019-01-09 09:06:50 -0700714 }
Jens Axboedef596e2019-01-09 08:59:42 -0700715 }
Jens Axboedef596e2019-01-09 08:59:42 -0700716
Jens Axboe09bb8392019-03-13 12:39:28 -0600717 io_commit_cqring(ctx);
Jens Axboedef596e2019-01-09 08:59:42 -0700718 io_free_req_many(ctx, reqs, &to_free);
719}
720
721static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
722 long min)
723{
724 struct io_kiocb *req, *tmp;
725 LIST_HEAD(done);
726 bool spin;
727 int ret;
728
729 /*
730 * Only spin for completions if we don't have multiple devices hanging
731 * off our complete list, and we're under the requested amount.
732 */
733 spin = !ctx->poll_multi_file && *nr_events < min;
734
735 ret = 0;
736 list_for_each_entry_safe(req, tmp, &ctx->poll_list, list) {
737 struct kiocb *kiocb = &req->rw;
738
739 /*
740 * Move completed entries to our local list. If we find a
741 * request that requires polling, break out and complete
742 * the done list first, if we have entries there.
743 */
744 if (req->flags & REQ_F_IOPOLL_COMPLETED) {
745 list_move_tail(&req->list, &done);
746 continue;
747 }
748 if (!list_empty(&done))
749 break;
750
751 ret = kiocb->ki_filp->f_op->iopoll(kiocb, spin);
752 if (ret < 0)
753 break;
754
755 if (ret && spin)
756 spin = false;
757 ret = 0;
758 }
759
760 if (!list_empty(&done))
761 io_iopoll_complete(ctx, nr_events, &done);
762
763 return ret;
764}
765
766/*
767 * Poll for a mininum of 'min' events. Note that if min == 0 we consider that a
768 * non-spinning poll check - we'll still enter the driver poll loop, but only
769 * as a non-spinning completion check.
770 */
771static int io_iopoll_getevents(struct io_ring_ctx *ctx, unsigned int *nr_events,
772 long min)
773{
774 while (!list_empty(&ctx->poll_list)) {
775 int ret;
776
777 ret = io_do_iopoll(ctx, nr_events, min);
778 if (ret < 0)
779 return ret;
780 if (!min || *nr_events >= min)
781 return 0;
782 }
783
784 return 1;
785}
786
787/*
788 * We can't just wait for polled events to come to us, we have to actively
789 * find and complete them.
790 */
791static void io_iopoll_reap_events(struct io_ring_ctx *ctx)
792{
793 if (!(ctx->flags & IORING_SETUP_IOPOLL))
794 return;
795
796 mutex_lock(&ctx->uring_lock);
797 while (!list_empty(&ctx->poll_list)) {
798 unsigned int nr_events = 0;
799
800 io_iopoll_getevents(ctx, &nr_events, 1);
801 }
802 mutex_unlock(&ctx->uring_lock);
803}
804
805static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
806 long min)
807{
808 int ret = 0;
809
810 do {
811 int tmin = 0;
812
813 if (*nr_events < min)
814 tmin = min - *nr_events;
815
816 ret = io_iopoll_getevents(ctx, nr_events, tmin);
817 if (ret <= 0)
818 break;
819 ret = 0;
820 } while (min && !*nr_events && !need_resched());
821
822 return ret;
823}
824
Jens Axboe2b188cc2019-01-07 10:46:33 -0700825static void kiocb_end_write(struct kiocb *kiocb)
826{
827 if (kiocb->ki_flags & IOCB_WRITE) {
828 struct inode *inode = file_inode(kiocb->ki_filp);
829
830 /*
831 * Tell lockdep we inherited freeze protection from submission
832 * thread.
833 */
834 if (S_ISREG(inode->i_mode))
835 __sb_writers_acquired(inode->i_sb, SB_FREEZE_WRITE);
836 file_end_write(kiocb->ki_filp);
837 }
838}
839
840static void io_complete_rw(struct kiocb *kiocb, long res, long res2)
841{
842 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw);
843
844 kiocb_end_write(kiocb);
845
Jens Axboe9e645e112019-05-10 16:07:28 -0600846 if ((req->flags & REQ_F_LINK) && res != req->result)
847 req->flags |= REQ_F_FAIL_LINK;
Jens Axboec71ffb62019-05-13 20:58:29 -0600848 io_cqring_add_event(req->ctx, req->user_data, res);
Jens Axboee65ef562019-03-12 10:16:44 -0600849 io_put_req(req);
Jens Axboe2b188cc2019-01-07 10:46:33 -0700850}
851
Jens Axboedef596e2019-01-09 08:59:42 -0700852static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2)
853{
854 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw);
855
856 kiocb_end_write(kiocb);
857
Jens Axboe9e645e112019-05-10 16:07:28 -0600858 if ((req->flags & REQ_F_LINK) && res != req->result)
859 req->flags |= REQ_F_FAIL_LINK;
860 req->result = res;
Jens Axboedef596e2019-01-09 08:59:42 -0700861 if (res != -EAGAIN)
862 req->flags |= REQ_F_IOPOLL_COMPLETED;
863}
864
865/*
866 * After the iocb has been issued, it's safe to be found on the poll list.
867 * Adding the kiocb to the list AFTER submission ensures that we don't
868 * find it from a io_iopoll_getevents() thread before the issuer is done
869 * accessing the kiocb cookie.
870 */
871static void io_iopoll_req_issued(struct io_kiocb *req)
872{
873 struct io_ring_ctx *ctx = req->ctx;
874
875 /*
876 * Track whether we have multiple files in our lists. This will impact
877 * how we do polling eventually, not spinning if we're on potentially
878 * different devices.
879 */
880 if (list_empty(&ctx->poll_list)) {
881 ctx->poll_multi_file = false;
882 } else if (!ctx->poll_multi_file) {
883 struct io_kiocb *list_req;
884
885 list_req = list_first_entry(&ctx->poll_list, struct io_kiocb,
886 list);
887 if (list_req->rw.ki_filp != req->rw.ki_filp)
888 ctx->poll_multi_file = true;
889 }
890
891 /*
892 * For fast devices, IO may have already completed. If it has, add
893 * it to the front so we find it first.
894 */
895 if (req->flags & REQ_F_IOPOLL_COMPLETED)
896 list_add(&req->list, &ctx->poll_list);
897 else
898 list_add_tail(&req->list, &ctx->poll_list);
899}
900
Jens Axboe3d6770f2019-04-13 11:50:54 -0600901static void io_file_put(struct io_submit_state *state)
Jens Axboe9a56a232019-01-09 09:06:50 -0700902{
Jens Axboe3d6770f2019-04-13 11:50:54 -0600903 if (state->file) {
Jens Axboe9a56a232019-01-09 09:06:50 -0700904 int diff = state->has_refs - state->used_refs;
905
906 if (diff)
907 fput_many(state->file, diff);
908 state->file = NULL;
909 }
910}
911
912/*
913 * Get as many references to a file as we have IOs left in this submission,
914 * assuming most submissions are for one file, or at least that each file
915 * has more than one submission.
916 */
917static struct file *io_file_get(struct io_submit_state *state, int fd)
918{
919 if (!state)
920 return fget(fd);
921
922 if (state->file) {
923 if (state->fd == fd) {
924 state->used_refs++;
925 state->ios_left--;
926 return state->file;
927 }
Jens Axboe3d6770f2019-04-13 11:50:54 -0600928 io_file_put(state);
Jens Axboe9a56a232019-01-09 09:06:50 -0700929 }
930 state->file = fget_many(fd, state->ios_left);
931 if (!state->file)
932 return NULL;
933
934 state->fd = fd;
935 state->has_refs = state->ios_left;
936 state->used_refs = 1;
937 state->ios_left--;
938 return state->file;
939}
940
Jens Axboe2b188cc2019-01-07 10:46:33 -0700941/*
942 * If we tracked the file through the SCM inflight mechanism, we could support
943 * any file. For now, just ensure that anything potentially problematic is done
944 * inline.
945 */
946static bool io_file_supports_async(struct file *file)
947{
948 umode_t mode = file_inode(file)->i_mode;
949
950 if (S_ISBLK(mode) || S_ISCHR(mode))
951 return true;
952 if (S_ISREG(mode) && file->f_op != &io_uring_fops)
953 return true;
954
955 return false;
956}
957
Jens Axboe6c271ce2019-01-10 11:22:30 -0700958static int io_prep_rw(struct io_kiocb *req, const struct sqe_submit *s,
Jens Axboe8358e3a2019-04-23 08:17:58 -0600959 bool force_nonblock)
Jens Axboe2b188cc2019-01-07 10:46:33 -0700960{
Jens Axboe6c271ce2019-01-10 11:22:30 -0700961 const struct io_uring_sqe *sqe = s->sqe;
Jens Axboedef596e2019-01-09 08:59:42 -0700962 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700963 struct kiocb *kiocb = &req->rw;
Jens Axboe09bb8392019-03-13 12:39:28 -0600964 unsigned ioprio;
965 int ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700966
Jens Axboe09bb8392019-03-13 12:39:28 -0600967 if (!req->file)
968 return -EBADF;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700969
Jens Axboe09bb8392019-03-13 12:39:28 -0600970 if (force_nonblock && !io_file_supports_async(req->file))
971 force_nonblock = false;
Jens Axboe6b063142019-01-10 22:13:58 -0700972
Jens Axboe2b188cc2019-01-07 10:46:33 -0700973 kiocb->ki_pos = READ_ONCE(sqe->off);
974 kiocb->ki_flags = iocb_flags(kiocb->ki_filp);
975 kiocb->ki_hint = ki_hint_validate(file_write_hint(kiocb->ki_filp));
976
977 ioprio = READ_ONCE(sqe->ioprio);
978 if (ioprio) {
979 ret = ioprio_check_cap(ioprio);
980 if (ret)
Jens Axboe09bb8392019-03-13 12:39:28 -0600981 return ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700982
983 kiocb->ki_ioprio = ioprio;
984 } else
985 kiocb->ki_ioprio = get_current_ioprio();
986
987 ret = kiocb_set_rw_flags(kiocb, READ_ONCE(sqe->rw_flags));
988 if (unlikely(ret))
Jens Axboe09bb8392019-03-13 12:39:28 -0600989 return ret;
Stefan Bühler8449eed2019-04-27 20:34:19 +0200990
991 /* don't allow async punt if RWF_NOWAIT was requested */
992 if (kiocb->ki_flags & IOCB_NOWAIT)
993 req->flags |= REQ_F_NOWAIT;
994
995 if (force_nonblock)
Jens Axboe2b188cc2019-01-07 10:46:33 -0700996 kiocb->ki_flags |= IOCB_NOWAIT;
Stefan Bühler8449eed2019-04-27 20:34:19 +0200997
Jens Axboedef596e2019-01-09 08:59:42 -0700998 if (ctx->flags & IORING_SETUP_IOPOLL) {
Jens Axboedef596e2019-01-09 08:59:42 -0700999 if (!(kiocb->ki_flags & IOCB_DIRECT) ||
1000 !kiocb->ki_filp->f_op->iopoll)
Jens Axboe09bb8392019-03-13 12:39:28 -06001001 return -EOPNOTSUPP;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001002
Jens Axboedef596e2019-01-09 08:59:42 -07001003 kiocb->ki_flags |= IOCB_HIPRI;
1004 kiocb->ki_complete = io_complete_rw_iopoll;
1005 } else {
Jens Axboe09bb8392019-03-13 12:39:28 -06001006 if (kiocb->ki_flags & IOCB_HIPRI)
1007 return -EINVAL;
Jens Axboedef596e2019-01-09 08:59:42 -07001008 kiocb->ki_complete = io_complete_rw;
1009 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07001010 return 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001011}
1012
1013static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
1014{
1015 switch (ret) {
1016 case -EIOCBQUEUED:
1017 break;
1018 case -ERESTARTSYS:
1019 case -ERESTARTNOINTR:
1020 case -ERESTARTNOHAND:
1021 case -ERESTART_RESTARTBLOCK:
1022 /*
1023 * We can't just restart the syscall, since previously
1024 * submitted sqes may already be in progress. Just fail this
1025 * IO with EINTR.
1026 */
1027 ret = -EINTR;
1028 /* fall through */
1029 default:
1030 kiocb->ki_complete(kiocb, ret, 0);
1031 }
1032}
1033
Jens Axboeedafcce2019-01-09 09:16:05 -07001034static int io_import_fixed(struct io_ring_ctx *ctx, int rw,
1035 const struct io_uring_sqe *sqe,
1036 struct iov_iter *iter)
1037{
1038 size_t len = READ_ONCE(sqe->len);
1039 struct io_mapped_ubuf *imu;
1040 unsigned index, buf_index;
1041 size_t offset;
1042 u64 buf_addr;
1043
1044 /* attempt to use fixed buffers without having provided iovecs */
1045 if (unlikely(!ctx->user_bufs))
1046 return -EFAULT;
1047
1048 buf_index = READ_ONCE(sqe->buf_index);
1049 if (unlikely(buf_index >= ctx->nr_user_bufs))
1050 return -EFAULT;
1051
1052 index = array_index_nospec(buf_index, ctx->nr_user_bufs);
1053 imu = &ctx->user_bufs[index];
1054 buf_addr = READ_ONCE(sqe->addr);
1055
1056 /* overflow */
1057 if (buf_addr + len < buf_addr)
1058 return -EFAULT;
1059 /* not inside the mapped region */
1060 if (buf_addr < imu->ubuf || buf_addr + len > imu->ubuf + imu->len)
1061 return -EFAULT;
1062
1063 /*
1064 * May not be a start of buffer, set size appropriately
1065 * and advance us to the beginning.
1066 */
1067 offset = buf_addr - imu->ubuf;
1068 iov_iter_bvec(iter, rw, imu->bvec, imu->nr_bvecs, offset + len);
Jens Axboebd11b3a2019-07-20 08:37:31 -06001069
1070 if (offset) {
1071 /*
1072 * Don't use iov_iter_advance() here, as it's really slow for
1073 * using the latter parts of a big fixed buffer - it iterates
1074 * over each segment manually. We can cheat a bit here, because
1075 * we know that:
1076 *
1077 * 1) it's a BVEC iter, we set it up
1078 * 2) all bvecs are PAGE_SIZE in size, except potentially the
1079 * first and last bvec
1080 *
1081 * So just find our index, and adjust the iterator afterwards.
1082 * If the offset is within the first bvec (or the whole first
1083 * bvec, just use iov_iter_advance(). This makes it easier
1084 * since we can just skip the first segment, which may not
1085 * be PAGE_SIZE aligned.
1086 */
1087 const struct bio_vec *bvec = imu->bvec;
1088
1089 if (offset <= bvec->bv_len) {
1090 iov_iter_advance(iter, offset);
1091 } else {
1092 unsigned long seg_skip;
1093
1094 /* skip first vec */
1095 offset -= bvec->bv_len;
1096 seg_skip = 1 + (offset >> PAGE_SHIFT);
1097
1098 iter->bvec = bvec + seg_skip;
1099 iter->nr_segs -= seg_skip;
1100 iter->count -= (seg_skip << PAGE_SHIFT);
1101 iter->iov_offset = offset & ~PAGE_MASK;
1102 if (iter->iov_offset)
1103 iter->count -= iter->iov_offset;
1104 }
1105 }
1106
Jens Axboeedafcce2019-01-09 09:16:05 -07001107 return 0;
1108}
1109
Jens Axboe87e5e6d2019-05-14 16:02:22 -06001110static ssize_t io_import_iovec(struct io_ring_ctx *ctx, int rw,
1111 const struct sqe_submit *s, struct iovec **iovec,
1112 struct iov_iter *iter)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001113{
1114 const struct io_uring_sqe *sqe = s->sqe;
1115 void __user *buf = u64_to_user_ptr(READ_ONCE(sqe->addr));
1116 size_t sqe_len = READ_ONCE(sqe->len);
Jens Axboeedafcce2019-01-09 09:16:05 -07001117 u8 opcode;
1118
1119 /*
1120 * We're reading ->opcode for the second time, but the first read
1121 * doesn't care whether it's _FIXED or not, so it doesn't matter
1122 * whether ->opcode changes concurrently. The first read does care
1123 * about whether it is a READ or a WRITE, so we don't trust this read
1124 * for that purpose and instead let the caller pass in the read/write
1125 * flag.
1126 */
1127 opcode = READ_ONCE(sqe->opcode);
1128 if (opcode == IORING_OP_READ_FIXED ||
1129 opcode == IORING_OP_WRITE_FIXED) {
Jens Axboe87e5e6d2019-05-14 16:02:22 -06001130 ssize_t ret = io_import_fixed(ctx, rw, sqe, iter);
Jens Axboeedafcce2019-01-09 09:16:05 -07001131 *iovec = NULL;
1132 return ret;
1133 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07001134
1135 if (!s->has_user)
1136 return -EFAULT;
1137
1138#ifdef CONFIG_COMPAT
1139 if (ctx->compat)
1140 return compat_import_iovec(rw, buf, sqe_len, UIO_FASTIOV,
1141 iovec, iter);
1142#endif
1143
1144 return import_iovec(rw, buf, sqe_len, UIO_FASTIOV, iovec, iter);
1145}
1146
Jens Axboe31b51512019-01-18 22:56:34 -07001147/*
1148 * Make a note of the last file/offset/direction we punted to async
1149 * context. We'll use this information to see if we can piggy back a
1150 * sequential request onto the previous one, if it's still hasn't been
1151 * completed by the async worker.
1152 */
1153static void io_async_list_note(int rw, struct io_kiocb *req, size_t len)
1154{
1155 struct async_list *async_list = &req->ctx->pending_async[rw];
1156 struct kiocb *kiocb = &req->rw;
1157 struct file *filp = kiocb->ki_filp;
1158 off_t io_end = kiocb->ki_pos + len;
1159
1160 if (filp == async_list->file && kiocb->ki_pos == async_list->io_end) {
Zhengyuan Liu9310a7ba2019-07-22 10:23:27 +08001161 unsigned long max_bytes;
Jens Axboe31b51512019-01-18 22:56:34 -07001162
1163 /* Use 8x RA size as a decent limiter for both reads/writes */
Zhengyuan Liu9310a7ba2019-07-22 10:23:27 +08001164 max_bytes = filp->f_ra.ra_pages << (PAGE_SHIFT + 3);
1165 if (!max_bytes)
1166 max_bytes = VM_READAHEAD_PAGES << (PAGE_SHIFT + 3);
Jens Axboe31b51512019-01-18 22:56:34 -07001167
Zhengyuan Liu9310a7ba2019-07-22 10:23:27 +08001168 /* If max len are exceeded, reset the state */
1169 if (async_list->io_len + len <= max_bytes) {
Jens Axboe31b51512019-01-18 22:56:34 -07001170 req->flags |= REQ_F_SEQ_PREV;
Zhengyuan Liu9310a7ba2019-07-22 10:23:27 +08001171 async_list->io_len += len;
Jens Axboe31b51512019-01-18 22:56:34 -07001172 } else {
1173 io_end = 0;
Zhengyuan Liu9310a7ba2019-07-22 10:23:27 +08001174 async_list->io_len = 0;
Jens Axboe31b51512019-01-18 22:56:34 -07001175 }
1176 }
1177
1178 /* New file? Reset state. */
1179 if (async_list->file != filp) {
Zhengyuan Liu9310a7ba2019-07-22 10:23:27 +08001180 async_list->io_len = 0;
Jens Axboe31b51512019-01-18 22:56:34 -07001181 async_list->file = filp;
1182 }
1183 async_list->io_end = io_end;
1184}
1185
Jens Axboee0c5c572019-03-12 10:18:47 -06001186static int io_read(struct io_kiocb *req, const struct sqe_submit *s,
Jens Axboe8358e3a2019-04-23 08:17:58 -06001187 bool force_nonblock)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001188{
1189 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
1190 struct kiocb *kiocb = &req->rw;
1191 struct iov_iter iter;
1192 struct file *file;
Jens Axboe31b51512019-01-18 22:56:34 -07001193 size_t iov_count;
Jens Axboe9d93a3f2019-05-15 13:53:07 -06001194 ssize_t read_size, ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001195
Jens Axboe8358e3a2019-04-23 08:17:58 -06001196 ret = io_prep_rw(req, s, force_nonblock);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001197 if (ret)
1198 return ret;
1199 file = kiocb->ki_filp;
1200
Jens Axboe2b188cc2019-01-07 10:46:33 -07001201 if (unlikely(!(file->f_mode & FMODE_READ)))
Jens Axboe09bb8392019-03-13 12:39:28 -06001202 return -EBADF;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001203 if (unlikely(!file->f_op->read_iter))
Jens Axboe09bb8392019-03-13 12:39:28 -06001204 return -EINVAL;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001205
1206 ret = io_import_iovec(req->ctx, READ, s, &iovec, &iter);
Jens Axboe87e5e6d2019-05-14 16:02:22 -06001207 if (ret < 0)
Jens Axboe09bb8392019-03-13 12:39:28 -06001208 return ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001209
Jens Axboe9d93a3f2019-05-15 13:53:07 -06001210 read_size = ret;
Jens Axboe9e645e112019-05-10 16:07:28 -06001211 if (req->flags & REQ_F_LINK)
1212 req->result = read_size;
1213
Jens Axboe31b51512019-01-18 22:56:34 -07001214 iov_count = iov_iter_count(&iter);
1215 ret = rw_verify_area(READ, file, &kiocb->ki_pos, iov_count);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001216 if (!ret) {
1217 ssize_t ret2;
1218
Jens Axboe2b188cc2019-01-07 10:46:33 -07001219 ret2 = call_read_iter(file, kiocb, &iter);
Jens Axboe9d93a3f2019-05-15 13:53:07 -06001220 /*
1221 * In case of a short read, punt to async. This can happen
1222 * if we have data partially cached. Alternatively we can
1223 * return the short read, in which case the application will
1224 * need to issue another SQE and wait for it. That SQE will
1225 * need async punt anyway, so it's more efficient to do it
1226 * here.
1227 */
1228 if (force_nonblock && ret2 > 0 && ret2 < read_size)
1229 ret2 = -EAGAIN;
1230 /* Catch -EAGAIN return for forced non-blocking submission */
Jens Axboe31b51512019-01-18 22:56:34 -07001231 if (!force_nonblock || ret2 != -EAGAIN) {
Jens Axboe2b188cc2019-01-07 10:46:33 -07001232 io_rw_done(kiocb, ret2);
Jens Axboe31b51512019-01-18 22:56:34 -07001233 } else {
1234 /*
1235 * If ->needs_lock is true, we're already in async
1236 * context.
1237 */
1238 if (!s->needs_lock)
1239 io_async_list_note(READ, req, iov_count);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001240 ret = -EAGAIN;
Jens Axboe31b51512019-01-18 22:56:34 -07001241 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07001242 }
1243 kfree(iovec);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001244 return ret;
1245}
1246
Jens Axboee0c5c572019-03-12 10:18:47 -06001247static int io_write(struct io_kiocb *req, const struct sqe_submit *s,
Jens Axboe8358e3a2019-04-23 08:17:58 -06001248 bool force_nonblock)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001249{
1250 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
1251 struct kiocb *kiocb = &req->rw;
1252 struct iov_iter iter;
1253 struct file *file;
Jens Axboe31b51512019-01-18 22:56:34 -07001254 size_t iov_count;
Jens Axboe87e5e6d2019-05-14 16:02:22 -06001255 ssize_t ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001256
Jens Axboe8358e3a2019-04-23 08:17:58 -06001257 ret = io_prep_rw(req, s, force_nonblock);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001258 if (ret)
1259 return ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001260
Jens Axboe2b188cc2019-01-07 10:46:33 -07001261 file = kiocb->ki_filp;
1262 if (unlikely(!(file->f_mode & FMODE_WRITE)))
Jens Axboe09bb8392019-03-13 12:39:28 -06001263 return -EBADF;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001264 if (unlikely(!file->f_op->write_iter))
Jens Axboe09bb8392019-03-13 12:39:28 -06001265 return -EINVAL;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001266
1267 ret = io_import_iovec(req->ctx, WRITE, s, &iovec, &iter);
Jens Axboe87e5e6d2019-05-14 16:02:22 -06001268 if (ret < 0)
Jens Axboe09bb8392019-03-13 12:39:28 -06001269 return ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001270
Jens Axboe9e645e112019-05-10 16:07:28 -06001271 if (req->flags & REQ_F_LINK)
1272 req->result = ret;
1273
Jens Axboe31b51512019-01-18 22:56:34 -07001274 iov_count = iov_iter_count(&iter);
1275
1276 ret = -EAGAIN;
1277 if (force_nonblock && !(kiocb->ki_flags & IOCB_DIRECT)) {
1278 /* If ->needs_lock is true, we're already in async context. */
1279 if (!s->needs_lock)
1280 io_async_list_note(WRITE, req, iov_count);
1281 goto out_free;
1282 }
1283
1284 ret = rw_verify_area(WRITE, file, &kiocb->ki_pos, iov_count);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001285 if (!ret) {
Roman Penyaev9bf79332019-03-25 20:09:24 +01001286 ssize_t ret2;
1287
Jens Axboe2b188cc2019-01-07 10:46:33 -07001288 /*
1289 * Open-code file_start_write here to grab freeze protection,
1290 * which will be released by another thread in
1291 * io_complete_rw(). Fool lockdep by telling it the lock got
1292 * released so that it doesn't complain about the held lock when
1293 * we return to userspace.
1294 */
1295 if (S_ISREG(file_inode(file)->i_mode)) {
1296 __sb_start_write(file_inode(file)->i_sb,
1297 SB_FREEZE_WRITE, true);
1298 __sb_writers_release(file_inode(file)->i_sb,
1299 SB_FREEZE_WRITE);
1300 }
1301 kiocb->ki_flags |= IOCB_WRITE;
Roman Penyaev9bf79332019-03-25 20:09:24 +01001302
1303 ret2 = call_write_iter(file, kiocb, &iter);
1304 if (!force_nonblock || ret2 != -EAGAIN) {
1305 io_rw_done(kiocb, ret2);
1306 } else {
1307 /*
1308 * If ->needs_lock is true, we're already in async
1309 * context.
1310 */
1311 if (!s->needs_lock)
1312 io_async_list_note(WRITE, req, iov_count);
1313 ret = -EAGAIN;
1314 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07001315 }
Jens Axboe31b51512019-01-18 22:56:34 -07001316out_free:
Jens Axboe2b188cc2019-01-07 10:46:33 -07001317 kfree(iovec);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001318 return ret;
1319}
1320
1321/*
1322 * IORING_OP_NOP just posts a completion event, nothing else.
1323 */
1324static int io_nop(struct io_kiocb *req, u64 user_data)
1325{
1326 struct io_ring_ctx *ctx = req->ctx;
1327 long err = 0;
1328
Jens Axboedef596e2019-01-09 08:59:42 -07001329 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
1330 return -EINVAL;
1331
Jens Axboec71ffb62019-05-13 20:58:29 -06001332 io_cqring_add_event(ctx, user_data, err);
Jens Axboee65ef562019-03-12 10:16:44 -06001333 io_put_req(req);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001334 return 0;
1335}
1336
Christoph Hellwigc992fe22019-01-11 09:43:02 -07001337static int io_prep_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1338{
Jens Axboe6b063142019-01-10 22:13:58 -07001339 struct io_ring_ctx *ctx = req->ctx;
Christoph Hellwigc992fe22019-01-11 09:43:02 -07001340
Jens Axboe09bb8392019-03-13 12:39:28 -06001341 if (!req->file)
1342 return -EBADF;
Christoph Hellwigc992fe22019-01-11 09:43:02 -07001343
Jens Axboe6b063142019-01-10 22:13:58 -07001344 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
Jens Axboedef596e2019-01-09 08:59:42 -07001345 return -EINVAL;
Jens Axboeedafcce2019-01-09 09:16:05 -07001346 if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index))
Christoph Hellwigc992fe22019-01-11 09:43:02 -07001347 return -EINVAL;
1348
Christoph Hellwigc992fe22019-01-11 09:43:02 -07001349 return 0;
1350}
1351
1352static int io_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe,
1353 bool force_nonblock)
1354{
1355 loff_t sqe_off = READ_ONCE(sqe->off);
1356 loff_t sqe_len = READ_ONCE(sqe->len);
1357 loff_t end = sqe_off + sqe_len;
1358 unsigned fsync_flags;
1359 int ret;
1360
1361 fsync_flags = READ_ONCE(sqe->fsync_flags);
1362 if (unlikely(fsync_flags & ~IORING_FSYNC_DATASYNC))
1363 return -EINVAL;
1364
1365 ret = io_prep_fsync(req, sqe);
1366 if (ret)
1367 return ret;
1368
1369 /* fsync always requires a blocking context */
1370 if (force_nonblock)
1371 return -EAGAIN;
1372
1373 ret = vfs_fsync_range(req->rw.ki_filp, sqe_off,
1374 end > 0 ? end : LLONG_MAX,
1375 fsync_flags & IORING_FSYNC_DATASYNC);
1376
Jens Axboe9e645e112019-05-10 16:07:28 -06001377 if (ret < 0 && (req->flags & REQ_F_LINK))
1378 req->flags |= REQ_F_FAIL_LINK;
Jens Axboec71ffb62019-05-13 20:58:29 -06001379 io_cqring_add_event(req->ctx, sqe->user_data, ret);
Jens Axboee65ef562019-03-12 10:16:44 -06001380 io_put_req(req);
Christoph Hellwigc992fe22019-01-11 09:43:02 -07001381 return 0;
1382}
1383
Jens Axboe5d17b4a2019-04-09 14:56:44 -06001384static int io_prep_sfr(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1385{
1386 struct io_ring_ctx *ctx = req->ctx;
1387 int ret = 0;
1388
1389 if (!req->file)
1390 return -EBADF;
Jens Axboe5d17b4a2019-04-09 14:56:44 -06001391
1392 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
1393 return -EINVAL;
1394 if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index))
1395 return -EINVAL;
1396
Jens Axboe5d17b4a2019-04-09 14:56:44 -06001397 return ret;
1398}
1399
1400static int io_sync_file_range(struct io_kiocb *req,
1401 const struct io_uring_sqe *sqe,
1402 bool force_nonblock)
1403{
1404 loff_t sqe_off;
1405 loff_t sqe_len;
1406 unsigned flags;
1407 int ret;
1408
1409 ret = io_prep_sfr(req, sqe);
1410 if (ret)
1411 return ret;
1412
1413 /* sync_file_range always requires a blocking context */
1414 if (force_nonblock)
1415 return -EAGAIN;
1416
1417 sqe_off = READ_ONCE(sqe->off);
1418 sqe_len = READ_ONCE(sqe->len);
1419 flags = READ_ONCE(sqe->sync_range_flags);
1420
1421 ret = sync_file_range(req->rw.ki_filp, sqe_off, sqe_len, flags);
1422
Jens Axboe9e645e112019-05-10 16:07:28 -06001423 if (ret < 0 && (req->flags & REQ_F_LINK))
1424 req->flags |= REQ_F_FAIL_LINK;
Jens Axboec71ffb62019-05-13 20:58:29 -06001425 io_cqring_add_event(req->ctx, sqe->user_data, ret);
Jens Axboe5d17b4a2019-04-09 14:56:44 -06001426 io_put_req(req);
1427 return 0;
1428}
1429
Jens Axboe0fa03c62019-04-19 13:34:07 -06001430#if defined(CONFIG_NET)
Jens Axboeaa1fa282019-04-19 13:38:09 -06001431static int io_send_recvmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe,
1432 bool force_nonblock,
1433 long (*fn)(struct socket *, struct user_msghdr __user *,
1434 unsigned int))
1435{
Jens Axboe0fa03c62019-04-19 13:34:07 -06001436 struct socket *sock;
1437 int ret;
1438
1439 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
1440 return -EINVAL;
1441
1442 sock = sock_from_file(req->file, &ret);
1443 if (sock) {
1444 struct user_msghdr __user *msg;
1445 unsigned flags;
1446
1447 flags = READ_ONCE(sqe->msg_flags);
1448 if (flags & MSG_DONTWAIT)
1449 req->flags |= REQ_F_NOWAIT;
1450 else if (force_nonblock)
1451 flags |= MSG_DONTWAIT;
1452
1453 msg = (struct user_msghdr __user *) (unsigned long)
1454 READ_ONCE(sqe->addr);
1455
Jens Axboeaa1fa282019-04-19 13:38:09 -06001456 ret = fn(sock, msg, flags);
Jens Axboe0fa03c62019-04-19 13:34:07 -06001457 if (force_nonblock && ret == -EAGAIN)
1458 return ret;
1459 }
1460
1461 io_cqring_add_event(req->ctx, sqe->user_data, ret);
1462 io_put_req(req);
1463 return 0;
Jens Axboeaa1fa282019-04-19 13:38:09 -06001464}
1465#endif
1466
1467static int io_sendmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe,
1468 bool force_nonblock)
1469{
1470#if defined(CONFIG_NET)
1471 return io_send_recvmsg(req, sqe, force_nonblock, __sys_sendmsg_sock);
1472#else
1473 return -EOPNOTSUPP;
1474#endif
1475}
1476
1477static int io_recvmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe,
1478 bool force_nonblock)
1479{
1480#if defined(CONFIG_NET)
1481 return io_send_recvmsg(req, sqe, force_nonblock, __sys_recvmsg_sock);
Jens Axboe0fa03c62019-04-19 13:34:07 -06001482#else
1483 return -EOPNOTSUPP;
1484#endif
1485}
1486
Jens Axboe221c5eb2019-01-17 09:41:58 -07001487static void io_poll_remove_one(struct io_kiocb *req)
1488{
1489 struct io_poll_iocb *poll = &req->poll;
1490
1491 spin_lock(&poll->head->lock);
1492 WRITE_ONCE(poll->canceled, true);
1493 if (!list_empty(&poll->wait.entry)) {
1494 list_del_init(&poll->wait.entry);
1495 queue_work(req->ctx->sqo_wq, &req->work);
1496 }
1497 spin_unlock(&poll->head->lock);
1498
1499 list_del_init(&req->list);
1500}
1501
1502static void io_poll_remove_all(struct io_ring_ctx *ctx)
1503{
1504 struct io_kiocb *req;
1505
1506 spin_lock_irq(&ctx->completion_lock);
1507 while (!list_empty(&ctx->cancel_list)) {
1508 req = list_first_entry(&ctx->cancel_list, struct io_kiocb,list);
1509 io_poll_remove_one(req);
1510 }
1511 spin_unlock_irq(&ctx->completion_lock);
1512}
1513
1514/*
1515 * Find a running poll command that matches one specified in sqe->addr,
1516 * and remove it if found.
1517 */
1518static int io_poll_remove(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1519{
1520 struct io_ring_ctx *ctx = req->ctx;
1521 struct io_kiocb *poll_req, *next;
1522 int ret = -ENOENT;
1523
1524 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
1525 return -EINVAL;
1526 if (sqe->ioprio || sqe->off || sqe->len || sqe->buf_index ||
1527 sqe->poll_events)
1528 return -EINVAL;
1529
1530 spin_lock_irq(&ctx->completion_lock);
1531 list_for_each_entry_safe(poll_req, next, &ctx->cancel_list, list) {
1532 if (READ_ONCE(sqe->addr) == poll_req->user_data) {
1533 io_poll_remove_one(poll_req);
1534 ret = 0;
1535 break;
1536 }
1537 }
1538 spin_unlock_irq(&ctx->completion_lock);
1539
Jens Axboec71ffb62019-05-13 20:58:29 -06001540 io_cqring_add_event(req->ctx, sqe->user_data, ret);
Jens Axboee65ef562019-03-12 10:16:44 -06001541 io_put_req(req);
Jens Axboe221c5eb2019-01-17 09:41:58 -07001542 return 0;
1543}
1544
Jens Axboe8c838782019-03-12 15:48:16 -06001545static void io_poll_complete(struct io_ring_ctx *ctx, struct io_kiocb *req,
1546 __poll_t mask)
Jens Axboe221c5eb2019-01-17 09:41:58 -07001547{
Jens Axboe8c838782019-03-12 15:48:16 -06001548 req->poll.done = true;
Jens Axboec71ffb62019-05-13 20:58:29 -06001549 io_cqring_fill_event(ctx, req->user_data, mangle_poll(mask));
Jens Axboe8c838782019-03-12 15:48:16 -06001550 io_commit_cqring(ctx);
Jens Axboe221c5eb2019-01-17 09:41:58 -07001551}
1552
1553static void io_poll_complete_work(struct work_struct *work)
1554{
1555 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
1556 struct io_poll_iocb *poll = &req->poll;
1557 struct poll_table_struct pt = { ._key = poll->events };
1558 struct io_ring_ctx *ctx = req->ctx;
1559 __poll_t mask = 0;
1560
1561 if (!READ_ONCE(poll->canceled))
1562 mask = vfs_poll(poll->file, &pt) & poll->events;
1563
1564 /*
1565 * Note that ->ki_cancel callers also delete iocb from active_reqs after
1566 * calling ->ki_cancel. We need the ctx_lock roundtrip here to
1567 * synchronize with them. In the cancellation case the list_del_init
1568 * itself is not actually needed, but harmless so we keep it in to
1569 * avoid further branches in the fast path.
1570 */
1571 spin_lock_irq(&ctx->completion_lock);
1572 if (!mask && !READ_ONCE(poll->canceled)) {
1573 add_wait_queue(poll->head, &poll->wait);
1574 spin_unlock_irq(&ctx->completion_lock);
1575 return;
1576 }
1577 list_del_init(&req->list);
Jens Axboe8c838782019-03-12 15:48:16 -06001578 io_poll_complete(ctx, req, mask);
Jens Axboe221c5eb2019-01-17 09:41:58 -07001579 spin_unlock_irq(&ctx->completion_lock);
1580
Jens Axboe8c838782019-03-12 15:48:16 -06001581 io_cqring_ev_posted(ctx);
1582 io_put_req(req);
Jens Axboe221c5eb2019-01-17 09:41:58 -07001583}
1584
1585static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
1586 void *key)
1587{
1588 struct io_poll_iocb *poll = container_of(wait, struct io_poll_iocb,
1589 wait);
1590 struct io_kiocb *req = container_of(poll, struct io_kiocb, poll);
1591 struct io_ring_ctx *ctx = req->ctx;
1592 __poll_t mask = key_to_poll(key);
Jens Axboe8c838782019-03-12 15:48:16 -06001593 unsigned long flags;
Jens Axboe221c5eb2019-01-17 09:41:58 -07001594
1595 /* for instances that support it check for an event match first: */
Jens Axboe8c838782019-03-12 15:48:16 -06001596 if (mask && !(mask & poll->events))
1597 return 0;
Jens Axboe221c5eb2019-01-17 09:41:58 -07001598
1599 list_del_init(&poll->wait.entry);
Jens Axboe8c838782019-03-12 15:48:16 -06001600
1601 if (mask && spin_trylock_irqsave(&ctx->completion_lock, flags)) {
1602 list_del(&req->list);
1603 io_poll_complete(ctx, req, mask);
1604 spin_unlock_irqrestore(&ctx->completion_lock, flags);
1605
1606 io_cqring_ev_posted(ctx);
1607 io_put_req(req);
1608 } else {
1609 queue_work(ctx->sqo_wq, &req->work);
1610 }
1611
Jens Axboe221c5eb2019-01-17 09:41:58 -07001612 return 1;
1613}
1614
1615struct io_poll_table {
1616 struct poll_table_struct pt;
1617 struct io_kiocb *req;
1618 int error;
1619};
1620
1621static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head,
1622 struct poll_table_struct *p)
1623{
1624 struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
1625
1626 if (unlikely(pt->req->poll.head)) {
1627 pt->error = -EINVAL;
1628 return;
1629 }
1630
1631 pt->error = 0;
1632 pt->req->poll.head = head;
1633 add_wait_queue(head, &pt->req->poll.wait);
1634}
1635
1636static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1637{
1638 struct io_poll_iocb *poll = &req->poll;
1639 struct io_ring_ctx *ctx = req->ctx;
1640 struct io_poll_table ipt;
Jens Axboe8c838782019-03-12 15:48:16 -06001641 bool cancel = false;
Jens Axboe221c5eb2019-01-17 09:41:58 -07001642 __poll_t mask;
1643 u16 events;
Jens Axboe221c5eb2019-01-17 09:41:58 -07001644
1645 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
1646 return -EINVAL;
1647 if (sqe->addr || sqe->ioprio || sqe->off || sqe->len || sqe->buf_index)
1648 return -EINVAL;
Jens Axboe09bb8392019-03-13 12:39:28 -06001649 if (!poll->file)
1650 return -EBADF;
Jens Axboe221c5eb2019-01-17 09:41:58 -07001651
1652 INIT_WORK(&req->work, io_poll_complete_work);
1653 events = READ_ONCE(sqe->poll_events);
1654 poll->events = demangle_poll(events) | EPOLLERR | EPOLLHUP;
1655
Jens Axboe221c5eb2019-01-17 09:41:58 -07001656 poll->head = NULL;
Jens Axboe8c838782019-03-12 15:48:16 -06001657 poll->done = false;
Jens Axboe221c5eb2019-01-17 09:41:58 -07001658 poll->canceled = false;
1659
1660 ipt.pt._qproc = io_poll_queue_proc;
1661 ipt.pt._key = poll->events;
1662 ipt.req = req;
1663 ipt.error = -EINVAL; /* same as no support for IOCB_CMD_POLL */
1664
1665 /* initialized the list so that we can do list_empty checks */
1666 INIT_LIST_HEAD(&poll->wait.entry);
1667 init_waitqueue_func_entry(&poll->wait, io_poll_wake);
1668
Jens Axboe221c5eb2019-01-17 09:41:58 -07001669 mask = vfs_poll(poll->file, &ipt.pt) & poll->events;
Jens Axboe221c5eb2019-01-17 09:41:58 -07001670
1671 spin_lock_irq(&ctx->completion_lock);
Jens Axboe8c838782019-03-12 15:48:16 -06001672 if (likely(poll->head)) {
1673 spin_lock(&poll->head->lock);
1674 if (unlikely(list_empty(&poll->wait.entry))) {
1675 if (ipt.error)
1676 cancel = true;
1677 ipt.error = 0;
1678 mask = 0;
1679 }
1680 if (mask || ipt.error)
1681 list_del_init(&poll->wait.entry);
1682 else if (cancel)
1683 WRITE_ONCE(poll->canceled, true);
1684 else if (!poll->done) /* actually waiting for an event */
1685 list_add_tail(&req->list, &ctx->cancel_list);
1686 spin_unlock(&poll->head->lock);
Jens Axboe221c5eb2019-01-17 09:41:58 -07001687 }
Jens Axboe8c838782019-03-12 15:48:16 -06001688 if (mask) { /* no async, we'd stolen it */
Jens Axboe8c838782019-03-12 15:48:16 -06001689 ipt.error = 0;
1690 io_poll_complete(ctx, req, mask);
1691 }
Jens Axboe221c5eb2019-01-17 09:41:58 -07001692 spin_unlock_irq(&ctx->completion_lock);
1693
Jens Axboe8c838782019-03-12 15:48:16 -06001694 if (mask) {
1695 io_cqring_ev_posted(ctx);
Jens Axboee65ef562019-03-12 10:16:44 -06001696 io_put_req(req);
Jens Axboe221c5eb2019-01-17 09:41:58 -07001697 }
Jens Axboe8c838782019-03-12 15:48:16 -06001698 return ipt.error;
Jens Axboe221c5eb2019-01-17 09:41:58 -07001699}
1700
Jens Axboede0617e2019-04-06 21:51:27 -06001701static int io_req_defer(struct io_ring_ctx *ctx, struct io_kiocb *req,
1702 const struct io_uring_sqe *sqe)
1703{
1704 struct io_uring_sqe *sqe_copy;
1705
1706 if (!io_sequence_defer(ctx, req) && list_empty(&ctx->defer_list))
1707 return 0;
1708
1709 sqe_copy = kmalloc(sizeof(*sqe_copy), GFP_KERNEL);
1710 if (!sqe_copy)
1711 return -EAGAIN;
1712
1713 spin_lock_irq(&ctx->completion_lock);
1714 if (!io_sequence_defer(ctx, req) && list_empty(&ctx->defer_list)) {
1715 spin_unlock_irq(&ctx->completion_lock);
1716 kfree(sqe_copy);
1717 return 0;
1718 }
1719
1720 memcpy(sqe_copy, sqe, sizeof(*sqe_copy));
1721 req->submit.sqe = sqe_copy;
1722
1723 INIT_WORK(&req->work, io_sq_wq_submit_work);
1724 list_add_tail(&req->list, &ctx->defer_list);
1725 spin_unlock_irq(&ctx->completion_lock);
1726 return -EIOCBQUEUED;
1727}
1728
Jens Axboe2b188cc2019-01-07 10:46:33 -07001729static int __io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
Jens Axboe8358e3a2019-04-23 08:17:58 -06001730 const struct sqe_submit *s, bool force_nonblock)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001731{
Jens Axboee0c5c572019-03-12 10:18:47 -06001732 int ret, opcode;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001733
Jens Axboe9e645e112019-05-10 16:07:28 -06001734 req->user_data = READ_ONCE(s->sqe->user_data);
1735
Jens Axboe2b188cc2019-01-07 10:46:33 -07001736 if (unlikely(s->index >= ctx->sq_entries))
1737 return -EINVAL;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001738
1739 opcode = READ_ONCE(s->sqe->opcode);
1740 switch (opcode) {
1741 case IORING_OP_NOP:
1742 ret = io_nop(req, req->user_data);
1743 break;
1744 case IORING_OP_READV:
Jens Axboeedafcce2019-01-09 09:16:05 -07001745 if (unlikely(s->sqe->buf_index))
1746 return -EINVAL;
Jens Axboe8358e3a2019-04-23 08:17:58 -06001747 ret = io_read(req, s, force_nonblock);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001748 break;
1749 case IORING_OP_WRITEV:
Jens Axboeedafcce2019-01-09 09:16:05 -07001750 if (unlikely(s->sqe->buf_index))
1751 return -EINVAL;
Jens Axboe8358e3a2019-04-23 08:17:58 -06001752 ret = io_write(req, s, force_nonblock);
Jens Axboeedafcce2019-01-09 09:16:05 -07001753 break;
1754 case IORING_OP_READ_FIXED:
Jens Axboe8358e3a2019-04-23 08:17:58 -06001755 ret = io_read(req, s, force_nonblock);
Jens Axboeedafcce2019-01-09 09:16:05 -07001756 break;
1757 case IORING_OP_WRITE_FIXED:
Jens Axboe8358e3a2019-04-23 08:17:58 -06001758 ret = io_write(req, s, force_nonblock);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001759 break;
Christoph Hellwigc992fe22019-01-11 09:43:02 -07001760 case IORING_OP_FSYNC:
1761 ret = io_fsync(req, s->sqe, force_nonblock);
1762 break;
Jens Axboe221c5eb2019-01-17 09:41:58 -07001763 case IORING_OP_POLL_ADD:
1764 ret = io_poll_add(req, s->sqe);
1765 break;
1766 case IORING_OP_POLL_REMOVE:
1767 ret = io_poll_remove(req, s->sqe);
1768 break;
Jens Axboe5d17b4a2019-04-09 14:56:44 -06001769 case IORING_OP_SYNC_FILE_RANGE:
1770 ret = io_sync_file_range(req, s->sqe, force_nonblock);
1771 break;
Jens Axboe0fa03c62019-04-19 13:34:07 -06001772 case IORING_OP_SENDMSG:
1773 ret = io_sendmsg(req, s->sqe, force_nonblock);
1774 break;
Jens Axboeaa1fa282019-04-19 13:38:09 -06001775 case IORING_OP_RECVMSG:
1776 ret = io_recvmsg(req, s->sqe, force_nonblock);
1777 break;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001778 default:
1779 ret = -EINVAL;
1780 break;
1781 }
1782
Jens Axboedef596e2019-01-09 08:59:42 -07001783 if (ret)
1784 return ret;
1785
1786 if (ctx->flags & IORING_SETUP_IOPOLL) {
Jens Axboe9e645e112019-05-10 16:07:28 -06001787 if (req->result == -EAGAIN)
Jens Axboedef596e2019-01-09 08:59:42 -07001788 return -EAGAIN;
1789
1790 /* workqueue context doesn't hold uring_lock, grab it now */
1791 if (s->needs_lock)
1792 mutex_lock(&ctx->uring_lock);
1793 io_iopoll_req_issued(req);
1794 if (s->needs_lock)
1795 mutex_unlock(&ctx->uring_lock);
1796 }
1797
1798 return 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001799}
1800
Jens Axboe31b51512019-01-18 22:56:34 -07001801static struct async_list *io_async_list_from_sqe(struct io_ring_ctx *ctx,
1802 const struct io_uring_sqe *sqe)
1803{
1804 switch (sqe->opcode) {
1805 case IORING_OP_READV:
1806 case IORING_OP_READ_FIXED:
1807 return &ctx->pending_async[READ];
1808 case IORING_OP_WRITEV:
1809 case IORING_OP_WRITE_FIXED:
1810 return &ctx->pending_async[WRITE];
1811 default:
1812 return NULL;
1813 }
1814}
1815
Jens Axboeedafcce2019-01-09 09:16:05 -07001816static inline bool io_sqe_needs_user(const struct io_uring_sqe *sqe)
1817{
1818 u8 opcode = READ_ONCE(sqe->opcode);
1819
1820 return !(opcode == IORING_OP_READ_FIXED ||
1821 opcode == IORING_OP_WRITE_FIXED);
1822}
1823
Jens Axboe2b188cc2019-01-07 10:46:33 -07001824static void io_sq_wq_submit_work(struct work_struct *work)
1825{
1826 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001827 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe31b51512019-01-18 22:56:34 -07001828 struct mm_struct *cur_mm = NULL;
1829 struct async_list *async_list;
1830 LIST_HEAD(req_list);
Jens Axboeedafcce2019-01-09 09:16:05 -07001831 mm_segment_t old_fs;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001832 int ret;
1833
Jens Axboe31b51512019-01-18 22:56:34 -07001834 async_list = io_async_list_from_sqe(ctx, req->submit.sqe);
1835restart:
1836 do {
1837 struct sqe_submit *s = &req->submit;
1838 const struct io_uring_sqe *sqe = s->sqe;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001839
Stefan Bühler8449eed2019-04-27 20:34:19 +02001840 /* Ensure we clear previously set non-block flag */
Jens Axboe31b51512019-01-18 22:56:34 -07001841 req->rw.ki_flags &= ~IOCB_NOWAIT;
1842
1843 ret = 0;
1844 if (io_sqe_needs_user(sqe) && !cur_mm) {
1845 if (!mmget_not_zero(ctx->sqo_mm)) {
1846 ret = -EFAULT;
1847 } else {
1848 cur_mm = ctx->sqo_mm;
1849 use_mm(cur_mm);
1850 old_fs = get_fs();
1851 set_fs(USER_DS);
1852 }
1853 }
1854
1855 if (!ret) {
1856 s->has_user = cur_mm != NULL;
1857 s->needs_lock = true;
1858 do {
Jens Axboe8358e3a2019-04-23 08:17:58 -06001859 ret = __io_submit_sqe(ctx, req, s, false);
Jens Axboe31b51512019-01-18 22:56:34 -07001860 /*
1861 * We can get EAGAIN for polled IO even though
1862 * we're forcing a sync submission from here,
1863 * since we can't wait for request slots on the
1864 * block side.
1865 */
1866 if (ret != -EAGAIN)
1867 break;
1868 cond_resched();
1869 } while (1);
1870 }
Jens Axboe817869d2019-04-30 14:44:05 -06001871
1872 /* drop submission reference */
1873 io_put_req(req);
1874
Jens Axboe31b51512019-01-18 22:56:34 -07001875 if (ret) {
Jens Axboec71ffb62019-05-13 20:58:29 -06001876 io_cqring_add_event(ctx, sqe->user_data, ret);
Jens Axboee65ef562019-03-12 10:16:44 -06001877 io_put_req(req);
Jens Axboe31b51512019-01-18 22:56:34 -07001878 }
1879
1880 /* async context always use a copy of the sqe */
1881 kfree(sqe);
1882
Zhengyuan Liuf7b76ac2019-07-16 23:26:14 +08001883 /* req from defer and link list needn't decrease async cnt */
1884 if (req->flags & (REQ_F_IO_DRAINED | REQ_F_LINK_DONE))
1885 goto out;
1886
Jens Axboe31b51512019-01-18 22:56:34 -07001887 if (!async_list)
1888 break;
1889 if (!list_empty(&req_list)) {
1890 req = list_first_entry(&req_list, struct io_kiocb,
1891 list);
1892 list_del(&req->list);
1893 continue;
1894 }
1895 if (list_empty(&async_list->list))
1896 break;
1897
1898 req = NULL;
1899 spin_lock(&async_list->lock);
1900 if (list_empty(&async_list->list)) {
1901 spin_unlock(&async_list->lock);
1902 break;
1903 }
1904 list_splice_init(&async_list->list, &req_list);
1905 spin_unlock(&async_list->lock);
1906
1907 req = list_first_entry(&req_list, struct io_kiocb, list);
1908 list_del(&req->list);
1909 } while (req);
Jens Axboeedafcce2019-01-09 09:16:05 -07001910
1911 /*
Jens Axboe31b51512019-01-18 22:56:34 -07001912 * Rare case of racing with a submitter. If we find the count has
1913 * dropped to zero AND we have pending work items, then restart
1914 * the processing. This is a tiny race window.
Jens Axboeedafcce2019-01-09 09:16:05 -07001915 */
Jens Axboe31b51512019-01-18 22:56:34 -07001916 if (async_list) {
1917 ret = atomic_dec_return(&async_list->cnt);
1918 while (!ret && !list_empty(&async_list->list)) {
1919 spin_lock(&async_list->lock);
1920 atomic_inc(&async_list->cnt);
1921 list_splice_init(&async_list->list, &req_list);
1922 spin_unlock(&async_list->lock);
1923
1924 if (!list_empty(&req_list)) {
1925 req = list_first_entry(&req_list,
1926 struct io_kiocb, list);
1927 list_del(&req->list);
1928 goto restart;
1929 }
1930 ret = atomic_dec_return(&async_list->cnt);
Jens Axboeedafcce2019-01-09 09:16:05 -07001931 }
Jens Axboeedafcce2019-01-09 09:16:05 -07001932 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07001933
Zhengyuan Liuf7b76ac2019-07-16 23:26:14 +08001934out:
Jens Axboe31b51512019-01-18 22:56:34 -07001935 if (cur_mm) {
Jens Axboeedafcce2019-01-09 09:16:05 -07001936 set_fs(old_fs);
Jens Axboe31b51512019-01-18 22:56:34 -07001937 unuse_mm(cur_mm);
1938 mmput(cur_mm);
Jens Axboeedafcce2019-01-09 09:16:05 -07001939 }
Jens Axboe31b51512019-01-18 22:56:34 -07001940}
Jens Axboe2b188cc2019-01-07 10:46:33 -07001941
Jens Axboe31b51512019-01-18 22:56:34 -07001942/*
1943 * See if we can piggy back onto previously submitted work, that is still
1944 * running. We currently only allow this if the new request is sequential
1945 * to the previous one we punted.
1946 */
1947static bool io_add_to_prev_work(struct async_list *list, struct io_kiocb *req)
1948{
1949 bool ret = false;
1950
1951 if (!list)
1952 return false;
1953 if (!(req->flags & REQ_F_SEQ_PREV))
1954 return false;
1955 if (!atomic_read(&list->cnt))
1956 return false;
1957
1958 ret = true;
1959 spin_lock(&list->lock);
1960 list_add_tail(&req->list, &list->list);
Zhengyuan Liuc0e48f92019-07-18 20:44:00 +08001961 /*
1962 * Ensure we see a simultaneous modification from io_sq_wq_submit_work()
1963 */
1964 smp_mb();
Jens Axboe31b51512019-01-18 22:56:34 -07001965 if (!atomic_read(&list->cnt)) {
1966 list_del_init(&req->list);
1967 ret = false;
1968 }
1969 spin_unlock(&list->lock);
1970 return ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07001971}
1972
Jens Axboe09bb8392019-03-13 12:39:28 -06001973static bool io_op_needs_file(const struct io_uring_sqe *sqe)
1974{
1975 int op = READ_ONCE(sqe->opcode);
1976
1977 switch (op) {
1978 case IORING_OP_NOP:
1979 case IORING_OP_POLL_REMOVE:
1980 return false;
1981 default:
1982 return true;
1983 }
1984}
1985
1986static int io_req_set_file(struct io_ring_ctx *ctx, const struct sqe_submit *s,
1987 struct io_submit_state *state, struct io_kiocb *req)
1988{
1989 unsigned flags;
1990 int fd;
1991
1992 flags = READ_ONCE(s->sqe->flags);
1993 fd = READ_ONCE(s->sqe->fd);
1994
Jens Axboede0617e2019-04-06 21:51:27 -06001995 if (flags & IOSQE_IO_DRAIN) {
1996 req->flags |= REQ_F_IO_DRAIN;
1997 req->sequence = ctx->cached_sq_head - 1;
1998 }
1999
Jens Axboe60c112b2019-06-21 10:20:18 -06002000 if (!io_op_needs_file(s->sqe))
Jens Axboe09bb8392019-03-13 12:39:28 -06002001 return 0;
Jens Axboe09bb8392019-03-13 12:39:28 -06002002
2003 if (flags & IOSQE_FIXED_FILE) {
2004 if (unlikely(!ctx->user_files ||
2005 (unsigned) fd >= ctx->nr_user_files))
2006 return -EBADF;
2007 req->file = ctx->user_files[fd];
2008 req->flags |= REQ_F_FIXED_FILE;
2009 } else {
2010 if (s->needs_fixed_file)
2011 return -EBADF;
2012 req->file = io_file_get(state, fd);
2013 if (unlikely(!req->file))
2014 return -EBADF;
2015 }
2016
2017 return 0;
2018}
2019
Jens Axboe9e645e112019-05-10 16:07:28 -06002020static int io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
2021 struct sqe_submit *s)
Jens Axboe2b188cc2019-01-07 10:46:33 -07002022{
Jens Axboee0c5c572019-03-12 10:18:47 -06002023 int ret;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002024
Jens Axboe8358e3a2019-04-23 08:17:58 -06002025 ret = __io_submit_sqe(ctx, req, s, true);
Stefan Bühler8449eed2019-04-27 20:34:19 +02002026 if (ret == -EAGAIN && !(req->flags & REQ_F_NOWAIT)) {
Jens Axboe2b188cc2019-01-07 10:46:33 -07002027 struct io_uring_sqe *sqe_copy;
2028
2029 sqe_copy = kmalloc(sizeof(*sqe_copy), GFP_KERNEL);
2030 if (sqe_copy) {
Jens Axboe31b51512019-01-18 22:56:34 -07002031 struct async_list *list;
2032
Jens Axboe2b188cc2019-01-07 10:46:33 -07002033 memcpy(sqe_copy, s->sqe, sizeof(*sqe_copy));
2034 s->sqe = sqe_copy;
2035
2036 memcpy(&req->submit, s, sizeof(*s));
Jens Axboe31b51512019-01-18 22:56:34 -07002037 list = io_async_list_from_sqe(ctx, s->sqe);
2038 if (!io_add_to_prev_work(list, req)) {
2039 if (list)
2040 atomic_inc(&list->cnt);
2041 INIT_WORK(&req->work, io_sq_wq_submit_work);
2042 queue_work(ctx->sqo_wq, &req->work);
2043 }
Jens Axboee65ef562019-03-12 10:16:44 -06002044
2045 /*
2046 * Queued up for async execution, worker will release
Jens Axboe9e645e112019-05-10 16:07:28 -06002047 * submit reference when the iocb is actually submitted.
Jens Axboee65ef562019-03-12 10:16:44 -06002048 */
2049 return 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002050 }
2051 }
Jens Axboee65ef562019-03-12 10:16:44 -06002052
2053 /* drop submission reference */
2054 io_put_req(req);
2055
2056 /* and drop final reference, if we failed */
Jens Axboe9e645e112019-05-10 16:07:28 -06002057 if (ret) {
2058 io_cqring_add_event(ctx, req->user_data, ret);
2059 if (req->flags & REQ_F_LINK)
2060 req->flags |= REQ_F_FAIL_LINK;
Jens Axboee65ef562019-03-12 10:16:44 -06002061 io_put_req(req);
Jens Axboe9e645e112019-05-10 16:07:28 -06002062 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07002063
2064 return ret;
2065}
2066
Jens Axboe9e645e112019-05-10 16:07:28 -06002067#define SQE_VALID_FLAGS (IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK)
2068
2069static void io_submit_sqe(struct io_ring_ctx *ctx, struct sqe_submit *s,
2070 struct io_submit_state *state, struct io_kiocb **link)
2071{
2072 struct io_uring_sqe *sqe_copy;
2073 struct io_kiocb *req;
2074 int ret;
2075
2076 /* enforce forwards compatibility on users */
2077 if (unlikely(s->sqe->flags & ~SQE_VALID_FLAGS)) {
2078 ret = -EINVAL;
2079 goto err;
2080 }
2081
2082 req = io_get_req(ctx, state);
2083 if (unlikely(!req)) {
2084 ret = -EAGAIN;
2085 goto err;
2086 }
2087
2088 ret = io_req_set_file(ctx, s, state, req);
2089 if (unlikely(ret)) {
2090err_req:
2091 io_free_req(req);
2092err:
2093 io_cqring_add_event(ctx, s->sqe->user_data, ret);
2094 return;
2095 }
2096
2097 ret = io_req_defer(ctx, req, s->sqe);
2098 if (ret) {
2099 if (ret != -EIOCBQUEUED)
2100 goto err_req;
2101 return;
2102 }
2103
2104 /*
2105 * If we already have a head request, queue this one for async
2106 * submittal once the head completes. If we don't have a head but
2107 * IOSQE_IO_LINK is set in the sqe, start a new head. This one will be
2108 * submitted sync once the chain is complete. If none of those
2109 * conditions are true (normal request), then just queue it.
2110 */
2111 if (*link) {
2112 struct io_kiocb *prev = *link;
2113
2114 sqe_copy = kmemdup(s->sqe, sizeof(*sqe_copy), GFP_KERNEL);
2115 if (!sqe_copy) {
2116 ret = -EAGAIN;
2117 goto err_req;
2118 }
2119
2120 s->sqe = sqe_copy;
2121 memcpy(&req->submit, s, sizeof(*s));
2122 list_add_tail(&req->list, &prev->link_list);
2123 } else if (s->sqe->flags & IOSQE_IO_LINK) {
2124 req->flags |= REQ_F_LINK;
2125
2126 memcpy(&req->submit, s, sizeof(*s));
2127 INIT_LIST_HEAD(&req->link_list);
2128 *link = req;
2129 } else {
2130 io_queue_sqe(ctx, req, s);
2131 }
2132}
2133
Jens Axboe9a56a232019-01-09 09:06:50 -07002134/*
2135 * Batched submission is done, ensure local IO is flushed out.
2136 */
2137static void io_submit_state_end(struct io_submit_state *state)
2138{
2139 blk_finish_plug(&state->plug);
Jens Axboe3d6770f2019-04-13 11:50:54 -06002140 io_file_put(state);
Jens Axboe2579f912019-01-09 09:10:43 -07002141 if (state->free_reqs)
2142 kmem_cache_free_bulk(req_cachep, state->free_reqs,
2143 &state->reqs[state->cur_req]);
Jens Axboe9a56a232019-01-09 09:06:50 -07002144}
2145
2146/*
2147 * Start submission side cache.
2148 */
2149static void io_submit_state_start(struct io_submit_state *state,
2150 struct io_ring_ctx *ctx, unsigned max_ios)
2151{
2152 blk_start_plug(&state->plug);
Jens Axboe2579f912019-01-09 09:10:43 -07002153 state->free_reqs = 0;
Jens Axboe9a56a232019-01-09 09:06:50 -07002154 state->file = NULL;
2155 state->ios_left = max_ios;
2156}
2157
Jens Axboe2b188cc2019-01-07 10:46:33 -07002158static void io_commit_sqring(struct io_ring_ctx *ctx)
2159{
2160 struct io_sq_ring *ring = ctx->sq_ring;
2161
2162 if (ctx->cached_sq_head != READ_ONCE(ring->r.head)) {
2163 /*
2164 * Ensure any loads from the SQEs are done at this point,
2165 * since once we write the new head, the application could
2166 * write new data to them.
2167 */
2168 smp_store_release(&ring->r.head, ctx->cached_sq_head);
Jens Axboe2b188cc2019-01-07 10:46:33 -07002169 }
2170}
2171
2172/*
Jens Axboe2b188cc2019-01-07 10:46:33 -07002173 * Fetch an sqe, if one is available. Note that s->sqe will point to memory
2174 * that is mapped by userspace. This means that care needs to be taken to
2175 * ensure that reads are stable, as we cannot rely on userspace always
2176 * being a good citizen. If members of the sqe are validated and then later
2177 * used, it's important that those reads are done through READ_ONCE() to
2178 * prevent a re-load down the line.
2179 */
2180static bool io_get_sqring(struct io_ring_ctx *ctx, struct sqe_submit *s)
2181{
2182 struct io_sq_ring *ring = ctx->sq_ring;
2183 unsigned head;
2184
2185 /*
2186 * The cached sq head (or cq tail) serves two purposes:
2187 *
2188 * 1) allows us to batch the cost of updating the user visible
2189 * head updates.
2190 * 2) allows the kernel side to track the head on its own, even
2191 * though the application is the one updating it.
2192 */
2193 head = ctx->cached_sq_head;
Stefan Bühlere523a292019-04-19 11:57:44 +02002194 /* make sure SQ entry isn't read before tail */
2195 if (head == smp_load_acquire(&ring->r.tail))
Jens Axboe2b188cc2019-01-07 10:46:33 -07002196 return false;
2197
2198 head = READ_ONCE(ring->array[head & ctx->sq_mask]);
2199 if (head < ctx->sq_entries) {
2200 s->index = head;
2201 s->sqe = &ctx->sq_sqes[head];
2202 ctx->cached_sq_head++;
2203 return true;
2204 }
2205
2206 /* drop invalid entries */
2207 ctx->cached_sq_head++;
2208 ring->dropped++;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002209 return false;
2210}
2211
Jens Axboe6c271ce2019-01-10 11:22:30 -07002212static int io_submit_sqes(struct io_ring_ctx *ctx, struct sqe_submit *sqes,
2213 unsigned int nr, bool has_user, bool mm_fault)
2214{
2215 struct io_submit_state state, *statep = NULL;
Jens Axboe9e645e112019-05-10 16:07:28 -06002216 struct io_kiocb *link = NULL;
2217 bool prev_was_link = false;
2218 int i, submitted = 0;
Jens Axboe6c271ce2019-01-10 11:22:30 -07002219
2220 if (nr > IO_PLUG_THRESHOLD) {
2221 io_submit_state_start(&state, ctx, nr);
2222 statep = &state;
2223 }
2224
2225 for (i = 0; i < nr; i++) {
Jens Axboe9e645e112019-05-10 16:07:28 -06002226 /*
2227 * If previous wasn't linked and we have a linked command,
2228 * that's the end of the chain. Submit the previous link.
2229 */
2230 if (!prev_was_link && link) {
2231 io_queue_sqe(ctx, link, &link->submit);
2232 link = NULL;
2233 }
2234 prev_was_link = (sqes[i].sqe->flags & IOSQE_IO_LINK) != 0;
2235
Jens Axboe6c271ce2019-01-10 11:22:30 -07002236 if (unlikely(mm_fault)) {
Jens Axboe9e645e112019-05-10 16:07:28 -06002237 io_cqring_add_event(ctx, sqes[i].sqe->user_data,
2238 -EFAULT);
Jens Axboe6c271ce2019-01-10 11:22:30 -07002239 } else {
2240 sqes[i].has_user = has_user;
2241 sqes[i].needs_lock = true;
2242 sqes[i].needs_fixed_file = true;
Jens Axboe9e645e112019-05-10 16:07:28 -06002243 io_submit_sqe(ctx, &sqes[i], statep, &link);
Jens Axboe6c271ce2019-01-10 11:22:30 -07002244 submitted++;
Jens Axboe6c271ce2019-01-10 11:22:30 -07002245 }
Jens Axboe6c271ce2019-01-10 11:22:30 -07002246 }
2247
Jens Axboe9e645e112019-05-10 16:07:28 -06002248 if (link)
2249 io_queue_sqe(ctx, link, &link->submit);
Jens Axboe6c271ce2019-01-10 11:22:30 -07002250 if (statep)
2251 io_submit_state_end(&state);
2252
2253 return submitted;
2254}
2255
2256static int io_sq_thread(void *data)
2257{
2258 struct sqe_submit sqes[IO_IOPOLL_BATCH];
2259 struct io_ring_ctx *ctx = data;
2260 struct mm_struct *cur_mm = NULL;
2261 mm_segment_t old_fs;
2262 DEFINE_WAIT(wait);
2263 unsigned inflight;
2264 unsigned long timeout;
2265
Jackie Liua4c0b3d2019-07-08 13:41:12 +08002266 complete(&ctx->sqo_thread_started);
2267
Jens Axboe6c271ce2019-01-10 11:22:30 -07002268 old_fs = get_fs();
2269 set_fs(USER_DS);
2270
2271 timeout = inflight = 0;
Roman Penyaev2bbcd6d2019-05-16 10:53:57 +02002272 while (!kthread_should_park()) {
Jens Axboe6c271ce2019-01-10 11:22:30 -07002273 bool all_fixed, mm_fault = false;
2274 int i;
2275
2276 if (inflight) {
2277 unsigned nr_events = 0;
2278
2279 if (ctx->flags & IORING_SETUP_IOPOLL) {
2280 /*
2281 * We disallow the app entering submit/complete
2282 * with polling, but we still need to lock the
2283 * ring to prevent racing with polled issue
2284 * that got punted to a workqueue.
2285 */
2286 mutex_lock(&ctx->uring_lock);
2287 io_iopoll_check(ctx, &nr_events, 0);
2288 mutex_unlock(&ctx->uring_lock);
2289 } else {
2290 /*
2291 * Normal IO, just pretend everything completed.
2292 * We don't have to poll completions for that.
2293 */
2294 nr_events = inflight;
2295 }
2296
2297 inflight -= nr_events;
2298 if (!inflight)
2299 timeout = jiffies + ctx->sq_thread_idle;
2300 }
2301
2302 if (!io_get_sqring(ctx, &sqes[0])) {
2303 /*
2304 * We're polling. If we're within the defined idle
2305 * period, then let us spin without work before going
2306 * to sleep.
2307 */
2308 if (inflight || !time_after(jiffies, timeout)) {
2309 cpu_relax();
2310 continue;
2311 }
2312
2313 /*
2314 * Drop cur_mm before scheduling, we can't hold it for
2315 * long periods (or over schedule()). Do this before
2316 * adding ourselves to the waitqueue, as the unuse/drop
2317 * may sleep.
2318 */
2319 if (cur_mm) {
2320 unuse_mm(cur_mm);
2321 mmput(cur_mm);
2322 cur_mm = NULL;
2323 }
2324
2325 prepare_to_wait(&ctx->sqo_wait, &wait,
2326 TASK_INTERRUPTIBLE);
2327
2328 /* Tell userspace we may need a wakeup call */
2329 ctx->sq_ring->flags |= IORING_SQ_NEED_WAKEUP;
Stefan Bühler0d7bae62019-04-19 11:57:45 +02002330 /* make sure to read SQ tail after writing flags */
2331 smp_mb();
Jens Axboe6c271ce2019-01-10 11:22:30 -07002332
2333 if (!io_get_sqring(ctx, &sqes[0])) {
Roman Penyaev2bbcd6d2019-05-16 10:53:57 +02002334 if (kthread_should_park()) {
Jens Axboe6c271ce2019-01-10 11:22:30 -07002335 finish_wait(&ctx->sqo_wait, &wait);
2336 break;
2337 }
2338 if (signal_pending(current))
2339 flush_signals(current);
2340 schedule();
2341 finish_wait(&ctx->sqo_wait, &wait);
2342
2343 ctx->sq_ring->flags &= ~IORING_SQ_NEED_WAKEUP;
Jens Axboe6c271ce2019-01-10 11:22:30 -07002344 continue;
2345 }
2346 finish_wait(&ctx->sqo_wait, &wait);
2347
2348 ctx->sq_ring->flags &= ~IORING_SQ_NEED_WAKEUP;
Jens Axboe6c271ce2019-01-10 11:22:30 -07002349 }
2350
2351 i = 0;
2352 all_fixed = true;
2353 do {
2354 if (all_fixed && io_sqe_needs_user(sqes[i].sqe))
2355 all_fixed = false;
2356
2357 i++;
2358 if (i == ARRAY_SIZE(sqes))
2359 break;
2360 } while (io_get_sqring(ctx, &sqes[i]));
2361
2362 /* Unless all new commands are FIXED regions, grab mm */
2363 if (!all_fixed && !cur_mm) {
2364 mm_fault = !mmget_not_zero(ctx->sqo_mm);
2365 if (!mm_fault) {
2366 use_mm(ctx->sqo_mm);
2367 cur_mm = ctx->sqo_mm;
2368 }
2369 }
2370
2371 inflight += io_submit_sqes(ctx, sqes, i, cur_mm != NULL,
2372 mm_fault);
2373
2374 /* Commit SQ ring head once we've consumed all SQEs */
2375 io_commit_sqring(ctx);
2376 }
2377
2378 set_fs(old_fs);
2379 if (cur_mm) {
2380 unuse_mm(cur_mm);
2381 mmput(cur_mm);
2382 }
Jens Axboe06058632019-04-13 09:26:03 -06002383
Roman Penyaev2bbcd6d2019-05-16 10:53:57 +02002384 kthread_parkme();
Jens Axboe06058632019-04-13 09:26:03 -06002385
Jens Axboe6c271ce2019-01-10 11:22:30 -07002386 return 0;
2387}
2388
Jens Axboe2b188cc2019-01-07 10:46:33 -07002389static int io_ring_submit(struct io_ring_ctx *ctx, unsigned int to_submit)
2390{
Jens Axboe9a56a232019-01-09 09:06:50 -07002391 struct io_submit_state state, *statep = NULL;
Jens Axboe9e645e112019-05-10 16:07:28 -06002392 struct io_kiocb *link = NULL;
2393 bool prev_was_link = false;
Jens Axboe5c8b0b52019-04-30 10:16:07 -06002394 int i, submit = 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002395
Jens Axboe9a56a232019-01-09 09:06:50 -07002396 if (to_submit > IO_PLUG_THRESHOLD) {
2397 io_submit_state_start(&state, ctx, to_submit);
2398 statep = &state;
2399 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07002400
2401 for (i = 0; i < to_submit; i++) {
2402 struct sqe_submit s;
2403
2404 if (!io_get_sqring(ctx, &s))
2405 break;
2406
Jens Axboe9e645e112019-05-10 16:07:28 -06002407 /*
2408 * If previous wasn't linked and we have a linked command,
2409 * that's the end of the chain. Submit the previous link.
2410 */
2411 if (!prev_was_link && link) {
2412 io_queue_sqe(ctx, link, &link->submit);
2413 link = NULL;
2414 }
2415 prev_was_link = (s.sqe->flags & IOSQE_IO_LINK) != 0;
2416
Jens Axboe2b188cc2019-01-07 10:46:33 -07002417 s.has_user = true;
Jens Axboedef596e2019-01-09 08:59:42 -07002418 s.needs_lock = false;
Jens Axboe6c271ce2019-01-10 11:22:30 -07002419 s.needs_fixed_file = false;
Jens Axboe5c8b0b52019-04-30 10:16:07 -06002420 submit++;
Jens Axboe9e645e112019-05-10 16:07:28 -06002421 io_submit_sqe(ctx, &s, statep, &link);
Jens Axboe2b188cc2019-01-07 10:46:33 -07002422 }
2423 io_commit_sqring(ctx);
2424
Jens Axboe9e645e112019-05-10 16:07:28 -06002425 if (link)
2426 io_queue_sqe(ctx, link, &link->submit);
Jens Axboe9a56a232019-01-09 09:06:50 -07002427 if (statep)
2428 io_submit_state_end(statep);
Jens Axboe2b188cc2019-01-07 10:46:33 -07002429
Jens Axboe5c8b0b52019-04-30 10:16:07 -06002430 return submit;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002431}
2432
2433static unsigned io_cqring_events(struct io_cq_ring *ring)
2434{
Jackie Liudc6ce4b2019-05-16 11:46:30 +08002435 /* See comment at the top of this file */
2436 smp_rmb();
Jens Axboe2b188cc2019-01-07 10:46:33 -07002437 return READ_ONCE(ring->r.tail) - READ_ONCE(ring->r.head);
2438}
2439
2440/*
2441 * Wait until events become available, if we don't already have some. The
2442 * application must reap them itself, as they reside on the shared cq ring.
2443 */
2444static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
2445 const sigset_t __user *sig, size_t sigsz)
2446{
2447 struct io_cq_ring *ring = ctx->cq_ring;
2448 sigset_t ksigmask, sigsaved;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002449 int ret;
2450
Jens Axboe2b188cc2019-01-07 10:46:33 -07002451 if (io_cqring_events(ring) >= min_events)
2452 return 0;
2453
2454 if (sig) {
Arnd Bergmann9e75ad52019-03-25 15:34:53 +01002455#ifdef CONFIG_COMPAT
2456 if (in_compat_syscall())
2457 ret = set_compat_user_sigmask((const compat_sigset_t __user *)sig,
2458 &ksigmask, &sigsaved, sigsz);
2459 else
2460#endif
2461 ret = set_user_sigmask(sig, &ksigmask,
2462 &sigsaved, sigsz);
2463
Jens Axboe2b188cc2019-01-07 10:46:33 -07002464 if (ret)
2465 return ret;
2466 }
2467
Jackie Liufdb288a2019-05-16 11:46:31 +08002468 ret = wait_event_interruptible(ctx->wait, io_cqring_events(ring) >= min_events);
Jens Axboe2b188cc2019-01-07 10:46:33 -07002469
2470 if (sig)
Oleg Nesterov97abc882019-06-28 12:06:50 -07002471 restore_user_sigmask(sig, &sigsaved, ret == -ERESTARTSYS);
2472
2473 if (ret == -ERESTARTSYS)
2474 ret = -EINTR;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002475
2476 return READ_ONCE(ring->r.head) == READ_ONCE(ring->r.tail) ? ret : 0;
2477}
2478
Jens Axboe6b063142019-01-10 22:13:58 -07002479static void __io_sqe_files_unregister(struct io_ring_ctx *ctx)
2480{
2481#if defined(CONFIG_UNIX)
2482 if (ctx->ring_sock) {
2483 struct sock *sock = ctx->ring_sock->sk;
2484 struct sk_buff *skb;
2485
2486 while ((skb = skb_dequeue(&sock->sk_receive_queue)) != NULL)
2487 kfree_skb(skb);
2488 }
2489#else
2490 int i;
2491
2492 for (i = 0; i < ctx->nr_user_files; i++)
2493 fput(ctx->user_files[i]);
2494#endif
2495}
2496
2497static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
2498{
2499 if (!ctx->user_files)
2500 return -ENXIO;
2501
2502 __io_sqe_files_unregister(ctx);
2503 kfree(ctx->user_files);
2504 ctx->user_files = NULL;
2505 ctx->nr_user_files = 0;
2506 return 0;
2507}
2508
Jens Axboe6c271ce2019-01-10 11:22:30 -07002509static void io_sq_thread_stop(struct io_ring_ctx *ctx)
2510{
2511 if (ctx->sqo_thread) {
Jackie Liua4c0b3d2019-07-08 13:41:12 +08002512 wait_for_completion(&ctx->sqo_thread_started);
Roman Penyaev2bbcd6d2019-05-16 10:53:57 +02002513 /*
2514 * The park is a bit of a work-around, without it we get
2515 * warning spews on shutdown with SQPOLL set and affinity
2516 * set to a single CPU.
2517 */
Jens Axboe06058632019-04-13 09:26:03 -06002518 kthread_park(ctx->sqo_thread);
Jens Axboe6c271ce2019-01-10 11:22:30 -07002519 kthread_stop(ctx->sqo_thread);
2520 ctx->sqo_thread = NULL;
2521 }
2522}
2523
Jens Axboe6b063142019-01-10 22:13:58 -07002524static void io_finish_async(struct io_ring_ctx *ctx)
2525{
Jens Axboe6c271ce2019-01-10 11:22:30 -07002526 io_sq_thread_stop(ctx);
2527
Jens Axboe6b063142019-01-10 22:13:58 -07002528 if (ctx->sqo_wq) {
2529 destroy_workqueue(ctx->sqo_wq);
2530 ctx->sqo_wq = NULL;
2531 }
2532}
2533
2534#if defined(CONFIG_UNIX)
2535static void io_destruct_skb(struct sk_buff *skb)
2536{
2537 struct io_ring_ctx *ctx = skb->sk->sk_user_data;
2538
2539 io_finish_async(ctx);
2540 unix_destruct_scm(skb);
2541}
2542
2543/*
2544 * Ensure the UNIX gc is aware of our file set, so we are certain that
2545 * the io_uring can be safely unregistered on process exit, even if we have
2546 * loops in the file referencing.
2547 */
2548static int __io_sqe_files_scm(struct io_ring_ctx *ctx, int nr, int offset)
2549{
2550 struct sock *sk = ctx->ring_sock->sk;
2551 struct scm_fp_list *fpl;
2552 struct sk_buff *skb;
2553 int i;
2554
2555 if (!capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN)) {
2556 unsigned long inflight = ctx->user->unix_inflight + nr;
2557
2558 if (inflight > task_rlimit(current, RLIMIT_NOFILE))
2559 return -EMFILE;
2560 }
2561
2562 fpl = kzalloc(sizeof(*fpl), GFP_KERNEL);
2563 if (!fpl)
2564 return -ENOMEM;
2565
2566 skb = alloc_skb(0, GFP_KERNEL);
2567 if (!skb) {
2568 kfree(fpl);
2569 return -ENOMEM;
2570 }
2571
2572 skb->sk = sk;
2573 skb->destructor = io_destruct_skb;
2574
2575 fpl->user = get_uid(ctx->user);
2576 for (i = 0; i < nr; i++) {
2577 fpl->fp[i] = get_file(ctx->user_files[i + offset]);
2578 unix_inflight(fpl->user, fpl->fp[i]);
2579 }
2580
2581 fpl->max = fpl->count = nr;
2582 UNIXCB(skb).fp = fpl;
2583 refcount_add(skb->truesize, &sk->sk_wmem_alloc);
2584 skb_queue_head(&sk->sk_receive_queue, skb);
2585
2586 for (i = 0; i < nr; i++)
2587 fput(fpl->fp[i]);
2588
2589 return 0;
2590}
2591
2592/*
2593 * If UNIX sockets are enabled, fd passing can cause a reference cycle which
2594 * causes regular reference counting to break down. We rely on the UNIX
2595 * garbage collection to take care of this problem for us.
2596 */
2597static int io_sqe_files_scm(struct io_ring_ctx *ctx)
2598{
2599 unsigned left, total;
2600 int ret = 0;
2601
2602 total = 0;
2603 left = ctx->nr_user_files;
2604 while (left) {
2605 unsigned this_files = min_t(unsigned, left, SCM_MAX_FD);
Jens Axboe6b063142019-01-10 22:13:58 -07002606
2607 ret = __io_sqe_files_scm(ctx, this_files, total);
2608 if (ret)
2609 break;
2610 left -= this_files;
2611 total += this_files;
2612 }
2613
2614 if (!ret)
2615 return 0;
2616
2617 while (total < ctx->nr_user_files) {
2618 fput(ctx->user_files[total]);
2619 total++;
2620 }
2621
2622 return ret;
2623}
2624#else
2625static int io_sqe_files_scm(struct io_ring_ctx *ctx)
2626{
2627 return 0;
2628}
2629#endif
2630
2631static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
2632 unsigned nr_args)
2633{
2634 __s32 __user *fds = (__s32 __user *) arg;
2635 int fd, ret = 0;
2636 unsigned i;
2637
2638 if (ctx->user_files)
2639 return -EBUSY;
2640 if (!nr_args)
2641 return -EINVAL;
2642 if (nr_args > IORING_MAX_FIXED_FILES)
2643 return -EMFILE;
2644
2645 ctx->user_files = kcalloc(nr_args, sizeof(struct file *), GFP_KERNEL);
2646 if (!ctx->user_files)
2647 return -ENOMEM;
2648
2649 for (i = 0; i < nr_args; i++) {
2650 ret = -EFAULT;
2651 if (copy_from_user(&fd, &fds[i], sizeof(fd)))
2652 break;
2653
2654 ctx->user_files[i] = fget(fd);
2655
2656 ret = -EBADF;
2657 if (!ctx->user_files[i])
2658 break;
2659 /*
2660 * Don't allow io_uring instances to be registered. If UNIX
2661 * isn't enabled, then this causes a reference cycle and this
2662 * instance can never get freed. If UNIX is enabled we'll
2663 * handle it just fine, but there's still no point in allowing
2664 * a ring fd as it doesn't support regular read/write anyway.
2665 */
2666 if (ctx->user_files[i]->f_op == &io_uring_fops) {
2667 fput(ctx->user_files[i]);
2668 break;
2669 }
2670 ctx->nr_user_files++;
2671 ret = 0;
2672 }
2673
2674 if (ret) {
2675 for (i = 0; i < ctx->nr_user_files; i++)
2676 fput(ctx->user_files[i]);
2677
2678 kfree(ctx->user_files);
Jens Axboe25adf502019-04-03 09:52:40 -06002679 ctx->user_files = NULL;
Jens Axboe6b063142019-01-10 22:13:58 -07002680 ctx->nr_user_files = 0;
2681 return ret;
2682 }
2683
2684 ret = io_sqe_files_scm(ctx);
2685 if (ret)
2686 io_sqe_files_unregister(ctx);
2687
2688 return ret;
2689}
2690
Jens Axboe6c271ce2019-01-10 11:22:30 -07002691static int io_sq_offload_start(struct io_ring_ctx *ctx,
2692 struct io_uring_params *p)
Jens Axboe2b188cc2019-01-07 10:46:33 -07002693{
2694 int ret;
2695
Jens Axboe6c271ce2019-01-10 11:22:30 -07002696 init_waitqueue_head(&ctx->sqo_wait);
Jens Axboe2b188cc2019-01-07 10:46:33 -07002697 mmgrab(current->mm);
2698 ctx->sqo_mm = current->mm;
2699
Jens Axboe6c271ce2019-01-10 11:22:30 -07002700 if (ctx->flags & IORING_SETUP_SQPOLL) {
Jens Axboe3ec482d2019-04-08 10:51:01 -06002701 ret = -EPERM;
2702 if (!capable(CAP_SYS_ADMIN))
2703 goto err;
2704
Jens Axboe917257d2019-04-13 09:28:55 -06002705 ctx->sq_thread_idle = msecs_to_jiffies(p->sq_thread_idle);
2706 if (!ctx->sq_thread_idle)
2707 ctx->sq_thread_idle = HZ;
2708
Jens Axboe6c271ce2019-01-10 11:22:30 -07002709 if (p->flags & IORING_SETUP_SQ_AFF) {
Jens Axboe44a9bd12019-05-14 20:00:30 -06002710 int cpu = p->sq_thread_cpu;
Jens Axboe6c271ce2019-01-10 11:22:30 -07002711
Jens Axboe917257d2019-04-13 09:28:55 -06002712 ret = -EINVAL;
Jens Axboe44a9bd12019-05-14 20:00:30 -06002713 if (cpu >= nr_cpu_ids)
2714 goto err;
Shenghui Wang7889f442019-05-07 16:03:19 +08002715 if (!cpu_online(cpu))
Jens Axboe917257d2019-04-13 09:28:55 -06002716 goto err;
2717
Jens Axboe6c271ce2019-01-10 11:22:30 -07002718 ctx->sqo_thread = kthread_create_on_cpu(io_sq_thread,
2719 ctx, cpu,
2720 "io_uring-sq");
2721 } else {
2722 ctx->sqo_thread = kthread_create(io_sq_thread, ctx,
2723 "io_uring-sq");
2724 }
2725 if (IS_ERR(ctx->sqo_thread)) {
2726 ret = PTR_ERR(ctx->sqo_thread);
2727 ctx->sqo_thread = NULL;
2728 goto err;
2729 }
2730 wake_up_process(ctx->sqo_thread);
2731 } else if (p->flags & IORING_SETUP_SQ_AFF) {
2732 /* Can't have SQ_AFF without SQPOLL */
2733 ret = -EINVAL;
2734 goto err;
2735 }
2736
Jens Axboe2b188cc2019-01-07 10:46:33 -07002737 /* Do QD, or 2 * CPUS, whatever is smallest */
2738 ctx->sqo_wq = alloc_workqueue("io_ring-wq", WQ_UNBOUND | WQ_FREEZABLE,
2739 min(ctx->sq_entries - 1, 2 * num_online_cpus()));
2740 if (!ctx->sqo_wq) {
2741 ret = -ENOMEM;
2742 goto err;
2743 }
2744
2745 return 0;
2746err:
Jens Axboe6c271ce2019-01-10 11:22:30 -07002747 io_sq_thread_stop(ctx);
Jens Axboe2b188cc2019-01-07 10:46:33 -07002748 mmdrop(ctx->sqo_mm);
2749 ctx->sqo_mm = NULL;
2750 return ret;
2751}
2752
2753static void io_unaccount_mem(struct user_struct *user, unsigned long nr_pages)
2754{
2755 atomic_long_sub(nr_pages, &user->locked_vm);
2756}
2757
2758static int io_account_mem(struct user_struct *user, unsigned long nr_pages)
2759{
2760 unsigned long page_limit, cur_pages, new_pages;
2761
2762 /* Don't allow more pages than we can safely lock */
2763 page_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
2764
2765 do {
2766 cur_pages = atomic_long_read(&user->locked_vm);
2767 new_pages = cur_pages + nr_pages;
2768 if (new_pages > page_limit)
2769 return -ENOMEM;
2770 } while (atomic_long_cmpxchg(&user->locked_vm, cur_pages,
2771 new_pages) != cur_pages);
2772
2773 return 0;
2774}
2775
2776static void io_mem_free(void *ptr)
2777{
Mark Rutland52e04ef2019-04-30 17:30:21 +01002778 struct page *page;
Jens Axboe2b188cc2019-01-07 10:46:33 -07002779
Mark Rutland52e04ef2019-04-30 17:30:21 +01002780 if (!ptr)
2781 return;
2782
2783 page = virt_to_head_page(ptr);
Jens Axboe2b188cc2019-01-07 10:46:33 -07002784 if (put_page_testzero(page))
2785 free_compound_page(page);
2786}
2787
2788static void *io_mem_alloc(size_t size)
2789{
2790 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN | __GFP_COMP |
2791 __GFP_NORETRY;
2792
2793 return (void *) __get_free_pages(gfp_flags, get_order(size));
2794}
2795
2796static unsigned long ring_pages(unsigned sq_entries, unsigned cq_entries)
2797{
2798 struct io_sq_ring *sq_ring;
2799 struct io_cq_ring *cq_ring;
2800 size_t bytes;
2801
2802 bytes = struct_size(sq_ring, array, sq_entries);
2803 bytes += array_size(sizeof(struct io_uring_sqe), sq_entries);
2804 bytes += struct_size(cq_ring, cqes, cq_entries);
2805
2806 return (bytes + PAGE_SIZE - 1) / PAGE_SIZE;
2807}
2808
Jens Axboeedafcce2019-01-09 09:16:05 -07002809static int io_sqe_buffer_unregister(struct io_ring_ctx *ctx)
2810{
2811 int i, j;
2812
2813 if (!ctx->user_bufs)
2814 return -ENXIO;
2815
2816 for (i = 0; i < ctx->nr_user_bufs; i++) {
2817 struct io_mapped_ubuf *imu = &ctx->user_bufs[i];
2818
2819 for (j = 0; j < imu->nr_bvecs; j++)
2820 put_page(imu->bvec[j].bv_page);
2821
2822 if (ctx->account_mem)
2823 io_unaccount_mem(ctx->user, imu->nr_bvecs);
Mark Rutlandd4ef6472019-05-01 16:59:16 +01002824 kvfree(imu->bvec);
Jens Axboeedafcce2019-01-09 09:16:05 -07002825 imu->nr_bvecs = 0;
2826 }
2827
2828 kfree(ctx->user_bufs);
2829 ctx->user_bufs = NULL;
2830 ctx->nr_user_bufs = 0;
2831 return 0;
2832}
2833
2834static int io_copy_iov(struct io_ring_ctx *ctx, struct iovec *dst,
2835 void __user *arg, unsigned index)
2836{
2837 struct iovec __user *src;
2838
2839#ifdef CONFIG_COMPAT
2840 if (ctx->compat) {
2841 struct compat_iovec __user *ciovs;
2842 struct compat_iovec ciov;
2843
2844 ciovs = (struct compat_iovec __user *) arg;
2845 if (copy_from_user(&ciov, &ciovs[index], sizeof(ciov)))
2846 return -EFAULT;
2847
2848 dst->iov_base = (void __user *) (unsigned long) ciov.iov_base;
2849 dst->iov_len = ciov.iov_len;
2850 return 0;
2851 }
2852#endif
2853 src = (struct iovec __user *) arg;
2854 if (copy_from_user(dst, &src[index], sizeof(*dst)))
2855 return -EFAULT;
2856 return 0;
2857}
2858
2859static int io_sqe_buffer_register(struct io_ring_ctx *ctx, void __user *arg,
2860 unsigned nr_args)
2861{
2862 struct vm_area_struct **vmas = NULL;
2863 struct page **pages = NULL;
2864 int i, j, got_pages = 0;
2865 int ret = -EINVAL;
2866
2867 if (ctx->user_bufs)
2868 return -EBUSY;
2869 if (!nr_args || nr_args > UIO_MAXIOV)
2870 return -EINVAL;
2871
2872 ctx->user_bufs = kcalloc(nr_args, sizeof(struct io_mapped_ubuf),
2873 GFP_KERNEL);
2874 if (!ctx->user_bufs)
2875 return -ENOMEM;
2876
2877 for (i = 0; i < nr_args; i++) {
2878 struct io_mapped_ubuf *imu = &ctx->user_bufs[i];
2879 unsigned long off, start, end, ubuf;
2880 int pret, nr_pages;
2881 struct iovec iov;
2882 size_t size;
2883
2884 ret = io_copy_iov(ctx, &iov, arg, i);
2885 if (ret)
Pavel Begunkova2786822019-05-26 12:35:47 +03002886 goto err;
Jens Axboeedafcce2019-01-09 09:16:05 -07002887
2888 /*
2889 * Don't impose further limits on the size and buffer
2890 * constraints here, we'll -EINVAL later when IO is
2891 * submitted if they are wrong.
2892 */
2893 ret = -EFAULT;
2894 if (!iov.iov_base || !iov.iov_len)
2895 goto err;
2896
2897 /* arbitrary limit, but we need something */
2898 if (iov.iov_len > SZ_1G)
2899 goto err;
2900
2901 ubuf = (unsigned long) iov.iov_base;
2902 end = (ubuf + iov.iov_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
2903 start = ubuf >> PAGE_SHIFT;
2904 nr_pages = end - start;
2905
2906 if (ctx->account_mem) {
2907 ret = io_account_mem(ctx->user, nr_pages);
2908 if (ret)
2909 goto err;
2910 }
2911
2912 ret = 0;
2913 if (!pages || nr_pages > got_pages) {
2914 kfree(vmas);
2915 kfree(pages);
Mark Rutlandd4ef6472019-05-01 16:59:16 +01002916 pages = kvmalloc_array(nr_pages, sizeof(struct page *),
Jens Axboeedafcce2019-01-09 09:16:05 -07002917 GFP_KERNEL);
Mark Rutlandd4ef6472019-05-01 16:59:16 +01002918 vmas = kvmalloc_array(nr_pages,
Jens Axboeedafcce2019-01-09 09:16:05 -07002919 sizeof(struct vm_area_struct *),
2920 GFP_KERNEL);
2921 if (!pages || !vmas) {
2922 ret = -ENOMEM;
2923 if (ctx->account_mem)
2924 io_unaccount_mem(ctx->user, nr_pages);
2925 goto err;
2926 }
2927 got_pages = nr_pages;
2928 }
2929
Mark Rutlandd4ef6472019-05-01 16:59:16 +01002930 imu->bvec = kvmalloc_array(nr_pages, sizeof(struct bio_vec),
Jens Axboeedafcce2019-01-09 09:16:05 -07002931 GFP_KERNEL);
2932 ret = -ENOMEM;
2933 if (!imu->bvec) {
2934 if (ctx->account_mem)
2935 io_unaccount_mem(ctx->user, nr_pages);
2936 goto err;
2937 }
2938
2939 ret = 0;
2940 down_read(&current->mm->mmap_sem);
Ira Weiny932f4a62019-05-13 17:17:03 -07002941 pret = get_user_pages(ubuf, nr_pages,
2942 FOLL_WRITE | FOLL_LONGTERM,
2943 pages, vmas);
Jens Axboeedafcce2019-01-09 09:16:05 -07002944 if (pret == nr_pages) {
2945 /* don't support file backed memory */
2946 for (j = 0; j < nr_pages; j++) {
2947 struct vm_area_struct *vma = vmas[j];
2948
2949 if (vma->vm_file &&
2950 !is_file_hugepages(vma->vm_file)) {
2951 ret = -EOPNOTSUPP;
2952 break;
2953 }
2954 }
2955 } else {
2956 ret = pret < 0 ? pret : -EFAULT;
2957 }
2958 up_read(&current->mm->mmap_sem);
2959 if (ret) {
2960 /*
2961 * if we did partial map, or found file backed vmas,
2962 * release any pages we did get
2963 */
2964 if (pret > 0) {
2965 for (j = 0; j < pret; j++)
2966 put_page(pages[j]);
2967 }
2968 if (ctx->account_mem)
2969 io_unaccount_mem(ctx->user, nr_pages);
Mark Rutlandd4ef6472019-05-01 16:59:16 +01002970 kvfree(imu->bvec);
Jens Axboeedafcce2019-01-09 09:16:05 -07002971 goto err;
2972 }
2973
2974 off = ubuf & ~PAGE_MASK;
2975 size = iov.iov_len;
2976 for (j = 0; j < nr_pages; j++) {
2977 size_t vec_len;
2978
2979 vec_len = min_t(size_t, size, PAGE_SIZE - off);
2980 imu->bvec[j].bv_page = pages[j];
2981 imu->bvec[j].bv_len = vec_len;
2982 imu->bvec[j].bv_offset = off;
2983 off = 0;
2984 size -= vec_len;
2985 }
2986 /* store original address for later verification */
2987 imu->ubuf = ubuf;
2988 imu->len = iov.iov_len;
2989 imu->nr_bvecs = nr_pages;
2990
2991 ctx->nr_user_bufs++;
2992 }
Mark Rutlandd4ef6472019-05-01 16:59:16 +01002993 kvfree(pages);
2994 kvfree(vmas);
Jens Axboeedafcce2019-01-09 09:16:05 -07002995 return 0;
2996err:
Mark Rutlandd4ef6472019-05-01 16:59:16 +01002997 kvfree(pages);
2998 kvfree(vmas);
Jens Axboeedafcce2019-01-09 09:16:05 -07002999 io_sqe_buffer_unregister(ctx);
3000 return ret;
3001}
3002
Jens Axboe9b402842019-04-11 11:45:41 -06003003static int io_eventfd_register(struct io_ring_ctx *ctx, void __user *arg)
3004{
3005 __s32 __user *fds = arg;
3006 int fd;
3007
3008 if (ctx->cq_ev_fd)
3009 return -EBUSY;
3010
3011 if (copy_from_user(&fd, fds, sizeof(*fds)))
3012 return -EFAULT;
3013
3014 ctx->cq_ev_fd = eventfd_ctx_fdget(fd);
3015 if (IS_ERR(ctx->cq_ev_fd)) {
3016 int ret = PTR_ERR(ctx->cq_ev_fd);
3017 ctx->cq_ev_fd = NULL;
3018 return ret;
3019 }
3020
3021 return 0;
3022}
3023
3024static int io_eventfd_unregister(struct io_ring_ctx *ctx)
3025{
3026 if (ctx->cq_ev_fd) {
3027 eventfd_ctx_put(ctx->cq_ev_fd);
3028 ctx->cq_ev_fd = NULL;
3029 return 0;
3030 }
3031
3032 return -ENXIO;
3033}
3034
Jens Axboe2b188cc2019-01-07 10:46:33 -07003035static void io_ring_ctx_free(struct io_ring_ctx *ctx)
3036{
Jens Axboe6b063142019-01-10 22:13:58 -07003037 io_finish_async(ctx);
Jens Axboe2b188cc2019-01-07 10:46:33 -07003038 if (ctx->sqo_mm)
3039 mmdrop(ctx->sqo_mm);
Jens Axboedef596e2019-01-09 08:59:42 -07003040
3041 io_iopoll_reap_events(ctx);
Jens Axboeedafcce2019-01-09 09:16:05 -07003042 io_sqe_buffer_unregister(ctx);
Jens Axboe6b063142019-01-10 22:13:58 -07003043 io_sqe_files_unregister(ctx);
Jens Axboe9b402842019-04-11 11:45:41 -06003044 io_eventfd_unregister(ctx);
Jens Axboedef596e2019-01-09 08:59:42 -07003045
Jens Axboe2b188cc2019-01-07 10:46:33 -07003046#if defined(CONFIG_UNIX)
Eric Biggers355e8d22019-06-12 14:58:43 -07003047 if (ctx->ring_sock) {
3048 ctx->ring_sock->file = NULL; /* so that iput() is called */
Jens Axboe2b188cc2019-01-07 10:46:33 -07003049 sock_release(ctx->ring_sock);
Eric Biggers355e8d22019-06-12 14:58:43 -07003050 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07003051#endif
3052
3053 io_mem_free(ctx->sq_ring);
3054 io_mem_free(ctx->sq_sqes);
3055 io_mem_free(ctx->cq_ring);
3056
3057 percpu_ref_exit(&ctx->refs);
3058 if (ctx->account_mem)
3059 io_unaccount_mem(ctx->user,
3060 ring_pages(ctx->sq_entries, ctx->cq_entries));
3061 free_uid(ctx->user);
3062 kfree(ctx);
3063}
3064
3065static __poll_t io_uring_poll(struct file *file, poll_table *wait)
3066{
3067 struct io_ring_ctx *ctx = file->private_data;
3068 __poll_t mask = 0;
3069
3070 poll_wait(file, &ctx->cq_wait, wait);
Stefan Bühler4f7067c2019-04-24 23:54:17 +02003071 /*
3072 * synchronizes with barrier from wq_has_sleeper call in
3073 * io_commit_cqring
3074 */
Jens Axboe2b188cc2019-01-07 10:46:33 -07003075 smp_rmb();
Stefan Bühlerfb775fa2019-04-19 11:57:46 +02003076 if (READ_ONCE(ctx->sq_ring->r.tail) - ctx->cached_sq_head !=
3077 ctx->sq_ring->ring_entries)
Jens Axboe2b188cc2019-01-07 10:46:33 -07003078 mask |= EPOLLOUT | EPOLLWRNORM;
3079 if (READ_ONCE(ctx->cq_ring->r.head) != ctx->cached_cq_tail)
3080 mask |= EPOLLIN | EPOLLRDNORM;
3081
3082 return mask;
3083}
3084
3085static int io_uring_fasync(int fd, struct file *file, int on)
3086{
3087 struct io_ring_ctx *ctx = file->private_data;
3088
3089 return fasync_helper(fd, file, on, &ctx->cq_fasync);
3090}
3091
3092static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
3093{
3094 mutex_lock(&ctx->uring_lock);
3095 percpu_ref_kill(&ctx->refs);
3096 mutex_unlock(&ctx->uring_lock);
3097
Jens Axboe221c5eb2019-01-17 09:41:58 -07003098 io_poll_remove_all(ctx);
Jens Axboedef596e2019-01-09 08:59:42 -07003099 io_iopoll_reap_events(ctx);
Jens Axboe2b188cc2019-01-07 10:46:33 -07003100 wait_for_completion(&ctx->ctx_done);
3101 io_ring_ctx_free(ctx);
3102}
3103
3104static int io_uring_release(struct inode *inode, struct file *file)
3105{
3106 struct io_ring_ctx *ctx = file->private_data;
3107
3108 file->private_data = NULL;
3109 io_ring_ctx_wait_and_kill(ctx);
3110 return 0;
3111}
3112
3113static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
3114{
3115 loff_t offset = (loff_t) vma->vm_pgoff << PAGE_SHIFT;
3116 unsigned long sz = vma->vm_end - vma->vm_start;
3117 struct io_ring_ctx *ctx = file->private_data;
3118 unsigned long pfn;
3119 struct page *page;
3120 void *ptr;
3121
3122 switch (offset) {
3123 case IORING_OFF_SQ_RING:
3124 ptr = ctx->sq_ring;
3125 break;
3126 case IORING_OFF_SQES:
3127 ptr = ctx->sq_sqes;
3128 break;
3129 case IORING_OFF_CQ_RING:
3130 ptr = ctx->cq_ring;
3131 break;
3132 default:
3133 return -EINVAL;
3134 }
3135
3136 page = virt_to_head_page(ptr);
3137 if (sz > (PAGE_SIZE << compound_order(page)))
3138 return -EINVAL;
3139
3140 pfn = virt_to_phys(ptr) >> PAGE_SHIFT;
3141 return remap_pfn_range(vma, vma->vm_start, pfn, sz, vma->vm_page_prot);
3142}
3143
3144SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
3145 u32, min_complete, u32, flags, const sigset_t __user *, sig,
3146 size_t, sigsz)
3147{
3148 struct io_ring_ctx *ctx;
3149 long ret = -EBADF;
3150 int submitted = 0;
3151 struct fd f;
3152
Jens Axboe6c271ce2019-01-10 11:22:30 -07003153 if (flags & ~(IORING_ENTER_GETEVENTS | IORING_ENTER_SQ_WAKEUP))
Jens Axboe2b188cc2019-01-07 10:46:33 -07003154 return -EINVAL;
3155
3156 f = fdget(fd);
3157 if (!f.file)
3158 return -EBADF;
3159
3160 ret = -EOPNOTSUPP;
3161 if (f.file->f_op != &io_uring_fops)
3162 goto out_fput;
3163
3164 ret = -ENXIO;
3165 ctx = f.file->private_data;
3166 if (!percpu_ref_tryget(&ctx->refs))
3167 goto out_fput;
3168
Jens Axboe6c271ce2019-01-10 11:22:30 -07003169 /*
3170 * For SQ polling, the thread will do all submissions and completions.
3171 * Just return the requested submit count, and wake the thread if
3172 * we were asked to.
3173 */
3174 if (ctx->flags & IORING_SETUP_SQPOLL) {
3175 if (flags & IORING_ENTER_SQ_WAKEUP)
3176 wake_up(&ctx->sqo_wait);
3177 submitted = to_submit;
3178 goto out_ctx;
3179 }
3180
Jens Axboe2b188cc2019-01-07 10:46:33 -07003181 ret = 0;
3182 if (to_submit) {
3183 to_submit = min(to_submit, ctx->sq_entries);
3184
3185 mutex_lock(&ctx->uring_lock);
3186 submitted = io_ring_submit(ctx, to_submit);
3187 mutex_unlock(&ctx->uring_lock);
Jens Axboe2b188cc2019-01-07 10:46:33 -07003188 }
3189 if (flags & IORING_ENTER_GETEVENTS) {
Jens Axboedef596e2019-01-09 08:59:42 -07003190 unsigned nr_events = 0;
3191
Jens Axboe2b188cc2019-01-07 10:46:33 -07003192 min_complete = min(min_complete, ctx->cq_entries);
3193
Jens Axboedef596e2019-01-09 08:59:42 -07003194 if (ctx->flags & IORING_SETUP_IOPOLL) {
3195 mutex_lock(&ctx->uring_lock);
3196 ret = io_iopoll_check(ctx, &nr_events, min_complete);
3197 mutex_unlock(&ctx->uring_lock);
3198 } else {
3199 ret = io_cqring_wait(ctx, min_complete, sig, sigsz);
3200 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07003201 }
3202
3203out_ctx:
3204 io_ring_drop_ctx_refs(ctx, 1);
3205out_fput:
3206 fdput(f);
3207 return submitted ? submitted : ret;
3208}
3209
3210static const struct file_operations io_uring_fops = {
3211 .release = io_uring_release,
3212 .mmap = io_uring_mmap,
3213 .poll = io_uring_poll,
3214 .fasync = io_uring_fasync,
3215};
3216
3217static int io_allocate_scq_urings(struct io_ring_ctx *ctx,
3218 struct io_uring_params *p)
3219{
3220 struct io_sq_ring *sq_ring;
3221 struct io_cq_ring *cq_ring;
3222 size_t size;
3223
3224 sq_ring = io_mem_alloc(struct_size(sq_ring, array, p->sq_entries));
3225 if (!sq_ring)
3226 return -ENOMEM;
3227
3228 ctx->sq_ring = sq_ring;
3229 sq_ring->ring_mask = p->sq_entries - 1;
3230 sq_ring->ring_entries = p->sq_entries;
3231 ctx->sq_mask = sq_ring->ring_mask;
3232 ctx->sq_entries = sq_ring->ring_entries;
3233
3234 size = array_size(sizeof(struct io_uring_sqe), p->sq_entries);
3235 if (size == SIZE_MAX)
3236 return -EOVERFLOW;
3237
3238 ctx->sq_sqes = io_mem_alloc(size);
Mark Rutland52e04ef2019-04-30 17:30:21 +01003239 if (!ctx->sq_sqes)
Jens Axboe2b188cc2019-01-07 10:46:33 -07003240 return -ENOMEM;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003241
3242 cq_ring = io_mem_alloc(struct_size(cq_ring, cqes, p->cq_entries));
Mark Rutland52e04ef2019-04-30 17:30:21 +01003243 if (!cq_ring)
Jens Axboe2b188cc2019-01-07 10:46:33 -07003244 return -ENOMEM;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003245
3246 ctx->cq_ring = cq_ring;
3247 cq_ring->ring_mask = p->cq_entries - 1;
3248 cq_ring->ring_entries = p->cq_entries;
3249 ctx->cq_mask = cq_ring->ring_mask;
3250 ctx->cq_entries = cq_ring->ring_entries;
3251 return 0;
3252}
3253
3254/*
3255 * Allocate an anonymous fd, this is what constitutes the application
3256 * visible backing of an io_uring instance. The application mmaps this
3257 * fd to gain access to the SQ/CQ ring details. If UNIX sockets are enabled,
3258 * we have to tie this fd to a socket for file garbage collection purposes.
3259 */
3260static int io_uring_get_fd(struct io_ring_ctx *ctx)
3261{
3262 struct file *file;
3263 int ret;
3264
3265#if defined(CONFIG_UNIX)
3266 ret = sock_create_kern(&init_net, PF_UNIX, SOCK_RAW, IPPROTO_IP,
3267 &ctx->ring_sock);
3268 if (ret)
3269 return ret;
3270#endif
3271
3272 ret = get_unused_fd_flags(O_RDWR | O_CLOEXEC);
3273 if (ret < 0)
3274 goto err;
3275
3276 file = anon_inode_getfile("[io_uring]", &io_uring_fops, ctx,
3277 O_RDWR | O_CLOEXEC);
3278 if (IS_ERR(file)) {
3279 put_unused_fd(ret);
3280 ret = PTR_ERR(file);
3281 goto err;
3282 }
3283
3284#if defined(CONFIG_UNIX)
3285 ctx->ring_sock->file = file;
Jens Axboe6b063142019-01-10 22:13:58 -07003286 ctx->ring_sock->sk->sk_user_data = ctx;
Jens Axboe2b188cc2019-01-07 10:46:33 -07003287#endif
3288 fd_install(ret, file);
3289 return ret;
3290err:
3291#if defined(CONFIG_UNIX)
3292 sock_release(ctx->ring_sock);
3293 ctx->ring_sock = NULL;
3294#endif
3295 return ret;
3296}
3297
3298static int io_uring_create(unsigned entries, struct io_uring_params *p)
3299{
3300 struct user_struct *user = NULL;
3301 struct io_ring_ctx *ctx;
3302 bool account_mem;
3303 int ret;
3304
3305 if (!entries || entries > IORING_MAX_ENTRIES)
3306 return -EINVAL;
3307
3308 /*
3309 * Use twice as many entries for the CQ ring. It's possible for the
3310 * application to drive a higher depth than the size of the SQ ring,
3311 * since the sqes are only used at submission time. This allows for
3312 * some flexibility in overcommitting a bit.
3313 */
3314 p->sq_entries = roundup_pow_of_two(entries);
3315 p->cq_entries = 2 * p->sq_entries;
3316
3317 user = get_uid(current_user());
3318 account_mem = !capable(CAP_IPC_LOCK);
3319
3320 if (account_mem) {
3321 ret = io_account_mem(user,
3322 ring_pages(p->sq_entries, p->cq_entries));
3323 if (ret) {
3324 free_uid(user);
3325 return ret;
3326 }
3327 }
3328
3329 ctx = io_ring_ctx_alloc(p);
3330 if (!ctx) {
3331 if (account_mem)
3332 io_unaccount_mem(user, ring_pages(p->sq_entries,
3333 p->cq_entries));
3334 free_uid(user);
3335 return -ENOMEM;
3336 }
3337 ctx->compat = in_compat_syscall();
3338 ctx->account_mem = account_mem;
3339 ctx->user = user;
3340
3341 ret = io_allocate_scq_urings(ctx, p);
3342 if (ret)
3343 goto err;
3344
Jens Axboe6c271ce2019-01-10 11:22:30 -07003345 ret = io_sq_offload_start(ctx, p);
Jens Axboe2b188cc2019-01-07 10:46:33 -07003346 if (ret)
3347 goto err;
3348
3349 ret = io_uring_get_fd(ctx);
3350 if (ret < 0)
3351 goto err;
3352
3353 memset(&p->sq_off, 0, sizeof(p->sq_off));
3354 p->sq_off.head = offsetof(struct io_sq_ring, r.head);
3355 p->sq_off.tail = offsetof(struct io_sq_ring, r.tail);
3356 p->sq_off.ring_mask = offsetof(struct io_sq_ring, ring_mask);
3357 p->sq_off.ring_entries = offsetof(struct io_sq_ring, ring_entries);
3358 p->sq_off.flags = offsetof(struct io_sq_ring, flags);
3359 p->sq_off.dropped = offsetof(struct io_sq_ring, dropped);
3360 p->sq_off.array = offsetof(struct io_sq_ring, array);
3361
3362 memset(&p->cq_off, 0, sizeof(p->cq_off));
3363 p->cq_off.head = offsetof(struct io_cq_ring, r.head);
3364 p->cq_off.tail = offsetof(struct io_cq_ring, r.tail);
3365 p->cq_off.ring_mask = offsetof(struct io_cq_ring, ring_mask);
3366 p->cq_off.ring_entries = offsetof(struct io_cq_ring, ring_entries);
3367 p->cq_off.overflow = offsetof(struct io_cq_ring, overflow);
3368 p->cq_off.cqes = offsetof(struct io_cq_ring, cqes);
3369 return ret;
3370err:
3371 io_ring_ctx_wait_and_kill(ctx);
3372 return ret;
3373}
3374
3375/*
3376 * Sets up an aio uring context, and returns the fd. Applications asks for a
3377 * ring size, we return the actual sq/cq ring sizes (among other things) in the
3378 * params structure passed in.
3379 */
3380static long io_uring_setup(u32 entries, struct io_uring_params __user *params)
3381{
3382 struct io_uring_params p;
3383 long ret;
3384 int i;
3385
3386 if (copy_from_user(&p, params, sizeof(p)))
3387 return -EFAULT;
3388 for (i = 0; i < ARRAY_SIZE(p.resv); i++) {
3389 if (p.resv[i])
3390 return -EINVAL;
3391 }
3392
Jens Axboe6c271ce2019-01-10 11:22:30 -07003393 if (p.flags & ~(IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL |
3394 IORING_SETUP_SQ_AFF))
Jens Axboe2b188cc2019-01-07 10:46:33 -07003395 return -EINVAL;
3396
3397 ret = io_uring_create(entries, &p);
3398 if (ret < 0)
3399 return ret;
3400
3401 if (copy_to_user(params, &p, sizeof(p)))
3402 return -EFAULT;
3403
3404 return ret;
3405}
3406
3407SYSCALL_DEFINE2(io_uring_setup, u32, entries,
3408 struct io_uring_params __user *, params)
3409{
3410 return io_uring_setup(entries, params);
3411}
3412
Jens Axboeedafcce2019-01-09 09:16:05 -07003413static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
3414 void __user *arg, unsigned nr_args)
Jens Axboeb19062a2019-04-15 10:49:38 -06003415 __releases(ctx->uring_lock)
3416 __acquires(ctx->uring_lock)
Jens Axboeedafcce2019-01-09 09:16:05 -07003417{
3418 int ret;
3419
Jens Axboe35fa71a2019-04-22 10:23:23 -06003420 /*
3421 * We're inside the ring mutex, if the ref is already dying, then
3422 * someone else killed the ctx or is already going through
3423 * io_uring_register().
3424 */
3425 if (percpu_ref_is_dying(&ctx->refs))
3426 return -ENXIO;
3427
Jens Axboeedafcce2019-01-09 09:16:05 -07003428 percpu_ref_kill(&ctx->refs);
Jens Axboeb19062a2019-04-15 10:49:38 -06003429
3430 /*
3431 * Drop uring mutex before waiting for references to exit. If another
3432 * thread is currently inside io_uring_enter() it might need to grab
3433 * the uring_lock to make progress. If we hold it here across the drain
3434 * wait, then we can deadlock. It's safe to drop the mutex here, since
3435 * no new references will come in after we've killed the percpu ref.
3436 */
3437 mutex_unlock(&ctx->uring_lock);
Jens Axboeedafcce2019-01-09 09:16:05 -07003438 wait_for_completion(&ctx->ctx_done);
Jens Axboeb19062a2019-04-15 10:49:38 -06003439 mutex_lock(&ctx->uring_lock);
Jens Axboeedafcce2019-01-09 09:16:05 -07003440
3441 switch (opcode) {
3442 case IORING_REGISTER_BUFFERS:
3443 ret = io_sqe_buffer_register(ctx, arg, nr_args);
3444 break;
3445 case IORING_UNREGISTER_BUFFERS:
3446 ret = -EINVAL;
3447 if (arg || nr_args)
3448 break;
3449 ret = io_sqe_buffer_unregister(ctx);
3450 break;
Jens Axboe6b063142019-01-10 22:13:58 -07003451 case IORING_REGISTER_FILES:
3452 ret = io_sqe_files_register(ctx, arg, nr_args);
3453 break;
3454 case IORING_UNREGISTER_FILES:
3455 ret = -EINVAL;
3456 if (arg || nr_args)
3457 break;
3458 ret = io_sqe_files_unregister(ctx);
3459 break;
Jens Axboe9b402842019-04-11 11:45:41 -06003460 case IORING_REGISTER_EVENTFD:
3461 ret = -EINVAL;
3462 if (nr_args != 1)
3463 break;
3464 ret = io_eventfd_register(ctx, arg);
3465 break;
3466 case IORING_UNREGISTER_EVENTFD:
3467 ret = -EINVAL;
3468 if (arg || nr_args)
3469 break;
3470 ret = io_eventfd_unregister(ctx);
3471 break;
Jens Axboeedafcce2019-01-09 09:16:05 -07003472 default:
3473 ret = -EINVAL;
3474 break;
3475 }
3476
3477 /* bring the ctx back to life */
3478 reinit_completion(&ctx->ctx_done);
3479 percpu_ref_reinit(&ctx->refs);
3480 return ret;
3481}
3482
3483SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode,
3484 void __user *, arg, unsigned int, nr_args)
3485{
3486 struct io_ring_ctx *ctx;
3487 long ret = -EBADF;
3488 struct fd f;
3489
3490 f = fdget(fd);
3491 if (!f.file)
3492 return -EBADF;
3493
3494 ret = -EOPNOTSUPP;
3495 if (f.file->f_op != &io_uring_fops)
3496 goto out_fput;
3497
3498 ctx = f.file->private_data;
3499
3500 mutex_lock(&ctx->uring_lock);
3501 ret = __io_uring_register(ctx, opcode, arg, nr_args);
3502 mutex_unlock(&ctx->uring_lock);
3503out_fput:
3504 fdput(f);
3505 return ret;
3506}
3507
Jens Axboe2b188cc2019-01-07 10:46:33 -07003508static int __init io_uring_init(void)
3509{
3510 req_cachep = KMEM_CACHE(io_kiocb, SLAB_HWCACHE_ALIGN | SLAB_PANIC);
3511 return 0;
3512};
3513__initcall(io_uring_init);