blob: 578797a4f31875462fc53da2610825b50834f828 [file] [log] [blame]
Jens Axboe2b188cc2019-01-07 10:46:33 -07001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Shared application/kernel submission and completion ring pairs, for
4 * supporting fast/efficient IO.
5 *
6 * A note on the read/write ordering memory barriers that are matched between
7 * the application and kernel side. When the application reads the CQ ring
8 * tail, it must use an appropriate smp_rmb() to order with the smp_wmb()
9 * the kernel uses after writing the tail. Failure to do so could cause a
10 * delay in when the application notices that completion events available.
11 * This isn't a fatal condition. Likewise, the application must use an
12 * appropriate smp_wmb() both before writing the SQ tail, and after writing
13 * the SQ tail. The first one orders the sqe writes with the tail write, and
14 * the latter is paired with the smp_rmb() the kernel will issue before
15 * reading the SQ tail on submission.
16 *
17 * Also see the examples in the liburing library:
18 *
19 * git://git.kernel.dk/liburing
20 *
21 * io_uring also uses READ/WRITE_ONCE() for _any_ store or load that happens
22 * from data shared between the kernel and application. This is done both
23 * for ordering purposes, but also to ensure that once a value is loaded from
24 * data that the application could potentially modify, it remains stable.
25 *
26 * Copyright (C) 2018-2019 Jens Axboe
Christoph Hellwigc992fe22019-01-11 09:43:02 -070027 * Copyright (c) 2018-2019 Christoph Hellwig
Jens Axboe2b188cc2019-01-07 10:46:33 -070028 */
29#include <linux/kernel.h>
30#include <linux/init.h>
31#include <linux/errno.h>
32#include <linux/syscalls.h>
33#include <linux/compat.h>
34#include <linux/refcount.h>
35#include <linux/uio.h>
36
37#include <linux/sched/signal.h>
38#include <linux/fs.h>
39#include <linux/file.h>
40#include <linux/fdtable.h>
41#include <linux/mm.h>
42#include <linux/mman.h>
43#include <linux/mmu_context.h>
44#include <linux/percpu.h>
45#include <linux/slab.h>
46#include <linux/workqueue.h>
47#include <linux/blkdev.h>
48#include <linux/net.h>
49#include <net/sock.h>
50#include <net/af_unix.h>
51#include <linux/anon_inodes.h>
52#include <linux/sched/mm.h>
53#include <linux/uaccess.h>
54#include <linux/nospec.h>
55
56#include <uapi/linux/io_uring.h>
57
58#include "internal.h"
59
60#define IORING_MAX_ENTRIES 4096
61
62struct io_uring {
63 u32 head ____cacheline_aligned_in_smp;
64 u32 tail ____cacheline_aligned_in_smp;
65};
66
67struct io_sq_ring {
68 struct io_uring r;
69 u32 ring_mask;
70 u32 ring_entries;
71 u32 dropped;
72 u32 flags;
73 u32 array[];
74};
75
76struct io_cq_ring {
77 struct io_uring r;
78 u32 ring_mask;
79 u32 ring_entries;
80 u32 overflow;
81 struct io_uring_cqe cqes[];
82};
83
84struct io_ring_ctx {
85 struct {
86 struct percpu_ref refs;
87 } ____cacheline_aligned_in_smp;
88
89 struct {
90 unsigned int flags;
91 bool compat;
92 bool account_mem;
93
94 /* SQ ring */
95 struct io_sq_ring *sq_ring;
96 unsigned cached_sq_head;
97 unsigned sq_entries;
98 unsigned sq_mask;
99 struct io_uring_sqe *sq_sqes;
100 } ____cacheline_aligned_in_smp;
101
102 /* IO offload */
103 struct workqueue_struct *sqo_wq;
104 struct mm_struct *sqo_mm;
105
106 struct {
107 /* CQ ring */
108 struct io_cq_ring *cq_ring;
109 unsigned cached_cq_tail;
110 unsigned cq_entries;
111 unsigned cq_mask;
112 struct wait_queue_head cq_wait;
113 struct fasync_struct *cq_fasync;
114 } ____cacheline_aligned_in_smp;
115
116 struct user_struct *user;
117
118 struct completion ctx_done;
119
120 struct {
121 struct mutex uring_lock;
122 wait_queue_head_t wait;
123 } ____cacheline_aligned_in_smp;
124
125 struct {
126 spinlock_t completion_lock;
Jens Axboedef596e2019-01-09 08:59:42 -0700127 bool poll_multi_file;
128 /*
129 * ->poll_list is protected by the ctx->uring_lock for
130 * io_uring instances that don't use IORING_SETUP_SQPOLL.
131 * For SQPOLL, only the single threaded io_sq_thread() will
132 * manipulate the list, hence no extra locking is needed there.
133 */
134 struct list_head poll_list;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700135 } ____cacheline_aligned_in_smp;
136
137#if defined(CONFIG_UNIX)
138 struct socket *ring_sock;
139#endif
140};
141
142struct sqe_submit {
143 const struct io_uring_sqe *sqe;
144 unsigned short index;
145 bool has_user;
Jens Axboedef596e2019-01-09 08:59:42 -0700146 bool needs_lock;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700147};
148
149struct io_kiocb {
150 struct kiocb rw;
151
152 struct sqe_submit submit;
153
154 struct io_ring_ctx *ctx;
155 struct list_head list;
156 unsigned int flags;
157#define REQ_F_FORCE_NONBLOCK 1 /* inline submission attempt */
Jens Axboedef596e2019-01-09 08:59:42 -0700158#define REQ_F_IOPOLL_COMPLETED 2 /* polled IO has completed */
Jens Axboe2b188cc2019-01-07 10:46:33 -0700159 u64 user_data;
Jens Axboedef596e2019-01-09 08:59:42 -0700160 u64 error;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700161
162 struct work_struct work;
163};
164
165#define IO_PLUG_THRESHOLD 2
Jens Axboedef596e2019-01-09 08:59:42 -0700166#define IO_IOPOLL_BATCH 8
Jens Axboe2b188cc2019-01-07 10:46:33 -0700167
168static struct kmem_cache *req_cachep;
169
170static const struct file_operations io_uring_fops;
171
172struct sock *io_uring_get_socket(struct file *file)
173{
174#if defined(CONFIG_UNIX)
175 if (file->f_op == &io_uring_fops) {
176 struct io_ring_ctx *ctx = file->private_data;
177
178 return ctx->ring_sock->sk;
179 }
180#endif
181 return NULL;
182}
183EXPORT_SYMBOL(io_uring_get_socket);
184
185static void io_ring_ctx_ref_free(struct percpu_ref *ref)
186{
187 struct io_ring_ctx *ctx = container_of(ref, struct io_ring_ctx, refs);
188
189 complete(&ctx->ctx_done);
190}
191
192static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
193{
194 struct io_ring_ctx *ctx;
195
196 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
197 if (!ctx)
198 return NULL;
199
200 if (percpu_ref_init(&ctx->refs, io_ring_ctx_ref_free, 0, GFP_KERNEL)) {
201 kfree(ctx);
202 return NULL;
203 }
204
205 ctx->flags = p->flags;
206 init_waitqueue_head(&ctx->cq_wait);
207 init_completion(&ctx->ctx_done);
208 mutex_init(&ctx->uring_lock);
209 init_waitqueue_head(&ctx->wait);
210 spin_lock_init(&ctx->completion_lock);
Jens Axboedef596e2019-01-09 08:59:42 -0700211 INIT_LIST_HEAD(&ctx->poll_list);
Jens Axboe2b188cc2019-01-07 10:46:33 -0700212 return ctx;
213}
214
215static void io_commit_cqring(struct io_ring_ctx *ctx)
216{
217 struct io_cq_ring *ring = ctx->cq_ring;
218
219 if (ctx->cached_cq_tail != READ_ONCE(ring->r.tail)) {
220 /* order cqe stores with ring update */
221 smp_store_release(&ring->r.tail, ctx->cached_cq_tail);
222
223 /*
224 * Write sider barrier of tail update, app has read side. See
225 * comment at the top of this file.
226 */
227 smp_wmb();
228
229 if (wq_has_sleeper(&ctx->cq_wait)) {
230 wake_up_interruptible(&ctx->cq_wait);
231 kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN);
232 }
233 }
234}
235
236static struct io_uring_cqe *io_get_cqring(struct io_ring_ctx *ctx)
237{
238 struct io_cq_ring *ring = ctx->cq_ring;
239 unsigned tail;
240
241 tail = ctx->cached_cq_tail;
242 /* See comment at the top of the file */
243 smp_rmb();
244 if (tail + 1 == READ_ONCE(ring->r.head))
245 return NULL;
246
247 ctx->cached_cq_tail++;
248 return &ring->cqes[tail & ctx->cq_mask];
249}
250
251static void io_cqring_fill_event(struct io_ring_ctx *ctx, u64 ki_user_data,
252 long res, unsigned ev_flags)
253{
254 struct io_uring_cqe *cqe;
255
256 /*
257 * If we can't get a cq entry, userspace overflowed the
258 * submission (by quite a lot). Increment the overflow count in
259 * the ring.
260 */
261 cqe = io_get_cqring(ctx);
262 if (cqe) {
263 WRITE_ONCE(cqe->user_data, ki_user_data);
264 WRITE_ONCE(cqe->res, res);
265 WRITE_ONCE(cqe->flags, ev_flags);
266 } else {
267 unsigned overflow = READ_ONCE(ctx->cq_ring->overflow);
268
269 WRITE_ONCE(ctx->cq_ring->overflow, overflow + 1);
270 }
271}
272
273static void io_cqring_add_event(struct io_ring_ctx *ctx, u64 ki_user_data,
274 long res, unsigned ev_flags)
275{
276 unsigned long flags;
277
278 spin_lock_irqsave(&ctx->completion_lock, flags);
279 io_cqring_fill_event(ctx, ki_user_data, res, ev_flags);
280 io_commit_cqring(ctx);
281 spin_unlock_irqrestore(&ctx->completion_lock, flags);
282
283 if (waitqueue_active(&ctx->wait))
284 wake_up(&ctx->wait);
285}
286
287static void io_ring_drop_ctx_refs(struct io_ring_ctx *ctx, unsigned refs)
288{
289 percpu_ref_put_many(&ctx->refs, refs);
290
291 if (waitqueue_active(&ctx->wait))
292 wake_up(&ctx->wait);
293}
294
295static struct io_kiocb *io_get_req(struct io_ring_ctx *ctx)
296{
297 struct io_kiocb *req;
298
299 if (!percpu_ref_tryget(&ctx->refs))
300 return NULL;
301
302 req = kmem_cache_alloc(req_cachep, __GFP_NOWARN);
303 if (req) {
304 req->ctx = ctx;
305 req->flags = 0;
306 return req;
307 }
308
309 io_ring_drop_ctx_refs(ctx, 1);
310 return NULL;
311}
312
Jens Axboedef596e2019-01-09 08:59:42 -0700313static void io_free_req_many(struct io_ring_ctx *ctx, void **reqs, int *nr)
314{
315 if (*nr) {
316 kmem_cache_free_bulk(req_cachep, *nr, reqs);
317 io_ring_drop_ctx_refs(ctx, *nr);
318 *nr = 0;
319 }
320}
321
Jens Axboe2b188cc2019-01-07 10:46:33 -0700322static void io_free_req(struct io_kiocb *req)
323{
324 io_ring_drop_ctx_refs(req->ctx, 1);
325 kmem_cache_free(req_cachep, req);
326}
327
Jens Axboedef596e2019-01-09 08:59:42 -0700328/*
329 * Find and free completed poll iocbs
330 */
331static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
332 struct list_head *done)
333{
334 void *reqs[IO_IOPOLL_BATCH];
335 struct io_kiocb *req;
336 int to_free = 0;
337
338 while (!list_empty(done)) {
339 req = list_first_entry(done, struct io_kiocb, list);
340 list_del(&req->list);
341
342 io_cqring_fill_event(ctx, req->user_data, req->error, 0);
343
344 reqs[to_free++] = req;
345 (*nr_events)++;
346
347 fput(req->rw.ki_filp);
348 if (to_free == ARRAY_SIZE(reqs))
349 io_free_req_many(ctx, reqs, &to_free);
350 }
351 io_commit_cqring(ctx);
352
353 io_free_req_many(ctx, reqs, &to_free);
354}
355
356static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
357 long min)
358{
359 struct io_kiocb *req, *tmp;
360 LIST_HEAD(done);
361 bool spin;
362 int ret;
363
364 /*
365 * Only spin for completions if we don't have multiple devices hanging
366 * off our complete list, and we're under the requested amount.
367 */
368 spin = !ctx->poll_multi_file && *nr_events < min;
369
370 ret = 0;
371 list_for_each_entry_safe(req, tmp, &ctx->poll_list, list) {
372 struct kiocb *kiocb = &req->rw;
373
374 /*
375 * Move completed entries to our local list. If we find a
376 * request that requires polling, break out and complete
377 * the done list first, if we have entries there.
378 */
379 if (req->flags & REQ_F_IOPOLL_COMPLETED) {
380 list_move_tail(&req->list, &done);
381 continue;
382 }
383 if (!list_empty(&done))
384 break;
385
386 ret = kiocb->ki_filp->f_op->iopoll(kiocb, spin);
387 if (ret < 0)
388 break;
389
390 if (ret && spin)
391 spin = false;
392 ret = 0;
393 }
394
395 if (!list_empty(&done))
396 io_iopoll_complete(ctx, nr_events, &done);
397
398 return ret;
399}
400
401/*
402 * Poll for a mininum of 'min' events. Note that if min == 0 we consider that a
403 * non-spinning poll check - we'll still enter the driver poll loop, but only
404 * as a non-spinning completion check.
405 */
406static int io_iopoll_getevents(struct io_ring_ctx *ctx, unsigned int *nr_events,
407 long min)
408{
409 while (!list_empty(&ctx->poll_list)) {
410 int ret;
411
412 ret = io_do_iopoll(ctx, nr_events, min);
413 if (ret < 0)
414 return ret;
415 if (!min || *nr_events >= min)
416 return 0;
417 }
418
419 return 1;
420}
421
422/*
423 * We can't just wait for polled events to come to us, we have to actively
424 * find and complete them.
425 */
426static void io_iopoll_reap_events(struct io_ring_ctx *ctx)
427{
428 if (!(ctx->flags & IORING_SETUP_IOPOLL))
429 return;
430
431 mutex_lock(&ctx->uring_lock);
432 while (!list_empty(&ctx->poll_list)) {
433 unsigned int nr_events = 0;
434
435 io_iopoll_getevents(ctx, &nr_events, 1);
436 }
437 mutex_unlock(&ctx->uring_lock);
438}
439
440static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
441 long min)
442{
443 int ret = 0;
444
445 do {
446 int tmin = 0;
447
448 if (*nr_events < min)
449 tmin = min - *nr_events;
450
451 ret = io_iopoll_getevents(ctx, nr_events, tmin);
452 if (ret <= 0)
453 break;
454 ret = 0;
455 } while (min && !*nr_events && !need_resched());
456
457 return ret;
458}
459
Jens Axboe2b188cc2019-01-07 10:46:33 -0700460static void kiocb_end_write(struct kiocb *kiocb)
461{
462 if (kiocb->ki_flags & IOCB_WRITE) {
463 struct inode *inode = file_inode(kiocb->ki_filp);
464
465 /*
466 * Tell lockdep we inherited freeze protection from submission
467 * thread.
468 */
469 if (S_ISREG(inode->i_mode))
470 __sb_writers_acquired(inode->i_sb, SB_FREEZE_WRITE);
471 file_end_write(kiocb->ki_filp);
472 }
473}
474
475static void io_complete_rw(struct kiocb *kiocb, long res, long res2)
476{
477 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw);
478
479 kiocb_end_write(kiocb);
480
481 fput(kiocb->ki_filp);
482 io_cqring_add_event(req->ctx, req->user_data, res, 0);
483 io_free_req(req);
484}
485
Jens Axboedef596e2019-01-09 08:59:42 -0700486static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2)
487{
488 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw);
489
490 kiocb_end_write(kiocb);
491
492 req->error = res;
493 if (res != -EAGAIN)
494 req->flags |= REQ_F_IOPOLL_COMPLETED;
495}
496
497/*
498 * After the iocb has been issued, it's safe to be found on the poll list.
499 * Adding the kiocb to the list AFTER submission ensures that we don't
500 * find it from a io_iopoll_getevents() thread before the issuer is done
501 * accessing the kiocb cookie.
502 */
503static void io_iopoll_req_issued(struct io_kiocb *req)
504{
505 struct io_ring_ctx *ctx = req->ctx;
506
507 /*
508 * Track whether we have multiple files in our lists. This will impact
509 * how we do polling eventually, not spinning if we're on potentially
510 * different devices.
511 */
512 if (list_empty(&ctx->poll_list)) {
513 ctx->poll_multi_file = false;
514 } else if (!ctx->poll_multi_file) {
515 struct io_kiocb *list_req;
516
517 list_req = list_first_entry(&ctx->poll_list, struct io_kiocb,
518 list);
519 if (list_req->rw.ki_filp != req->rw.ki_filp)
520 ctx->poll_multi_file = true;
521 }
522
523 /*
524 * For fast devices, IO may have already completed. If it has, add
525 * it to the front so we find it first.
526 */
527 if (req->flags & REQ_F_IOPOLL_COMPLETED)
528 list_add(&req->list, &ctx->poll_list);
529 else
530 list_add_tail(&req->list, &ctx->poll_list);
531}
532
Jens Axboe2b188cc2019-01-07 10:46:33 -0700533/*
534 * If we tracked the file through the SCM inflight mechanism, we could support
535 * any file. For now, just ensure that anything potentially problematic is done
536 * inline.
537 */
538static bool io_file_supports_async(struct file *file)
539{
540 umode_t mode = file_inode(file)->i_mode;
541
542 if (S_ISBLK(mode) || S_ISCHR(mode))
543 return true;
544 if (S_ISREG(mode) && file->f_op != &io_uring_fops)
545 return true;
546
547 return false;
548}
549
550static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe,
551 bool force_nonblock)
552{
Jens Axboedef596e2019-01-09 08:59:42 -0700553 struct io_ring_ctx *ctx = req->ctx;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700554 struct kiocb *kiocb = &req->rw;
555 unsigned ioprio;
556 int fd, ret;
557
558 /* For -EAGAIN retry, everything is already prepped */
559 if (kiocb->ki_filp)
560 return 0;
561
562 fd = READ_ONCE(sqe->fd);
563 kiocb->ki_filp = fget(fd);
564 if (unlikely(!kiocb->ki_filp))
565 return -EBADF;
566 if (force_nonblock && !io_file_supports_async(kiocb->ki_filp))
567 force_nonblock = false;
568 kiocb->ki_pos = READ_ONCE(sqe->off);
569 kiocb->ki_flags = iocb_flags(kiocb->ki_filp);
570 kiocb->ki_hint = ki_hint_validate(file_write_hint(kiocb->ki_filp));
571
572 ioprio = READ_ONCE(sqe->ioprio);
573 if (ioprio) {
574 ret = ioprio_check_cap(ioprio);
575 if (ret)
576 goto out_fput;
577
578 kiocb->ki_ioprio = ioprio;
579 } else
580 kiocb->ki_ioprio = get_current_ioprio();
581
582 ret = kiocb_set_rw_flags(kiocb, READ_ONCE(sqe->rw_flags));
583 if (unlikely(ret))
584 goto out_fput;
585 if (force_nonblock) {
586 kiocb->ki_flags |= IOCB_NOWAIT;
587 req->flags |= REQ_F_FORCE_NONBLOCK;
588 }
Jens Axboedef596e2019-01-09 08:59:42 -0700589 if (ctx->flags & IORING_SETUP_IOPOLL) {
590 ret = -EOPNOTSUPP;
591 if (!(kiocb->ki_flags & IOCB_DIRECT) ||
592 !kiocb->ki_filp->f_op->iopoll)
593 goto out_fput;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700594
Jens Axboedef596e2019-01-09 08:59:42 -0700595 req->error = 0;
596 kiocb->ki_flags |= IOCB_HIPRI;
597 kiocb->ki_complete = io_complete_rw_iopoll;
598 } else {
599 if (kiocb->ki_flags & IOCB_HIPRI) {
600 ret = -EINVAL;
601 goto out_fput;
602 }
603 kiocb->ki_complete = io_complete_rw;
604 }
Jens Axboe2b188cc2019-01-07 10:46:33 -0700605 return 0;
606out_fput:
607 fput(kiocb->ki_filp);
608 return ret;
609}
610
611static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
612{
613 switch (ret) {
614 case -EIOCBQUEUED:
615 break;
616 case -ERESTARTSYS:
617 case -ERESTARTNOINTR:
618 case -ERESTARTNOHAND:
619 case -ERESTART_RESTARTBLOCK:
620 /*
621 * We can't just restart the syscall, since previously
622 * submitted sqes may already be in progress. Just fail this
623 * IO with EINTR.
624 */
625 ret = -EINTR;
626 /* fall through */
627 default:
628 kiocb->ki_complete(kiocb, ret, 0);
629 }
630}
631
632static int io_import_iovec(struct io_ring_ctx *ctx, int rw,
633 const struct sqe_submit *s, struct iovec **iovec,
634 struct iov_iter *iter)
635{
636 const struct io_uring_sqe *sqe = s->sqe;
637 void __user *buf = u64_to_user_ptr(READ_ONCE(sqe->addr));
638 size_t sqe_len = READ_ONCE(sqe->len);
639
640 if (!s->has_user)
641 return -EFAULT;
642
643#ifdef CONFIG_COMPAT
644 if (ctx->compat)
645 return compat_import_iovec(rw, buf, sqe_len, UIO_FASTIOV,
646 iovec, iter);
647#endif
648
649 return import_iovec(rw, buf, sqe_len, UIO_FASTIOV, iovec, iter);
650}
651
652static ssize_t io_read(struct io_kiocb *req, const struct sqe_submit *s,
653 bool force_nonblock)
654{
655 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
656 struct kiocb *kiocb = &req->rw;
657 struct iov_iter iter;
658 struct file *file;
659 ssize_t ret;
660
661 ret = io_prep_rw(req, s->sqe, force_nonblock);
662 if (ret)
663 return ret;
664 file = kiocb->ki_filp;
665
666 ret = -EBADF;
667 if (unlikely(!(file->f_mode & FMODE_READ)))
668 goto out_fput;
669 ret = -EINVAL;
670 if (unlikely(!file->f_op->read_iter))
671 goto out_fput;
672
673 ret = io_import_iovec(req->ctx, READ, s, &iovec, &iter);
674 if (ret)
675 goto out_fput;
676
677 ret = rw_verify_area(READ, file, &kiocb->ki_pos, iov_iter_count(&iter));
678 if (!ret) {
679 ssize_t ret2;
680
681 /* Catch -EAGAIN return for forced non-blocking submission */
682 ret2 = call_read_iter(file, kiocb, &iter);
683 if (!force_nonblock || ret2 != -EAGAIN)
684 io_rw_done(kiocb, ret2);
685 else
686 ret = -EAGAIN;
687 }
688 kfree(iovec);
689out_fput:
690 /* Hold on to the file for -EAGAIN */
691 if (unlikely(ret && ret != -EAGAIN))
692 fput(file);
693 return ret;
694}
695
696static ssize_t io_write(struct io_kiocb *req, const struct sqe_submit *s,
697 bool force_nonblock)
698{
699 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
700 struct kiocb *kiocb = &req->rw;
701 struct iov_iter iter;
702 struct file *file;
703 ssize_t ret;
704
705 ret = io_prep_rw(req, s->sqe, force_nonblock);
706 if (ret)
707 return ret;
708 /* Hold on to the file for -EAGAIN */
709 if (force_nonblock && !(kiocb->ki_flags & IOCB_DIRECT))
710 return -EAGAIN;
711
712 ret = -EBADF;
713 file = kiocb->ki_filp;
714 if (unlikely(!(file->f_mode & FMODE_WRITE)))
715 goto out_fput;
716 ret = -EINVAL;
717 if (unlikely(!file->f_op->write_iter))
718 goto out_fput;
719
720 ret = io_import_iovec(req->ctx, WRITE, s, &iovec, &iter);
721 if (ret)
722 goto out_fput;
723
724 ret = rw_verify_area(WRITE, file, &kiocb->ki_pos,
725 iov_iter_count(&iter));
726 if (!ret) {
727 /*
728 * Open-code file_start_write here to grab freeze protection,
729 * which will be released by another thread in
730 * io_complete_rw(). Fool lockdep by telling it the lock got
731 * released so that it doesn't complain about the held lock when
732 * we return to userspace.
733 */
734 if (S_ISREG(file_inode(file)->i_mode)) {
735 __sb_start_write(file_inode(file)->i_sb,
736 SB_FREEZE_WRITE, true);
737 __sb_writers_release(file_inode(file)->i_sb,
738 SB_FREEZE_WRITE);
739 }
740 kiocb->ki_flags |= IOCB_WRITE;
741 io_rw_done(kiocb, call_write_iter(file, kiocb, &iter));
742 }
743 kfree(iovec);
744out_fput:
745 if (unlikely(ret))
746 fput(file);
747 return ret;
748}
749
750/*
751 * IORING_OP_NOP just posts a completion event, nothing else.
752 */
753static int io_nop(struct io_kiocb *req, u64 user_data)
754{
755 struct io_ring_ctx *ctx = req->ctx;
756 long err = 0;
757
Jens Axboedef596e2019-01-09 08:59:42 -0700758 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
759 return -EINVAL;
760
Jens Axboe2b188cc2019-01-07 10:46:33 -0700761 /*
762 * Twilight zone - it's possible that someone issued an opcode that
763 * has a file attached, then got -EAGAIN on submission, and changed
764 * the sqe before we retried it from async context. Avoid dropping
765 * a file reference for this malicious case, and flag the error.
766 */
767 if (req->rw.ki_filp) {
768 err = -EBADF;
769 fput(req->rw.ki_filp);
770 }
771 io_cqring_add_event(ctx, user_data, err, 0);
772 io_free_req(req);
773 return 0;
774}
775
Christoph Hellwigc992fe22019-01-11 09:43:02 -0700776static int io_prep_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe)
777{
778 int fd;
779
780 /* Prep already done */
781 if (req->rw.ki_filp)
782 return 0;
783
Jens Axboedef596e2019-01-09 08:59:42 -0700784 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
785 return -EINVAL;
Christoph Hellwigc992fe22019-01-11 09:43:02 -0700786 if (unlikely(sqe->addr || sqe->ioprio))
787 return -EINVAL;
788
789 fd = READ_ONCE(sqe->fd);
790 req->rw.ki_filp = fget(fd);
791 if (unlikely(!req->rw.ki_filp))
792 return -EBADF;
793
794 return 0;
795}
796
797static int io_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe,
798 bool force_nonblock)
799{
800 loff_t sqe_off = READ_ONCE(sqe->off);
801 loff_t sqe_len = READ_ONCE(sqe->len);
802 loff_t end = sqe_off + sqe_len;
803 unsigned fsync_flags;
804 int ret;
805
806 fsync_flags = READ_ONCE(sqe->fsync_flags);
807 if (unlikely(fsync_flags & ~IORING_FSYNC_DATASYNC))
808 return -EINVAL;
809
810 ret = io_prep_fsync(req, sqe);
811 if (ret)
812 return ret;
813
814 /* fsync always requires a blocking context */
815 if (force_nonblock)
816 return -EAGAIN;
817
818 ret = vfs_fsync_range(req->rw.ki_filp, sqe_off,
819 end > 0 ? end : LLONG_MAX,
820 fsync_flags & IORING_FSYNC_DATASYNC);
821
822 fput(req->rw.ki_filp);
823 io_cqring_add_event(req->ctx, sqe->user_data, ret, 0);
824 io_free_req(req);
825 return 0;
826}
827
Jens Axboe2b188cc2019-01-07 10:46:33 -0700828static int __io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
829 const struct sqe_submit *s, bool force_nonblock)
830{
831 ssize_t ret;
832 int opcode;
833
834 if (unlikely(s->index >= ctx->sq_entries))
835 return -EINVAL;
836 req->user_data = READ_ONCE(s->sqe->user_data);
837
838 opcode = READ_ONCE(s->sqe->opcode);
839 switch (opcode) {
840 case IORING_OP_NOP:
841 ret = io_nop(req, req->user_data);
842 break;
843 case IORING_OP_READV:
844 ret = io_read(req, s, force_nonblock);
845 break;
846 case IORING_OP_WRITEV:
847 ret = io_write(req, s, force_nonblock);
848 break;
Christoph Hellwigc992fe22019-01-11 09:43:02 -0700849 case IORING_OP_FSYNC:
850 ret = io_fsync(req, s->sqe, force_nonblock);
851 break;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700852 default:
853 ret = -EINVAL;
854 break;
855 }
856
Jens Axboedef596e2019-01-09 08:59:42 -0700857 if (ret)
858 return ret;
859
860 if (ctx->flags & IORING_SETUP_IOPOLL) {
861 if (req->error == -EAGAIN)
862 return -EAGAIN;
863
864 /* workqueue context doesn't hold uring_lock, grab it now */
865 if (s->needs_lock)
866 mutex_lock(&ctx->uring_lock);
867 io_iopoll_req_issued(req);
868 if (s->needs_lock)
869 mutex_unlock(&ctx->uring_lock);
870 }
871
872 return 0;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700873}
874
875static void io_sq_wq_submit_work(struct work_struct *work)
876{
877 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
878 struct sqe_submit *s = &req->submit;
879 const struct io_uring_sqe *sqe = s->sqe;
880 struct io_ring_ctx *ctx = req->ctx;
881 mm_segment_t old_fs = get_fs();
882 int ret;
883
884 /* Ensure we clear previously set forced non-block flag */
885 req->flags &= ~REQ_F_FORCE_NONBLOCK;
886 req->rw.ki_flags &= ~IOCB_NOWAIT;
887
888 if (!mmget_not_zero(ctx->sqo_mm)) {
889 ret = -EFAULT;
890 goto err;
891 }
892
893 use_mm(ctx->sqo_mm);
894 set_fs(USER_DS);
895 s->has_user = true;
Jens Axboedef596e2019-01-09 08:59:42 -0700896 s->needs_lock = true;
Jens Axboe2b188cc2019-01-07 10:46:33 -0700897
Jens Axboedef596e2019-01-09 08:59:42 -0700898 do {
899 ret = __io_submit_sqe(ctx, req, s, false);
900 /*
901 * We can get EAGAIN for polled IO even though we're forcing
902 * a sync submission from here, since we can't wait for
903 * request slots on the block side.
904 */
905 if (ret != -EAGAIN)
906 break;
907 cond_resched();
908 } while (1);
Jens Axboe2b188cc2019-01-07 10:46:33 -0700909
910 set_fs(old_fs);
911 unuse_mm(ctx->sqo_mm);
912 mmput(ctx->sqo_mm);
913err:
914 if (ret) {
915 io_cqring_add_event(ctx, sqe->user_data, ret, 0);
916 io_free_req(req);
917 }
918
919 /* async context always use a copy of the sqe */
920 kfree(sqe);
921}
922
923static int io_submit_sqe(struct io_ring_ctx *ctx, struct sqe_submit *s)
924{
925 struct io_kiocb *req;
926 ssize_t ret;
927
928 /* enforce forwards compatibility on users */
929 if (unlikely(s->sqe->flags))
930 return -EINVAL;
931
932 req = io_get_req(ctx);
933 if (unlikely(!req))
934 return -EAGAIN;
935
936 req->rw.ki_filp = NULL;
937
938 ret = __io_submit_sqe(ctx, req, s, true);
939 if (ret == -EAGAIN) {
940 struct io_uring_sqe *sqe_copy;
941
942 sqe_copy = kmalloc(sizeof(*sqe_copy), GFP_KERNEL);
943 if (sqe_copy) {
944 memcpy(sqe_copy, s->sqe, sizeof(*sqe_copy));
945 s->sqe = sqe_copy;
946
947 memcpy(&req->submit, s, sizeof(*s));
948 INIT_WORK(&req->work, io_sq_wq_submit_work);
949 queue_work(ctx->sqo_wq, &req->work);
950 ret = 0;
951 }
952 }
953 if (ret)
954 io_free_req(req);
955
956 return ret;
957}
958
959static void io_commit_sqring(struct io_ring_ctx *ctx)
960{
961 struct io_sq_ring *ring = ctx->sq_ring;
962
963 if (ctx->cached_sq_head != READ_ONCE(ring->r.head)) {
964 /*
965 * Ensure any loads from the SQEs are done at this point,
966 * since once we write the new head, the application could
967 * write new data to them.
968 */
969 smp_store_release(&ring->r.head, ctx->cached_sq_head);
970
971 /*
972 * write side barrier of head update, app has read side. See
973 * comment at the top of this file
974 */
975 smp_wmb();
976 }
977}
978
979/*
980 * Undo last io_get_sqring()
981 */
982static void io_drop_sqring(struct io_ring_ctx *ctx)
983{
984 ctx->cached_sq_head--;
985}
986
987/*
988 * Fetch an sqe, if one is available. Note that s->sqe will point to memory
989 * that is mapped by userspace. This means that care needs to be taken to
990 * ensure that reads are stable, as we cannot rely on userspace always
991 * being a good citizen. If members of the sqe are validated and then later
992 * used, it's important that those reads are done through READ_ONCE() to
993 * prevent a re-load down the line.
994 */
995static bool io_get_sqring(struct io_ring_ctx *ctx, struct sqe_submit *s)
996{
997 struct io_sq_ring *ring = ctx->sq_ring;
998 unsigned head;
999
1000 /*
1001 * The cached sq head (or cq tail) serves two purposes:
1002 *
1003 * 1) allows us to batch the cost of updating the user visible
1004 * head updates.
1005 * 2) allows the kernel side to track the head on its own, even
1006 * though the application is the one updating it.
1007 */
1008 head = ctx->cached_sq_head;
1009 /* See comment at the top of this file */
1010 smp_rmb();
1011 if (head == READ_ONCE(ring->r.tail))
1012 return false;
1013
1014 head = READ_ONCE(ring->array[head & ctx->sq_mask]);
1015 if (head < ctx->sq_entries) {
1016 s->index = head;
1017 s->sqe = &ctx->sq_sqes[head];
1018 ctx->cached_sq_head++;
1019 return true;
1020 }
1021
1022 /* drop invalid entries */
1023 ctx->cached_sq_head++;
1024 ring->dropped++;
1025 /* See comment at the top of this file */
1026 smp_wmb();
1027 return false;
1028}
1029
1030static int io_ring_submit(struct io_ring_ctx *ctx, unsigned int to_submit)
1031{
1032 int i, ret = 0, submit = 0;
1033 struct blk_plug plug;
1034
1035 if (to_submit > IO_PLUG_THRESHOLD)
1036 blk_start_plug(&plug);
1037
1038 for (i = 0; i < to_submit; i++) {
1039 struct sqe_submit s;
1040
1041 if (!io_get_sqring(ctx, &s))
1042 break;
1043
1044 s.has_user = true;
Jens Axboedef596e2019-01-09 08:59:42 -07001045 s.needs_lock = false;
1046
Jens Axboe2b188cc2019-01-07 10:46:33 -07001047 ret = io_submit_sqe(ctx, &s);
1048 if (ret) {
1049 io_drop_sqring(ctx);
1050 break;
1051 }
1052
1053 submit++;
1054 }
1055 io_commit_sqring(ctx);
1056
1057 if (to_submit > IO_PLUG_THRESHOLD)
1058 blk_finish_plug(&plug);
1059
1060 return submit ? submit : ret;
1061}
1062
1063static unsigned io_cqring_events(struct io_cq_ring *ring)
1064{
1065 return READ_ONCE(ring->r.tail) - READ_ONCE(ring->r.head);
1066}
1067
1068/*
1069 * Wait until events become available, if we don't already have some. The
1070 * application must reap them itself, as they reside on the shared cq ring.
1071 */
1072static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
1073 const sigset_t __user *sig, size_t sigsz)
1074{
1075 struct io_cq_ring *ring = ctx->cq_ring;
1076 sigset_t ksigmask, sigsaved;
1077 DEFINE_WAIT(wait);
1078 int ret;
1079
1080 /* See comment at the top of this file */
1081 smp_rmb();
1082 if (io_cqring_events(ring) >= min_events)
1083 return 0;
1084
1085 if (sig) {
1086 ret = set_user_sigmask(sig, &ksigmask, &sigsaved, sigsz);
1087 if (ret)
1088 return ret;
1089 }
1090
1091 do {
1092 prepare_to_wait(&ctx->wait, &wait, TASK_INTERRUPTIBLE);
1093
1094 ret = 0;
1095 /* See comment at the top of this file */
1096 smp_rmb();
1097 if (io_cqring_events(ring) >= min_events)
1098 break;
1099
1100 schedule();
1101
1102 ret = -EINTR;
1103 if (signal_pending(current))
1104 break;
1105 } while (1);
1106
1107 finish_wait(&ctx->wait, &wait);
1108
1109 if (sig)
1110 restore_user_sigmask(sig, &sigsaved);
1111
1112 return READ_ONCE(ring->r.head) == READ_ONCE(ring->r.tail) ? ret : 0;
1113}
1114
1115static int io_sq_offload_start(struct io_ring_ctx *ctx)
1116{
1117 int ret;
1118
1119 mmgrab(current->mm);
1120 ctx->sqo_mm = current->mm;
1121
1122 /* Do QD, or 2 * CPUS, whatever is smallest */
1123 ctx->sqo_wq = alloc_workqueue("io_ring-wq", WQ_UNBOUND | WQ_FREEZABLE,
1124 min(ctx->sq_entries - 1, 2 * num_online_cpus()));
1125 if (!ctx->sqo_wq) {
1126 ret = -ENOMEM;
1127 goto err;
1128 }
1129
1130 return 0;
1131err:
1132 mmdrop(ctx->sqo_mm);
1133 ctx->sqo_mm = NULL;
1134 return ret;
1135}
1136
1137static void io_unaccount_mem(struct user_struct *user, unsigned long nr_pages)
1138{
1139 atomic_long_sub(nr_pages, &user->locked_vm);
1140}
1141
1142static int io_account_mem(struct user_struct *user, unsigned long nr_pages)
1143{
1144 unsigned long page_limit, cur_pages, new_pages;
1145
1146 /* Don't allow more pages than we can safely lock */
1147 page_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
1148
1149 do {
1150 cur_pages = atomic_long_read(&user->locked_vm);
1151 new_pages = cur_pages + nr_pages;
1152 if (new_pages > page_limit)
1153 return -ENOMEM;
1154 } while (atomic_long_cmpxchg(&user->locked_vm, cur_pages,
1155 new_pages) != cur_pages);
1156
1157 return 0;
1158}
1159
1160static void io_mem_free(void *ptr)
1161{
1162 struct page *page = virt_to_head_page(ptr);
1163
1164 if (put_page_testzero(page))
1165 free_compound_page(page);
1166}
1167
1168static void *io_mem_alloc(size_t size)
1169{
1170 gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN | __GFP_COMP |
1171 __GFP_NORETRY;
1172
1173 return (void *) __get_free_pages(gfp_flags, get_order(size));
1174}
1175
1176static unsigned long ring_pages(unsigned sq_entries, unsigned cq_entries)
1177{
1178 struct io_sq_ring *sq_ring;
1179 struct io_cq_ring *cq_ring;
1180 size_t bytes;
1181
1182 bytes = struct_size(sq_ring, array, sq_entries);
1183 bytes += array_size(sizeof(struct io_uring_sqe), sq_entries);
1184 bytes += struct_size(cq_ring, cqes, cq_entries);
1185
1186 return (bytes + PAGE_SIZE - 1) / PAGE_SIZE;
1187}
1188
1189static void io_ring_ctx_free(struct io_ring_ctx *ctx)
1190{
1191 if (ctx->sqo_wq)
1192 destroy_workqueue(ctx->sqo_wq);
1193 if (ctx->sqo_mm)
1194 mmdrop(ctx->sqo_mm);
Jens Axboedef596e2019-01-09 08:59:42 -07001195
1196 io_iopoll_reap_events(ctx);
1197
Jens Axboe2b188cc2019-01-07 10:46:33 -07001198#if defined(CONFIG_UNIX)
1199 if (ctx->ring_sock)
1200 sock_release(ctx->ring_sock);
1201#endif
1202
1203 io_mem_free(ctx->sq_ring);
1204 io_mem_free(ctx->sq_sqes);
1205 io_mem_free(ctx->cq_ring);
1206
1207 percpu_ref_exit(&ctx->refs);
1208 if (ctx->account_mem)
1209 io_unaccount_mem(ctx->user,
1210 ring_pages(ctx->sq_entries, ctx->cq_entries));
1211 free_uid(ctx->user);
1212 kfree(ctx);
1213}
1214
1215static __poll_t io_uring_poll(struct file *file, poll_table *wait)
1216{
1217 struct io_ring_ctx *ctx = file->private_data;
1218 __poll_t mask = 0;
1219
1220 poll_wait(file, &ctx->cq_wait, wait);
1221 /* See comment at the top of this file */
1222 smp_rmb();
1223 if (READ_ONCE(ctx->sq_ring->r.tail) + 1 != ctx->cached_sq_head)
1224 mask |= EPOLLOUT | EPOLLWRNORM;
1225 if (READ_ONCE(ctx->cq_ring->r.head) != ctx->cached_cq_tail)
1226 mask |= EPOLLIN | EPOLLRDNORM;
1227
1228 return mask;
1229}
1230
1231static int io_uring_fasync(int fd, struct file *file, int on)
1232{
1233 struct io_ring_ctx *ctx = file->private_data;
1234
1235 return fasync_helper(fd, file, on, &ctx->cq_fasync);
1236}
1237
1238static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
1239{
1240 mutex_lock(&ctx->uring_lock);
1241 percpu_ref_kill(&ctx->refs);
1242 mutex_unlock(&ctx->uring_lock);
1243
Jens Axboedef596e2019-01-09 08:59:42 -07001244 io_iopoll_reap_events(ctx);
Jens Axboe2b188cc2019-01-07 10:46:33 -07001245 wait_for_completion(&ctx->ctx_done);
1246 io_ring_ctx_free(ctx);
1247}
1248
1249static int io_uring_release(struct inode *inode, struct file *file)
1250{
1251 struct io_ring_ctx *ctx = file->private_data;
1252
1253 file->private_data = NULL;
1254 io_ring_ctx_wait_and_kill(ctx);
1255 return 0;
1256}
1257
1258static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
1259{
1260 loff_t offset = (loff_t) vma->vm_pgoff << PAGE_SHIFT;
1261 unsigned long sz = vma->vm_end - vma->vm_start;
1262 struct io_ring_ctx *ctx = file->private_data;
1263 unsigned long pfn;
1264 struct page *page;
1265 void *ptr;
1266
1267 switch (offset) {
1268 case IORING_OFF_SQ_RING:
1269 ptr = ctx->sq_ring;
1270 break;
1271 case IORING_OFF_SQES:
1272 ptr = ctx->sq_sqes;
1273 break;
1274 case IORING_OFF_CQ_RING:
1275 ptr = ctx->cq_ring;
1276 break;
1277 default:
1278 return -EINVAL;
1279 }
1280
1281 page = virt_to_head_page(ptr);
1282 if (sz > (PAGE_SIZE << compound_order(page)))
1283 return -EINVAL;
1284
1285 pfn = virt_to_phys(ptr) >> PAGE_SHIFT;
1286 return remap_pfn_range(vma, vma->vm_start, pfn, sz, vma->vm_page_prot);
1287}
1288
1289SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
1290 u32, min_complete, u32, flags, const sigset_t __user *, sig,
1291 size_t, sigsz)
1292{
1293 struct io_ring_ctx *ctx;
1294 long ret = -EBADF;
1295 int submitted = 0;
1296 struct fd f;
1297
1298 if (flags & ~IORING_ENTER_GETEVENTS)
1299 return -EINVAL;
1300
1301 f = fdget(fd);
1302 if (!f.file)
1303 return -EBADF;
1304
1305 ret = -EOPNOTSUPP;
1306 if (f.file->f_op != &io_uring_fops)
1307 goto out_fput;
1308
1309 ret = -ENXIO;
1310 ctx = f.file->private_data;
1311 if (!percpu_ref_tryget(&ctx->refs))
1312 goto out_fput;
1313
1314 ret = 0;
1315 if (to_submit) {
1316 to_submit = min(to_submit, ctx->sq_entries);
1317
1318 mutex_lock(&ctx->uring_lock);
1319 submitted = io_ring_submit(ctx, to_submit);
1320 mutex_unlock(&ctx->uring_lock);
1321
1322 if (submitted < 0)
1323 goto out_ctx;
1324 }
1325 if (flags & IORING_ENTER_GETEVENTS) {
Jens Axboedef596e2019-01-09 08:59:42 -07001326 unsigned nr_events = 0;
1327
Jens Axboe2b188cc2019-01-07 10:46:33 -07001328 min_complete = min(min_complete, ctx->cq_entries);
1329
1330 /*
1331 * The application could have included the 'to_submit' count
1332 * in how many events it wanted to wait for. If we failed to
1333 * submit the desired count, we may need to adjust the number
1334 * of events to poll/wait for.
1335 */
1336 if (submitted < to_submit)
1337 min_complete = min_t(unsigned, submitted, min_complete);
1338
Jens Axboedef596e2019-01-09 08:59:42 -07001339 if (ctx->flags & IORING_SETUP_IOPOLL) {
1340 mutex_lock(&ctx->uring_lock);
1341 ret = io_iopoll_check(ctx, &nr_events, min_complete);
1342 mutex_unlock(&ctx->uring_lock);
1343 } else {
1344 ret = io_cqring_wait(ctx, min_complete, sig, sigsz);
1345 }
Jens Axboe2b188cc2019-01-07 10:46:33 -07001346 }
1347
1348out_ctx:
1349 io_ring_drop_ctx_refs(ctx, 1);
1350out_fput:
1351 fdput(f);
1352 return submitted ? submitted : ret;
1353}
1354
1355static const struct file_operations io_uring_fops = {
1356 .release = io_uring_release,
1357 .mmap = io_uring_mmap,
1358 .poll = io_uring_poll,
1359 .fasync = io_uring_fasync,
1360};
1361
1362static int io_allocate_scq_urings(struct io_ring_ctx *ctx,
1363 struct io_uring_params *p)
1364{
1365 struct io_sq_ring *sq_ring;
1366 struct io_cq_ring *cq_ring;
1367 size_t size;
1368
1369 sq_ring = io_mem_alloc(struct_size(sq_ring, array, p->sq_entries));
1370 if (!sq_ring)
1371 return -ENOMEM;
1372
1373 ctx->sq_ring = sq_ring;
1374 sq_ring->ring_mask = p->sq_entries - 1;
1375 sq_ring->ring_entries = p->sq_entries;
1376 ctx->sq_mask = sq_ring->ring_mask;
1377 ctx->sq_entries = sq_ring->ring_entries;
1378
1379 size = array_size(sizeof(struct io_uring_sqe), p->sq_entries);
1380 if (size == SIZE_MAX)
1381 return -EOVERFLOW;
1382
1383 ctx->sq_sqes = io_mem_alloc(size);
1384 if (!ctx->sq_sqes) {
1385 io_mem_free(ctx->sq_ring);
1386 return -ENOMEM;
1387 }
1388
1389 cq_ring = io_mem_alloc(struct_size(cq_ring, cqes, p->cq_entries));
1390 if (!cq_ring) {
1391 io_mem_free(ctx->sq_ring);
1392 io_mem_free(ctx->sq_sqes);
1393 return -ENOMEM;
1394 }
1395
1396 ctx->cq_ring = cq_ring;
1397 cq_ring->ring_mask = p->cq_entries - 1;
1398 cq_ring->ring_entries = p->cq_entries;
1399 ctx->cq_mask = cq_ring->ring_mask;
1400 ctx->cq_entries = cq_ring->ring_entries;
1401 return 0;
1402}
1403
1404/*
1405 * Allocate an anonymous fd, this is what constitutes the application
1406 * visible backing of an io_uring instance. The application mmaps this
1407 * fd to gain access to the SQ/CQ ring details. If UNIX sockets are enabled,
1408 * we have to tie this fd to a socket for file garbage collection purposes.
1409 */
1410static int io_uring_get_fd(struct io_ring_ctx *ctx)
1411{
1412 struct file *file;
1413 int ret;
1414
1415#if defined(CONFIG_UNIX)
1416 ret = sock_create_kern(&init_net, PF_UNIX, SOCK_RAW, IPPROTO_IP,
1417 &ctx->ring_sock);
1418 if (ret)
1419 return ret;
1420#endif
1421
1422 ret = get_unused_fd_flags(O_RDWR | O_CLOEXEC);
1423 if (ret < 0)
1424 goto err;
1425
1426 file = anon_inode_getfile("[io_uring]", &io_uring_fops, ctx,
1427 O_RDWR | O_CLOEXEC);
1428 if (IS_ERR(file)) {
1429 put_unused_fd(ret);
1430 ret = PTR_ERR(file);
1431 goto err;
1432 }
1433
1434#if defined(CONFIG_UNIX)
1435 ctx->ring_sock->file = file;
1436#endif
1437 fd_install(ret, file);
1438 return ret;
1439err:
1440#if defined(CONFIG_UNIX)
1441 sock_release(ctx->ring_sock);
1442 ctx->ring_sock = NULL;
1443#endif
1444 return ret;
1445}
1446
1447static int io_uring_create(unsigned entries, struct io_uring_params *p)
1448{
1449 struct user_struct *user = NULL;
1450 struct io_ring_ctx *ctx;
1451 bool account_mem;
1452 int ret;
1453
1454 if (!entries || entries > IORING_MAX_ENTRIES)
1455 return -EINVAL;
1456
1457 /*
1458 * Use twice as many entries for the CQ ring. It's possible for the
1459 * application to drive a higher depth than the size of the SQ ring,
1460 * since the sqes are only used at submission time. This allows for
1461 * some flexibility in overcommitting a bit.
1462 */
1463 p->sq_entries = roundup_pow_of_two(entries);
1464 p->cq_entries = 2 * p->sq_entries;
1465
1466 user = get_uid(current_user());
1467 account_mem = !capable(CAP_IPC_LOCK);
1468
1469 if (account_mem) {
1470 ret = io_account_mem(user,
1471 ring_pages(p->sq_entries, p->cq_entries));
1472 if (ret) {
1473 free_uid(user);
1474 return ret;
1475 }
1476 }
1477
1478 ctx = io_ring_ctx_alloc(p);
1479 if (!ctx) {
1480 if (account_mem)
1481 io_unaccount_mem(user, ring_pages(p->sq_entries,
1482 p->cq_entries));
1483 free_uid(user);
1484 return -ENOMEM;
1485 }
1486 ctx->compat = in_compat_syscall();
1487 ctx->account_mem = account_mem;
1488 ctx->user = user;
1489
1490 ret = io_allocate_scq_urings(ctx, p);
1491 if (ret)
1492 goto err;
1493
1494 ret = io_sq_offload_start(ctx);
1495 if (ret)
1496 goto err;
1497
1498 ret = io_uring_get_fd(ctx);
1499 if (ret < 0)
1500 goto err;
1501
1502 memset(&p->sq_off, 0, sizeof(p->sq_off));
1503 p->sq_off.head = offsetof(struct io_sq_ring, r.head);
1504 p->sq_off.tail = offsetof(struct io_sq_ring, r.tail);
1505 p->sq_off.ring_mask = offsetof(struct io_sq_ring, ring_mask);
1506 p->sq_off.ring_entries = offsetof(struct io_sq_ring, ring_entries);
1507 p->sq_off.flags = offsetof(struct io_sq_ring, flags);
1508 p->sq_off.dropped = offsetof(struct io_sq_ring, dropped);
1509 p->sq_off.array = offsetof(struct io_sq_ring, array);
1510
1511 memset(&p->cq_off, 0, sizeof(p->cq_off));
1512 p->cq_off.head = offsetof(struct io_cq_ring, r.head);
1513 p->cq_off.tail = offsetof(struct io_cq_ring, r.tail);
1514 p->cq_off.ring_mask = offsetof(struct io_cq_ring, ring_mask);
1515 p->cq_off.ring_entries = offsetof(struct io_cq_ring, ring_entries);
1516 p->cq_off.overflow = offsetof(struct io_cq_ring, overflow);
1517 p->cq_off.cqes = offsetof(struct io_cq_ring, cqes);
1518 return ret;
1519err:
1520 io_ring_ctx_wait_and_kill(ctx);
1521 return ret;
1522}
1523
1524/*
1525 * Sets up an aio uring context, and returns the fd. Applications asks for a
1526 * ring size, we return the actual sq/cq ring sizes (among other things) in the
1527 * params structure passed in.
1528 */
1529static long io_uring_setup(u32 entries, struct io_uring_params __user *params)
1530{
1531 struct io_uring_params p;
1532 long ret;
1533 int i;
1534
1535 if (copy_from_user(&p, params, sizeof(p)))
1536 return -EFAULT;
1537 for (i = 0; i < ARRAY_SIZE(p.resv); i++) {
1538 if (p.resv[i])
1539 return -EINVAL;
1540 }
1541
Jens Axboedef596e2019-01-09 08:59:42 -07001542 if (p.flags & ~IORING_SETUP_IOPOLL)
Jens Axboe2b188cc2019-01-07 10:46:33 -07001543 return -EINVAL;
1544
1545 ret = io_uring_create(entries, &p);
1546 if (ret < 0)
1547 return ret;
1548
1549 if (copy_to_user(params, &p, sizeof(p)))
1550 return -EFAULT;
1551
1552 return ret;
1553}
1554
1555SYSCALL_DEFINE2(io_uring_setup, u32, entries,
1556 struct io_uring_params __user *, params)
1557{
1558 return io_uring_setup(entries, params);
1559}
1560
1561static int __init io_uring_init(void)
1562{
1563 req_cachep = KMEM_CACHE(io_kiocb, SLAB_HWCACHE_ALIGN | SLAB_PANIC);
1564 return 0;
1565};
1566__initcall(io_uring_init);