Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * Shared application/kernel submission and completion ring pairs, for |
| 4 | * supporting fast/efficient IO. |
| 5 | * |
| 6 | * A note on the read/write ordering memory barriers that are matched between |
Stefan Bühler | 1e84b97 | 2019-04-24 23:54:16 +0200 | [diff] [blame] | 7 | * the application and kernel side. |
| 8 | * |
| 9 | * After the application reads the CQ ring tail, it must use an |
| 10 | * appropriate smp_rmb() to pair with the smp_wmb() the kernel uses |
| 11 | * before writing the tail (using smp_load_acquire to read the tail will |
| 12 | * do). It also needs a smp_mb() before updating CQ head (ordering the |
| 13 | * entry load(s) with the head store), pairing with an implicit barrier |
| 14 | * through a control-dependency in io_get_cqring (smp_store_release to |
| 15 | * store head will do). Failure to do so could lead to reading invalid |
| 16 | * CQ entries. |
| 17 | * |
| 18 | * Likewise, the application must use an appropriate smp_wmb() before |
| 19 | * writing the SQ tail (ordering SQ entry stores with the tail store), |
| 20 | * which pairs with smp_load_acquire in io_get_sqring (smp_store_release |
| 21 | * to store the tail will do). And it needs a barrier ordering the SQ |
| 22 | * head load before writing new SQ entries (smp_load_acquire to read |
| 23 | * head will do). |
| 24 | * |
| 25 | * When using the SQ poll thread (IORING_SETUP_SQPOLL), the application |
| 26 | * needs to check the SQ flags for IORING_SQ_NEED_WAKEUP *after* |
| 27 | * updating the SQ tail; a full memory barrier smp_mb() is needed |
| 28 | * between. |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 29 | * |
| 30 | * Also see the examples in the liburing library: |
| 31 | * |
| 32 | * git://git.kernel.dk/liburing |
| 33 | * |
| 34 | * io_uring also uses READ/WRITE_ONCE() for _any_ store or load that happens |
| 35 | * from data shared between the kernel and application. This is done both |
| 36 | * for ordering purposes, but also to ensure that once a value is loaded from |
| 37 | * data that the application could potentially modify, it remains stable. |
| 38 | * |
| 39 | * Copyright (C) 2018-2019 Jens Axboe |
Christoph Hellwig | c992fe2 | 2019-01-11 09:43:02 -0700 | [diff] [blame] | 40 | * Copyright (c) 2018-2019 Christoph Hellwig |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 41 | */ |
| 42 | #include <linux/kernel.h> |
| 43 | #include <linux/init.h> |
| 44 | #include <linux/errno.h> |
| 45 | #include <linux/syscalls.h> |
| 46 | #include <linux/compat.h> |
| 47 | #include <linux/refcount.h> |
| 48 | #include <linux/uio.h> |
Pavel Begunkov | 6b47ee6 | 2020-01-18 20:22:41 +0300 | [diff] [blame] | 49 | #include <linux/bits.h> |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 50 | |
| 51 | #include <linux/sched/signal.h> |
| 52 | #include <linux/fs.h> |
| 53 | #include <linux/file.h> |
| 54 | #include <linux/fdtable.h> |
| 55 | #include <linux/mm.h> |
| 56 | #include <linux/mman.h> |
| 57 | #include <linux/mmu_context.h> |
| 58 | #include <linux/percpu.h> |
| 59 | #include <linux/slab.h> |
Jens Axboe | 6c271ce | 2019-01-10 11:22:30 -0700 | [diff] [blame] | 60 | #include <linux/kthread.h> |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 61 | #include <linux/blkdev.h> |
Jens Axboe | edafcce | 2019-01-09 09:16:05 -0700 | [diff] [blame] | 62 | #include <linux/bvec.h> |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 63 | #include <linux/net.h> |
| 64 | #include <net/sock.h> |
| 65 | #include <net/af_unix.h> |
Jens Axboe | 6b06314 | 2019-01-10 22:13:58 -0700 | [diff] [blame] | 66 | #include <net/scm.h> |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 67 | #include <linux/anon_inodes.h> |
| 68 | #include <linux/sched/mm.h> |
| 69 | #include <linux/uaccess.h> |
| 70 | #include <linux/nospec.h> |
Jens Axboe | edafcce | 2019-01-09 09:16:05 -0700 | [diff] [blame] | 71 | #include <linux/sizes.h> |
| 72 | #include <linux/hugetlb.h> |
Jens Axboe | aa4c396 | 2019-11-29 10:14:00 -0700 | [diff] [blame] | 73 | #include <linux/highmem.h> |
Jens Axboe | 15b71ab | 2019-12-11 11:20:36 -0700 | [diff] [blame] | 74 | #include <linux/namei.h> |
| 75 | #include <linux/fsnotify.h> |
Jens Axboe | 4840e41 | 2019-12-25 22:03:45 -0700 | [diff] [blame] | 76 | #include <linux/fadvise.h> |
Jens Axboe | 3e4827b | 2020-01-08 15:18:09 -0700 | [diff] [blame] | 77 | #include <linux/eventpoll.h> |
Jens Axboe | ff002b3 | 2020-02-07 16:05:21 -0700 | [diff] [blame] | 78 | #include <linux/fs_struct.h> |
Pavel Begunkov | 7d67af2 | 2020-02-24 11:32:45 +0300 | [diff] [blame] | 79 | #include <linux/splice.h> |
Jens Axboe | b41e985 | 2020-02-17 09:52:41 -0700 | [diff] [blame] | 80 | #include <linux/task_work.h> |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 81 | |
Dmitrii Dolgov | c826bd7 | 2019-10-15 19:02:01 +0200 | [diff] [blame] | 82 | #define CREATE_TRACE_POINTS |
| 83 | #include <trace/events/io_uring.h> |
| 84 | |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 85 | #include <uapi/linux/io_uring.h> |
| 86 | |
| 87 | #include "internal.h" |
Jens Axboe | 561fb04 | 2019-10-24 07:25:42 -0600 | [diff] [blame] | 88 | #include "io-wq.h" |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 89 | |
Daniel Xu | 5277dea | 2019-09-14 14:23:45 -0700 | [diff] [blame] | 90 | #define IORING_MAX_ENTRIES 32768 |
Jens Axboe | 33a107f | 2019-10-04 12:10:03 -0600 | [diff] [blame] | 91 | #define IORING_MAX_CQ_ENTRIES (2 * IORING_MAX_ENTRIES) |
Jens Axboe | 65e19f5 | 2019-10-26 07:20:21 -0600 | [diff] [blame] | 92 | |
| 93 | /* |
| 94 | * Shift of 9 is 512 entries, or exactly one page on 64-bit archs |
| 95 | */ |
| 96 | #define IORING_FILE_TABLE_SHIFT 9 |
| 97 | #define IORING_MAX_FILES_TABLE (1U << IORING_FILE_TABLE_SHIFT) |
| 98 | #define IORING_FILE_TABLE_MASK (IORING_MAX_FILES_TABLE - 1) |
| 99 | #define IORING_MAX_FIXED_FILES (64 * IORING_MAX_FILES_TABLE) |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 100 | |
| 101 | struct io_uring { |
| 102 | u32 head ____cacheline_aligned_in_smp; |
| 103 | u32 tail ____cacheline_aligned_in_smp; |
| 104 | }; |
| 105 | |
Stefan Bühler | 1e84b97 | 2019-04-24 23:54:16 +0200 | [diff] [blame] | 106 | /* |
Hristo Venev | 75b28af | 2019-08-26 17:23:46 +0000 | [diff] [blame] | 107 | * This data is shared with the application through the mmap at offsets |
| 108 | * IORING_OFF_SQ_RING and IORING_OFF_CQ_RING. |
Stefan Bühler | 1e84b97 | 2019-04-24 23:54:16 +0200 | [diff] [blame] | 109 | * |
| 110 | * The offsets to the member fields are published through struct |
| 111 | * io_sqring_offsets when calling io_uring_setup. |
| 112 | */ |
Hristo Venev | 75b28af | 2019-08-26 17:23:46 +0000 | [diff] [blame] | 113 | struct io_rings { |
Stefan Bühler | 1e84b97 | 2019-04-24 23:54:16 +0200 | [diff] [blame] | 114 | /* |
| 115 | * Head and tail offsets into the ring; the offsets need to be |
| 116 | * masked to get valid indices. |
| 117 | * |
Hristo Venev | 75b28af | 2019-08-26 17:23:46 +0000 | [diff] [blame] | 118 | * The kernel controls head of the sq ring and the tail of the cq ring, |
| 119 | * and the application controls tail of the sq ring and the head of the |
| 120 | * cq ring. |
Stefan Bühler | 1e84b97 | 2019-04-24 23:54:16 +0200 | [diff] [blame] | 121 | */ |
Hristo Venev | 75b28af | 2019-08-26 17:23:46 +0000 | [diff] [blame] | 122 | struct io_uring sq, cq; |
Stefan Bühler | 1e84b97 | 2019-04-24 23:54:16 +0200 | [diff] [blame] | 123 | /* |
Hristo Venev | 75b28af | 2019-08-26 17:23:46 +0000 | [diff] [blame] | 124 | * Bitmasks to apply to head and tail offsets (constant, equals |
Stefan Bühler | 1e84b97 | 2019-04-24 23:54:16 +0200 | [diff] [blame] | 125 | * ring_entries - 1) |
| 126 | */ |
Hristo Venev | 75b28af | 2019-08-26 17:23:46 +0000 | [diff] [blame] | 127 | u32 sq_ring_mask, cq_ring_mask; |
| 128 | /* Ring sizes (constant, power of 2) */ |
| 129 | u32 sq_ring_entries, cq_ring_entries; |
Stefan Bühler | 1e84b97 | 2019-04-24 23:54:16 +0200 | [diff] [blame] | 130 | /* |
| 131 | * Number of invalid entries dropped by the kernel due to |
| 132 | * invalid index stored in array |
| 133 | * |
| 134 | * Written by the kernel, shouldn't be modified by the |
| 135 | * application (i.e. get number of "new events" by comparing to |
| 136 | * cached value). |
| 137 | * |
| 138 | * After a new SQ head value was read by the application this |
| 139 | * counter includes all submissions that were dropped reaching |
| 140 | * the new SQ head (and possibly more). |
| 141 | */ |
Hristo Venev | 75b28af | 2019-08-26 17:23:46 +0000 | [diff] [blame] | 142 | u32 sq_dropped; |
Stefan Bühler | 1e84b97 | 2019-04-24 23:54:16 +0200 | [diff] [blame] | 143 | /* |
| 144 | * Runtime flags |
| 145 | * |
| 146 | * Written by the kernel, shouldn't be modified by the |
| 147 | * application. |
| 148 | * |
| 149 | * The application needs a full memory barrier before checking |
| 150 | * for IORING_SQ_NEED_WAKEUP after updating the sq tail. |
| 151 | */ |
Hristo Venev | 75b28af | 2019-08-26 17:23:46 +0000 | [diff] [blame] | 152 | u32 sq_flags; |
Stefan Bühler | 1e84b97 | 2019-04-24 23:54:16 +0200 | [diff] [blame] | 153 | /* |
| 154 | * Number of completion events lost because the queue was full; |
| 155 | * this should be avoided by the application by making sure |
LimingWu | 0b4295b | 2019-12-05 20:18:18 +0800 | [diff] [blame] | 156 | * there are not more requests pending than there is space in |
Stefan Bühler | 1e84b97 | 2019-04-24 23:54:16 +0200 | [diff] [blame] | 157 | * the completion queue. |
| 158 | * |
| 159 | * Written by the kernel, shouldn't be modified by the |
| 160 | * application (i.e. get number of "new events" by comparing to |
| 161 | * cached value). |
| 162 | * |
| 163 | * As completion events come in out of order this counter is not |
| 164 | * ordered with any other data. |
| 165 | */ |
Hristo Venev | 75b28af | 2019-08-26 17:23:46 +0000 | [diff] [blame] | 166 | u32 cq_overflow; |
Stefan Bühler | 1e84b97 | 2019-04-24 23:54:16 +0200 | [diff] [blame] | 167 | /* |
| 168 | * Ring buffer of completion events. |
| 169 | * |
| 170 | * The kernel writes completion events fresh every time they are |
| 171 | * produced, so the application is allowed to modify pending |
| 172 | * entries. |
| 173 | */ |
Hristo Venev | 75b28af | 2019-08-26 17:23:46 +0000 | [diff] [blame] | 174 | struct io_uring_cqe cqes[] ____cacheline_aligned_in_smp; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 175 | }; |
| 176 | |
Jens Axboe | edafcce | 2019-01-09 09:16:05 -0700 | [diff] [blame] | 177 | struct io_mapped_ubuf { |
| 178 | u64 ubuf; |
| 179 | size_t len; |
| 180 | struct bio_vec *bvec; |
| 181 | unsigned int nr_bvecs; |
| 182 | }; |
| 183 | |
Jens Axboe | 65e19f5 | 2019-10-26 07:20:21 -0600 | [diff] [blame] | 184 | struct fixed_file_table { |
| 185 | struct file **files; |
Jens Axboe | 31b5151 | 2019-01-18 22:56:34 -0700 | [diff] [blame] | 186 | }; |
| 187 | |
Jens Axboe | 05f3fb3 | 2019-12-09 11:22:50 -0700 | [diff] [blame] | 188 | struct fixed_file_data { |
| 189 | struct fixed_file_table *table; |
| 190 | struct io_ring_ctx *ctx; |
| 191 | |
| 192 | struct percpu_ref refs; |
| 193 | struct llist_head put_llist; |
Jens Axboe | 05f3fb3 | 2019-12-09 11:22:50 -0700 | [diff] [blame] | 194 | struct work_struct ref_work; |
| 195 | struct completion done; |
| 196 | }; |
| 197 | |
Jens Axboe | 5a2e745 | 2020-02-23 16:23:11 -0700 | [diff] [blame] | 198 | struct io_buffer { |
| 199 | struct list_head list; |
| 200 | __u64 addr; |
| 201 | __s32 len; |
| 202 | __u16 bid; |
| 203 | }; |
| 204 | |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 205 | struct io_ring_ctx { |
| 206 | struct { |
| 207 | struct percpu_ref refs; |
| 208 | } ____cacheline_aligned_in_smp; |
| 209 | |
| 210 | struct { |
| 211 | unsigned int flags; |
Randy Dunlap | e1d8533 | 2020-02-05 20:57:10 -0800 | [diff] [blame] | 212 | unsigned int compat: 1; |
| 213 | unsigned int account_mem: 1; |
| 214 | unsigned int cq_overflow_flushed: 1; |
| 215 | unsigned int drain_next: 1; |
| 216 | unsigned int eventfd_async: 1; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 217 | |
Hristo Venev | 75b28af | 2019-08-26 17:23:46 +0000 | [diff] [blame] | 218 | /* |
| 219 | * Ring buffer of indices into array of io_uring_sqe, which is |
| 220 | * mmapped by the application using the IORING_OFF_SQES offset. |
| 221 | * |
| 222 | * This indirection could e.g. be used to assign fixed |
| 223 | * io_uring_sqe entries to operations and only submit them to |
| 224 | * the queue when needed. |
| 225 | * |
| 226 | * The kernel modifies neither the indices array nor the entries |
| 227 | * array. |
| 228 | */ |
| 229 | u32 *sq_array; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 230 | unsigned cached_sq_head; |
| 231 | unsigned sq_entries; |
| 232 | unsigned sq_mask; |
Jens Axboe | 6c271ce | 2019-01-10 11:22:30 -0700 | [diff] [blame] | 233 | unsigned sq_thread_idle; |
Jens Axboe | 498ccd9 | 2019-10-25 10:04:25 -0600 | [diff] [blame] | 234 | unsigned cached_sq_dropped; |
Jens Axboe | 206aefd | 2019-11-07 18:27:42 -0700 | [diff] [blame] | 235 | atomic_t cached_cq_overflow; |
Jens Axboe | ad3eb2c | 2019-12-18 17:12:20 -0700 | [diff] [blame] | 236 | unsigned long sq_check_overflow; |
Jens Axboe | de0617e | 2019-04-06 21:51:27 -0600 | [diff] [blame] | 237 | |
| 238 | struct list_head defer_list; |
Jens Axboe | 5262f56 | 2019-09-17 12:26:57 -0600 | [diff] [blame] | 239 | struct list_head timeout_list; |
Jens Axboe | 1d7bb1d | 2019-11-06 11:31:17 -0700 | [diff] [blame] | 240 | struct list_head cq_overflow_list; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 241 | |
Jens Axboe | fcb323c | 2019-10-24 12:39:47 -0600 | [diff] [blame] | 242 | wait_queue_head_t inflight_wait; |
Jens Axboe | ad3eb2c | 2019-12-18 17:12:20 -0700 | [diff] [blame] | 243 | struct io_uring_sqe *sq_sqes; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 244 | } ____cacheline_aligned_in_smp; |
| 245 | |
Hristo Venev | 75b28af | 2019-08-26 17:23:46 +0000 | [diff] [blame] | 246 | struct io_rings *rings; |
| 247 | |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 248 | /* IO offload */ |
Jens Axboe | 561fb04 | 2019-10-24 07:25:42 -0600 | [diff] [blame] | 249 | struct io_wq *io_wq; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 250 | struct task_struct *sqo_thread; /* if using sq thread polling */ |
| 251 | struct mm_struct *sqo_mm; |
| 252 | wait_queue_head_t sqo_wait; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 253 | |
Jens Axboe | 6b06314 | 2019-01-10 22:13:58 -0700 | [diff] [blame] | 254 | /* |
| 255 | * If used, fixed file set. Writers must ensure that ->refs is dead, |
| 256 | * readers must ensure that ->refs is alive as long as the file* is |
| 257 | * used. Only updated through io_uring_register(2). |
| 258 | */ |
Jens Axboe | 05f3fb3 | 2019-12-09 11:22:50 -0700 | [diff] [blame] | 259 | struct fixed_file_data *file_data; |
Jens Axboe | 6b06314 | 2019-01-10 22:13:58 -0700 | [diff] [blame] | 260 | unsigned nr_user_files; |
Pavel Begunkov | b14cca0 | 2020-01-17 04:45:59 +0300 | [diff] [blame] | 261 | int ring_fd; |
| 262 | struct file *ring_file; |
Jens Axboe | 6b06314 | 2019-01-10 22:13:58 -0700 | [diff] [blame] | 263 | |
Jens Axboe | edafcce | 2019-01-09 09:16:05 -0700 | [diff] [blame] | 264 | /* if used, fixed mapped user buffers */ |
| 265 | unsigned nr_user_bufs; |
| 266 | struct io_mapped_ubuf *user_bufs; |
| 267 | |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 268 | struct user_struct *user; |
| 269 | |
Jens Axboe | 0b8c0ec | 2019-12-02 08:50:00 -0700 | [diff] [blame] | 270 | const struct cred *creds; |
Jens Axboe | 181e448 | 2019-11-25 08:52:30 -0700 | [diff] [blame] | 271 | |
Jens Axboe | 206aefd | 2019-11-07 18:27:42 -0700 | [diff] [blame] | 272 | /* 0 is for ctx quiesce/reinit/free, 1 is for sqo_thread started */ |
| 273 | struct completion *completions; |
| 274 | |
Jens Axboe | 0ddf92e | 2019-11-08 08:52:53 -0700 | [diff] [blame] | 275 | /* if all else fails... */ |
| 276 | struct io_kiocb *fallback_req; |
| 277 | |
Jens Axboe | 206aefd | 2019-11-07 18:27:42 -0700 | [diff] [blame] | 278 | #if defined(CONFIG_UNIX) |
| 279 | struct socket *ring_sock; |
| 280 | #endif |
| 281 | |
Jens Axboe | 5a2e745 | 2020-02-23 16:23:11 -0700 | [diff] [blame] | 282 | struct idr io_buffer_idr; |
| 283 | |
Jens Axboe | 071698e | 2020-01-28 10:04:42 -0700 | [diff] [blame] | 284 | struct idr personality_idr; |
| 285 | |
Jens Axboe | 206aefd | 2019-11-07 18:27:42 -0700 | [diff] [blame] | 286 | struct { |
| 287 | unsigned cached_cq_tail; |
| 288 | unsigned cq_entries; |
| 289 | unsigned cq_mask; |
| 290 | atomic_t cq_timeouts; |
Jens Axboe | ad3eb2c | 2019-12-18 17:12:20 -0700 | [diff] [blame] | 291 | unsigned long cq_check_overflow; |
Jens Axboe | 206aefd | 2019-11-07 18:27:42 -0700 | [diff] [blame] | 292 | struct wait_queue_head cq_wait; |
| 293 | struct fasync_struct *cq_fasync; |
| 294 | struct eventfd_ctx *cq_ev_fd; |
| 295 | } ____cacheline_aligned_in_smp; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 296 | |
| 297 | struct { |
| 298 | struct mutex uring_lock; |
| 299 | wait_queue_head_t wait; |
| 300 | } ____cacheline_aligned_in_smp; |
| 301 | |
| 302 | struct { |
| 303 | spinlock_t completion_lock; |
Jens Axboe | e94f141 | 2019-12-19 12:06:02 -0700 | [diff] [blame] | 304 | |
Jens Axboe | def596e | 2019-01-09 08:59:42 -0700 | [diff] [blame] | 305 | /* |
| 306 | * ->poll_list is protected by the ctx->uring_lock for |
| 307 | * io_uring instances that don't use IORING_SETUP_SQPOLL. |
| 308 | * For SQPOLL, only the single threaded io_sq_thread() will |
| 309 | * manipulate the list, hence no extra locking is needed there. |
| 310 | */ |
| 311 | struct list_head poll_list; |
Jens Axboe | 78076bb | 2019-12-04 19:56:40 -0700 | [diff] [blame] | 312 | struct hlist_head *cancel_hash; |
| 313 | unsigned cancel_hash_bits; |
Jens Axboe | e94f141 | 2019-12-19 12:06:02 -0700 | [diff] [blame] | 314 | bool poll_multi_file; |
Jens Axboe | fcb323c | 2019-10-24 12:39:47 -0600 | [diff] [blame] | 315 | |
| 316 | spinlock_t inflight_lock; |
| 317 | struct list_head inflight_list; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 318 | } ____cacheline_aligned_in_smp; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 319 | }; |
| 320 | |
Jens Axboe | 09bb839 | 2019-03-13 12:39:28 -0600 | [diff] [blame] | 321 | /* |
| 322 | * First field must be the file pointer in all the |
| 323 | * iocb unions! See also 'struct kiocb' in <linux/fs.h> |
| 324 | */ |
Jens Axboe | 221c5eb | 2019-01-17 09:41:58 -0700 | [diff] [blame] | 325 | struct io_poll_iocb { |
| 326 | struct file *file; |
Jens Axboe | 0969e78 | 2019-12-17 18:40:57 -0700 | [diff] [blame] | 327 | union { |
| 328 | struct wait_queue_head *head; |
| 329 | u64 addr; |
| 330 | }; |
Jens Axboe | 221c5eb | 2019-01-17 09:41:58 -0700 | [diff] [blame] | 331 | __poll_t events; |
Jens Axboe | 8c83878 | 2019-03-12 15:48:16 -0600 | [diff] [blame] | 332 | bool done; |
Jens Axboe | 221c5eb | 2019-01-17 09:41:58 -0700 | [diff] [blame] | 333 | bool canceled; |
Jens Axboe | 392edb4 | 2019-12-09 17:52:20 -0700 | [diff] [blame] | 334 | struct wait_queue_entry wait; |
Jens Axboe | 221c5eb | 2019-01-17 09:41:58 -0700 | [diff] [blame] | 335 | }; |
| 336 | |
Jens Axboe | b5dba59 | 2019-12-11 14:02:38 -0700 | [diff] [blame] | 337 | struct io_close { |
| 338 | struct file *file; |
| 339 | struct file *put_file; |
| 340 | int fd; |
| 341 | }; |
| 342 | |
Jens Axboe | ad8a48a | 2019-11-15 08:49:11 -0700 | [diff] [blame] | 343 | struct io_timeout_data { |
| 344 | struct io_kiocb *req; |
| 345 | struct hrtimer timer; |
| 346 | struct timespec64 ts; |
| 347 | enum hrtimer_mode mode; |
Pavel Begunkov | cc42e0a | 2019-11-25 23:14:38 +0300 | [diff] [blame] | 348 | u32 seq_offset; |
Jens Axboe | ad8a48a | 2019-11-15 08:49:11 -0700 | [diff] [blame] | 349 | }; |
| 350 | |
Jens Axboe | 8ed8d3c | 2019-12-16 11:55:28 -0700 | [diff] [blame] | 351 | struct io_accept { |
| 352 | struct file *file; |
| 353 | struct sockaddr __user *addr; |
| 354 | int __user *addr_len; |
| 355 | int flags; |
| 356 | }; |
| 357 | |
| 358 | struct io_sync { |
| 359 | struct file *file; |
| 360 | loff_t len; |
| 361 | loff_t off; |
| 362 | int flags; |
Jens Axboe | d63d1b5 | 2019-12-10 10:38:56 -0700 | [diff] [blame] | 363 | int mode; |
Jens Axboe | 8ed8d3c | 2019-12-16 11:55:28 -0700 | [diff] [blame] | 364 | }; |
| 365 | |
Jens Axboe | fbf2384 | 2019-12-17 18:45:56 -0700 | [diff] [blame] | 366 | struct io_cancel { |
| 367 | struct file *file; |
| 368 | u64 addr; |
| 369 | }; |
| 370 | |
Jens Axboe | b29472e | 2019-12-17 18:50:29 -0700 | [diff] [blame] | 371 | struct io_timeout { |
| 372 | struct file *file; |
| 373 | u64 addr; |
| 374 | int flags; |
Jens Axboe | 26a6167 | 2019-12-20 09:02:01 -0700 | [diff] [blame] | 375 | unsigned count; |
Jens Axboe | b29472e | 2019-12-17 18:50:29 -0700 | [diff] [blame] | 376 | }; |
| 377 | |
Jens Axboe | 9adbd45 | 2019-12-20 08:45:55 -0700 | [diff] [blame] | 378 | struct io_rw { |
| 379 | /* NOTE: kiocb has the file as the first member, so don't do it here */ |
| 380 | struct kiocb kiocb; |
| 381 | u64 addr; |
| 382 | u64 len; |
| 383 | }; |
| 384 | |
Jens Axboe | 3fbb51c | 2019-12-20 08:51:52 -0700 | [diff] [blame] | 385 | struct io_connect { |
| 386 | struct file *file; |
| 387 | struct sockaddr __user *addr; |
| 388 | int addr_len; |
| 389 | }; |
| 390 | |
Jens Axboe | e47293f | 2019-12-20 08:58:21 -0700 | [diff] [blame] | 391 | struct io_sr_msg { |
| 392 | struct file *file; |
Jens Axboe | fddafac | 2020-01-04 20:19:44 -0700 | [diff] [blame] | 393 | union { |
| 394 | struct user_msghdr __user *msg; |
| 395 | void __user *buf; |
| 396 | }; |
Jens Axboe | e47293f | 2019-12-20 08:58:21 -0700 | [diff] [blame] | 397 | int msg_flags; |
Jens Axboe | fddafac | 2020-01-04 20:19:44 -0700 | [diff] [blame] | 398 | size_t len; |
Jens Axboe | e47293f | 2019-12-20 08:58:21 -0700 | [diff] [blame] | 399 | }; |
| 400 | |
Jens Axboe | 15b71ab | 2019-12-11 11:20:36 -0700 | [diff] [blame] | 401 | struct io_open { |
| 402 | struct file *file; |
| 403 | int dfd; |
Jens Axboe | eddc7ef | 2019-12-13 21:18:10 -0700 | [diff] [blame] | 404 | union { |
Jens Axboe | eddc7ef | 2019-12-13 21:18:10 -0700 | [diff] [blame] | 405 | unsigned mask; |
| 406 | }; |
Jens Axboe | 15b71ab | 2019-12-11 11:20:36 -0700 | [diff] [blame] | 407 | struct filename *filename; |
Jens Axboe | eddc7ef | 2019-12-13 21:18:10 -0700 | [diff] [blame] | 408 | struct statx __user *buffer; |
Jens Axboe | c12cedf | 2020-01-08 17:41:21 -0700 | [diff] [blame] | 409 | struct open_how how; |
Jens Axboe | 15b71ab | 2019-12-11 11:20:36 -0700 | [diff] [blame] | 410 | }; |
| 411 | |
Jens Axboe | 05f3fb3 | 2019-12-09 11:22:50 -0700 | [diff] [blame] | 412 | struct io_files_update { |
| 413 | struct file *file; |
| 414 | u64 arg; |
| 415 | u32 nr_args; |
| 416 | u32 offset; |
| 417 | }; |
| 418 | |
Jens Axboe | 4840e41 | 2019-12-25 22:03:45 -0700 | [diff] [blame] | 419 | struct io_fadvise { |
| 420 | struct file *file; |
| 421 | u64 offset; |
| 422 | u32 len; |
| 423 | u32 advice; |
| 424 | }; |
| 425 | |
Jens Axboe | c1ca757 | 2019-12-25 22:18:28 -0700 | [diff] [blame] | 426 | struct io_madvise { |
| 427 | struct file *file; |
| 428 | u64 addr; |
| 429 | u32 len; |
| 430 | u32 advice; |
| 431 | }; |
| 432 | |
Jens Axboe | 3e4827b | 2020-01-08 15:18:09 -0700 | [diff] [blame] | 433 | struct io_epoll { |
| 434 | struct file *file; |
| 435 | int epfd; |
| 436 | int op; |
| 437 | int fd; |
| 438 | struct epoll_event event; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 439 | }; |
| 440 | |
Pavel Begunkov | 7d67af2 | 2020-02-24 11:32:45 +0300 | [diff] [blame] | 441 | struct io_splice { |
| 442 | struct file *file_out; |
| 443 | struct file *file_in; |
| 444 | loff_t off_out; |
| 445 | loff_t off_in; |
| 446 | u64 len; |
| 447 | unsigned int flags; |
| 448 | }; |
| 449 | |
Jens Axboe | ddf0322d | 2020-02-23 16:41:33 -0700 | [diff] [blame^] | 450 | struct io_provide_buf { |
| 451 | struct file *file; |
| 452 | __u64 addr; |
| 453 | __s32 len; |
| 454 | __u32 bgid; |
| 455 | __u16 nbufs; |
| 456 | __u16 bid; |
| 457 | }; |
| 458 | |
Jens Axboe | f499a02 | 2019-12-02 16:28:46 -0700 | [diff] [blame] | 459 | struct io_async_connect { |
| 460 | struct sockaddr_storage address; |
| 461 | }; |
| 462 | |
Jens Axboe | 03b1230 | 2019-12-02 18:50:25 -0700 | [diff] [blame] | 463 | struct io_async_msghdr { |
| 464 | struct iovec fast_iov[UIO_FASTIOV]; |
| 465 | struct iovec *iov; |
| 466 | struct sockaddr __user *uaddr; |
| 467 | struct msghdr msg; |
Jens Axboe | b537916 | 2020-02-09 11:29:15 -0700 | [diff] [blame] | 468 | struct sockaddr_storage addr; |
Jens Axboe | 03b1230 | 2019-12-02 18:50:25 -0700 | [diff] [blame] | 469 | }; |
| 470 | |
Jens Axboe | f67676d | 2019-12-02 11:03:47 -0700 | [diff] [blame] | 471 | struct io_async_rw { |
| 472 | struct iovec fast_iov[UIO_FASTIOV]; |
| 473 | struct iovec *iov; |
| 474 | ssize_t nr_segs; |
| 475 | ssize_t size; |
| 476 | }; |
| 477 | |
Jens Axboe | 1a6b74f | 2019-12-02 10:33:15 -0700 | [diff] [blame] | 478 | struct io_async_ctx { |
Jens Axboe | f67676d | 2019-12-02 11:03:47 -0700 | [diff] [blame] | 479 | union { |
| 480 | struct io_async_rw rw; |
Jens Axboe | 03b1230 | 2019-12-02 18:50:25 -0700 | [diff] [blame] | 481 | struct io_async_msghdr msg; |
Jens Axboe | f499a02 | 2019-12-02 16:28:46 -0700 | [diff] [blame] | 482 | struct io_async_connect connect; |
Jens Axboe | 2d28390 | 2019-12-04 11:08:05 -0700 | [diff] [blame] | 483 | struct io_timeout_data timeout; |
Jens Axboe | f67676d | 2019-12-02 11:03:47 -0700 | [diff] [blame] | 484 | }; |
Jens Axboe | 1a6b74f | 2019-12-02 10:33:15 -0700 | [diff] [blame] | 485 | }; |
| 486 | |
Pavel Begunkov | 6b47ee6 | 2020-01-18 20:22:41 +0300 | [diff] [blame] | 487 | enum { |
| 488 | REQ_F_FIXED_FILE_BIT = IOSQE_FIXED_FILE_BIT, |
| 489 | REQ_F_IO_DRAIN_BIT = IOSQE_IO_DRAIN_BIT, |
| 490 | REQ_F_LINK_BIT = IOSQE_IO_LINK_BIT, |
| 491 | REQ_F_HARDLINK_BIT = IOSQE_IO_HARDLINK_BIT, |
| 492 | REQ_F_FORCE_ASYNC_BIT = IOSQE_ASYNC_BIT, |
| 493 | |
| 494 | REQ_F_LINK_NEXT_BIT, |
| 495 | REQ_F_FAIL_LINK_BIT, |
| 496 | REQ_F_INFLIGHT_BIT, |
| 497 | REQ_F_CUR_POS_BIT, |
| 498 | REQ_F_NOWAIT_BIT, |
| 499 | REQ_F_IOPOLL_COMPLETED_BIT, |
| 500 | REQ_F_LINK_TIMEOUT_BIT, |
| 501 | REQ_F_TIMEOUT_BIT, |
| 502 | REQ_F_ISREG_BIT, |
| 503 | REQ_F_MUST_PUNT_BIT, |
| 504 | REQ_F_TIMEOUT_NOSEQ_BIT, |
| 505 | REQ_F_COMP_LOCKED_BIT, |
Pavel Begunkov | 99bc4c3 | 2020-02-07 22:04:45 +0300 | [diff] [blame] | 506 | REQ_F_NEED_CLEANUP_BIT, |
Jens Axboe | 2ca1025 | 2020-02-13 17:17:35 -0700 | [diff] [blame] | 507 | REQ_F_OVERFLOW_BIT, |
Jens Axboe | d7718a9 | 2020-02-14 22:23:12 -0700 | [diff] [blame] | 508 | REQ_F_POLLED_BIT, |
Pavel Begunkov | 6b47ee6 | 2020-01-18 20:22:41 +0300 | [diff] [blame] | 509 | }; |
| 510 | |
| 511 | enum { |
| 512 | /* ctx owns file */ |
| 513 | REQ_F_FIXED_FILE = BIT(REQ_F_FIXED_FILE_BIT), |
| 514 | /* drain existing IO first */ |
| 515 | REQ_F_IO_DRAIN = BIT(REQ_F_IO_DRAIN_BIT), |
| 516 | /* linked sqes */ |
| 517 | REQ_F_LINK = BIT(REQ_F_LINK_BIT), |
| 518 | /* doesn't sever on completion < 0 */ |
| 519 | REQ_F_HARDLINK = BIT(REQ_F_HARDLINK_BIT), |
| 520 | /* IOSQE_ASYNC */ |
| 521 | REQ_F_FORCE_ASYNC = BIT(REQ_F_FORCE_ASYNC_BIT), |
| 522 | |
| 523 | /* already grabbed next link */ |
| 524 | REQ_F_LINK_NEXT = BIT(REQ_F_LINK_NEXT_BIT), |
| 525 | /* fail rest of links */ |
| 526 | REQ_F_FAIL_LINK = BIT(REQ_F_FAIL_LINK_BIT), |
| 527 | /* on inflight list */ |
| 528 | REQ_F_INFLIGHT = BIT(REQ_F_INFLIGHT_BIT), |
| 529 | /* read/write uses file position */ |
| 530 | REQ_F_CUR_POS = BIT(REQ_F_CUR_POS_BIT), |
| 531 | /* must not punt to workers */ |
| 532 | REQ_F_NOWAIT = BIT(REQ_F_NOWAIT_BIT), |
| 533 | /* polled IO has completed */ |
| 534 | REQ_F_IOPOLL_COMPLETED = BIT(REQ_F_IOPOLL_COMPLETED_BIT), |
| 535 | /* has linked timeout */ |
| 536 | REQ_F_LINK_TIMEOUT = BIT(REQ_F_LINK_TIMEOUT_BIT), |
| 537 | /* timeout request */ |
| 538 | REQ_F_TIMEOUT = BIT(REQ_F_TIMEOUT_BIT), |
| 539 | /* regular file */ |
| 540 | REQ_F_ISREG = BIT(REQ_F_ISREG_BIT), |
| 541 | /* must be punted even for NONBLOCK */ |
| 542 | REQ_F_MUST_PUNT = BIT(REQ_F_MUST_PUNT_BIT), |
| 543 | /* no timeout sequence */ |
| 544 | REQ_F_TIMEOUT_NOSEQ = BIT(REQ_F_TIMEOUT_NOSEQ_BIT), |
| 545 | /* completion under lock */ |
| 546 | REQ_F_COMP_LOCKED = BIT(REQ_F_COMP_LOCKED_BIT), |
Pavel Begunkov | 99bc4c3 | 2020-02-07 22:04:45 +0300 | [diff] [blame] | 547 | /* needs cleanup */ |
| 548 | REQ_F_NEED_CLEANUP = BIT(REQ_F_NEED_CLEANUP_BIT), |
Jens Axboe | 2ca1025 | 2020-02-13 17:17:35 -0700 | [diff] [blame] | 549 | /* in overflow list */ |
| 550 | REQ_F_OVERFLOW = BIT(REQ_F_OVERFLOW_BIT), |
Jens Axboe | d7718a9 | 2020-02-14 22:23:12 -0700 | [diff] [blame] | 551 | /* already went through poll handler */ |
| 552 | REQ_F_POLLED = BIT(REQ_F_POLLED_BIT), |
| 553 | }; |
| 554 | |
| 555 | struct async_poll { |
| 556 | struct io_poll_iocb poll; |
| 557 | struct io_wq_work work; |
Pavel Begunkov | 6b47ee6 | 2020-01-18 20:22:41 +0300 | [diff] [blame] | 558 | }; |
| 559 | |
Jens Axboe | 09bb839 | 2019-03-13 12:39:28 -0600 | [diff] [blame] | 560 | /* |
| 561 | * NOTE! Each of the iocb union members has the file pointer |
| 562 | * as the first entry in their struct definition. So you can |
| 563 | * access the file pointer through any of the sub-structs, |
| 564 | * or directly as just 'ki_filp' in this struct. |
| 565 | */ |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 566 | struct io_kiocb { |
Jens Axboe | 221c5eb | 2019-01-17 09:41:58 -0700 | [diff] [blame] | 567 | union { |
Jens Axboe | 09bb839 | 2019-03-13 12:39:28 -0600 | [diff] [blame] | 568 | struct file *file; |
Jens Axboe | 9adbd45 | 2019-12-20 08:45:55 -0700 | [diff] [blame] | 569 | struct io_rw rw; |
Jens Axboe | 221c5eb | 2019-01-17 09:41:58 -0700 | [diff] [blame] | 570 | struct io_poll_iocb poll; |
Jens Axboe | 8ed8d3c | 2019-12-16 11:55:28 -0700 | [diff] [blame] | 571 | struct io_accept accept; |
| 572 | struct io_sync sync; |
Jens Axboe | fbf2384 | 2019-12-17 18:45:56 -0700 | [diff] [blame] | 573 | struct io_cancel cancel; |
Jens Axboe | b29472e | 2019-12-17 18:50:29 -0700 | [diff] [blame] | 574 | struct io_timeout timeout; |
Jens Axboe | 3fbb51c | 2019-12-20 08:51:52 -0700 | [diff] [blame] | 575 | struct io_connect connect; |
Jens Axboe | e47293f | 2019-12-20 08:58:21 -0700 | [diff] [blame] | 576 | struct io_sr_msg sr_msg; |
Jens Axboe | 15b71ab | 2019-12-11 11:20:36 -0700 | [diff] [blame] | 577 | struct io_open open; |
Jens Axboe | b5dba59 | 2019-12-11 14:02:38 -0700 | [diff] [blame] | 578 | struct io_close close; |
Jens Axboe | 05f3fb3 | 2019-12-09 11:22:50 -0700 | [diff] [blame] | 579 | struct io_files_update files_update; |
Jens Axboe | 4840e41 | 2019-12-25 22:03:45 -0700 | [diff] [blame] | 580 | struct io_fadvise fadvise; |
Jens Axboe | c1ca757 | 2019-12-25 22:18:28 -0700 | [diff] [blame] | 581 | struct io_madvise madvise; |
Jens Axboe | 3e4827b | 2020-01-08 15:18:09 -0700 | [diff] [blame] | 582 | struct io_epoll epoll; |
Pavel Begunkov | 7d67af2 | 2020-02-24 11:32:45 +0300 | [diff] [blame] | 583 | struct io_splice splice; |
Jens Axboe | ddf0322d | 2020-02-23 16:41:33 -0700 | [diff] [blame^] | 584 | struct io_provide_buf pbuf; |
Jens Axboe | 221c5eb | 2019-01-17 09:41:58 -0700 | [diff] [blame] | 585 | }; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 586 | |
Jens Axboe | 1a6b74f | 2019-12-02 10:33:15 -0700 | [diff] [blame] | 587 | struct io_async_ctx *io; |
Pavel Begunkov | cf6fd4b | 2019-11-25 23:14:39 +0300 | [diff] [blame] | 588 | bool needs_fixed_file; |
Jens Axboe | d625c6e | 2019-12-17 19:53:05 -0700 | [diff] [blame] | 589 | u8 opcode; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 590 | |
| 591 | struct io_ring_ctx *ctx; |
Jens Axboe | d7718a9 | 2020-02-14 22:23:12 -0700 | [diff] [blame] | 592 | struct list_head list; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 593 | unsigned int flags; |
Jens Axboe | c16361c | 2019-01-17 08:39:48 -0700 | [diff] [blame] | 594 | refcount_t refs; |
Jens Axboe | d7718a9 | 2020-02-14 22:23:12 -0700 | [diff] [blame] | 595 | struct task_struct *task; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 596 | u64 user_data; |
Jens Axboe | 9e645e11 | 2019-05-10 16:07:28 -0600 | [diff] [blame] | 597 | u32 result; |
Jens Axboe | de0617e | 2019-04-06 21:51:27 -0600 | [diff] [blame] | 598 | u32 sequence; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 599 | |
Jens Axboe | d7718a9 | 2020-02-14 22:23:12 -0700 | [diff] [blame] | 600 | struct list_head link_list; |
| 601 | |
Jens Axboe | fcb323c | 2019-10-24 12:39:47 -0600 | [diff] [blame] | 602 | struct list_head inflight_entry; |
| 603 | |
Jens Axboe | b41e985 | 2020-02-17 09:52:41 -0700 | [diff] [blame] | 604 | union { |
| 605 | /* |
| 606 | * Only commands that never go async can use the below fields, |
Jens Axboe | d7718a9 | 2020-02-14 22:23:12 -0700 | [diff] [blame] | 607 | * obviously. Right now only IORING_OP_POLL_ADD uses them, and |
| 608 | * async armed poll handlers for regular commands. The latter |
| 609 | * restore the work, if needed. |
Jens Axboe | b41e985 | 2020-02-17 09:52:41 -0700 | [diff] [blame] | 610 | */ |
| 611 | struct { |
Jens Axboe | b41e985 | 2020-02-17 09:52:41 -0700 | [diff] [blame] | 612 | struct callback_head task_work; |
Jens Axboe | d7718a9 | 2020-02-14 22:23:12 -0700 | [diff] [blame] | 613 | struct hlist_node hash_node; |
| 614 | struct async_poll *apoll; |
Jens Axboe | b41e985 | 2020-02-17 09:52:41 -0700 | [diff] [blame] | 615 | }; |
| 616 | struct io_wq_work work; |
| 617 | }; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 618 | }; |
| 619 | |
| 620 | #define IO_PLUG_THRESHOLD 2 |
Jens Axboe | def596e | 2019-01-09 08:59:42 -0700 | [diff] [blame] | 621 | #define IO_IOPOLL_BATCH 8 |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 622 | |
Jens Axboe | 9a56a23 | 2019-01-09 09:06:50 -0700 | [diff] [blame] | 623 | struct io_submit_state { |
| 624 | struct blk_plug plug; |
| 625 | |
| 626 | /* |
Jens Axboe | 2579f91 | 2019-01-09 09:10:43 -0700 | [diff] [blame] | 627 | * io_kiocb alloc cache |
| 628 | */ |
| 629 | void *reqs[IO_IOPOLL_BATCH]; |
Pavel Begunkov | 6c8a313 | 2020-02-01 03:58:00 +0300 | [diff] [blame] | 630 | unsigned int free_reqs; |
Jens Axboe | 2579f91 | 2019-01-09 09:10:43 -0700 | [diff] [blame] | 631 | |
| 632 | /* |
Jens Axboe | 9a56a23 | 2019-01-09 09:06:50 -0700 | [diff] [blame] | 633 | * File reference cache |
| 634 | */ |
| 635 | struct file *file; |
| 636 | unsigned int fd; |
| 637 | unsigned int has_refs; |
| 638 | unsigned int used_refs; |
| 639 | unsigned int ios_left; |
| 640 | }; |
| 641 | |
Jens Axboe | d365634 | 2019-12-18 09:50:26 -0700 | [diff] [blame] | 642 | struct io_op_def { |
| 643 | /* needs req->io allocated for deferral/async */ |
| 644 | unsigned async_ctx : 1; |
| 645 | /* needs current->mm setup, does mm access */ |
| 646 | unsigned needs_mm : 1; |
| 647 | /* needs req->file assigned */ |
| 648 | unsigned needs_file : 1; |
| 649 | /* needs req->file assigned IFF fd is >= 0 */ |
| 650 | unsigned fd_non_neg : 1; |
| 651 | /* hash wq insertion if file is a regular file */ |
| 652 | unsigned hash_reg_file : 1; |
| 653 | /* unbound wq insertion if file is a non-regular file */ |
| 654 | unsigned unbound_nonreg_file : 1; |
Jens Axboe | 66f4af9 | 2020-01-16 15:36:52 -0700 | [diff] [blame] | 655 | /* opcode is not supported by this kernel */ |
| 656 | unsigned not_supported : 1; |
Jens Axboe | f86cd20 | 2020-01-29 13:46:44 -0700 | [diff] [blame] | 657 | /* needs file table */ |
| 658 | unsigned file_table : 1; |
Jens Axboe | ff002b3 | 2020-02-07 16:05:21 -0700 | [diff] [blame] | 659 | /* needs ->fs */ |
| 660 | unsigned needs_fs : 1; |
Jens Axboe | 8a72758 | 2020-02-20 09:59:44 -0700 | [diff] [blame] | 661 | /* set if opcode supports polled "wait" */ |
| 662 | unsigned pollin : 1; |
| 663 | unsigned pollout : 1; |
Jens Axboe | d365634 | 2019-12-18 09:50:26 -0700 | [diff] [blame] | 664 | }; |
| 665 | |
| 666 | static const struct io_op_def io_op_defs[] = { |
Pavel Begunkov | 0463b6c | 2020-01-18 21:35:38 +0300 | [diff] [blame] | 667 | [IORING_OP_NOP] = {}, |
| 668 | [IORING_OP_READV] = { |
Jens Axboe | d365634 | 2019-12-18 09:50:26 -0700 | [diff] [blame] | 669 | .async_ctx = 1, |
| 670 | .needs_mm = 1, |
| 671 | .needs_file = 1, |
| 672 | .unbound_nonreg_file = 1, |
Jens Axboe | 8a72758 | 2020-02-20 09:59:44 -0700 | [diff] [blame] | 673 | .pollin = 1, |
Jens Axboe | d365634 | 2019-12-18 09:50:26 -0700 | [diff] [blame] | 674 | }, |
Pavel Begunkov | 0463b6c | 2020-01-18 21:35:38 +0300 | [diff] [blame] | 675 | [IORING_OP_WRITEV] = { |
Jens Axboe | d365634 | 2019-12-18 09:50:26 -0700 | [diff] [blame] | 676 | .async_ctx = 1, |
| 677 | .needs_mm = 1, |
| 678 | .needs_file = 1, |
| 679 | .hash_reg_file = 1, |
| 680 | .unbound_nonreg_file = 1, |
Jens Axboe | 8a72758 | 2020-02-20 09:59:44 -0700 | [diff] [blame] | 681 | .pollout = 1, |
Jens Axboe | d365634 | 2019-12-18 09:50:26 -0700 | [diff] [blame] | 682 | }, |
Pavel Begunkov | 0463b6c | 2020-01-18 21:35:38 +0300 | [diff] [blame] | 683 | [IORING_OP_FSYNC] = { |
Jens Axboe | d365634 | 2019-12-18 09:50:26 -0700 | [diff] [blame] | 684 | .needs_file = 1, |
| 685 | }, |
Pavel Begunkov | 0463b6c | 2020-01-18 21:35:38 +0300 | [diff] [blame] | 686 | [IORING_OP_READ_FIXED] = { |
Jens Axboe | d365634 | 2019-12-18 09:50:26 -0700 | [diff] [blame] | 687 | .needs_file = 1, |
| 688 | .unbound_nonreg_file = 1, |
Jens Axboe | 8a72758 | 2020-02-20 09:59:44 -0700 | [diff] [blame] | 689 | .pollin = 1, |
Jens Axboe | d365634 | 2019-12-18 09:50:26 -0700 | [diff] [blame] | 690 | }, |
Pavel Begunkov | 0463b6c | 2020-01-18 21:35:38 +0300 | [diff] [blame] | 691 | [IORING_OP_WRITE_FIXED] = { |
Jens Axboe | d365634 | 2019-12-18 09:50:26 -0700 | [diff] [blame] | 692 | .needs_file = 1, |
| 693 | .hash_reg_file = 1, |
| 694 | .unbound_nonreg_file = 1, |
Jens Axboe | 8a72758 | 2020-02-20 09:59:44 -0700 | [diff] [blame] | 695 | .pollout = 1, |
Jens Axboe | d365634 | 2019-12-18 09:50:26 -0700 | [diff] [blame] | 696 | }, |
Pavel Begunkov | 0463b6c | 2020-01-18 21:35:38 +0300 | [diff] [blame] | 697 | [IORING_OP_POLL_ADD] = { |
Jens Axboe | d365634 | 2019-12-18 09:50:26 -0700 | [diff] [blame] | 698 | .needs_file = 1, |
| 699 | .unbound_nonreg_file = 1, |
| 700 | }, |
Pavel Begunkov | 0463b6c | 2020-01-18 21:35:38 +0300 | [diff] [blame] | 701 | [IORING_OP_POLL_REMOVE] = {}, |
| 702 | [IORING_OP_SYNC_FILE_RANGE] = { |
Jens Axboe | d365634 | 2019-12-18 09:50:26 -0700 | [diff] [blame] | 703 | .needs_file = 1, |
| 704 | }, |
Pavel Begunkov | 0463b6c | 2020-01-18 21:35:38 +0300 | [diff] [blame] | 705 | [IORING_OP_SENDMSG] = { |
Jens Axboe | d365634 | 2019-12-18 09:50:26 -0700 | [diff] [blame] | 706 | .async_ctx = 1, |
| 707 | .needs_mm = 1, |
| 708 | .needs_file = 1, |
| 709 | .unbound_nonreg_file = 1, |
Jens Axboe | ff002b3 | 2020-02-07 16:05:21 -0700 | [diff] [blame] | 710 | .needs_fs = 1, |
Jens Axboe | 8a72758 | 2020-02-20 09:59:44 -0700 | [diff] [blame] | 711 | .pollout = 1, |
Jens Axboe | d365634 | 2019-12-18 09:50:26 -0700 | [diff] [blame] | 712 | }, |
Pavel Begunkov | 0463b6c | 2020-01-18 21:35:38 +0300 | [diff] [blame] | 713 | [IORING_OP_RECVMSG] = { |
Jens Axboe | d365634 | 2019-12-18 09:50:26 -0700 | [diff] [blame] | 714 | .async_ctx = 1, |
| 715 | .needs_mm = 1, |
| 716 | .needs_file = 1, |
| 717 | .unbound_nonreg_file = 1, |
Jens Axboe | ff002b3 | 2020-02-07 16:05:21 -0700 | [diff] [blame] | 718 | .needs_fs = 1, |
Jens Axboe | 8a72758 | 2020-02-20 09:59:44 -0700 | [diff] [blame] | 719 | .pollin = 1, |
Jens Axboe | d365634 | 2019-12-18 09:50:26 -0700 | [diff] [blame] | 720 | }, |
Pavel Begunkov | 0463b6c | 2020-01-18 21:35:38 +0300 | [diff] [blame] | 721 | [IORING_OP_TIMEOUT] = { |
Jens Axboe | d365634 | 2019-12-18 09:50:26 -0700 | [diff] [blame] | 722 | .async_ctx = 1, |
| 723 | .needs_mm = 1, |
| 724 | }, |
Pavel Begunkov | 0463b6c | 2020-01-18 21:35:38 +0300 | [diff] [blame] | 725 | [IORING_OP_TIMEOUT_REMOVE] = {}, |
| 726 | [IORING_OP_ACCEPT] = { |
Jens Axboe | d365634 | 2019-12-18 09:50:26 -0700 | [diff] [blame] | 727 | .needs_mm = 1, |
| 728 | .needs_file = 1, |
| 729 | .unbound_nonreg_file = 1, |
Jens Axboe | f86cd20 | 2020-01-29 13:46:44 -0700 | [diff] [blame] | 730 | .file_table = 1, |
Jens Axboe | 8a72758 | 2020-02-20 09:59:44 -0700 | [diff] [blame] | 731 | .pollin = 1, |
Jens Axboe | d365634 | 2019-12-18 09:50:26 -0700 | [diff] [blame] | 732 | }, |
Pavel Begunkov | 0463b6c | 2020-01-18 21:35:38 +0300 | [diff] [blame] | 733 | [IORING_OP_ASYNC_CANCEL] = {}, |
| 734 | [IORING_OP_LINK_TIMEOUT] = { |
Jens Axboe | d365634 | 2019-12-18 09:50:26 -0700 | [diff] [blame] | 735 | .async_ctx = 1, |
| 736 | .needs_mm = 1, |
| 737 | }, |
Pavel Begunkov | 0463b6c | 2020-01-18 21:35:38 +0300 | [diff] [blame] | 738 | [IORING_OP_CONNECT] = { |
Jens Axboe | d365634 | 2019-12-18 09:50:26 -0700 | [diff] [blame] | 739 | .async_ctx = 1, |
| 740 | .needs_mm = 1, |
| 741 | .needs_file = 1, |
| 742 | .unbound_nonreg_file = 1, |
Jens Axboe | 8a72758 | 2020-02-20 09:59:44 -0700 | [diff] [blame] | 743 | .pollout = 1, |
Jens Axboe | d365634 | 2019-12-18 09:50:26 -0700 | [diff] [blame] | 744 | }, |
Pavel Begunkov | 0463b6c | 2020-01-18 21:35:38 +0300 | [diff] [blame] | 745 | [IORING_OP_FALLOCATE] = { |
Jens Axboe | d365634 | 2019-12-18 09:50:26 -0700 | [diff] [blame] | 746 | .needs_file = 1, |
| 747 | }, |
Pavel Begunkov | 0463b6c | 2020-01-18 21:35:38 +0300 | [diff] [blame] | 748 | [IORING_OP_OPENAT] = { |
Jens Axboe | d365634 | 2019-12-18 09:50:26 -0700 | [diff] [blame] | 749 | .needs_file = 1, |
| 750 | .fd_non_neg = 1, |
Jens Axboe | f86cd20 | 2020-01-29 13:46:44 -0700 | [diff] [blame] | 751 | .file_table = 1, |
Jens Axboe | ff002b3 | 2020-02-07 16:05:21 -0700 | [diff] [blame] | 752 | .needs_fs = 1, |
Jens Axboe | d365634 | 2019-12-18 09:50:26 -0700 | [diff] [blame] | 753 | }, |
Pavel Begunkov | 0463b6c | 2020-01-18 21:35:38 +0300 | [diff] [blame] | 754 | [IORING_OP_CLOSE] = { |
Jens Axboe | d365634 | 2019-12-18 09:50:26 -0700 | [diff] [blame] | 755 | .needs_file = 1, |
Jens Axboe | f86cd20 | 2020-01-29 13:46:44 -0700 | [diff] [blame] | 756 | .file_table = 1, |
Jens Axboe | d365634 | 2019-12-18 09:50:26 -0700 | [diff] [blame] | 757 | }, |
Pavel Begunkov | 0463b6c | 2020-01-18 21:35:38 +0300 | [diff] [blame] | 758 | [IORING_OP_FILES_UPDATE] = { |
Jens Axboe | d365634 | 2019-12-18 09:50:26 -0700 | [diff] [blame] | 759 | .needs_mm = 1, |
Jens Axboe | f86cd20 | 2020-01-29 13:46:44 -0700 | [diff] [blame] | 760 | .file_table = 1, |
Jens Axboe | d365634 | 2019-12-18 09:50:26 -0700 | [diff] [blame] | 761 | }, |
Pavel Begunkov | 0463b6c | 2020-01-18 21:35:38 +0300 | [diff] [blame] | 762 | [IORING_OP_STATX] = { |
Jens Axboe | d365634 | 2019-12-18 09:50:26 -0700 | [diff] [blame] | 763 | .needs_mm = 1, |
| 764 | .needs_file = 1, |
| 765 | .fd_non_neg = 1, |
Jens Axboe | ff002b3 | 2020-02-07 16:05:21 -0700 | [diff] [blame] | 766 | .needs_fs = 1, |
Jens Axboe | d365634 | 2019-12-18 09:50:26 -0700 | [diff] [blame] | 767 | }, |
Pavel Begunkov | 0463b6c | 2020-01-18 21:35:38 +0300 | [diff] [blame] | 768 | [IORING_OP_READ] = { |
Jens Axboe | 3a6820f | 2019-12-22 15:19:35 -0700 | [diff] [blame] | 769 | .needs_mm = 1, |
| 770 | .needs_file = 1, |
| 771 | .unbound_nonreg_file = 1, |
Jens Axboe | 8a72758 | 2020-02-20 09:59:44 -0700 | [diff] [blame] | 772 | .pollin = 1, |
Jens Axboe | 3a6820f | 2019-12-22 15:19:35 -0700 | [diff] [blame] | 773 | }, |
Pavel Begunkov | 0463b6c | 2020-01-18 21:35:38 +0300 | [diff] [blame] | 774 | [IORING_OP_WRITE] = { |
Jens Axboe | 3a6820f | 2019-12-22 15:19:35 -0700 | [diff] [blame] | 775 | .needs_mm = 1, |
| 776 | .needs_file = 1, |
| 777 | .unbound_nonreg_file = 1, |
Jens Axboe | 8a72758 | 2020-02-20 09:59:44 -0700 | [diff] [blame] | 778 | .pollout = 1, |
Jens Axboe | 3a6820f | 2019-12-22 15:19:35 -0700 | [diff] [blame] | 779 | }, |
Pavel Begunkov | 0463b6c | 2020-01-18 21:35:38 +0300 | [diff] [blame] | 780 | [IORING_OP_FADVISE] = { |
Jens Axboe | 4840e41 | 2019-12-25 22:03:45 -0700 | [diff] [blame] | 781 | .needs_file = 1, |
| 782 | }, |
Pavel Begunkov | 0463b6c | 2020-01-18 21:35:38 +0300 | [diff] [blame] | 783 | [IORING_OP_MADVISE] = { |
Jens Axboe | c1ca757 | 2019-12-25 22:18:28 -0700 | [diff] [blame] | 784 | .needs_mm = 1, |
| 785 | }, |
Pavel Begunkov | 0463b6c | 2020-01-18 21:35:38 +0300 | [diff] [blame] | 786 | [IORING_OP_SEND] = { |
Jens Axboe | fddafac | 2020-01-04 20:19:44 -0700 | [diff] [blame] | 787 | .needs_mm = 1, |
| 788 | .needs_file = 1, |
| 789 | .unbound_nonreg_file = 1, |
Jens Axboe | 8a72758 | 2020-02-20 09:59:44 -0700 | [diff] [blame] | 790 | .pollout = 1, |
Jens Axboe | fddafac | 2020-01-04 20:19:44 -0700 | [diff] [blame] | 791 | }, |
Pavel Begunkov | 0463b6c | 2020-01-18 21:35:38 +0300 | [diff] [blame] | 792 | [IORING_OP_RECV] = { |
Jens Axboe | fddafac | 2020-01-04 20:19:44 -0700 | [diff] [blame] | 793 | .needs_mm = 1, |
| 794 | .needs_file = 1, |
| 795 | .unbound_nonreg_file = 1, |
Jens Axboe | 8a72758 | 2020-02-20 09:59:44 -0700 | [diff] [blame] | 796 | .pollin = 1, |
Jens Axboe | fddafac | 2020-01-04 20:19:44 -0700 | [diff] [blame] | 797 | }, |
Pavel Begunkov | 0463b6c | 2020-01-18 21:35:38 +0300 | [diff] [blame] | 798 | [IORING_OP_OPENAT2] = { |
Jens Axboe | cebdb98 | 2020-01-08 17:59:24 -0700 | [diff] [blame] | 799 | .needs_file = 1, |
| 800 | .fd_non_neg = 1, |
Jens Axboe | f86cd20 | 2020-01-29 13:46:44 -0700 | [diff] [blame] | 801 | .file_table = 1, |
Jens Axboe | ff002b3 | 2020-02-07 16:05:21 -0700 | [diff] [blame] | 802 | .needs_fs = 1, |
Jens Axboe | cebdb98 | 2020-01-08 17:59:24 -0700 | [diff] [blame] | 803 | }, |
Jens Axboe | 3e4827b | 2020-01-08 15:18:09 -0700 | [diff] [blame] | 804 | [IORING_OP_EPOLL_CTL] = { |
| 805 | .unbound_nonreg_file = 1, |
| 806 | .file_table = 1, |
| 807 | }, |
Pavel Begunkov | 7d67af2 | 2020-02-24 11:32:45 +0300 | [diff] [blame] | 808 | [IORING_OP_SPLICE] = { |
| 809 | .needs_file = 1, |
| 810 | .hash_reg_file = 1, |
| 811 | .unbound_nonreg_file = 1, |
Jens Axboe | ddf0322d | 2020-02-23 16:41:33 -0700 | [diff] [blame^] | 812 | }, |
| 813 | [IORING_OP_PROVIDE_BUFFERS] = {}, |
Jens Axboe | d365634 | 2019-12-18 09:50:26 -0700 | [diff] [blame] | 814 | }; |
| 815 | |
Jens Axboe | 561fb04 | 2019-10-24 07:25:42 -0600 | [diff] [blame] | 816 | static void io_wq_submit_work(struct io_wq_work **workptr); |
Jens Axboe | 78e19bb | 2019-11-06 15:21:34 -0700 | [diff] [blame] | 817 | static void io_cqring_fill_event(struct io_kiocb *req, long res); |
Jackie Liu | ec9c02a | 2019-11-08 23:50:36 +0800 | [diff] [blame] | 818 | static void io_put_req(struct io_kiocb *req); |
Jens Axboe | 978db57 | 2019-11-14 22:39:04 -0700 | [diff] [blame] | 819 | static void __io_double_put_req(struct io_kiocb *req); |
Jens Axboe | 94ae5e7 | 2019-11-14 19:39:52 -0700 | [diff] [blame] | 820 | static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req); |
| 821 | static void io_queue_linked_timeout(struct io_kiocb *req); |
Jens Axboe | 05f3fb3 | 2019-12-09 11:22:50 -0700 | [diff] [blame] | 822 | static int __io_sqe_files_update(struct io_ring_ctx *ctx, |
| 823 | struct io_uring_files_update *ip, |
| 824 | unsigned nr_args); |
Jens Axboe | f86cd20 | 2020-01-29 13:46:44 -0700 | [diff] [blame] | 825 | static int io_grab_files(struct io_kiocb *req); |
Jens Axboe | 2faf852 | 2020-02-04 19:54:55 -0700 | [diff] [blame] | 826 | static void io_ring_file_ref_flush(struct fixed_file_data *data); |
Pavel Begunkov | 99bc4c3 | 2020-02-07 22:04:45 +0300 | [diff] [blame] | 827 | static void io_cleanup_req(struct io_kiocb *req); |
Jens Axboe | b41e985 | 2020-02-17 09:52:41 -0700 | [diff] [blame] | 828 | static int io_file_get(struct io_submit_state *state, struct io_kiocb *req, |
| 829 | int fd, struct file **out_file, bool fixed); |
| 830 | static void __io_queue_sqe(struct io_kiocb *req, |
| 831 | const struct io_uring_sqe *sqe); |
Jens Axboe | de0617e | 2019-04-06 21:51:27 -0600 | [diff] [blame] | 832 | |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 833 | static struct kmem_cache *req_cachep; |
| 834 | |
| 835 | static const struct file_operations io_uring_fops; |
| 836 | |
| 837 | struct sock *io_uring_get_socket(struct file *file) |
| 838 | { |
| 839 | #if defined(CONFIG_UNIX) |
| 840 | if (file->f_op == &io_uring_fops) { |
| 841 | struct io_ring_ctx *ctx = file->private_data; |
| 842 | |
| 843 | return ctx->ring_sock->sk; |
| 844 | } |
| 845 | #endif |
| 846 | return NULL; |
| 847 | } |
| 848 | EXPORT_SYMBOL(io_uring_get_socket); |
| 849 | |
| 850 | static void io_ring_ctx_ref_free(struct percpu_ref *ref) |
| 851 | { |
| 852 | struct io_ring_ctx *ctx = container_of(ref, struct io_ring_ctx, refs); |
| 853 | |
Jens Axboe | 206aefd | 2019-11-07 18:27:42 -0700 | [diff] [blame] | 854 | complete(&ctx->completions[0]); |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 855 | } |
| 856 | |
| 857 | static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p) |
| 858 | { |
| 859 | struct io_ring_ctx *ctx; |
Jens Axboe | 78076bb | 2019-12-04 19:56:40 -0700 | [diff] [blame] | 860 | int hash_bits; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 861 | |
| 862 | ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); |
| 863 | if (!ctx) |
| 864 | return NULL; |
| 865 | |
Jens Axboe | 0ddf92e | 2019-11-08 08:52:53 -0700 | [diff] [blame] | 866 | ctx->fallback_req = kmem_cache_alloc(req_cachep, GFP_KERNEL); |
| 867 | if (!ctx->fallback_req) |
| 868 | goto err; |
| 869 | |
Jens Axboe | 206aefd | 2019-11-07 18:27:42 -0700 | [diff] [blame] | 870 | ctx->completions = kmalloc(2 * sizeof(struct completion), GFP_KERNEL); |
| 871 | if (!ctx->completions) |
| 872 | goto err; |
| 873 | |
Jens Axboe | 78076bb | 2019-12-04 19:56:40 -0700 | [diff] [blame] | 874 | /* |
| 875 | * Use 5 bits less than the max cq entries, that should give us around |
| 876 | * 32 entries per hash list if totally full and uniformly spread. |
| 877 | */ |
| 878 | hash_bits = ilog2(p->cq_entries); |
| 879 | hash_bits -= 5; |
| 880 | if (hash_bits <= 0) |
| 881 | hash_bits = 1; |
| 882 | ctx->cancel_hash_bits = hash_bits; |
| 883 | ctx->cancel_hash = kmalloc((1U << hash_bits) * sizeof(struct hlist_head), |
| 884 | GFP_KERNEL); |
| 885 | if (!ctx->cancel_hash) |
| 886 | goto err; |
| 887 | __hash_init(ctx->cancel_hash, 1U << hash_bits); |
| 888 | |
Roman Gushchin | 2148289 | 2019-05-07 10:01:48 -0700 | [diff] [blame] | 889 | if (percpu_ref_init(&ctx->refs, io_ring_ctx_ref_free, |
Jens Axboe | 206aefd | 2019-11-07 18:27:42 -0700 | [diff] [blame] | 890 | PERCPU_REF_ALLOW_REINIT, GFP_KERNEL)) |
| 891 | goto err; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 892 | |
| 893 | ctx->flags = p->flags; |
| 894 | init_waitqueue_head(&ctx->cq_wait); |
Jens Axboe | 1d7bb1d | 2019-11-06 11:31:17 -0700 | [diff] [blame] | 895 | INIT_LIST_HEAD(&ctx->cq_overflow_list); |
Jens Axboe | 206aefd | 2019-11-07 18:27:42 -0700 | [diff] [blame] | 896 | init_completion(&ctx->completions[0]); |
| 897 | init_completion(&ctx->completions[1]); |
Jens Axboe | 5a2e745 | 2020-02-23 16:23:11 -0700 | [diff] [blame] | 898 | idr_init(&ctx->io_buffer_idr); |
Jens Axboe | 071698e | 2020-01-28 10:04:42 -0700 | [diff] [blame] | 899 | idr_init(&ctx->personality_idr); |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 900 | mutex_init(&ctx->uring_lock); |
| 901 | init_waitqueue_head(&ctx->wait); |
| 902 | spin_lock_init(&ctx->completion_lock); |
Jens Axboe | def596e | 2019-01-09 08:59:42 -0700 | [diff] [blame] | 903 | INIT_LIST_HEAD(&ctx->poll_list); |
Jens Axboe | de0617e | 2019-04-06 21:51:27 -0600 | [diff] [blame] | 904 | INIT_LIST_HEAD(&ctx->defer_list); |
Jens Axboe | 5262f56 | 2019-09-17 12:26:57 -0600 | [diff] [blame] | 905 | INIT_LIST_HEAD(&ctx->timeout_list); |
Jens Axboe | fcb323c | 2019-10-24 12:39:47 -0600 | [diff] [blame] | 906 | init_waitqueue_head(&ctx->inflight_wait); |
| 907 | spin_lock_init(&ctx->inflight_lock); |
| 908 | INIT_LIST_HEAD(&ctx->inflight_list); |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 909 | return ctx; |
Jens Axboe | 206aefd | 2019-11-07 18:27:42 -0700 | [diff] [blame] | 910 | err: |
Jens Axboe | 0ddf92e | 2019-11-08 08:52:53 -0700 | [diff] [blame] | 911 | if (ctx->fallback_req) |
| 912 | kmem_cache_free(req_cachep, ctx->fallback_req); |
Jens Axboe | 206aefd | 2019-11-07 18:27:42 -0700 | [diff] [blame] | 913 | kfree(ctx->completions); |
Jens Axboe | 78076bb | 2019-12-04 19:56:40 -0700 | [diff] [blame] | 914 | kfree(ctx->cancel_hash); |
Jens Axboe | 206aefd | 2019-11-07 18:27:42 -0700 | [diff] [blame] | 915 | kfree(ctx); |
| 916 | return NULL; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 917 | } |
| 918 | |
Bob Liu | 9d858b2 | 2019-11-13 18:06:25 +0800 | [diff] [blame] | 919 | static inline bool __req_need_defer(struct io_kiocb *req) |
Jens Axboe | de0617e | 2019-04-06 21:51:27 -0600 | [diff] [blame] | 920 | { |
Jackie Liu | a197f66 | 2019-11-08 08:09:12 -0700 | [diff] [blame] | 921 | struct io_ring_ctx *ctx = req->ctx; |
| 922 | |
Jens Axboe | 498ccd9 | 2019-10-25 10:04:25 -0600 | [diff] [blame] | 923 | return req->sequence != ctx->cached_cq_tail + ctx->cached_sq_dropped |
| 924 | + atomic_read(&ctx->cached_cq_overflow); |
Jens Axboe | de0617e | 2019-04-06 21:51:27 -0600 | [diff] [blame] | 925 | } |
| 926 | |
Bob Liu | 9d858b2 | 2019-11-13 18:06:25 +0800 | [diff] [blame] | 927 | static inline bool req_need_defer(struct io_kiocb *req) |
Jens Axboe | 7adf4ea | 2019-10-10 21:42:58 -0600 | [diff] [blame] | 928 | { |
Pavel Begunkov | 8798789 | 2020-01-18 01:22:30 +0300 | [diff] [blame] | 929 | if (unlikely(req->flags & REQ_F_IO_DRAIN)) |
Bob Liu | 9d858b2 | 2019-11-13 18:06:25 +0800 | [diff] [blame] | 930 | return __req_need_defer(req); |
Jens Axboe | 7adf4ea | 2019-10-10 21:42:58 -0600 | [diff] [blame] | 931 | |
Bob Liu | 9d858b2 | 2019-11-13 18:06:25 +0800 | [diff] [blame] | 932 | return false; |
Jens Axboe | 7adf4ea | 2019-10-10 21:42:58 -0600 | [diff] [blame] | 933 | } |
| 934 | |
| 935 | static struct io_kiocb *io_get_deferred_req(struct io_ring_ctx *ctx) |
Jens Axboe | de0617e | 2019-04-06 21:51:27 -0600 | [diff] [blame] | 936 | { |
| 937 | struct io_kiocb *req; |
| 938 | |
Jens Axboe | 7adf4ea | 2019-10-10 21:42:58 -0600 | [diff] [blame] | 939 | req = list_first_entry_or_null(&ctx->defer_list, struct io_kiocb, list); |
Bob Liu | 9d858b2 | 2019-11-13 18:06:25 +0800 | [diff] [blame] | 940 | if (req && !req_need_defer(req)) { |
Jens Axboe | de0617e | 2019-04-06 21:51:27 -0600 | [diff] [blame] | 941 | list_del_init(&req->list); |
| 942 | return req; |
| 943 | } |
| 944 | |
| 945 | return NULL; |
| 946 | } |
| 947 | |
Jens Axboe | 5262f56 | 2019-09-17 12:26:57 -0600 | [diff] [blame] | 948 | static struct io_kiocb *io_get_timeout_req(struct io_ring_ctx *ctx) |
| 949 | { |
Jens Axboe | 7adf4ea | 2019-10-10 21:42:58 -0600 | [diff] [blame] | 950 | struct io_kiocb *req; |
| 951 | |
| 952 | req = list_first_entry_or_null(&ctx->timeout_list, struct io_kiocb, list); |
Jens Axboe | 93bd25b | 2019-11-11 23:34:31 -0700 | [diff] [blame] | 953 | if (req) { |
| 954 | if (req->flags & REQ_F_TIMEOUT_NOSEQ) |
| 955 | return NULL; |
Linus Torvalds | fb4b3d3 | 2019-11-25 10:40:27 -0800 | [diff] [blame] | 956 | if (!__req_need_defer(req)) { |
Jens Axboe | 93bd25b | 2019-11-11 23:34:31 -0700 | [diff] [blame] | 957 | list_del_init(&req->list); |
| 958 | return req; |
| 959 | } |
Jens Axboe | 7adf4ea | 2019-10-10 21:42:58 -0600 | [diff] [blame] | 960 | } |
| 961 | |
| 962 | return NULL; |
Jens Axboe | 5262f56 | 2019-09-17 12:26:57 -0600 | [diff] [blame] | 963 | } |
| 964 | |
Jens Axboe | de0617e | 2019-04-06 21:51:27 -0600 | [diff] [blame] | 965 | static void __io_commit_cqring(struct io_ring_ctx *ctx) |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 966 | { |
Hristo Venev | 75b28af | 2019-08-26 17:23:46 +0000 | [diff] [blame] | 967 | struct io_rings *rings = ctx->rings; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 968 | |
Pavel Begunkov | 0791015 | 2020-01-17 03:52:46 +0300 | [diff] [blame] | 969 | /* order cqe stores with ring update */ |
| 970 | smp_store_release(&rings->cq.tail, ctx->cached_cq_tail); |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 971 | |
Pavel Begunkov | 0791015 | 2020-01-17 03:52:46 +0300 | [diff] [blame] | 972 | if (wq_has_sleeper(&ctx->cq_wait)) { |
| 973 | wake_up_interruptible(&ctx->cq_wait); |
| 974 | kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN); |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 975 | } |
| 976 | } |
| 977 | |
Jens Axboe | cccf0ee | 2020-01-27 16:34:48 -0700 | [diff] [blame] | 978 | static inline void io_req_work_grab_env(struct io_kiocb *req, |
| 979 | const struct io_op_def *def) |
Jens Axboe | 18d9be1 | 2019-09-10 09:13:05 -0600 | [diff] [blame] | 980 | { |
Jens Axboe | cccf0ee | 2020-01-27 16:34:48 -0700 | [diff] [blame] | 981 | if (!req->work.mm && def->needs_mm) { |
| 982 | mmgrab(current->mm); |
| 983 | req->work.mm = current->mm; |
| 984 | } |
| 985 | if (!req->work.creds) |
| 986 | req->work.creds = get_current_cred(); |
Jens Axboe | ff002b3 | 2020-02-07 16:05:21 -0700 | [diff] [blame] | 987 | if (!req->work.fs && def->needs_fs) { |
| 988 | spin_lock(¤t->fs->lock); |
| 989 | if (!current->fs->in_exec) { |
| 990 | req->work.fs = current->fs; |
| 991 | req->work.fs->users++; |
| 992 | } else { |
| 993 | req->work.flags |= IO_WQ_WORK_CANCEL; |
| 994 | } |
| 995 | spin_unlock(¤t->fs->lock); |
| 996 | } |
Jens Axboe | 6ab2314 | 2020-02-08 20:23:59 -0700 | [diff] [blame] | 997 | if (!req->work.task_pid) |
| 998 | req->work.task_pid = task_pid_vnr(current); |
Jens Axboe | cccf0ee | 2020-01-27 16:34:48 -0700 | [diff] [blame] | 999 | } |
| 1000 | |
| 1001 | static inline void io_req_work_drop_env(struct io_kiocb *req) |
| 1002 | { |
| 1003 | if (req->work.mm) { |
| 1004 | mmdrop(req->work.mm); |
| 1005 | req->work.mm = NULL; |
| 1006 | } |
| 1007 | if (req->work.creds) { |
| 1008 | put_cred(req->work.creds); |
| 1009 | req->work.creds = NULL; |
| 1010 | } |
Jens Axboe | ff002b3 | 2020-02-07 16:05:21 -0700 | [diff] [blame] | 1011 | if (req->work.fs) { |
| 1012 | struct fs_struct *fs = req->work.fs; |
| 1013 | |
| 1014 | spin_lock(&req->work.fs->lock); |
| 1015 | if (--fs->users) |
| 1016 | fs = NULL; |
| 1017 | spin_unlock(&req->work.fs->lock); |
| 1018 | if (fs) |
| 1019 | free_fs_struct(fs); |
| 1020 | } |
Jens Axboe | 561fb04 | 2019-10-24 07:25:42 -0600 | [diff] [blame] | 1021 | } |
| 1022 | |
Jens Axboe | 94ae5e7 | 2019-11-14 19:39:52 -0700 | [diff] [blame] | 1023 | static inline bool io_prep_async_work(struct io_kiocb *req, |
| 1024 | struct io_kiocb **link) |
Jens Axboe | 561fb04 | 2019-10-24 07:25:42 -0600 | [diff] [blame] | 1025 | { |
Jens Axboe | d365634 | 2019-12-18 09:50:26 -0700 | [diff] [blame] | 1026 | const struct io_op_def *def = &io_op_defs[req->opcode]; |
Jens Axboe | 561fb04 | 2019-10-24 07:25:42 -0600 | [diff] [blame] | 1027 | bool do_hashed = false; |
Jens Axboe | 54a91f3 | 2019-09-10 09:15:04 -0600 | [diff] [blame] | 1028 | |
Jens Axboe | d365634 | 2019-12-18 09:50:26 -0700 | [diff] [blame] | 1029 | if (req->flags & REQ_F_ISREG) { |
| 1030 | if (def->hash_reg_file) |
Jens Axboe | 3529d8c | 2019-12-19 18:24:38 -0700 | [diff] [blame] | 1031 | do_hashed = true; |
Jens Axboe | d365634 | 2019-12-18 09:50:26 -0700 | [diff] [blame] | 1032 | } else { |
| 1033 | if (def->unbound_nonreg_file) |
Jens Axboe | 3529d8c | 2019-12-19 18:24:38 -0700 | [diff] [blame] | 1034 | req->work.flags |= IO_WQ_WORK_UNBOUND; |
Jens Axboe | 54a91f3 | 2019-09-10 09:15:04 -0600 | [diff] [blame] | 1035 | } |
Jens Axboe | cccf0ee | 2020-01-27 16:34:48 -0700 | [diff] [blame] | 1036 | |
| 1037 | io_req_work_grab_env(req, def); |
Jens Axboe | 54a91f3 | 2019-09-10 09:15:04 -0600 | [diff] [blame] | 1038 | |
Jens Axboe | 94ae5e7 | 2019-11-14 19:39:52 -0700 | [diff] [blame] | 1039 | *link = io_prep_linked_timeout(req); |
Jens Axboe | 561fb04 | 2019-10-24 07:25:42 -0600 | [diff] [blame] | 1040 | return do_hashed; |
| 1041 | } |
| 1042 | |
Jackie Liu | a197f66 | 2019-11-08 08:09:12 -0700 | [diff] [blame] | 1043 | static inline void io_queue_async_work(struct io_kiocb *req) |
Jens Axboe | 561fb04 | 2019-10-24 07:25:42 -0600 | [diff] [blame] | 1044 | { |
Jackie Liu | a197f66 | 2019-11-08 08:09:12 -0700 | [diff] [blame] | 1045 | struct io_ring_ctx *ctx = req->ctx; |
Jens Axboe | 94ae5e7 | 2019-11-14 19:39:52 -0700 | [diff] [blame] | 1046 | struct io_kiocb *link; |
| 1047 | bool do_hashed; |
| 1048 | |
| 1049 | do_hashed = io_prep_async_work(req, &link); |
Jens Axboe | 561fb04 | 2019-10-24 07:25:42 -0600 | [diff] [blame] | 1050 | |
| 1051 | trace_io_uring_queue_async_work(ctx, do_hashed, req, &req->work, |
| 1052 | req->flags); |
| 1053 | if (!do_hashed) { |
| 1054 | io_wq_enqueue(ctx->io_wq, &req->work); |
| 1055 | } else { |
| 1056 | io_wq_enqueue_hashed(ctx->io_wq, &req->work, |
| 1057 | file_inode(req->file)); |
| 1058 | } |
Jens Axboe | 94ae5e7 | 2019-11-14 19:39:52 -0700 | [diff] [blame] | 1059 | |
| 1060 | if (link) |
| 1061 | io_queue_linked_timeout(link); |
Jens Axboe | 18d9be1 | 2019-09-10 09:13:05 -0600 | [diff] [blame] | 1062 | } |
| 1063 | |
Jens Axboe | 5262f56 | 2019-09-17 12:26:57 -0600 | [diff] [blame] | 1064 | static void io_kill_timeout(struct io_kiocb *req) |
| 1065 | { |
| 1066 | int ret; |
| 1067 | |
Jens Axboe | 2d28390 | 2019-12-04 11:08:05 -0700 | [diff] [blame] | 1068 | ret = hrtimer_try_to_cancel(&req->io->timeout.timer); |
Jens Axboe | 5262f56 | 2019-09-17 12:26:57 -0600 | [diff] [blame] | 1069 | if (ret != -1) { |
| 1070 | atomic_inc(&req->ctx->cq_timeouts); |
Jens Axboe | 842f961 | 2019-10-29 12:34:10 -0600 | [diff] [blame] | 1071 | list_del_init(&req->list); |
Jens Axboe | 78e19bb | 2019-11-06 15:21:34 -0700 | [diff] [blame] | 1072 | io_cqring_fill_event(req, 0); |
Jackie Liu | ec9c02a | 2019-11-08 23:50:36 +0800 | [diff] [blame] | 1073 | io_put_req(req); |
Jens Axboe | 5262f56 | 2019-09-17 12:26:57 -0600 | [diff] [blame] | 1074 | } |
| 1075 | } |
| 1076 | |
| 1077 | static void io_kill_timeouts(struct io_ring_ctx *ctx) |
| 1078 | { |
| 1079 | struct io_kiocb *req, *tmp; |
| 1080 | |
| 1081 | spin_lock_irq(&ctx->completion_lock); |
| 1082 | list_for_each_entry_safe(req, tmp, &ctx->timeout_list, list) |
| 1083 | io_kill_timeout(req); |
| 1084 | spin_unlock_irq(&ctx->completion_lock); |
| 1085 | } |
| 1086 | |
Jens Axboe | de0617e | 2019-04-06 21:51:27 -0600 | [diff] [blame] | 1087 | static void io_commit_cqring(struct io_ring_ctx *ctx) |
| 1088 | { |
| 1089 | struct io_kiocb *req; |
| 1090 | |
Jens Axboe | 5262f56 | 2019-09-17 12:26:57 -0600 | [diff] [blame] | 1091 | while ((req = io_get_timeout_req(ctx)) != NULL) |
| 1092 | io_kill_timeout(req); |
| 1093 | |
Jens Axboe | de0617e | 2019-04-06 21:51:27 -0600 | [diff] [blame] | 1094 | __io_commit_cqring(ctx); |
| 1095 | |
Pavel Begunkov | 8798789 | 2020-01-18 01:22:30 +0300 | [diff] [blame] | 1096 | while ((req = io_get_deferred_req(ctx)) != NULL) |
Jackie Liu | a197f66 | 2019-11-08 08:09:12 -0700 | [diff] [blame] | 1097 | io_queue_async_work(req); |
Jens Axboe | de0617e | 2019-04-06 21:51:27 -0600 | [diff] [blame] | 1098 | } |
| 1099 | |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 1100 | static struct io_uring_cqe *io_get_cqring(struct io_ring_ctx *ctx) |
| 1101 | { |
Hristo Venev | 75b28af | 2019-08-26 17:23:46 +0000 | [diff] [blame] | 1102 | struct io_rings *rings = ctx->rings; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 1103 | unsigned tail; |
| 1104 | |
| 1105 | tail = ctx->cached_cq_tail; |
Stefan Bühler | 115e12e | 2019-04-24 23:54:18 +0200 | [diff] [blame] | 1106 | /* |
| 1107 | * writes to the cq entry need to come after reading head; the |
| 1108 | * control dependency is enough as we're using WRITE_ONCE to |
| 1109 | * fill the cq entry |
| 1110 | */ |
Hristo Venev | 75b28af | 2019-08-26 17:23:46 +0000 | [diff] [blame] | 1111 | if (tail - READ_ONCE(rings->cq.head) == rings->cq_ring_entries) |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 1112 | return NULL; |
| 1113 | |
| 1114 | ctx->cached_cq_tail++; |
Hristo Venev | 75b28af | 2019-08-26 17:23:46 +0000 | [diff] [blame] | 1115 | return &rings->cqes[tail & ctx->cq_mask]; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 1116 | } |
| 1117 | |
Jens Axboe | f2842ab | 2020-01-08 11:04:00 -0700 | [diff] [blame] | 1118 | static inline bool io_should_trigger_evfd(struct io_ring_ctx *ctx) |
| 1119 | { |
Jens Axboe | f0b493e | 2020-02-01 21:30:11 -0700 | [diff] [blame] | 1120 | if (!ctx->cq_ev_fd) |
| 1121 | return false; |
Jens Axboe | f2842ab | 2020-01-08 11:04:00 -0700 | [diff] [blame] | 1122 | if (!ctx->eventfd_async) |
| 1123 | return true; |
Jens Axboe | b41e985 | 2020-02-17 09:52:41 -0700 | [diff] [blame] | 1124 | return io_wq_current_is_worker(); |
Jens Axboe | f2842ab | 2020-01-08 11:04:00 -0700 | [diff] [blame] | 1125 | } |
| 1126 | |
Jens Axboe | b41e985 | 2020-02-17 09:52:41 -0700 | [diff] [blame] | 1127 | static void io_cqring_ev_posted(struct io_ring_ctx *ctx) |
Jens Axboe | 8c83878 | 2019-03-12 15:48:16 -0600 | [diff] [blame] | 1128 | { |
| 1129 | if (waitqueue_active(&ctx->wait)) |
| 1130 | wake_up(&ctx->wait); |
| 1131 | if (waitqueue_active(&ctx->sqo_wait)) |
| 1132 | wake_up(&ctx->sqo_wait); |
Jens Axboe | b41e985 | 2020-02-17 09:52:41 -0700 | [diff] [blame] | 1133 | if (io_should_trigger_evfd(ctx)) |
Jens Axboe | 9b40284 | 2019-04-11 11:45:41 -0600 | [diff] [blame] | 1134 | eventfd_signal(ctx->cq_ev_fd, 1); |
Jens Axboe | 8c83878 | 2019-03-12 15:48:16 -0600 | [diff] [blame] | 1135 | } |
| 1136 | |
Jens Axboe | c4a2ed7 | 2019-11-21 21:01:26 -0700 | [diff] [blame] | 1137 | /* Returns true if there are no backlogged entries after the flush */ |
| 1138 | static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force) |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 1139 | { |
Jens Axboe | 1d7bb1d | 2019-11-06 11:31:17 -0700 | [diff] [blame] | 1140 | struct io_rings *rings = ctx->rings; |
| 1141 | struct io_uring_cqe *cqe; |
| 1142 | struct io_kiocb *req; |
| 1143 | unsigned long flags; |
| 1144 | LIST_HEAD(list); |
| 1145 | |
| 1146 | if (!force) { |
| 1147 | if (list_empty_careful(&ctx->cq_overflow_list)) |
Jens Axboe | c4a2ed7 | 2019-11-21 21:01:26 -0700 | [diff] [blame] | 1148 | return true; |
Jens Axboe | 1d7bb1d | 2019-11-06 11:31:17 -0700 | [diff] [blame] | 1149 | if ((ctx->cached_cq_tail - READ_ONCE(rings->cq.head) == |
| 1150 | rings->cq_ring_entries)) |
Jens Axboe | c4a2ed7 | 2019-11-21 21:01:26 -0700 | [diff] [blame] | 1151 | return false; |
Jens Axboe | 1d7bb1d | 2019-11-06 11:31:17 -0700 | [diff] [blame] | 1152 | } |
| 1153 | |
| 1154 | spin_lock_irqsave(&ctx->completion_lock, flags); |
| 1155 | |
| 1156 | /* if force is set, the ring is going away. always drop after that */ |
| 1157 | if (force) |
Jens Axboe | 69b3e54 | 2020-01-08 11:01:46 -0700 | [diff] [blame] | 1158 | ctx->cq_overflow_flushed = 1; |
Jens Axboe | 1d7bb1d | 2019-11-06 11:31:17 -0700 | [diff] [blame] | 1159 | |
Jens Axboe | c4a2ed7 | 2019-11-21 21:01:26 -0700 | [diff] [blame] | 1160 | cqe = NULL; |
Jens Axboe | 1d7bb1d | 2019-11-06 11:31:17 -0700 | [diff] [blame] | 1161 | while (!list_empty(&ctx->cq_overflow_list)) { |
| 1162 | cqe = io_get_cqring(ctx); |
| 1163 | if (!cqe && !force) |
| 1164 | break; |
| 1165 | |
| 1166 | req = list_first_entry(&ctx->cq_overflow_list, struct io_kiocb, |
| 1167 | list); |
| 1168 | list_move(&req->list, &list); |
Jens Axboe | 2ca1025 | 2020-02-13 17:17:35 -0700 | [diff] [blame] | 1169 | req->flags &= ~REQ_F_OVERFLOW; |
Jens Axboe | 1d7bb1d | 2019-11-06 11:31:17 -0700 | [diff] [blame] | 1170 | if (cqe) { |
| 1171 | WRITE_ONCE(cqe->user_data, req->user_data); |
| 1172 | WRITE_ONCE(cqe->res, req->result); |
| 1173 | WRITE_ONCE(cqe->flags, 0); |
| 1174 | } else { |
| 1175 | WRITE_ONCE(ctx->rings->cq_overflow, |
| 1176 | atomic_inc_return(&ctx->cached_cq_overflow)); |
| 1177 | } |
| 1178 | } |
| 1179 | |
| 1180 | io_commit_cqring(ctx); |
Jens Axboe | ad3eb2c | 2019-12-18 17:12:20 -0700 | [diff] [blame] | 1181 | if (cqe) { |
| 1182 | clear_bit(0, &ctx->sq_check_overflow); |
| 1183 | clear_bit(0, &ctx->cq_check_overflow); |
| 1184 | } |
Jens Axboe | 1d7bb1d | 2019-11-06 11:31:17 -0700 | [diff] [blame] | 1185 | spin_unlock_irqrestore(&ctx->completion_lock, flags); |
| 1186 | io_cqring_ev_posted(ctx); |
| 1187 | |
| 1188 | while (!list_empty(&list)) { |
| 1189 | req = list_first_entry(&list, struct io_kiocb, list); |
| 1190 | list_del(&req->list); |
Jackie Liu | ec9c02a | 2019-11-08 23:50:36 +0800 | [diff] [blame] | 1191 | io_put_req(req); |
Jens Axboe | 1d7bb1d | 2019-11-06 11:31:17 -0700 | [diff] [blame] | 1192 | } |
Jens Axboe | c4a2ed7 | 2019-11-21 21:01:26 -0700 | [diff] [blame] | 1193 | |
| 1194 | return cqe != NULL; |
Jens Axboe | 1d7bb1d | 2019-11-06 11:31:17 -0700 | [diff] [blame] | 1195 | } |
| 1196 | |
Jens Axboe | 78e19bb | 2019-11-06 15:21:34 -0700 | [diff] [blame] | 1197 | static void io_cqring_fill_event(struct io_kiocb *req, long res) |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 1198 | { |
Jens Axboe | 78e19bb | 2019-11-06 15:21:34 -0700 | [diff] [blame] | 1199 | struct io_ring_ctx *ctx = req->ctx; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 1200 | struct io_uring_cqe *cqe; |
| 1201 | |
Jens Axboe | 78e19bb | 2019-11-06 15:21:34 -0700 | [diff] [blame] | 1202 | trace_io_uring_complete(ctx, req->user_data, res); |
Jens Axboe | 51c3ff6 | 2019-11-03 06:52:50 -0700 | [diff] [blame] | 1203 | |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 1204 | /* |
| 1205 | * If we can't get a cq entry, userspace overflowed the |
| 1206 | * submission (by quite a lot). Increment the overflow count in |
| 1207 | * the ring. |
| 1208 | */ |
| 1209 | cqe = io_get_cqring(ctx); |
Jens Axboe | 1d7bb1d | 2019-11-06 11:31:17 -0700 | [diff] [blame] | 1210 | if (likely(cqe)) { |
Jens Axboe | 78e19bb | 2019-11-06 15:21:34 -0700 | [diff] [blame] | 1211 | WRITE_ONCE(cqe->user_data, req->user_data); |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 1212 | WRITE_ONCE(cqe->res, res); |
| 1213 | WRITE_ONCE(cqe->flags, 0); |
Jens Axboe | 1d7bb1d | 2019-11-06 11:31:17 -0700 | [diff] [blame] | 1214 | } else if (ctx->cq_overflow_flushed) { |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 1215 | WRITE_ONCE(ctx->rings->cq_overflow, |
| 1216 | atomic_inc_return(&ctx->cached_cq_overflow)); |
Jens Axboe | 1d7bb1d | 2019-11-06 11:31:17 -0700 | [diff] [blame] | 1217 | } else { |
Jens Axboe | ad3eb2c | 2019-12-18 17:12:20 -0700 | [diff] [blame] | 1218 | if (list_empty(&ctx->cq_overflow_list)) { |
| 1219 | set_bit(0, &ctx->sq_check_overflow); |
| 1220 | set_bit(0, &ctx->cq_check_overflow); |
| 1221 | } |
Jens Axboe | 2ca1025 | 2020-02-13 17:17:35 -0700 | [diff] [blame] | 1222 | req->flags |= REQ_F_OVERFLOW; |
Jens Axboe | 1d7bb1d | 2019-11-06 11:31:17 -0700 | [diff] [blame] | 1223 | refcount_inc(&req->refs); |
| 1224 | req->result = res; |
| 1225 | list_add_tail(&req->list, &ctx->cq_overflow_list); |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 1226 | } |
| 1227 | } |
| 1228 | |
Jens Axboe | 78e19bb | 2019-11-06 15:21:34 -0700 | [diff] [blame] | 1229 | static void io_cqring_add_event(struct io_kiocb *req, long res) |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 1230 | { |
Jens Axboe | 78e19bb | 2019-11-06 15:21:34 -0700 | [diff] [blame] | 1231 | struct io_ring_ctx *ctx = req->ctx; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 1232 | unsigned long flags; |
| 1233 | |
| 1234 | spin_lock_irqsave(&ctx->completion_lock, flags); |
Jens Axboe | 78e19bb | 2019-11-06 15:21:34 -0700 | [diff] [blame] | 1235 | io_cqring_fill_event(req, res); |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 1236 | io_commit_cqring(ctx); |
| 1237 | spin_unlock_irqrestore(&ctx->completion_lock, flags); |
| 1238 | |
Jens Axboe | 8c83878 | 2019-03-12 15:48:16 -0600 | [diff] [blame] | 1239 | io_cqring_ev_posted(ctx); |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 1240 | } |
| 1241 | |
Jens Axboe | 0ddf92e | 2019-11-08 08:52:53 -0700 | [diff] [blame] | 1242 | static inline bool io_is_fallback_req(struct io_kiocb *req) |
| 1243 | { |
| 1244 | return req == (struct io_kiocb *) |
| 1245 | ((unsigned long) req->ctx->fallback_req & ~1UL); |
| 1246 | } |
| 1247 | |
| 1248 | static struct io_kiocb *io_get_fallback_req(struct io_ring_ctx *ctx) |
| 1249 | { |
| 1250 | struct io_kiocb *req; |
| 1251 | |
| 1252 | req = ctx->fallback_req; |
| 1253 | if (!test_and_set_bit_lock(0, (unsigned long *) ctx->fallback_req)) |
| 1254 | return req; |
| 1255 | |
| 1256 | return NULL; |
| 1257 | } |
| 1258 | |
Jens Axboe | 2579f91 | 2019-01-09 09:10:43 -0700 | [diff] [blame] | 1259 | static struct io_kiocb *io_get_req(struct io_ring_ctx *ctx, |
| 1260 | struct io_submit_state *state) |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 1261 | { |
Jens Axboe | fd6fab2 | 2019-03-14 16:30:06 -0600 | [diff] [blame] | 1262 | gfp_t gfp = GFP_KERNEL | __GFP_NOWARN; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 1263 | struct io_kiocb *req; |
| 1264 | |
Jens Axboe | 2579f91 | 2019-01-09 09:10:43 -0700 | [diff] [blame] | 1265 | if (!state) { |
Jens Axboe | fd6fab2 | 2019-03-14 16:30:06 -0600 | [diff] [blame] | 1266 | req = kmem_cache_alloc(req_cachep, gfp); |
Jens Axboe | 2579f91 | 2019-01-09 09:10:43 -0700 | [diff] [blame] | 1267 | if (unlikely(!req)) |
Jens Axboe | 0ddf92e | 2019-11-08 08:52:53 -0700 | [diff] [blame] | 1268 | goto fallback; |
Jens Axboe | 2579f91 | 2019-01-09 09:10:43 -0700 | [diff] [blame] | 1269 | } else if (!state->free_reqs) { |
| 1270 | size_t sz; |
| 1271 | int ret; |
| 1272 | |
| 1273 | sz = min_t(size_t, state->ios_left, ARRAY_SIZE(state->reqs)); |
Jens Axboe | fd6fab2 | 2019-03-14 16:30:06 -0600 | [diff] [blame] | 1274 | ret = kmem_cache_alloc_bulk(req_cachep, gfp, sz, state->reqs); |
| 1275 | |
| 1276 | /* |
| 1277 | * Bulk alloc is all-or-nothing. If we fail to get a batch, |
| 1278 | * retry single alloc to be on the safe side. |
| 1279 | */ |
| 1280 | if (unlikely(ret <= 0)) { |
| 1281 | state->reqs[0] = kmem_cache_alloc(req_cachep, gfp); |
| 1282 | if (!state->reqs[0]) |
Jens Axboe | 0ddf92e | 2019-11-08 08:52:53 -0700 | [diff] [blame] | 1283 | goto fallback; |
Jens Axboe | fd6fab2 | 2019-03-14 16:30:06 -0600 | [diff] [blame] | 1284 | ret = 1; |
| 1285 | } |
Jens Axboe | 2579f91 | 2019-01-09 09:10:43 -0700 | [diff] [blame] | 1286 | state->free_reqs = ret - 1; |
Pavel Begunkov | 6c8a313 | 2020-02-01 03:58:00 +0300 | [diff] [blame] | 1287 | req = state->reqs[ret - 1]; |
Jens Axboe | 2579f91 | 2019-01-09 09:10:43 -0700 | [diff] [blame] | 1288 | } else { |
Jens Axboe | 2579f91 | 2019-01-09 09:10:43 -0700 | [diff] [blame] | 1289 | state->free_reqs--; |
Pavel Begunkov | 6c8a313 | 2020-02-01 03:58:00 +0300 | [diff] [blame] | 1290 | req = state->reqs[state->free_reqs]; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 1291 | } |
| 1292 | |
Jens Axboe | 0ddf92e | 2019-11-08 08:52:53 -0700 | [diff] [blame] | 1293 | got_it: |
Jens Axboe | 1a6b74f | 2019-12-02 10:33:15 -0700 | [diff] [blame] | 1294 | req->io = NULL; |
Jens Axboe | 60c112b | 2019-06-21 10:20:18 -0600 | [diff] [blame] | 1295 | req->file = NULL; |
Jens Axboe | 2579f91 | 2019-01-09 09:10:43 -0700 | [diff] [blame] | 1296 | req->ctx = ctx; |
| 1297 | req->flags = 0; |
Jens Axboe | e65ef56 | 2019-03-12 10:16:44 -0600 | [diff] [blame] | 1298 | /* one is dropped after submission, the other at completion */ |
| 1299 | refcount_set(&req->refs, 2); |
Jens Axboe | 9e645e11 | 2019-05-10 16:07:28 -0600 | [diff] [blame] | 1300 | req->result = 0; |
Jens Axboe | 561fb04 | 2019-10-24 07:25:42 -0600 | [diff] [blame] | 1301 | INIT_IO_WORK(&req->work, io_wq_submit_work); |
Jens Axboe | 2579f91 | 2019-01-09 09:10:43 -0700 | [diff] [blame] | 1302 | return req; |
Jens Axboe | 0ddf92e | 2019-11-08 08:52:53 -0700 | [diff] [blame] | 1303 | fallback: |
| 1304 | req = io_get_fallback_req(ctx); |
| 1305 | if (req) |
| 1306 | goto got_it; |
Pavel Begunkov | 6805b32 | 2019-10-08 02:18:42 +0300 | [diff] [blame] | 1307 | percpu_ref_put(&ctx->refs); |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 1308 | return NULL; |
| 1309 | } |
| 1310 | |
Pavel Begunkov | 8da11c1 | 2020-02-24 11:32:44 +0300 | [diff] [blame] | 1311 | static inline void io_put_file(struct io_kiocb *req, struct file *file, |
| 1312 | bool fixed) |
| 1313 | { |
| 1314 | if (fixed) |
| 1315 | percpu_ref_put(&req->ctx->file_data->refs); |
| 1316 | else |
| 1317 | fput(file); |
| 1318 | } |
| 1319 | |
Pavel Begunkov | 2b85edf | 2019-12-28 14:13:03 +0300 | [diff] [blame] | 1320 | static void __io_req_do_free(struct io_kiocb *req) |
Jens Axboe | def596e | 2019-01-09 08:59:42 -0700 | [diff] [blame] | 1321 | { |
Pavel Begunkov | 2b85edf | 2019-12-28 14:13:03 +0300 | [diff] [blame] | 1322 | if (likely(!io_is_fallback_req(req))) |
| 1323 | kmem_cache_free(req_cachep, req); |
| 1324 | else |
| 1325 | clear_bit_unlock(0, (unsigned long *) req->ctx->fallback_req); |
| 1326 | } |
| 1327 | |
Jens Axboe | c6ca97b30 | 2019-12-28 12:11:08 -0700 | [diff] [blame] | 1328 | static void __io_req_aux_free(struct io_kiocb *req) |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 1329 | { |
Pavel Begunkov | 929a3af | 2020-02-19 00:19:09 +0300 | [diff] [blame] | 1330 | if (req->flags & REQ_F_NEED_CLEANUP) |
| 1331 | io_cleanup_req(req); |
| 1332 | |
YueHaibing | 96fd84d | 2020-01-07 22:22:44 +0800 | [diff] [blame] | 1333 | kfree(req->io); |
Pavel Begunkov | 8da11c1 | 2020-02-24 11:32:44 +0300 | [diff] [blame] | 1334 | if (req->file) |
| 1335 | io_put_file(req, req->file, (req->flags & REQ_F_FIXED_FILE)); |
Jens Axboe | cccf0ee | 2020-01-27 16:34:48 -0700 | [diff] [blame] | 1336 | |
| 1337 | io_req_work_drop_env(req); |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 1338 | } |
| 1339 | |
| 1340 | static void __io_free_req(struct io_kiocb *req) |
| 1341 | { |
Jens Axboe | c6ca97b30 | 2019-12-28 12:11:08 -0700 | [diff] [blame] | 1342 | __io_req_aux_free(req); |
Jens Axboe | fcb323c | 2019-10-24 12:39:47 -0600 | [diff] [blame] | 1343 | |
Jens Axboe | fcb323c | 2019-10-24 12:39:47 -0600 | [diff] [blame] | 1344 | if (req->flags & REQ_F_INFLIGHT) { |
Jens Axboe | c6ca97b30 | 2019-12-28 12:11:08 -0700 | [diff] [blame] | 1345 | struct io_ring_ctx *ctx = req->ctx; |
Jens Axboe | fcb323c | 2019-10-24 12:39:47 -0600 | [diff] [blame] | 1346 | unsigned long flags; |
| 1347 | |
| 1348 | spin_lock_irqsave(&ctx->inflight_lock, flags); |
| 1349 | list_del(&req->inflight_entry); |
| 1350 | if (waitqueue_active(&ctx->inflight_wait)) |
| 1351 | wake_up(&ctx->inflight_wait); |
| 1352 | spin_unlock_irqrestore(&ctx->inflight_lock, flags); |
| 1353 | } |
Pavel Begunkov | 2b85edf | 2019-12-28 14:13:03 +0300 | [diff] [blame] | 1354 | |
| 1355 | percpu_ref_put(&req->ctx->refs); |
| 1356 | __io_req_do_free(req); |
Jens Axboe | e65ef56 | 2019-03-12 10:16:44 -0600 | [diff] [blame] | 1357 | } |
| 1358 | |
Jens Axboe | c6ca97b30 | 2019-12-28 12:11:08 -0700 | [diff] [blame] | 1359 | struct req_batch { |
| 1360 | void *reqs[IO_IOPOLL_BATCH]; |
| 1361 | int to_free; |
| 1362 | int need_iter; |
| 1363 | }; |
| 1364 | |
| 1365 | static void io_free_req_many(struct io_ring_ctx *ctx, struct req_batch *rb) |
| 1366 | { |
Jens Axboe | 10fef4b | 2020-01-09 07:52:28 -0700 | [diff] [blame] | 1367 | int fixed_refs = rb->to_free; |
| 1368 | |
Jens Axboe | c6ca97b30 | 2019-12-28 12:11:08 -0700 | [diff] [blame] | 1369 | if (!rb->to_free) |
| 1370 | return; |
| 1371 | if (rb->need_iter) { |
| 1372 | int i, inflight = 0; |
| 1373 | unsigned long flags; |
| 1374 | |
Jens Axboe | 10fef4b | 2020-01-09 07:52:28 -0700 | [diff] [blame] | 1375 | fixed_refs = 0; |
Jens Axboe | c6ca97b30 | 2019-12-28 12:11:08 -0700 | [diff] [blame] | 1376 | for (i = 0; i < rb->to_free; i++) { |
| 1377 | struct io_kiocb *req = rb->reqs[i]; |
| 1378 | |
Jens Axboe | 10fef4b | 2020-01-09 07:52:28 -0700 | [diff] [blame] | 1379 | if (req->flags & REQ_F_FIXED_FILE) { |
Jens Axboe | c6ca97b30 | 2019-12-28 12:11:08 -0700 | [diff] [blame] | 1380 | req->file = NULL; |
Jens Axboe | 10fef4b | 2020-01-09 07:52:28 -0700 | [diff] [blame] | 1381 | fixed_refs++; |
| 1382 | } |
Jens Axboe | c6ca97b30 | 2019-12-28 12:11:08 -0700 | [diff] [blame] | 1383 | if (req->flags & REQ_F_INFLIGHT) |
| 1384 | inflight++; |
Jens Axboe | c6ca97b30 | 2019-12-28 12:11:08 -0700 | [diff] [blame] | 1385 | __io_req_aux_free(req); |
| 1386 | } |
| 1387 | if (!inflight) |
| 1388 | goto do_free; |
| 1389 | |
| 1390 | spin_lock_irqsave(&ctx->inflight_lock, flags); |
| 1391 | for (i = 0; i < rb->to_free; i++) { |
| 1392 | struct io_kiocb *req = rb->reqs[i]; |
| 1393 | |
Jens Axboe | 10fef4b | 2020-01-09 07:52:28 -0700 | [diff] [blame] | 1394 | if (req->flags & REQ_F_INFLIGHT) { |
Jens Axboe | c6ca97b30 | 2019-12-28 12:11:08 -0700 | [diff] [blame] | 1395 | list_del(&req->inflight_entry); |
| 1396 | if (!--inflight) |
| 1397 | break; |
| 1398 | } |
| 1399 | } |
| 1400 | spin_unlock_irqrestore(&ctx->inflight_lock, flags); |
| 1401 | |
| 1402 | if (waitqueue_active(&ctx->inflight_wait)) |
| 1403 | wake_up(&ctx->inflight_wait); |
| 1404 | } |
| 1405 | do_free: |
| 1406 | kmem_cache_free_bulk(req_cachep, rb->to_free, rb->reqs); |
Jens Axboe | 10fef4b | 2020-01-09 07:52:28 -0700 | [diff] [blame] | 1407 | if (fixed_refs) |
| 1408 | percpu_ref_put_many(&ctx->file_data->refs, fixed_refs); |
Jens Axboe | c6ca97b30 | 2019-12-28 12:11:08 -0700 | [diff] [blame] | 1409 | percpu_ref_put_many(&ctx->refs, rb->to_free); |
Jens Axboe | c6ca97b30 | 2019-12-28 12:11:08 -0700 | [diff] [blame] | 1410 | rb->to_free = rb->need_iter = 0; |
Jens Axboe | e65ef56 | 2019-03-12 10:16:44 -0600 | [diff] [blame] | 1411 | } |
| 1412 | |
Jackie Liu | a197f66 | 2019-11-08 08:09:12 -0700 | [diff] [blame] | 1413 | static bool io_link_cancel_timeout(struct io_kiocb *req) |
Jens Axboe | 9e645e11 | 2019-05-10 16:07:28 -0600 | [diff] [blame] | 1414 | { |
Jackie Liu | a197f66 | 2019-11-08 08:09:12 -0700 | [diff] [blame] | 1415 | struct io_ring_ctx *ctx = req->ctx; |
Jens Axboe | 2665abf | 2019-11-05 12:40:47 -0700 | [diff] [blame] | 1416 | int ret; |
| 1417 | |
Jens Axboe | 2d28390 | 2019-12-04 11:08:05 -0700 | [diff] [blame] | 1418 | ret = hrtimer_try_to_cancel(&req->io->timeout.timer); |
Jens Axboe | 2665abf | 2019-11-05 12:40:47 -0700 | [diff] [blame] | 1419 | if (ret != -1) { |
Jens Axboe | 78e19bb | 2019-11-06 15:21:34 -0700 | [diff] [blame] | 1420 | io_cqring_fill_event(req, -ECANCELED); |
Jens Axboe | 2665abf | 2019-11-05 12:40:47 -0700 | [diff] [blame] | 1421 | io_commit_cqring(ctx); |
| 1422 | req->flags &= ~REQ_F_LINK; |
Jackie Liu | ec9c02a | 2019-11-08 23:50:36 +0800 | [diff] [blame] | 1423 | io_put_req(req); |
Jens Axboe | 2665abf | 2019-11-05 12:40:47 -0700 | [diff] [blame] | 1424 | return true; |
| 1425 | } |
| 1426 | |
| 1427 | return false; |
| 1428 | } |
| 1429 | |
Jens Axboe | ba816ad | 2019-09-28 11:36:45 -0600 | [diff] [blame] | 1430 | static void io_req_link_next(struct io_kiocb *req, struct io_kiocb **nxtptr) |
Jens Axboe | 9e645e11 | 2019-05-10 16:07:28 -0600 | [diff] [blame] | 1431 | { |
Jens Axboe | 2665abf | 2019-11-05 12:40:47 -0700 | [diff] [blame] | 1432 | struct io_ring_ctx *ctx = req->ctx; |
Jens Axboe | 2665abf | 2019-11-05 12:40:47 -0700 | [diff] [blame] | 1433 | bool wake_ev = false; |
Jens Axboe | 9e645e11 | 2019-05-10 16:07:28 -0600 | [diff] [blame] | 1434 | |
Jens Axboe | 4d7dd46 | 2019-11-20 13:03:52 -0700 | [diff] [blame] | 1435 | /* Already got next link */ |
| 1436 | if (req->flags & REQ_F_LINK_NEXT) |
| 1437 | return; |
| 1438 | |
Jens Axboe | 9e645e11 | 2019-05-10 16:07:28 -0600 | [diff] [blame] | 1439 | /* |
| 1440 | * The list should never be empty when we are called here. But could |
| 1441 | * potentially happen if the chain is messed up, check to be on the |
| 1442 | * safe side. |
| 1443 | */ |
Pavel Begunkov | 4493233 | 2019-12-05 16:16:35 +0300 | [diff] [blame] | 1444 | while (!list_empty(&req->link_list)) { |
| 1445 | struct io_kiocb *nxt = list_first_entry(&req->link_list, |
| 1446 | struct io_kiocb, link_list); |
Jens Axboe | 94ae5e7 | 2019-11-14 19:39:52 -0700 | [diff] [blame] | 1447 | |
Pavel Begunkov | 4493233 | 2019-12-05 16:16:35 +0300 | [diff] [blame] | 1448 | if (unlikely((req->flags & REQ_F_LINK_TIMEOUT) && |
| 1449 | (nxt->flags & REQ_F_TIMEOUT))) { |
| 1450 | list_del_init(&nxt->link_list); |
Jens Axboe | 94ae5e7 | 2019-11-14 19:39:52 -0700 | [diff] [blame] | 1451 | wake_ev |= io_link_cancel_timeout(nxt); |
Jens Axboe | 94ae5e7 | 2019-11-14 19:39:52 -0700 | [diff] [blame] | 1452 | req->flags &= ~REQ_F_LINK_TIMEOUT; |
| 1453 | continue; |
| 1454 | } |
Jens Axboe | 9e645e11 | 2019-05-10 16:07:28 -0600 | [diff] [blame] | 1455 | |
Pavel Begunkov | 4493233 | 2019-12-05 16:16:35 +0300 | [diff] [blame] | 1456 | list_del_init(&req->link_list); |
| 1457 | if (!list_empty(&nxt->link_list)) |
| 1458 | nxt->flags |= REQ_F_LINK; |
Pavel Begunkov | b18fdf7 | 2019-11-21 23:21:02 +0300 | [diff] [blame] | 1459 | *nxtptr = nxt; |
Jens Axboe | 94ae5e7 | 2019-11-14 19:39:52 -0700 | [diff] [blame] | 1460 | break; |
Jens Axboe | 9e645e11 | 2019-05-10 16:07:28 -0600 | [diff] [blame] | 1461 | } |
Jens Axboe | 2665abf | 2019-11-05 12:40:47 -0700 | [diff] [blame] | 1462 | |
Jens Axboe | 4d7dd46 | 2019-11-20 13:03:52 -0700 | [diff] [blame] | 1463 | req->flags |= REQ_F_LINK_NEXT; |
Jens Axboe | 2665abf | 2019-11-05 12:40:47 -0700 | [diff] [blame] | 1464 | if (wake_ev) |
| 1465 | io_cqring_ev_posted(ctx); |
Jens Axboe | 9e645e11 | 2019-05-10 16:07:28 -0600 | [diff] [blame] | 1466 | } |
| 1467 | |
| 1468 | /* |
| 1469 | * Called if REQ_F_LINK is set, and we fail the head request |
| 1470 | */ |
| 1471 | static void io_fail_links(struct io_kiocb *req) |
| 1472 | { |
Jens Axboe | 2665abf | 2019-11-05 12:40:47 -0700 | [diff] [blame] | 1473 | struct io_ring_ctx *ctx = req->ctx; |
Jens Axboe | 2665abf | 2019-11-05 12:40:47 -0700 | [diff] [blame] | 1474 | unsigned long flags; |
| 1475 | |
| 1476 | spin_lock_irqsave(&ctx->completion_lock, flags); |
Jens Axboe | 9e645e11 | 2019-05-10 16:07:28 -0600 | [diff] [blame] | 1477 | |
| 1478 | while (!list_empty(&req->link_list)) { |
Pavel Begunkov | 4493233 | 2019-12-05 16:16:35 +0300 | [diff] [blame] | 1479 | struct io_kiocb *link = list_first_entry(&req->link_list, |
| 1480 | struct io_kiocb, link_list); |
Jens Axboe | 9e645e11 | 2019-05-10 16:07:28 -0600 | [diff] [blame] | 1481 | |
Pavel Begunkov | 4493233 | 2019-12-05 16:16:35 +0300 | [diff] [blame] | 1482 | list_del_init(&link->link_list); |
Dmitrii Dolgov | c826bd7 | 2019-10-15 19:02:01 +0200 | [diff] [blame] | 1483 | trace_io_uring_fail_link(req, link); |
Jens Axboe | 2665abf | 2019-11-05 12:40:47 -0700 | [diff] [blame] | 1484 | |
| 1485 | if ((req->flags & REQ_F_LINK_TIMEOUT) && |
Jens Axboe | d625c6e | 2019-12-17 19:53:05 -0700 | [diff] [blame] | 1486 | link->opcode == IORING_OP_LINK_TIMEOUT) { |
Jackie Liu | a197f66 | 2019-11-08 08:09:12 -0700 | [diff] [blame] | 1487 | io_link_cancel_timeout(link); |
Jens Axboe | 2665abf | 2019-11-05 12:40:47 -0700 | [diff] [blame] | 1488 | } else { |
Jens Axboe | 78e19bb | 2019-11-06 15:21:34 -0700 | [diff] [blame] | 1489 | io_cqring_fill_event(link, -ECANCELED); |
Jens Axboe | 978db57 | 2019-11-14 22:39:04 -0700 | [diff] [blame] | 1490 | __io_double_put_req(link); |
Jens Axboe | 2665abf | 2019-11-05 12:40:47 -0700 | [diff] [blame] | 1491 | } |
Jens Axboe | 5d96072 | 2019-11-19 15:31:28 -0700 | [diff] [blame] | 1492 | req->flags &= ~REQ_F_LINK_TIMEOUT; |
Jens Axboe | 9e645e11 | 2019-05-10 16:07:28 -0600 | [diff] [blame] | 1493 | } |
Jens Axboe | 2665abf | 2019-11-05 12:40:47 -0700 | [diff] [blame] | 1494 | |
| 1495 | io_commit_cqring(ctx); |
| 1496 | spin_unlock_irqrestore(&ctx->completion_lock, flags); |
| 1497 | io_cqring_ev_posted(ctx); |
Jens Axboe | 9e645e11 | 2019-05-10 16:07:28 -0600 | [diff] [blame] | 1498 | } |
| 1499 | |
Jens Axboe | 4d7dd46 | 2019-11-20 13:03:52 -0700 | [diff] [blame] | 1500 | static void io_req_find_next(struct io_kiocb *req, struct io_kiocb **nxt) |
Jens Axboe | 9e645e11 | 2019-05-10 16:07:28 -0600 | [diff] [blame] | 1501 | { |
Jens Axboe | 4d7dd46 | 2019-11-20 13:03:52 -0700 | [diff] [blame] | 1502 | if (likely(!(req->flags & REQ_F_LINK))) |
Jens Axboe | 2665abf | 2019-11-05 12:40:47 -0700 | [diff] [blame] | 1503 | return; |
Jens Axboe | 2665abf | 2019-11-05 12:40:47 -0700 | [diff] [blame] | 1504 | |
Jens Axboe | 9e645e11 | 2019-05-10 16:07:28 -0600 | [diff] [blame] | 1505 | /* |
| 1506 | * If LINK is set, we have dependent requests in this chain. If we |
| 1507 | * didn't fail this request, queue the first one up, moving any other |
| 1508 | * dependencies to the next request. In case of failure, fail the rest |
| 1509 | * of the chain. |
| 1510 | */ |
Jens Axboe | 2665abf | 2019-11-05 12:40:47 -0700 | [diff] [blame] | 1511 | if (req->flags & REQ_F_FAIL_LINK) { |
| 1512 | io_fail_links(req); |
Jens Axboe | 7c9e7f0 | 2019-11-12 08:15:53 -0700 | [diff] [blame] | 1513 | } else if ((req->flags & (REQ_F_LINK_TIMEOUT | REQ_F_COMP_LOCKED)) == |
| 1514 | REQ_F_LINK_TIMEOUT) { |
Jens Axboe | 2665abf | 2019-11-05 12:40:47 -0700 | [diff] [blame] | 1515 | struct io_ring_ctx *ctx = req->ctx; |
| 1516 | unsigned long flags; |
| 1517 | |
| 1518 | /* |
| 1519 | * If this is a timeout link, we could be racing with the |
| 1520 | * timeout timer. Grab the completion lock for this case to |
Jens Axboe | 7c9e7f0 | 2019-11-12 08:15:53 -0700 | [diff] [blame] | 1521 | * protect against that. |
Jens Axboe | 2665abf | 2019-11-05 12:40:47 -0700 | [diff] [blame] | 1522 | */ |
| 1523 | spin_lock_irqsave(&ctx->completion_lock, flags); |
| 1524 | io_req_link_next(req, nxt); |
| 1525 | spin_unlock_irqrestore(&ctx->completion_lock, flags); |
| 1526 | } else { |
| 1527 | io_req_link_next(req, nxt); |
Jens Axboe | 9e645e11 | 2019-05-10 16:07:28 -0600 | [diff] [blame] | 1528 | } |
Jens Axboe | 4d7dd46 | 2019-11-20 13:03:52 -0700 | [diff] [blame] | 1529 | } |
Jens Axboe | 9e645e11 | 2019-05-10 16:07:28 -0600 | [diff] [blame] | 1530 | |
Jackie Liu | c69f8db | 2019-11-09 11:00:08 +0800 | [diff] [blame] | 1531 | static void io_free_req(struct io_kiocb *req) |
| 1532 | { |
Pavel Begunkov | 944e58b | 2019-11-21 23:21:01 +0300 | [diff] [blame] | 1533 | struct io_kiocb *nxt = NULL; |
| 1534 | |
| 1535 | io_req_find_next(req, &nxt); |
Pavel Begunkov | 70cf9f3 | 2019-11-21 23:21:00 +0300 | [diff] [blame] | 1536 | __io_free_req(req); |
Pavel Begunkov | 944e58b | 2019-11-21 23:21:01 +0300 | [diff] [blame] | 1537 | |
| 1538 | if (nxt) |
| 1539 | io_queue_async_work(nxt); |
Jackie Liu | c69f8db | 2019-11-09 11:00:08 +0800 | [diff] [blame] | 1540 | } |
| 1541 | |
Pavel Begunkov | 7a743e2 | 2020-03-03 21:33:13 +0300 | [diff] [blame] | 1542 | static void io_link_work_cb(struct io_wq_work **workptr) |
| 1543 | { |
| 1544 | struct io_wq_work *work = *workptr; |
| 1545 | struct io_kiocb *link = work->data; |
| 1546 | |
| 1547 | io_queue_linked_timeout(link); |
| 1548 | io_wq_submit_work(workptr); |
| 1549 | } |
| 1550 | |
| 1551 | static void io_wq_assign_next(struct io_wq_work **workptr, struct io_kiocb *nxt) |
| 1552 | { |
| 1553 | struct io_kiocb *link; |
| 1554 | |
| 1555 | *workptr = &nxt->work; |
| 1556 | link = io_prep_linked_timeout(nxt); |
| 1557 | if (link) { |
| 1558 | nxt->work.func = io_link_work_cb; |
| 1559 | nxt->work.data = link; |
| 1560 | } |
| 1561 | } |
| 1562 | |
Jens Axboe | ba816ad | 2019-09-28 11:36:45 -0600 | [diff] [blame] | 1563 | /* |
| 1564 | * Drop reference to request, return next in chain (if there is one) if this |
| 1565 | * was the last reference to this request. |
| 1566 | */ |
Pavel Begunkov | f9bd67f | 2019-11-21 23:21:03 +0300 | [diff] [blame] | 1567 | __attribute__((nonnull)) |
Jackie Liu | ec9c02a | 2019-11-08 23:50:36 +0800 | [diff] [blame] | 1568 | static void io_put_req_find_next(struct io_kiocb *req, struct io_kiocb **nxtptr) |
Jens Axboe | e65ef56 | 2019-03-12 10:16:44 -0600 | [diff] [blame] | 1569 | { |
Jens Axboe | 2a44f46 | 2020-02-25 13:25:41 -0700 | [diff] [blame] | 1570 | if (refcount_dec_and_test(&req->refs)) { |
| 1571 | io_req_find_next(req, nxtptr); |
Jens Axboe | 4d7dd46 | 2019-11-20 13:03:52 -0700 | [diff] [blame] | 1572 | __io_free_req(req); |
Jens Axboe | 2a44f46 | 2020-02-25 13:25:41 -0700 | [diff] [blame] | 1573 | } |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 1574 | } |
| 1575 | |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 1576 | static void io_put_req(struct io_kiocb *req) |
| 1577 | { |
Jens Axboe | def596e | 2019-01-09 08:59:42 -0700 | [diff] [blame] | 1578 | if (refcount_dec_and_test(&req->refs)) |
| 1579 | io_free_req(req); |
| 1580 | } |
| 1581 | |
Pavel Begunkov | e9fd939 | 2020-03-04 16:14:12 +0300 | [diff] [blame] | 1582 | static void io_steal_work(struct io_kiocb *req, |
| 1583 | struct io_wq_work **workptr) |
Pavel Begunkov | 7a743e2 | 2020-03-03 21:33:13 +0300 | [diff] [blame] | 1584 | { |
| 1585 | /* |
| 1586 | * It's in an io-wq worker, so there always should be at least |
| 1587 | * one reference, which will be dropped in io_put_work() just |
| 1588 | * after the current handler returns. |
| 1589 | * |
| 1590 | * It also means, that if the counter dropped to 1, then there is |
| 1591 | * no asynchronous users left, so it's safe to steal the next work. |
| 1592 | */ |
Pavel Begunkov | 7a743e2 | 2020-03-03 21:33:13 +0300 | [diff] [blame] | 1593 | if (refcount_read(&req->refs) == 1) { |
| 1594 | struct io_kiocb *nxt = NULL; |
| 1595 | |
| 1596 | io_req_find_next(req, &nxt); |
| 1597 | if (nxt) |
| 1598 | io_wq_assign_next(workptr, nxt); |
| 1599 | } |
| 1600 | } |
| 1601 | |
Jens Axboe | 978db57 | 2019-11-14 22:39:04 -0700 | [diff] [blame] | 1602 | /* |
| 1603 | * Must only be used if we don't need to care about links, usually from |
| 1604 | * within the completion handling itself. |
| 1605 | */ |
| 1606 | static void __io_double_put_req(struct io_kiocb *req) |
Jens Axboe | a3a0e43 | 2019-08-20 11:03:11 -0600 | [diff] [blame] | 1607 | { |
Jens Axboe | 78e19bb | 2019-11-06 15:21:34 -0700 | [diff] [blame] | 1608 | /* drop both submit and complete references */ |
| 1609 | if (refcount_sub_and_test(2, &req->refs)) |
| 1610 | __io_free_req(req); |
| 1611 | } |
| 1612 | |
Jens Axboe | 978db57 | 2019-11-14 22:39:04 -0700 | [diff] [blame] | 1613 | static void io_double_put_req(struct io_kiocb *req) |
| 1614 | { |
| 1615 | /* drop both submit and complete references */ |
| 1616 | if (refcount_sub_and_test(2, &req->refs)) |
| 1617 | io_free_req(req); |
| 1618 | } |
| 1619 | |
Jens Axboe | 1d7bb1d | 2019-11-06 11:31:17 -0700 | [diff] [blame] | 1620 | static unsigned io_cqring_events(struct io_ring_ctx *ctx, bool noflush) |
Jens Axboe | a3a0e43 | 2019-08-20 11:03:11 -0600 | [diff] [blame] | 1621 | { |
Jens Axboe | 84f97dc | 2019-11-06 11:27:53 -0700 | [diff] [blame] | 1622 | struct io_rings *rings = ctx->rings; |
| 1623 | |
Jens Axboe | ad3eb2c | 2019-12-18 17:12:20 -0700 | [diff] [blame] | 1624 | if (test_bit(0, &ctx->cq_check_overflow)) { |
| 1625 | /* |
| 1626 | * noflush == true is from the waitqueue handler, just ensure |
| 1627 | * we wake up the task, and the next invocation will flush the |
| 1628 | * entries. We cannot safely to it from here. |
| 1629 | */ |
| 1630 | if (noflush && !list_empty(&ctx->cq_overflow_list)) |
| 1631 | return -1U; |
Jens Axboe | 1d7bb1d | 2019-11-06 11:31:17 -0700 | [diff] [blame] | 1632 | |
Jens Axboe | ad3eb2c | 2019-12-18 17:12:20 -0700 | [diff] [blame] | 1633 | io_cqring_overflow_flush(ctx, false); |
| 1634 | } |
Jens Axboe | 1d7bb1d | 2019-11-06 11:31:17 -0700 | [diff] [blame] | 1635 | |
Jens Axboe | a3a0e43 | 2019-08-20 11:03:11 -0600 | [diff] [blame] | 1636 | /* See comment at the top of this file */ |
| 1637 | smp_rmb(); |
Jens Axboe | ad3eb2c | 2019-12-18 17:12:20 -0700 | [diff] [blame] | 1638 | return ctx->cached_cq_tail - READ_ONCE(rings->cq.head); |
Jens Axboe | a3a0e43 | 2019-08-20 11:03:11 -0600 | [diff] [blame] | 1639 | } |
| 1640 | |
Pavel Begunkov | fb5ccc9 | 2019-10-25 12:31:30 +0300 | [diff] [blame] | 1641 | static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx) |
| 1642 | { |
| 1643 | struct io_rings *rings = ctx->rings; |
| 1644 | |
| 1645 | /* make sure SQ entry isn't read before tail */ |
| 1646 | return smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head; |
| 1647 | } |
| 1648 | |
Jens Axboe | 8237e04 | 2019-12-28 10:48:22 -0700 | [diff] [blame] | 1649 | static inline bool io_req_multi_free(struct req_batch *rb, struct io_kiocb *req) |
Jens Axboe | e94f141 | 2019-12-19 12:06:02 -0700 | [diff] [blame] | 1650 | { |
Jens Axboe | c6ca97b30 | 2019-12-28 12:11:08 -0700 | [diff] [blame] | 1651 | if ((req->flags & REQ_F_LINK) || io_is_fallback_req(req)) |
| 1652 | return false; |
Jens Axboe | e94f141 | 2019-12-19 12:06:02 -0700 | [diff] [blame] | 1653 | |
Jens Axboe | c6ca97b30 | 2019-12-28 12:11:08 -0700 | [diff] [blame] | 1654 | if (!(req->flags & REQ_F_FIXED_FILE) || req->io) |
| 1655 | rb->need_iter++; |
| 1656 | |
| 1657 | rb->reqs[rb->to_free++] = req; |
| 1658 | if (unlikely(rb->to_free == ARRAY_SIZE(rb->reqs))) |
| 1659 | io_free_req_many(req->ctx, rb); |
| 1660 | return true; |
Jens Axboe | e94f141 | 2019-12-19 12:06:02 -0700 | [diff] [blame] | 1661 | } |
| 1662 | |
Jens Axboe | def596e | 2019-01-09 08:59:42 -0700 | [diff] [blame] | 1663 | /* |
| 1664 | * Find and free completed poll iocbs |
| 1665 | */ |
| 1666 | static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events, |
| 1667 | struct list_head *done) |
| 1668 | { |
Jens Axboe | 8237e04 | 2019-12-28 10:48:22 -0700 | [diff] [blame] | 1669 | struct req_batch rb; |
Jens Axboe | def596e | 2019-01-09 08:59:42 -0700 | [diff] [blame] | 1670 | struct io_kiocb *req; |
Jens Axboe | def596e | 2019-01-09 08:59:42 -0700 | [diff] [blame] | 1671 | |
Jens Axboe | c6ca97b30 | 2019-12-28 12:11:08 -0700 | [diff] [blame] | 1672 | rb.to_free = rb.need_iter = 0; |
Jens Axboe | def596e | 2019-01-09 08:59:42 -0700 | [diff] [blame] | 1673 | while (!list_empty(done)) { |
| 1674 | req = list_first_entry(done, struct io_kiocb, list); |
| 1675 | list_del(&req->list); |
| 1676 | |
Jens Axboe | 78e19bb | 2019-11-06 15:21:34 -0700 | [diff] [blame] | 1677 | io_cqring_fill_event(req, req->result); |
Jens Axboe | def596e | 2019-01-09 08:59:42 -0700 | [diff] [blame] | 1678 | (*nr_events)++; |
| 1679 | |
Jens Axboe | 8237e04 | 2019-12-28 10:48:22 -0700 | [diff] [blame] | 1680 | if (refcount_dec_and_test(&req->refs) && |
| 1681 | !io_req_multi_free(&rb, req)) |
| 1682 | io_free_req(req); |
Jens Axboe | def596e | 2019-01-09 08:59:42 -0700 | [diff] [blame] | 1683 | } |
Jens Axboe | def596e | 2019-01-09 08:59:42 -0700 | [diff] [blame] | 1684 | |
Jens Axboe | 09bb839 | 2019-03-13 12:39:28 -0600 | [diff] [blame] | 1685 | io_commit_cqring(ctx); |
Jens Axboe | 8237e04 | 2019-12-28 10:48:22 -0700 | [diff] [blame] | 1686 | io_free_req_many(ctx, &rb); |
Jens Axboe | def596e | 2019-01-09 08:59:42 -0700 | [diff] [blame] | 1687 | } |
| 1688 | |
| 1689 | static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events, |
| 1690 | long min) |
| 1691 | { |
| 1692 | struct io_kiocb *req, *tmp; |
| 1693 | LIST_HEAD(done); |
| 1694 | bool spin; |
| 1695 | int ret; |
| 1696 | |
| 1697 | /* |
| 1698 | * Only spin for completions if we don't have multiple devices hanging |
| 1699 | * off our complete list, and we're under the requested amount. |
| 1700 | */ |
| 1701 | spin = !ctx->poll_multi_file && *nr_events < min; |
| 1702 | |
| 1703 | ret = 0; |
| 1704 | list_for_each_entry_safe(req, tmp, &ctx->poll_list, list) { |
Jens Axboe | 9adbd45 | 2019-12-20 08:45:55 -0700 | [diff] [blame] | 1705 | struct kiocb *kiocb = &req->rw.kiocb; |
Jens Axboe | def596e | 2019-01-09 08:59:42 -0700 | [diff] [blame] | 1706 | |
| 1707 | /* |
| 1708 | * Move completed entries to our local list. If we find a |
| 1709 | * request that requires polling, break out and complete |
| 1710 | * the done list first, if we have entries there. |
| 1711 | */ |
| 1712 | if (req->flags & REQ_F_IOPOLL_COMPLETED) { |
| 1713 | list_move_tail(&req->list, &done); |
| 1714 | continue; |
| 1715 | } |
| 1716 | if (!list_empty(&done)) |
| 1717 | break; |
| 1718 | |
| 1719 | ret = kiocb->ki_filp->f_op->iopoll(kiocb, spin); |
| 1720 | if (ret < 0) |
| 1721 | break; |
| 1722 | |
| 1723 | if (ret && spin) |
| 1724 | spin = false; |
| 1725 | ret = 0; |
| 1726 | } |
| 1727 | |
| 1728 | if (!list_empty(&done)) |
| 1729 | io_iopoll_complete(ctx, nr_events, &done); |
| 1730 | |
| 1731 | return ret; |
| 1732 | } |
| 1733 | |
| 1734 | /* |
Brian Gianforcaro | d195a66 | 2019-12-13 03:09:50 -0800 | [diff] [blame] | 1735 | * Poll for a minimum of 'min' events. Note that if min == 0 we consider that a |
Jens Axboe | def596e | 2019-01-09 08:59:42 -0700 | [diff] [blame] | 1736 | * non-spinning poll check - we'll still enter the driver poll loop, but only |
| 1737 | * as a non-spinning completion check. |
| 1738 | */ |
| 1739 | static int io_iopoll_getevents(struct io_ring_ctx *ctx, unsigned int *nr_events, |
| 1740 | long min) |
| 1741 | { |
Jens Axboe | 08f5439 | 2019-08-21 22:19:11 -0600 | [diff] [blame] | 1742 | while (!list_empty(&ctx->poll_list) && !need_resched()) { |
Jens Axboe | def596e | 2019-01-09 08:59:42 -0700 | [diff] [blame] | 1743 | int ret; |
| 1744 | |
| 1745 | ret = io_do_iopoll(ctx, nr_events, min); |
| 1746 | if (ret < 0) |
| 1747 | return ret; |
| 1748 | if (!min || *nr_events >= min) |
| 1749 | return 0; |
| 1750 | } |
| 1751 | |
| 1752 | return 1; |
| 1753 | } |
| 1754 | |
| 1755 | /* |
| 1756 | * We can't just wait for polled events to come to us, we have to actively |
| 1757 | * find and complete them. |
| 1758 | */ |
| 1759 | static void io_iopoll_reap_events(struct io_ring_ctx *ctx) |
| 1760 | { |
| 1761 | if (!(ctx->flags & IORING_SETUP_IOPOLL)) |
| 1762 | return; |
| 1763 | |
| 1764 | mutex_lock(&ctx->uring_lock); |
| 1765 | while (!list_empty(&ctx->poll_list)) { |
| 1766 | unsigned int nr_events = 0; |
| 1767 | |
| 1768 | io_iopoll_getevents(ctx, &nr_events, 1); |
Jens Axboe | 08f5439 | 2019-08-21 22:19:11 -0600 | [diff] [blame] | 1769 | |
| 1770 | /* |
| 1771 | * Ensure we allow local-to-the-cpu processing to take place, |
| 1772 | * in this case we need to ensure that we reap all events. |
| 1773 | */ |
| 1774 | cond_resched(); |
Jens Axboe | def596e | 2019-01-09 08:59:42 -0700 | [diff] [blame] | 1775 | } |
| 1776 | mutex_unlock(&ctx->uring_lock); |
| 1777 | } |
| 1778 | |
Xiaoguang Wang | c7849be | 2020-02-22 14:46:05 +0800 | [diff] [blame] | 1779 | static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events, |
| 1780 | long min) |
Jens Axboe | def596e | 2019-01-09 08:59:42 -0700 | [diff] [blame] | 1781 | { |
Jens Axboe | 2b2ed97 | 2019-10-25 10:06:15 -0600 | [diff] [blame] | 1782 | int iters = 0, ret = 0; |
Jens Axboe | def596e | 2019-01-09 08:59:42 -0700 | [diff] [blame] | 1783 | |
Xiaoguang Wang | c7849be | 2020-02-22 14:46:05 +0800 | [diff] [blame] | 1784 | /* |
| 1785 | * We disallow the app entering submit/complete with polling, but we |
| 1786 | * still need to lock the ring to prevent racing with polled issue |
| 1787 | * that got punted to a workqueue. |
| 1788 | */ |
| 1789 | mutex_lock(&ctx->uring_lock); |
Jens Axboe | def596e | 2019-01-09 08:59:42 -0700 | [diff] [blame] | 1790 | do { |
| 1791 | int tmin = 0; |
| 1792 | |
Jens Axboe | 500f9fb | 2019-08-19 12:15:59 -0600 | [diff] [blame] | 1793 | /* |
Jens Axboe | a3a0e43 | 2019-08-20 11:03:11 -0600 | [diff] [blame] | 1794 | * Don't enter poll loop if we already have events pending. |
| 1795 | * If we do, we can potentially be spinning for commands that |
| 1796 | * already triggered a CQE (eg in error). |
| 1797 | */ |
Jens Axboe | 1d7bb1d | 2019-11-06 11:31:17 -0700 | [diff] [blame] | 1798 | if (io_cqring_events(ctx, false)) |
Jens Axboe | a3a0e43 | 2019-08-20 11:03:11 -0600 | [diff] [blame] | 1799 | break; |
| 1800 | |
| 1801 | /* |
Jens Axboe | 500f9fb | 2019-08-19 12:15:59 -0600 | [diff] [blame] | 1802 | * If a submit got punted to a workqueue, we can have the |
| 1803 | * application entering polling for a command before it gets |
| 1804 | * issued. That app will hold the uring_lock for the duration |
| 1805 | * of the poll right here, so we need to take a breather every |
| 1806 | * now and then to ensure that the issue has a chance to add |
| 1807 | * the poll to the issued list. Otherwise we can spin here |
| 1808 | * forever, while the workqueue is stuck trying to acquire the |
| 1809 | * very same mutex. |
| 1810 | */ |
| 1811 | if (!(++iters & 7)) { |
| 1812 | mutex_unlock(&ctx->uring_lock); |
| 1813 | mutex_lock(&ctx->uring_lock); |
| 1814 | } |
| 1815 | |
Jens Axboe | def596e | 2019-01-09 08:59:42 -0700 | [diff] [blame] | 1816 | if (*nr_events < min) |
| 1817 | tmin = min - *nr_events; |
| 1818 | |
| 1819 | ret = io_iopoll_getevents(ctx, nr_events, tmin); |
| 1820 | if (ret <= 0) |
| 1821 | break; |
| 1822 | ret = 0; |
| 1823 | } while (min && !*nr_events && !need_resched()); |
| 1824 | |
Jens Axboe | 500f9fb | 2019-08-19 12:15:59 -0600 | [diff] [blame] | 1825 | mutex_unlock(&ctx->uring_lock); |
Jens Axboe | def596e | 2019-01-09 08:59:42 -0700 | [diff] [blame] | 1826 | return ret; |
| 1827 | } |
| 1828 | |
Jens Axboe | 491381ce | 2019-10-17 09:20:46 -0600 | [diff] [blame] | 1829 | static void kiocb_end_write(struct io_kiocb *req) |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 1830 | { |
Jens Axboe | 491381ce | 2019-10-17 09:20:46 -0600 | [diff] [blame] | 1831 | /* |
| 1832 | * Tell lockdep we inherited freeze protection from submission |
| 1833 | * thread. |
| 1834 | */ |
| 1835 | if (req->flags & REQ_F_ISREG) { |
| 1836 | struct inode *inode = file_inode(req->file); |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 1837 | |
Jens Axboe | 491381ce | 2019-10-17 09:20:46 -0600 | [diff] [blame] | 1838 | __sb_writers_acquired(inode->i_sb, SB_FREEZE_WRITE); |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 1839 | } |
Jens Axboe | 491381ce | 2019-10-17 09:20:46 -0600 | [diff] [blame] | 1840 | file_end_write(req->file); |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 1841 | } |
| 1842 | |
Jens Axboe | 4e88d6e | 2019-12-07 20:59:47 -0700 | [diff] [blame] | 1843 | static inline void req_set_fail_links(struct io_kiocb *req) |
| 1844 | { |
| 1845 | if ((req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) == REQ_F_LINK) |
| 1846 | req->flags |= REQ_F_FAIL_LINK; |
| 1847 | } |
| 1848 | |
Jens Axboe | ba816ad | 2019-09-28 11:36:45 -0600 | [diff] [blame] | 1849 | static void io_complete_rw_common(struct kiocb *kiocb, long res) |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 1850 | { |
Jens Axboe | 9adbd45 | 2019-12-20 08:45:55 -0700 | [diff] [blame] | 1851 | struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb); |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 1852 | |
Jens Axboe | 491381ce | 2019-10-17 09:20:46 -0600 | [diff] [blame] | 1853 | if (kiocb->ki_flags & IOCB_WRITE) |
| 1854 | kiocb_end_write(req); |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 1855 | |
Jens Axboe | 4e88d6e | 2019-12-07 20:59:47 -0700 | [diff] [blame] | 1856 | if (res != req->result) |
| 1857 | req_set_fail_links(req); |
Jens Axboe | 78e19bb | 2019-11-06 15:21:34 -0700 | [diff] [blame] | 1858 | io_cqring_add_event(req, res); |
Jens Axboe | ba816ad | 2019-09-28 11:36:45 -0600 | [diff] [blame] | 1859 | } |
| 1860 | |
| 1861 | static void io_complete_rw(struct kiocb *kiocb, long res, long res2) |
| 1862 | { |
Jens Axboe | 9adbd45 | 2019-12-20 08:45:55 -0700 | [diff] [blame] | 1863 | struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb); |
Jens Axboe | ba816ad | 2019-09-28 11:36:45 -0600 | [diff] [blame] | 1864 | |
| 1865 | io_complete_rw_common(kiocb, res); |
Jens Axboe | e65ef56 | 2019-03-12 10:16:44 -0600 | [diff] [blame] | 1866 | io_put_req(req); |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 1867 | } |
| 1868 | |
Jens Axboe | def596e | 2019-01-09 08:59:42 -0700 | [diff] [blame] | 1869 | static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2) |
| 1870 | { |
Jens Axboe | 9adbd45 | 2019-12-20 08:45:55 -0700 | [diff] [blame] | 1871 | struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb); |
Jens Axboe | def596e | 2019-01-09 08:59:42 -0700 | [diff] [blame] | 1872 | |
Jens Axboe | 491381ce | 2019-10-17 09:20:46 -0600 | [diff] [blame] | 1873 | if (kiocb->ki_flags & IOCB_WRITE) |
| 1874 | kiocb_end_write(req); |
Jens Axboe | def596e | 2019-01-09 08:59:42 -0700 | [diff] [blame] | 1875 | |
Jens Axboe | 4e88d6e | 2019-12-07 20:59:47 -0700 | [diff] [blame] | 1876 | if (res != req->result) |
| 1877 | req_set_fail_links(req); |
Jens Axboe | 9e645e11 | 2019-05-10 16:07:28 -0600 | [diff] [blame] | 1878 | req->result = res; |
Jens Axboe | def596e | 2019-01-09 08:59:42 -0700 | [diff] [blame] | 1879 | if (res != -EAGAIN) |
| 1880 | req->flags |= REQ_F_IOPOLL_COMPLETED; |
| 1881 | } |
| 1882 | |
| 1883 | /* |
| 1884 | * After the iocb has been issued, it's safe to be found on the poll list. |
| 1885 | * Adding the kiocb to the list AFTER submission ensures that we don't |
| 1886 | * find it from a io_iopoll_getevents() thread before the issuer is done |
| 1887 | * accessing the kiocb cookie. |
| 1888 | */ |
| 1889 | static void io_iopoll_req_issued(struct io_kiocb *req) |
| 1890 | { |
| 1891 | struct io_ring_ctx *ctx = req->ctx; |
| 1892 | |
| 1893 | /* |
| 1894 | * Track whether we have multiple files in our lists. This will impact |
| 1895 | * how we do polling eventually, not spinning if we're on potentially |
| 1896 | * different devices. |
| 1897 | */ |
| 1898 | if (list_empty(&ctx->poll_list)) { |
| 1899 | ctx->poll_multi_file = false; |
| 1900 | } else if (!ctx->poll_multi_file) { |
| 1901 | struct io_kiocb *list_req; |
| 1902 | |
| 1903 | list_req = list_first_entry(&ctx->poll_list, struct io_kiocb, |
| 1904 | list); |
Jens Axboe | 9adbd45 | 2019-12-20 08:45:55 -0700 | [diff] [blame] | 1905 | if (list_req->file != req->file) |
Jens Axboe | def596e | 2019-01-09 08:59:42 -0700 | [diff] [blame] | 1906 | ctx->poll_multi_file = true; |
| 1907 | } |
| 1908 | |
| 1909 | /* |
| 1910 | * For fast devices, IO may have already completed. If it has, add |
| 1911 | * it to the front so we find it first. |
| 1912 | */ |
| 1913 | if (req->flags & REQ_F_IOPOLL_COMPLETED) |
| 1914 | list_add(&req->list, &ctx->poll_list); |
| 1915 | else |
| 1916 | list_add_tail(&req->list, &ctx->poll_list); |
Xiaoguang Wang | bdcd3ea | 2020-02-25 22:12:08 +0800 | [diff] [blame] | 1917 | |
| 1918 | if ((ctx->flags & IORING_SETUP_SQPOLL) && |
| 1919 | wq_has_sleeper(&ctx->sqo_wait)) |
| 1920 | wake_up(&ctx->sqo_wait); |
Jens Axboe | def596e | 2019-01-09 08:59:42 -0700 | [diff] [blame] | 1921 | } |
| 1922 | |
Jens Axboe | 3d6770f | 2019-04-13 11:50:54 -0600 | [diff] [blame] | 1923 | static void io_file_put(struct io_submit_state *state) |
Jens Axboe | 9a56a23 | 2019-01-09 09:06:50 -0700 | [diff] [blame] | 1924 | { |
Jens Axboe | 3d6770f | 2019-04-13 11:50:54 -0600 | [diff] [blame] | 1925 | if (state->file) { |
Jens Axboe | 9a56a23 | 2019-01-09 09:06:50 -0700 | [diff] [blame] | 1926 | int diff = state->has_refs - state->used_refs; |
| 1927 | |
| 1928 | if (diff) |
| 1929 | fput_many(state->file, diff); |
| 1930 | state->file = NULL; |
| 1931 | } |
| 1932 | } |
| 1933 | |
| 1934 | /* |
| 1935 | * Get as many references to a file as we have IOs left in this submission, |
| 1936 | * assuming most submissions are for one file, or at least that each file |
| 1937 | * has more than one submission. |
| 1938 | */ |
Pavel Begunkov | 8da11c1 | 2020-02-24 11:32:44 +0300 | [diff] [blame] | 1939 | static struct file *__io_file_get(struct io_submit_state *state, int fd) |
Jens Axboe | 9a56a23 | 2019-01-09 09:06:50 -0700 | [diff] [blame] | 1940 | { |
| 1941 | if (!state) |
| 1942 | return fget(fd); |
| 1943 | |
| 1944 | if (state->file) { |
| 1945 | if (state->fd == fd) { |
| 1946 | state->used_refs++; |
| 1947 | state->ios_left--; |
| 1948 | return state->file; |
| 1949 | } |
Jens Axboe | 3d6770f | 2019-04-13 11:50:54 -0600 | [diff] [blame] | 1950 | io_file_put(state); |
Jens Axboe | 9a56a23 | 2019-01-09 09:06:50 -0700 | [diff] [blame] | 1951 | } |
| 1952 | state->file = fget_many(fd, state->ios_left); |
| 1953 | if (!state->file) |
| 1954 | return NULL; |
| 1955 | |
| 1956 | state->fd = fd; |
| 1957 | state->has_refs = state->ios_left; |
| 1958 | state->used_refs = 1; |
| 1959 | state->ios_left--; |
| 1960 | return state->file; |
| 1961 | } |
| 1962 | |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 1963 | /* |
| 1964 | * If we tracked the file through the SCM inflight mechanism, we could support |
| 1965 | * any file. For now, just ensure that anything potentially problematic is done |
| 1966 | * inline. |
| 1967 | */ |
| 1968 | static bool io_file_supports_async(struct file *file) |
| 1969 | { |
| 1970 | umode_t mode = file_inode(file)->i_mode; |
| 1971 | |
Jens Axboe | 10d5934 | 2019-12-09 20:16:22 -0700 | [diff] [blame] | 1972 | if (S_ISBLK(mode) || S_ISCHR(mode) || S_ISSOCK(mode)) |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 1973 | return true; |
| 1974 | if (S_ISREG(mode) && file->f_op != &io_uring_fops) |
| 1975 | return true; |
| 1976 | |
| 1977 | return false; |
| 1978 | } |
| 1979 | |
Jens Axboe | 3529d8c | 2019-12-19 18:24:38 -0700 | [diff] [blame] | 1980 | static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe, |
| 1981 | bool force_nonblock) |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 1982 | { |
Jens Axboe | def596e | 2019-01-09 08:59:42 -0700 | [diff] [blame] | 1983 | struct io_ring_ctx *ctx = req->ctx; |
Jens Axboe | 9adbd45 | 2019-12-20 08:45:55 -0700 | [diff] [blame] | 1984 | struct kiocb *kiocb = &req->rw.kiocb; |
Jens Axboe | 09bb839 | 2019-03-13 12:39:28 -0600 | [diff] [blame] | 1985 | unsigned ioprio; |
| 1986 | int ret; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 1987 | |
Jens Axboe | 491381ce | 2019-10-17 09:20:46 -0600 | [diff] [blame] | 1988 | if (S_ISREG(file_inode(req->file)->i_mode)) |
| 1989 | req->flags |= REQ_F_ISREG; |
| 1990 | |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 1991 | kiocb->ki_pos = READ_ONCE(sqe->off); |
Jens Axboe | ba04291 | 2019-12-25 16:33:42 -0700 | [diff] [blame] | 1992 | if (kiocb->ki_pos == -1 && !(req->file->f_mode & FMODE_STREAM)) { |
| 1993 | req->flags |= REQ_F_CUR_POS; |
| 1994 | kiocb->ki_pos = req->file->f_pos; |
| 1995 | } |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 1996 | kiocb->ki_hint = ki_hint_validate(file_write_hint(kiocb->ki_filp)); |
Pavel Begunkov | 3e577dc | 2020-02-01 03:58:42 +0300 | [diff] [blame] | 1997 | kiocb->ki_flags = iocb_flags(kiocb->ki_filp); |
| 1998 | ret = kiocb_set_rw_flags(kiocb, READ_ONCE(sqe->rw_flags)); |
| 1999 | if (unlikely(ret)) |
| 2000 | return ret; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 2001 | |
| 2002 | ioprio = READ_ONCE(sqe->ioprio); |
| 2003 | if (ioprio) { |
| 2004 | ret = ioprio_check_cap(ioprio); |
| 2005 | if (ret) |
Jens Axboe | 09bb839 | 2019-03-13 12:39:28 -0600 | [diff] [blame] | 2006 | return ret; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 2007 | |
| 2008 | kiocb->ki_ioprio = ioprio; |
| 2009 | } else |
| 2010 | kiocb->ki_ioprio = get_current_ioprio(); |
| 2011 | |
Stefan Bühler | 8449eed | 2019-04-27 20:34:19 +0200 | [diff] [blame] | 2012 | /* don't allow async punt if RWF_NOWAIT was requested */ |
Jens Axboe | 491381ce | 2019-10-17 09:20:46 -0600 | [diff] [blame] | 2013 | if ((kiocb->ki_flags & IOCB_NOWAIT) || |
| 2014 | (req->file->f_flags & O_NONBLOCK)) |
Stefan Bühler | 8449eed | 2019-04-27 20:34:19 +0200 | [diff] [blame] | 2015 | req->flags |= REQ_F_NOWAIT; |
| 2016 | |
| 2017 | if (force_nonblock) |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 2018 | kiocb->ki_flags |= IOCB_NOWAIT; |
Stefan Bühler | 8449eed | 2019-04-27 20:34:19 +0200 | [diff] [blame] | 2019 | |
Jens Axboe | def596e | 2019-01-09 08:59:42 -0700 | [diff] [blame] | 2020 | if (ctx->flags & IORING_SETUP_IOPOLL) { |
Jens Axboe | def596e | 2019-01-09 08:59:42 -0700 | [diff] [blame] | 2021 | if (!(kiocb->ki_flags & IOCB_DIRECT) || |
| 2022 | !kiocb->ki_filp->f_op->iopoll) |
Jens Axboe | 09bb839 | 2019-03-13 12:39:28 -0600 | [diff] [blame] | 2023 | return -EOPNOTSUPP; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 2024 | |
Jens Axboe | def596e | 2019-01-09 08:59:42 -0700 | [diff] [blame] | 2025 | kiocb->ki_flags |= IOCB_HIPRI; |
| 2026 | kiocb->ki_complete = io_complete_rw_iopoll; |
Jens Axboe | 6873e0b | 2019-10-30 13:53:09 -0600 | [diff] [blame] | 2027 | req->result = 0; |
Jens Axboe | def596e | 2019-01-09 08:59:42 -0700 | [diff] [blame] | 2028 | } else { |
Jens Axboe | 09bb839 | 2019-03-13 12:39:28 -0600 | [diff] [blame] | 2029 | if (kiocb->ki_flags & IOCB_HIPRI) |
| 2030 | return -EINVAL; |
Jens Axboe | def596e | 2019-01-09 08:59:42 -0700 | [diff] [blame] | 2031 | kiocb->ki_complete = io_complete_rw; |
| 2032 | } |
Jens Axboe | 9adbd45 | 2019-12-20 08:45:55 -0700 | [diff] [blame] | 2033 | |
Jens Axboe | 3529d8c | 2019-12-19 18:24:38 -0700 | [diff] [blame] | 2034 | req->rw.addr = READ_ONCE(sqe->addr); |
| 2035 | req->rw.len = READ_ONCE(sqe->len); |
Jens Axboe | 9adbd45 | 2019-12-20 08:45:55 -0700 | [diff] [blame] | 2036 | /* we own ->private, reuse it for the buffer index */ |
| 2037 | req->rw.kiocb.private = (void *) (unsigned long) |
Jens Axboe | 3529d8c | 2019-12-19 18:24:38 -0700 | [diff] [blame] | 2038 | READ_ONCE(sqe->buf_index); |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 2039 | return 0; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 2040 | } |
| 2041 | |
| 2042 | static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret) |
| 2043 | { |
| 2044 | switch (ret) { |
| 2045 | case -EIOCBQUEUED: |
| 2046 | break; |
| 2047 | case -ERESTARTSYS: |
| 2048 | case -ERESTARTNOINTR: |
| 2049 | case -ERESTARTNOHAND: |
| 2050 | case -ERESTART_RESTARTBLOCK: |
| 2051 | /* |
| 2052 | * We can't just restart the syscall, since previously |
| 2053 | * submitted sqes may already be in progress. Just fail this |
| 2054 | * IO with EINTR. |
| 2055 | */ |
| 2056 | ret = -EINTR; |
| 2057 | /* fall through */ |
| 2058 | default: |
| 2059 | kiocb->ki_complete(kiocb, ret, 0); |
| 2060 | } |
| 2061 | } |
| 2062 | |
Pavel Begunkov | 014db00 | 2020-03-03 21:33:12 +0300 | [diff] [blame] | 2063 | static void kiocb_done(struct kiocb *kiocb, ssize_t ret) |
Jens Axboe | ba816ad | 2019-09-28 11:36:45 -0600 | [diff] [blame] | 2064 | { |
Jens Axboe | ba04291 | 2019-12-25 16:33:42 -0700 | [diff] [blame] | 2065 | struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb); |
| 2066 | |
| 2067 | if (req->flags & REQ_F_CUR_POS) |
| 2068 | req->file->f_pos = kiocb->ki_pos; |
Pavel Begunkov | bcaec08 | 2020-02-24 11:30:18 +0300 | [diff] [blame] | 2069 | if (ret >= 0 && kiocb->ki_complete == io_complete_rw) |
Pavel Begunkov | 014db00 | 2020-03-03 21:33:12 +0300 | [diff] [blame] | 2070 | io_complete_rw(kiocb, ret, 0); |
Jens Axboe | ba816ad | 2019-09-28 11:36:45 -0600 | [diff] [blame] | 2071 | else |
| 2072 | io_rw_done(kiocb, ret); |
| 2073 | } |
| 2074 | |
Jens Axboe | 9adbd45 | 2019-12-20 08:45:55 -0700 | [diff] [blame] | 2075 | static ssize_t io_import_fixed(struct io_kiocb *req, int rw, |
Pavel Begunkov | 7d00916 | 2019-11-25 23:14:40 +0300 | [diff] [blame] | 2076 | struct iov_iter *iter) |
Jens Axboe | edafcce | 2019-01-09 09:16:05 -0700 | [diff] [blame] | 2077 | { |
Jens Axboe | 9adbd45 | 2019-12-20 08:45:55 -0700 | [diff] [blame] | 2078 | struct io_ring_ctx *ctx = req->ctx; |
| 2079 | size_t len = req->rw.len; |
Jens Axboe | edafcce | 2019-01-09 09:16:05 -0700 | [diff] [blame] | 2080 | struct io_mapped_ubuf *imu; |
| 2081 | unsigned index, buf_index; |
| 2082 | size_t offset; |
| 2083 | u64 buf_addr; |
| 2084 | |
| 2085 | /* attempt to use fixed buffers without having provided iovecs */ |
| 2086 | if (unlikely(!ctx->user_bufs)) |
| 2087 | return -EFAULT; |
| 2088 | |
Jens Axboe | 9adbd45 | 2019-12-20 08:45:55 -0700 | [diff] [blame] | 2089 | buf_index = (unsigned long) req->rw.kiocb.private; |
Jens Axboe | edafcce | 2019-01-09 09:16:05 -0700 | [diff] [blame] | 2090 | if (unlikely(buf_index >= ctx->nr_user_bufs)) |
| 2091 | return -EFAULT; |
| 2092 | |
| 2093 | index = array_index_nospec(buf_index, ctx->nr_user_bufs); |
| 2094 | imu = &ctx->user_bufs[index]; |
Jens Axboe | 9adbd45 | 2019-12-20 08:45:55 -0700 | [diff] [blame] | 2095 | buf_addr = req->rw.addr; |
Jens Axboe | edafcce | 2019-01-09 09:16:05 -0700 | [diff] [blame] | 2096 | |
| 2097 | /* overflow */ |
| 2098 | if (buf_addr + len < buf_addr) |
| 2099 | return -EFAULT; |
| 2100 | /* not inside the mapped region */ |
| 2101 | if (buf_addr < imu->ubuf || buf_addr + len > imu->ubuf + imu->len) |
| 2102 | return -EFAULT; |
| 2103 | |
| 2104 | /* |
| 2105 | * May not be a start of buffer, set size appropriately |
| 2106 | * and advance us to the beginning. |
| 2107 | */ |
| 2108 | offset = buf_addr - imu->ubuf; |
| 2109 | iov_iter_bvec(iter, rw, imu->bvec, imu->nr_bvecs, offset + len); |
Jens Axboe | bd11b3a | 2019-07-20 08:37:31 -0600 | [diff] [blame] | 2110 | |
| 2111 | if (offset) { |
| 2112 | /* |
| 2113 | * Don't use iov_iter_advance() here, as it's really slow for |
| 2114 | * using the latter parts of a big fixed buffer - it iterates |
| 2115 | * over each segment manually. We can cheat a bit here, because |
| 2116 | * we know that: |
| 2117 | * |
| 2118 | * 1) it's a BVEC iter, we set it up |
| 2119 | * 2) all bvecs are PAGE_SIZE in size, except potentially the |
| 2120 | * first and last bvec |
| 2121 | * |
| 2122 | * So just find our index, and adjust the iterator afterwards. |
| 2123 | * If the offset is within the first bvec (or the whole first |
| 2124 | * bvec, just use iov_iter_advance(). This makes it easier |
| 2125 | * since we can just skip the first segment, which may not |
| 2126 | * be PAGE_SIZE aligned. |
| 2127 | */ |
| 2128 | const struct bio_vec *bvec = imu->bvec; |
| 2129 | |
| 2130 | if (offset <= bvec->bv_len) { |
| 2131 | iov_iter_advance(iter, offset); |
| 2132 | } else { |
| 2133 | unsigned long seg_skip; |
| 2134 | |
| 2135 | /* skip first vec */ |
| 2136 | offset -= bvec->bv_len; |
| 2137 | seg_skip = 1 + (offset >> PAGE_SHIFT); |
| 2138 | |
| 2139 | iter->bvec = bvec + seg_skip; |
| 2140 | iter->nr_segs -= seg_skip; |
Aleix Roca Nonell | 99c79f6 | 2019-08-15 14:03:22 +0200 | [diff] [blame] | 2141 | iter->count -= bvec->bv_len + offset; |
Jens Axboe | bd11b3a | 2019-07-20 08:37:31 -0600 | [diff] [blame] | 2142 | iter->iov_offset = offset & ~PAGE_MASK; |
Jens Axboe | bd11b3a | 2019-07-20 08:37:31 -0600 | [diff] [blame] | 2143 | } |
| 2144 | } |
| 2145 | |
Jens Axboe | 5e55956 | 2019-11-13 16:12:46 -0700 | [diff] [blame] | 2146 | return len; |
Jens Axboe | edafcce | 2019-01-09 09:16:05 -0700 | [diff] [blame] | 2147 | } |
| 2148 | |
Pavel Begunkov | cf6fd4b | 2019-11-25 23:14:39 +0300 | [diff] [blame] | 2149 | static ssize_t io_import_iovec(int rw, struct io_kiocb *req, |
| 2150 | struct iovec **iovec, struct iov_iter *iter) |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 2151 | { |
Jens Axboe | 9adbd45 | 2019-12-20 08:45:55 -0700 | [diff] [blame] | 2152 | void __user *buf = u64_to_user_ptr(req->rw.addr); |
| 2153 | size_t sqe_len = req->rw.len; |
Jens Axboe | edafcce | 2019-01-09 09:16:05 -0700 | [diff] [blame] | 2154 | u8 opcode; |
| 2155 | |
Jens Axboe | d625c6e | 2019-12-17 19:53:05 -0700 | [diff] [blame] | 2156 | opcode = req->opcode; |
Pavel Begunkov | 7d00916 | 2019-11-25 23:14:40 +0300 | [diff] [blame] | 2157 | if (opcode == IORING_OP_READ_FIXED || opcode == IORING_OP_WRITE_FIXED) { |
Jens Axboe | edafcce | 2019-01-09 09:16:05 -0700 | [diff] [blame] | 2158 | *iovec = NULL; |
Jens Axboe | 9adbd45 | 2019-12-20 08:45:55 -0700 | [diff] [blame] | 2159 | return io_import_fixed(req, rw, iter); |
Jens Axboe | edafcce | 2019-01-09 09:16:05 -0700 | [diff] [blame] | 2160 | } |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 2161 | |
Jens Axboe | 9adbd45 | 2019-12-20 08:45:55 -0700 | [diff] [blame] | 2162 | /* buffer index only valid with fixed read/write */ |
| 2163 | if (req->rw.kiocb.private) |
| 2164 | return -EINVAL; |
| 2165 | |
Jens Axboe | 3a6820f | 2019-12-22 15:19:35 -0700 | [diff] [blame] | 2166 | if (opcode == IORING_OP_READ || opcode == IORING_OP_WRITE) { |
| 2167 | ssize_t ret; |
| 2168 | ret = import_single_range(rw, buf, sqe_len, *iovec, iter); |
| 2169 | *iovec = NULL; |
Jens Axboe | 3a90159 | 2020-02-25 17:48:55 -0700 | [diff] [blame] | 2170 | return ret < 0 ? ret : sqe_len; |
Jens Axboe | 3a6820f | 2019-12-22 15:19:35 -0700 | [diff] [blame] | 2171 | } |
| 2172 | |
Jens Axboe | f67676d | 2019-12-02 11:03:47 -0700 | [diff] [blame] | 2173 | if (req->io) { |
| 2174 | struct io_async_rw *iorw = &req->io->rw; |
| 2175 | |
| 2176 | *iovec = iorw->iov; |
| 2177 | iov_iter_init(iter, rw, *iovec, iorw->nr_segs, iorw->size); |
| 2178 | if (iorw->iov == iorw->fast_iov) |
| 2179 | *iovec = NULL; |
| 2180 | return iorw->size; |
| 2181 | } |
| 2182 | |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 2183 | #ifdef CONFIG_COMPAT |
Pavel Begunkov | cf6fd4b | 2019-11-25 23:14:39 +0300 | [diff] [blame] | 2184 | if (req->ctx->compat) |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 2185 | return compat_import_iovec(rw, buf, sqe_len, UIO_FASTIOV, |
| 2186 | iovec, iter); |
| 2187 | #endif |
| 2188 | |
| 2189 | return import_iovec(rw, buf, sqe_len, UIO_FASTIOV, iovec, iter); |
| 2190 | } |
| 2191 | |
Jens Axboe | 3296061 | 2019-09-23 11:05:34 -0600 | [diff] [blame] | 2192 | /* |
| 2193 | * For files that don't have ->read_iter() and ->write_iter(), handle them |
| 2194 | * by looping over ->read() or ->write() manually. |
| 2195 | */ |
| 2196 | static ssize_t loop_rw_iter(int rw, struct file *file, struct kiocb *kiocb, |
| 2197 | struct iov_iter *iter) |
| 2198 | { |
| 2199 | ssize_t ret = 0; |
| 2200 | |
| 2201 | /* |
| 2202 | * Don't support polled IO through this interface, and we can't |
| 2203 | * support non-blocking either. For the latter, this just causes |
| 2204 | * the kiocb to be handled from an async context. |
| 2205 | */ |
| 2206 | if (kiocb->ki_flags & IOCB_HIPRI) |
| 2207 | return -EOPNOTSUPP; |
| 2208 | if (kiocb->ki_flags & IOCB_NOWAIT) |
| 2209 | return -EAGAIN; |
| 2210 | |
| 2211 | while (iov_iter_count(iter)) { |
Pavel Begunkov | 311ae9e | 2019-11-24 11:58:24 +0300 | [diff] [blame] | 2212 | struct iovec iovec; |
Jens Axboe | 3296061 | 2019-09-23 11:05:34 -0600 | [diff] [blame] | 2213 | ssize_t nr; |
| 2214 | |
Pavel Begunkov | 311ae9e | 2019-11-24 11:58:24 +0300 | [diff] [blame] | 2215 | if (!iov_iter_is_bvec(iter)) { |
| 2216 | iovec = iov_iter_iovec(iter); |
| 2217 | } else { |
| 2218 | /* fixed buffers import bvec */ |
| 2219 | iovec.iov_base = kmap(iter->bvec->bv_page) |
| 2220 | + iter->iov_offset; |
| 2221 | iovec.iov_len = min(iter->count, |
| 2222 | iter->bvec->bv_len - iter->iov_offset); |
| 2223 | } |
| 2224 | |
Jens Axboe | 3296061 | 2019-09-23 11:05:34 -0600 | [diff] [blame] | 2225 | if (rw == READ) { |
| 2226 | nr = file->f_op->read(file, iovec.iov_base, |
| 2227 | iovec.iov_len, &kiocb->ki_pos); |
| 2228 | } else { |
| 2229 | nr = file->f_op->write(file, iovec.iov_base, |
| 2230 | iovec.iov_len, &kiocb->ki_pos); |
| 2231 | } |
| 2232 | |
Pavel Begunkov | 311ae9e | 2019-11-24 11:58:24 +0300 | [diff] [blame] | 2233 | if (iov_iter_is_bvec(iter)) |
| 2234 | kunmap(iter->bvec->bv_page); |
| 2235 | |
Jens Axboe | 3296061 | 2019-09-23 11:05:34 -0600 | [diff] [blame] | 2236 | if (nr < 0) { |
| 2237 | if (!ret) |
| 2238 | ret = nr; |
| 2239 | break; |
| 2240 | } |
| 2241 | ret += nr; |
| 2242 | if (nr != iovec.iov_len) |
| 2243 | break; |
| 2244 | iov_iter_advance(iter, nr); |
| 2245 | } |
| 2246 | |
| 2247 | return ret; |
| 2248 | } |
| 2249 | |
Jens Axboe | b7bb4f7 | 2019-12-15 22:13:43 -0700 | [diff] [blame] | 2250 | static void io_req_map_rw(struct io_kiocb *req, ssize_t io_size, |
Jens Axboe | f67676d | 2019-12-02 11:03:47 -0700 | [diff] [blame] | 2251 | struct iovec *iovec, struct iovec *fast_iov, |
| 2252 | struct iov_iter *iter) |
| 2253 | { |
| 2254 | req->io->rw.nr_segs = iter->nr_segs; |
| 2255 | req->io->rw.size = io_size; |
| 2256 | req->io->rw.iov = iovec; |
| 2257 | if (!req->io->rw.iov) { |
| 2258 | req->io->rw.iov = req->io->rw.fast_iov; |
| 2259 | memcpy(req->io->rw.iov, fast_iov, |
| 2260 | sizeof(struct iovec) * iter->nr_segs); |
Pavel Begunkov | 99bc4c3 | 2020-02-07 22:04:45 +0300 | [diff] [blame] | 2261 | } else { |
| 2262 | req->flags |= REQ_F_NEED_CLEANUP; |
Jens Axboe | f67676d | 2019-12-02 11:03:47 -0700 | [diff] [blame] | 2263 | } |
| 2264 | } |
| 2265 | |
Jens Axboe | b7bb4f7 | 2019-12-15 22:13:43 -0700 | [diff] [blame] | 2266 | static int io_alloc_async_ctx(struct io_kiocb *req) |
Jens Axboe | f67676d | 2019-12-02 11:03:47 -0700 | [diff] [blame] | 2267 | { |
Jens Axboe | d365634 | 2019-12-18 09:50:26 -0700 | [diff] [blame] | 2268 | if (!io_op_defs[req->opcode].async_ctx) |
| 2269 | return 0; |
Jens Axboe | f67676d | 2019-12-02 11:03:47 -0700 | [diff] [blame] | 2270 | req->io = kmalloc(sizeof(*req->io), GFP_KERNEL); |
Jens Axboe | 06b76d4 | 2019-12-19 14:44:26 -0700 | [diff] [blame] | 2271 | return req->io == NULL; |
Jens Axboe | b7bb4f7 | 2019-12-15 22:13:43 -0700 | [diff] [blame] | 2272 | } |
| 2273 | |
Jens Axboe | b7bb4f7 | 2019-12-15 22:13:43 -0700 | [diff] [blame] | 2274 | static int io_setup_async_rw(struct io_kiocb *req, ssize_t io_size, |
| 2275 | struct iovec *iovec, struct iovec *fast_iov, |
| 2276 | struct iov_iter *iter) |
| 2277 | { |
Jens Axboe | 980ad26 | 2020-01-24 23:08:54 -0700 | [diff] [blame] | 2278 | if (!io_op_defs[req->opcode].async_ctx) |
Jens Axboe | 74566df | 2020-01-13 19:23:24 -0700 | [diff] [blame] | 2279 | return 0; |
Jens Axboe | 5d204bc | 2020-01-31 12:06:52 -0700 | [diff] [blame] | 2280 | if (!req->io) { |
| 2281 | if (io_alloc_async_ctx(req)) |
| 2282 | return -ENOMEM; |
Jens Axboe | b7bb4f7 | 2019-12-15 22:13:43 -0700 | [diff] [blame] | 2283 | |
Jens Axboe | 5d204bc | 2020-01-31 12:06:52 -0700 | [diff] [blame] | 2284 | io_req_map_rw(req, io_size, iovec, fast_iov, iter); |
| 2285 | } |
Jens Axboe | b7bb4f7 | 2019-12-15 22:13:43 -0700 | [diff] [blame] | 2286 | return 0; |
Jens Axboe | f67676d | 2019-12-02 11:03:47 -0700 | [diff] [blame] | 2287 | } |
| 2288 | |
Jens Axboe | 3529d8c | 2019-12-19 18:24:38 -0700 | [diff] [blame] | 2289 | static int io_read_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe, |
| 2290 | bool force_nonblock) |
Jens Axboe | f67676d | 2019-12-02 11:03:47 -0700 | [diff] [blame] | 2291 | { |
Jens Axboe | 3529d8c | 2019-12-19 18:24:38 -0700 | [diff] [blame] | 2292 | struct io_async_ctx *io; |
| 2293 | struct iov_iter iter; |
Jens Axboe | f67676d | 2019-12-02 11:03:47 -0700 | [diff] [blame] | 2294 | ssize_t ret; |
| 2295 | |
Jens Axboe | 3529d8c | 2019-12-19 18:24:38 -0700 | [diff] [blame] | 2296 | ret = io_prep_rw(req, sqe, force_nonblock); |
| 2297 | if (ret) |
| 2298 | return ret; |
Jens Axboe | f67676d | 2019-12-02 11:03:47 -0700 | [diff] [blame] | 2299 | |
Jens Axboe | 3529d8c | 2019-12-19 18:24:38 -0700 | [diff] [blame] | 2300 | if (unlikely(!(req->file->f_mode & FMODE_READ))) |
| 2301 | return -EBADF; |
Jens Axboe | f67676d | 2019-12-02 11:03:47 -0700 | [diff] [blame] | 2302 | |
Pavel Begunkov | 5f798be | 2020-02-08 13:28:02 +0300 | [diff] [blame] | 2303 | /* either don't need iovec imported or already have it */ |
| 2304 | if (!req->io || req->flags & REQ_F_NEED_CLEANUP) |
Jens Axboe | 3529d8c | 2019-12-19 18:24:38 -0700 | [diff] [blame] | 2305 | return 0; |
| 2306 | |
| 2307 | io = req->io; |
| 2308 | io->rw.iov = io->rw.fast_iov; |
| 2309 | req->io = NULL; |
| 2310 | ret = io_import_iovec(READ, req, &io->rw.iov, &iter); |
| 2311 | req->io = io; |
| 2312 | if (ret < 0) |
| 2313 | return ret; |
| 2314 | |
| 2315 | io_req_map_rw(req, ret, io->rw.iov, io->rw.fast_iov, &iter); |
| 2316 | return 0; |
Jens Axboe | f67676d | 2019-12-02 11:03:47 -0700 | [diff] [blame] | 2317 | } |
| 2318 | |
Pavel Begunkov | 014db00 | 2020-03-03 21:33:12 +0300 | [diff] [blame] | 2319 | static int io_read(struct io_kiocb *req, bool force_nonblock) |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 2320 | { |
| 2321 | struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs; |
Jens Axboe | 9adbd45 | 2019-12-20 08:45:55 -0700 | [diff] [blame] | 2322 | struct kiocb *kiocb = &req->rw.kiocb; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 2323 | struct iov_iter iter; |
Jens Axboe | 31b5151 | 2019-01-18 22:56:34 -0700 | [diff] [blame] | 2324 | size_t iov_count; |
Jens Axboe | f67676d | 2019-12-02 11:03:47 -0700 | [diff] [blame] | 2325 | ssize_t io_size, ret; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 2326 | |
Jens Axboe | 3529d8c | 2019-12-19 18:24:38 -0700 | [diff] [blame] | 2327 | ret = io_import_iovec(READ, req, &iovec, &iter); |
Jens Axboe | 06b76d4 | 2019-12-19 14:44:26 -0700 | [diff] [blame] | 2328 | if (ret < 0) |
| 2329 | return ret; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 2330 | |
Jens Axboe | fd6c2e4 | 2019-12-18 12:19:41 -0700 | [diff] [blame] | 2331 | /* Ensure we clear previously set non-block flag */ |
| 2332 | if (!force_nonblock) |
Jens Axboe | 29de5f6 | 2020-02-20 09:56:08 -0700 | [diff] [blame] | 2333 | kiocb->ki_flags &= ~IOCB_NOWAIT; |
Jens Axboe | fd6c2e4 | 2019-12-18 12:19:41 -0700 | [diff] [blame] | 2334 | |
Bijan Mottahedeh | 797f3f5 | 2020-01-15 18:37:45 -0800 | [diff] [blame] | 2335 | req->result = 0; |
Jens Axboe | f67676d | 2019-12-02 11:03:47 -0700 | [diff] [blame] | 2336 | io_size = ret; |
Jens Axboe | 9e645e11 | 2019-05-10 16:07:28 -0600 | [diff] [blame] | 2337 | if (req->flags & REQ_F_LINK) |
Jens Axboe | f67676d | 2019-12-02 11:03:47 -0700 | [diff] [blame] | 2338 | req->result = io_size; |
| 2339 | |
| 2340 | /* |
| 2341 | * If the file doesn't support async, mark it as REQ_F_MUST_PUNT so |
| 2342 | * we know to async punt it even if it was opened O_NONBLOCK |
| 2343 | */ |
Jens Axboe | 29de5f6 | 2020-02-20 09:56:08 -0700 | [diff] [blame] | 2344 | if (force_nonblock && !io_file_supports_async(req->file)) |
Jens Axboe | f67676d | 2019-12-02 11:03:47 -0700 | [diff] [blame] | 2345 | goto copy_iov; |
Jens Axboe | 9e645e11 | 2019-05-10 16:07:28 -0600 | [diff] [blame] | 2346 | |
Jens Axboe | 31b5151 | 2019-01-18 22:56:34 -0700 | [diff] [blame] | 2347 | iov_count = iov_iter_count(&iter); |
Jens Axboe | 9adbd45 | 2019-12-20 08:45:55 -0700 | [diff] [blame] | 2348 | ret = rw_verify_area(READ, req->file, &kiocb->ki_pos, iov_count); |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 2349 | if (!ret) { |
| 2350 | ssize_t ret2; |
| 2351 | |
Jens Axboe | 9adbd45 | 2019-12-20 08:45:55 -0700 | [diff] [blame] | 2352 | if (req->file->f_op->read_iter) |
| 2353 | ret2 = call_read_iter(req->file, kiocb, &iter); |
Jens Axboe | 3296061 | 2019-09-23 11:05:34 -0600 | [diff] [blame] | 2354 | else |
Jens Axboe | 9adbd45 | 2019-12-20 08:45:55 -0700 | [diff] [blame] | 2355 | ret2 = loop_rw_iter(READ, req->file, kiocb, &iter); |
Jens Axboe | 3296061 | 2019-09-23 11:05:34 -0600 | [diff] [blame] | 2356 | |
Jens Axboe | 9d93a3f | 2019-05-15 13:53:07 -0600 | [diff] [blame] | 2357 | /* Catch -EAGAIN return for forced non-blocking submission */ |
Jens Axboe | f67676d | 2019-12-02 11:03:47 -0700 | [diff] [blame] | 2358 | if (!force_nonblock || ret2 != -EAGAIN) { |
Pavel Begunkov | 014db00 | 2020-03-03 21:33:12 +0300 | [diff] [blame] | 2359 | kiocb_done(kiocb, ret2); |
Jens Axboe | f67676d | 2019-12-02 11:03:47 -0700 | [diff] [blame] | 2360 | } else { |
| 2361 | copy_iov: |
Jens Axboe | b7bb4f7 | 2019-12-15 22:13:43 -0700 | [diff] [blame] | 2362 | ret = io_setup_async_rw(req, io_size, iovec, |
Jens Axboe | f67676d | 2019-12-02 11:03:47 -0700 | [diff] [blame] | 2363 | inline_vecs, &iter); |
| 2364 | if (ret) |
| 2365 | goto out_free; |
Jens Axboe | 29de5f6 | 2020-02-20 09:56:08 -0700 | [diff] [blame] | 2366 | /* any defer here is final, must blocking retry */ |
| 2367 | if (!(req->flags & REQ_F_NOWAIT)) |
| 2368 | req->flags |= REQ_F_MUST_PUNT; |
Jens Axboe | f67676d | 2019-12-02 11:03:47 -0700 | [diff] [blame] | 2369 | return -EAGAIN; |
| 2370 | } |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 2371 | } |
Jens Axboe | f67676d | 2019-12-02 11:03:47 -0700 | [diff] [blame] | 2372 | out_free: |
Pavel Begunkov | 1e95081 | 2020-02-06 19:51:16 +0300 | [diff] [blame] | 2373 | kfree(iovec); |
Pavel Begunkov | 99bc4c3 | 2020-02-07 22:04:45 +0300 | [diff] [blame] | 2374 | req->flags &= ~REQ_F_NEED_CLEANUP; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 2375 | return ret; |
| 2376 | } |
| 2377 | |
Jens Axboe | 3529d8c | 2019-12-19 18:24:38 -0700 | [diff] [blame] | 2378 | static int io_write_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe, |
| 2379 | bool force_nonblock) |
Jens Axboe | f67676d | 2019-12-02 11:03:47 -0700 | [diff] [blame] | 2380 | { |
Jens Axboe | 3529d8c | 2019-12-19 18:24:38 -0700 | [diff] [blame] | 2381 | struct io_async_ctx *io; |
| 2382 | struct iov_iter iter; |
Jens Axboe | f67676d | 2019-12-02 11:03:47 -0700 | [diff] [blame] | 2383 | ssize_t ret; |
| 2384 | |
Jens Axboe | 3529d8c | 2019-12-19 18:24:38 -0700 | [diff] [blame] | 2385 | ret = io_prep_rw(req, sqe, force_nonblock); |
| 2386 | if (ret) |
| 2387 | return ret; |
Jens Axboe | f67676d | 2019-12-02 11:03:47 -0700 | [diff] [blame] | 2388 | |
Jens Axboe | 3529d8c | 2019-12-19 18:24:38 -0700 | [diff] [blame] | 2389 | if (unlikely(!(req->file->f_mode & FMODE_WRITE))) |
| 2390 | return -EBADF; |
Jens Axboe | f67676d | 2019-12-02 11:03:47 -0700 | [diff] [blame] | 2391 | |
Pavel Begunkov | 5f798be | 2020-02-08 13:28:02 +0300 | [diff] [blame] | 2392 | /* either don't need iovec imported or already have it */ |
| 2393 | if (!req->io || req->flags & REQ_F_NEED_CLEANUP) |
Jens Axboe | 3529d8c | 2019-12-19 18:24:38 -0700 | [diff] [blame] | 2394 | return 0; |
| 2395 | |
| 2396 | io = req->io; |
| 2397 | io->rw.iov = io->rw.fast_iov; |
| 2398 | req->io = NULL; |
| 2399 | ret = io_import_iovec(WRITE, req, &io->rw.iov, &iter); |
| 2400 | req->io = io; |
| 2401 | if (ret < 0) |
| 2402 | return ret; |
| 2403 | |
| 2404 | io_req_map_rw(req, ret, io->rw.iov, io->rw.fast_iov, &iter); |
| 2405 | return 0; |
Jens Axboe | f67676d | 2019-12-02 11:03:47 -0700 | [diff] [blame] | 2406 | } |
| 2407 | |
Pavel Begunkov | 014db00 | 2020-03-03 21:33:12 +0300 | [diff] [blame] | 2408 | static int io_write(struct io_kiocb *req, bool force_nonblock) |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 2409 | { |
| 2410 | struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs; |
Jens Axboe | 9adbd45 | 2019-12-20 08:45:55 -0700 | [diff] [blame] | 2411 | struct kiocb *kiocb = &req->rw.kiocb; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 2412 | struct iov_iter iter; |
Jens Axboe | 31b5151 | 2019-01-18 22:56:34 -0700 | [diff] [blame] | 2413 | size_t iov_count; |
Jens Axboe | f67676d | 2019-12-02 11:03:47 -0700 | [diff] [blame] | 2414 | ssize_t ret, io_size; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 2415 | |
Jens Axboe | 3529d8c | 2019-12-19 18:24:38 -0700 | [diff] [blame] | 2416 | ret = io_import_iovec(WRITE, req, &iovec, &iter); |
Jens Axboe | 06b76d4 | 2019-12-19 14:44:26 -0700 | [diff] [blame] | 2417 | if (ret < 0) |
| 2418 | return ret; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 2419 | |
Jens Axboe | fd6c2e4 | 2019-12-18 12:19:41 -0700 | [diff] [blame] | 2420 | /* Ensure we clear previously set non-block flag */ |
| 2421 | if (!force_nonblock) |
Jens Axboe | 9adbd45 | 2019-12-20 08:45:55 -0700 | [diff] [blame] | 2422 | req->rw.kiocb.ki_flags &= ~IOCB_NOWAIT; |
Jens Axboe | fd6c2e4 | 2019-12-18 12:19:41 -0700 | [diff] [blame] | 2423 | |
Bijan Mottahedeh | 797f3f5 | 2020-01-15 18:37:45 -0800 | [diff] [blame] | 2424 | req->result = 0; |
Jens Axboe | f67676d | 2019-12-02 11:03:47 -0700 | [diff] [blame] | 2425 | io_size = ret; |
Jens Axboe | 9e645e11 | 2019-05-10 16:07:28 -0600 | [diff] [blame] | 2426 | if (req->flags & REQ_F_LINK) |
Jens Axboe | f67676d | 2019-12-02 11:03:47 -0700 | [diff] [blame] | 2427 | req->result = io_size; |
| 2428 | |
| 2429 | /* |
| 2430 | * If the file doesn't support async, mark it as REQ_F_MUST_PUNT so |
| 2431 | * we know to async punt it even if it was opened O_NONBLOCK |
| 2432 | */ |
Jens Axboe | 29de5f6 | 2020-02-20 09:56:08 -0700 | [diff] [blame] | 2433 | if (force_nonblock && !io_file_supports_async(req->file)) |
Jens Axboe | f67676d | 2019-12-02 11:03:47 -0700 | [diff] [blame] | 2434 | goto copy_iov; |
Jens Axboe | f67676d | 2019-12-02 11:03:47 -0700 | [diff] [blame] | 2435 | |
Jens Axboe | 10d5934 | 2019-12-09 20:16:22 -0700 | [diff] [blame] | 2436 | /* file path doesn't support NOWAIT for non-direct_IO */ |
| 2437 | if (force_nonblock && !(kiocb->ki_flags & IOCB_DIRECT) && |
| 2438 | (req->flags & REQ_F_ISREG)) |
Jens Axboe | f67676d | 2019-12-02 11:03:47 -0700 | [diff] [blame] | 2439 | goto copy_iov; |
Jens Axboe | 9e645e11 | 2019-05-10 16:07:28 -0600 | [diff] [blame] | 2440 | |
Jens Axboe | 31b5151 | 2019-01-18 22:56:34 -0700 | [diff] [blame] | 2441 | iov_count = iov_iter_count(&iter); |
Jens Axboe | 9adbd45 | 2019-12-20 08:45:55 -0700 | [diff] [blame] | 2442 | ret = rw_verify_area(WRITE, req->file, &kiocb->ki_pos, iov_count); |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 2443 | if (!ret) { |
Roman Penyaev | 9bf7933 | 2019-03-25 20:09:24 +0100 | [diff] [blame] | 2444 | ssize_t ret2; |
| 2445 | |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 2446 | /* |
| 2447 | * Open-code file_start_write here to grab freeze protection, |
| 2448 | * which will be released by another thread in |
| 2449 | * io_complete_rw(). Fool lockdep by telling it the lock got |
| 2450 | * released so that it doesn't complain about the held lock when |
| 2451 | * we return to userspace. |
| 2452 | */ |
Jens Axboe | 491381ce | 2019-10-17 09:20:46 -0600 | [diff] [blame] | 2453 | if (req->flags & REQ_F_ISREG) { |
Jens Axboe | 9adbd45 | 2019-12-20 08:45:55 -0700 | [diff] [blame] | 2454 | __sb_start_write(file_inode(req->file)->i_sb, |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 2455 | SB_FREEZE_WRITE, true); |
Jens Axboe | 9adbd45 | 2019-12-20 08:45:55 -0700 | [diff] [blame] | 2456 | __sb_writers_release(file_inode(req->file)->i_sb, |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 2457 | SB_FREEZE_WRITE); |
| 2458 | } |
| 2459 | kiocb->ki_flags |= IOCB_WRITE; |
Roman Penyaev | 9bf7933 | 2019-03-25 20:09:24 +0100 | [diff] [blame] | 2460 | |
Jens Axboe | 9adbd45 | 2019-12-20 08:45:55 -0700 | [diff] [blame] | 2461 | if (req->file->f_op->write_iter) |
| 2462 | ret2 = call_write_iter(req->file, kiocb, &iter); |
Jens Axboe | 3296061 | 2019-09-23 11:05:34 -0600 | [diff] [blame] | 2463 | else |
Jens Axboe | 9adbd45 | 2019-12-20 08:45:55 -0700 | [diff] [blame] | 2464 | ret2 = loop_rw_iter(WRITE, req->file, kiocb, &iter); |
Jens Axboe | faac996 | 2020-02-07 15:45:22 -0700 | [diff] [blame] | 2465 | /* |
| 2466 | * Raw bdev writes will -EOPNOTSUPP for IOCB_NOWAIT. Just |
| 2467 | * retry them without IOCB_NOWAIT. |
| 2468 | */ |
| 2469 | if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT)) |
| 2470 | ret2 = -EAGAIN; |
Jens Axboe | f67676d | 2019-12-02 11:03:47 -0700 | [diff] [blame] | 2471 | if (!force_nonblock || ret2 != -EAGAIN) { |
Pavel Begunkov | 014db00 | 2020-03-03 21:33:12 +0300 | [diff] [blame] | 2472 | kiocb_done(kiocb, ret2); |
Jens Axboe | f67676d | 2019-12-02 11:03:47 -0700 | [diff] [blame] | 2473 | } else { |
| 2474 | copy_iov: |
Jens Axboe | b7bb4f7 | 2019-12-15 22:13:43 -0700 | [diff] [blame] | 2475 | ret = io_setup_async_rw(req, io_size, iovec, |
Jens Axboe | f67676d | 2019-12-02 11:03:47 -0700 | [diff] [blame] | 2476 | inline_vecs, &iter); |
| 2477 | if (ret) |
| 2478 | goto out_free; |
Jens Axboe | 29de5f6 | 2020-02-20 09:56:08 -0700 | [diff] [blame] | 2479 | /* any defer here is final, must blocking retry */ |
| 2480 | req->flags |= REQ_F_MUST_PUNT; |
Jens Axboe | f67676d | 2019-12-02 11:03:47 -0700 | [diff] [blame] | 2481 | return -EAGAIN; |
| 2482 | } |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 2483 | } |
Jens Axboe | 31b5151 | 2019-01-18 22:56:34 -0700 | [diff] [blame] | 2484 | out_free: |
Pavel Begunkov | 99bc4c3 | 2020-02-07 22:04:45 +0300 | [diff] [blame] | 2485 | req->flags &= ~REQ_F_NEED_CLEANUP; |
Pavel Begunkov | 1e95081 | 2020-02-06 19:51:16 +0300 | [diff] [blame] | 2486 | kfree(iovec); |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 2487 | return ret; |
| 2488 | } |
| 2489 | |
Pavel Begunkov | 7d67af2 | 2020-02-24 11:32:45 +0300 | [diff] [blame] | 2490 | static int io_splice_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) |
| 2491 | { |
| 2492 | struct io_splice* sp = &req->splice; |
| 2493 | unsigned int valid_flags = SPLICE_F_FD_IN_FIXED | SPLICE_F_ALL; |
| 2494 | int ret; |
| 2495 | |
| 2496 | if (req->flags & REQ_F_NEED_CLEANUP) |
| 2497 | return 0; |
| 2498 | |
| 2499 | sp->file_in = NULL; |
| 2500 | sp->off_in = READ_ONCE(sqe->splice_off_in); |
| 2501 | sp->off_out = READ_ONCE(sqe->off); |
| 2502 | sp->len = READ_ONCE(sqe->len); |
| 2503 | sp->flags = READ_ONCE(sqe->splice_flags); |
| 2504 | |
| 2505 | if (unlikely(sp->flags & ~valid_flags)) |
| 2506 | return -EINVAL; |
| 2507 | |
| 2508 | ret = io_file_get(NULL, req, READ_ONCE(sqe->splice_fd_in), &sp->file_in, |
| 2509 | (sp->flags & SPLICE_F_FD_IN_FIXED)); |
| 2510 | if (ret) |
| 2511 | return ret; |
| 2512 | req->flags |= REQ_F_NEED_CLEANUP; |
| 2513 | |
| 2514 | if (!S_ISREG(file_inode(sp->file_in)->i_mode)) |
| 2515 | req->work.flags |= IO_WQ_WORK_UNBOUND; |
| 2516 | |
| 2517 | return 0; |
| 2518 | } |
| 2519 | |
| 2520 | static bool io_splice_punt(struct file *file) |
| 2521 | { |
| 2522 | if (get_pipe_info(file)) |
| 2523 | return false; |
| 2524 | if (!io_file_supports_async(file)) |
| 2525 | return true; |
| 2526 | return !(file->f_mode & O_NONBLOCK); |
| 2527 | } |
| 2528 | |
Pavel Begunkov | 014db00 | 2020-03-03 21:33:12 +0300 | [diff] [blame] | 2529 | static int io_splice(struct io_kiocb *req, bool force_nonblock) |
Pavel Begunkov | 7d67af2 | 2020-02-24 11:32:45 +0300 | [diff] [blame] | 2530 | { |
| 2531 | struct io_splice *sp = &req->splice; |
| 2532 | struct file *in = sp->file_in; |
| 2533 | struct file *out = sp->file_out; |
| 2534 | unsigned int flags = sp->flags & ~SPLICE_F_FD_IN_FIXED; |
| 2535 | loff_t *poff_in, *poff_out; |
| 2536 | long ret; |
| 2537 | |
| 2538 | if (force_nonblock) { |
| 2539 | if (io_splice_punt(in) || io_splice_punt(out)) |
| 2540 | return -EAGAIN; |
| 2541 | flags |= SPLICE_F_NONBLOCK; |
| 2542 | } |
| 2543 | |
| 2544 | poff_in = (sp->off_in == -1) ? NULL : &sp->off_in; |
| 2545 | poff_out = (sp->off_out == -1) ? NULL : &sp->off_out; |
| 2546 | ret = do_splice(in, poff_in, out, poff_out, sp->len, flags); |
| 2547 | if (force_nonblock && ret == -EAGAIN) |
| 2548 | return -EAGAIN; |
| 2549 | |
| 2550 | io_put_file(req, in, (sp->flags & SPLICE_F_FD_IN_FIXED)); |
| 2551 | req->flags &= ~REQ_F_NEED_CLEANUP; |
| 2552 | |
| 2553 | io_cqring_add_event(req, ret); |
| 2554 | if (ret != sp->len) |
| 2555 | req_set_fail_links(req); |
Pavel Begunkov | 014db00 | 2020-03-03 21:33:12 +0300 | [diff] [blame] | 2556 | io_put_req(req); |
Pavel Begunkov | 7d67af2 | 2020-02-24 11:32:45 +0300 | [diff] [blame] | 2557 | return 0; |
| 2558 | } |
| 2559 | |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 2560 | /* |
| 2561 | * IORING_OP_NOP just posts a completion event, nothing else. |
| 2562 | */ |
Jens Axboe | 78e19bb | 2019-11-06 15:21:34 -0700 | [diff] [blame] | 2563 | static int io_nop(struct io_kiocb *req) |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 2564 | { |
| 2565 | struct io_ring_ctx *ctx = req->ctx; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 2566 | |
Jens Axboe | def596e | 2019-01-09 08:59:42 -0700 | [diff] [blame] | 2567 | if (unlikely(ctx->flags & IORING_SETUP_IOPOLL)) |
| 2568 | return -EINVAL; |
| 2569 | |
Jens Axboe | 78e19bb | 2019-11-06 15:21:34 -0700 | [diff] [blame] | 2570 | io_cqring_add_event(req, 0); |
Jens Axboe | e65ef56 | 2019-03-12 10:16:44 -0600 | [diff] [blame] | 2571 | io_put_req(req); |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 2572 | return 0; |
| 2573 | } |
| 2574 | |
Jens Axboe | 3529d8c | 2019-12-19 18:24:38 -0700 | [diff] [blame] | 2575 | static int io_prep_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe) |
Christoph Hellwig | c992fe2 | 2019-01-11 09:43:02 -0700 | [diff] [blame] | 2576 | { |
Jens Axboe | 6b06314 | 2019-01-10 22:13:58 -0700 | [diff] [blame] | 2577 | struct io_ring_ctx *ctx = req->ctx; |
Christoph Hellwig | c992fe2 | 2019-01-11 09:43:02 -0700 | [diff] [blame] | 2578 | |
Jens Axboe | 09bb839 | 2019-03-13 12:39:28 -0600 | [diff] [blame] | 2579 | if (!req->file) |
| 2580 | return -EBADF; |
Christoph Hellwig | c992fe2 | 2019-01-11 09:43:02 -0700 | [diff] [blame] | 2581 | |
Jens Axboe | 6b06314 | 2019-01-10 22:13:58 -0700 | [diff] [blame] | 2582 | if (unlikely(ctx->flags & IORING_SETUP_IOPOLL)) |
Jens Axboe | def596e | 2019-01-09 08:59:42 -0700 | [diff] [blame] | 2583 | return -EINVAL; |
Jens Axboe | edafcce | 2019-01-09 09:16:05 -0700 | [diff] [blame] | 2584 | if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index)) |
Christoph Hellwig | c992fe2 | 2019-01-11 09:43:02 -0700 | [diff] [blame] | 2585 | return -EINVAL; |
| 2586 | |
Jens Axboe | 8ed8d3c | 2019-12-16 11:55:28 -0700 | [diff] [blame] | 2587 | req->sync.flags = READ_ONCE(sqe->fsync_flags); |
| 2588 | if (unlikely(req->sync.flags & ~IORING_FSYNC_DATASYNC)) |
| 2589 | return -EINVAL; |
| 2590 | |
| 2591 | req->sync.off = READ_ONCE(sqe->off); |
| 2592 | req->sync.len = READ_ONCE(sqe->len); |
Christoph Hellwig | c992fe2 | 2019-01-11 09:43:02 -0700 | [diff] [blame] | 2593 | return 0; |
| 2594 | } |
| 2595 | |
Jens Axboe | 8ed8d3c | 2019-12-16 11:55:28 -0700 | [diff] [blame] | 2596 | static bool io_req_cancelled(struct io_kiocb *req) |
| 2597 | { |
| 2598 | if (req->work.flags & IO_WQ_WORK_CANCEL) { |
| 2599 | req_set_fail_links(req); |
| 2600 | io_cqring_add_event(req, -ECANCELED); |
Pavel Begunkov | e9fd939 | 2020-03-04 16:14:12 +0300 | [diff] [blame] | 2601 | io_put_req(req); |
Jens Axboe | 8ed8d3c | 2019-12-16 11:55:28 -0700 | [diff] [blame] | 2602 | return true; |
| 2603 | } |
| 2604 | |
| 2605 | return false; |
| 2606 | } |
| 2607 | |
Pavel Begunkov | 014db00 | 2020-03-03 21:33:12 +0300 | [diff] [blame] | 2608 | static void __io_fsync(struct io_kiocb *req) |
Jens Axboe | 8ed8d3c | 2019-12-16 11:55:28 -0700 | [diff] [blame] | 2609 | { |
Jens Axboe | 8ed8d3c | 2019-12-16 11:55:28 -0700 | [diff] [blame] | 2610 | loff_t end = req->sync.off + req->sync.len; |
Jens Axboe | 8ed8d3c | 2019-12-16 11:55:28 -0700 | [diff] [blame] | 2611 | int ret; |
| 2612 | |
Jens Axboe | 9adbd45 | 2019-12-20 08:45:55 -0700 | [diff] [blame] | 2613 | ret = vfs_fsync_range(req->file, req->sync.off, |
Jens Axboe | 8ed8d3c | 2019-12-16 11:55:28 -0700 | [diff] [blame] | 2614 | end > 0 ? end : LLONG_MAX, |
| 2615 | req->sync.flags & IORING_FSYNC_DATASYNC); |
| 2616 | if (ret < 0) |
| 2617 | req_set_fail_links(req); |
| 2618 | io_cqring_add_event(req, ret); |
Pavel Begunkov | 014db00 | 2020-03-03 21:33:12 +0300 | [diff] [blame] | 2619 | io_put_req(req); |
Pavel Begunkov | 5ea6216 | 2020-02-24 11:30:16 +0300 | [diff] [blame] | 2620 | } |
| 2621 | |
| 2622 | static void io_fsync_finish(struct io_wq_work **workptr) |
| 2623 | { |
| 2624 | struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work); |
Pavel Begunkov | 5ea6216 | 2020-02-24 11:30:16 +0300 | [diff] [blame] | 2625 | |
| 2626 | if (io_req_cancelled(req)) |
| 2627 | return; |
Pavel Begunkov | 014db00 | 2020-03-03 21:33:12 +0300 | [diff] [blame] | 2628 | __io_fsync(req); |
Pavel Begunkov | e9fd939 | 2020-03-04 16:14:12 +0300 | [diff] [blame] | 2629 | io_steal_work(req, workptr); |
Jens Axboe | 8ed8d3c | 2019-12-16 11:55:28 -0700 | [diff] [blame] | 2630 | } |
| 2631 | |
Pavel Begunkov | 014db00 | 2020-03-03 21:33:12 +0300 | [diff] [blame] | 2632 | static int io_fsync(struct io_kiocb *req, bool force_nonblock) |
Christoph Hellwig | c992fe2 | 2019-01-11 09:43:02 -0700 | [diff] [blame] | 2633 | { |
Christoph Hellwig | c992fe2 | 2019-01-11 09:43:02 -0700 | [diff] [blame] | 2634 | /* fsync always requires a blocking context */ |
Jens Axboe | 8ed8d3c | 2019-12-16 11:55:28 -0700 | [diff] [blame] | 2635 | if (force_nonblock) { |
Jens Axboe | 8ed8d3c | 2019-12-16 11:55:28 -0700 | [diff] [blame] | 2636 | req->work.func = io_fsync_finish; |
Christoph Hellwig | c992fe2 | 2019-01-11 09:43:02 -0700 | [diff] [blame] | 2637 | return -EAGAIN; |
Jens Axboe | 8ed8d3c | 2019-12-16 11:55:28 -0700 | [diff] [blame] | 2638 | } |
Pavel Begunkov | 014db00 | 2020-03-03 21:33:12 +0300 | [diff] [blame] | 2639 | __io_fsync(req); |
Christoph Hellwig | c992fe2 | 2019-01-11 09:43:02 -0700 | [diff] [blame] | 2640 | return 0; |
| 2641 | } |
| 2642 | |
Pavel Begunkov | 014db00 | 2020-03-03 21:33:12 +0300 | [diff] [blame] | 2643 | static void __io_fallocate(struct io_kiocb *req) |
Jens Axboe | d63d1b5 | 2019-12-10 10:38:56 -0700 | [diff] [blame] | 2644 | { |
Jens Axboe | d63d1b5 | 2019-12-10 10:38:56 -0700 | [diff] [blame] | 2645 | int ret; |
| 2646 | |
| 2647 | ret = vfs_fallocate(req->file, req->sync.mode, req->sync.off, |
| 2648 | req->sync.len); |
| 2649 | if (ret < 0) |
| 2650 | req_set_fail_links(req); |
| 2651 | io_cqring_add_event(req, ret); |
Pavel Begunkov | 014db00 | 2020-03-03 21:33:12 +0300 | [diff] [blame] | 2652 | io_put_req(req); |
Pavel Begunkov | 5ea6216 | 2020-02-24 11:30:16 +0300 | [diff] [blame] | 2653 | } |
| 2654 | |
| 2655 | static void io_fallocate_finish(struct io_wq_work **workptr) |
| 2656 | { |
| 2657 | struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work); |
Pavel Begunkov | 5ea6216 | 2020-02-24 11:30:16 +0300 | [diff] [blame] | 2658 | |
Pavel Begunkov | 594506f | 2020-03-03 21:33:11 +0300 | [diff] [blame] | 2659 | if (io_req_cancelled(req)) |
| 2660 | return; |
Pavel Begunkov | 014db00 | 2020-03-03 21:33:12 +0300 | [diff] [blame] | 2661 | __io_fallocate(req); |
Pavel Begunkov | e9fd939 | 2020-03-04 16:14:12 +0300 | [diff] [blame] | 2662 | io_steal_work(req, workptr); |
Jens Axboe | d63d1b5 | 2019-12-10 10:38:56 -0700 | [diff] [blame] | 2663 | } |
| 2664 | |
| 2665 | static int io_fallocate_prep(struct io_kiocb *req, |
| 2666 | const struct io_uring_sqe *sqe) |
| 2667 | { |
| 2668 | if (sqe->ioprio || sqe->buf_index || sqe->rw_flags) |
| 2669 | return -EINVAL; |
| 2670 | |
| 2671 | req->sync.off = READ_ONCE(sqe->off); |
| 2672 | req->sync.len = READ_ONCE(sqe->addr); |
| 2673 | req->sync.mode = READ_ONCE(sqe->len); |
| 2674 | return 0; |
| 2675 | } |
| 2676 | |
Pavel Begunkov | 014db00 | 2020-03-03 21:33:12 +0300 | [diff] [blame] | 2677 | static int io_fallocate(struct io_kiocb *req, bool force_nonblock) |
Jens Axboe | d63d1b5 | 2019-12-10 10:38:56 -0700 | [diff] [blame] | 2678 | { |
Jens Axboe | d63d1b5 | 2019-12-10 10:38:56 -0700 | [diff] [blame] | 2679 | /* fallocate always requiring blocking context */ |
| 2680 | if (force_nonblock) { |
Jens Axboe | d63d1b5 | 2019-12-10 10:38:56 -0700 | [diff] [blame] | 2681 | req->work.func = io_fallocate_finish; |
| 2682 | return -EAGAIN; |
| 2683 | } |
| 2684 | |
Pavel Begunkov | 014db00 | 2020-03-03 21:33:12 +0300 | [diff] [blame] | 2685 | __io_fallocate(req); |
Jens Axboe | d63d1b5 | 2019-12-10 10:38:56 -0700 | [diff] [blame] | 2686 | return 0; |
| 2687 | } |
| 2688 | |
Jens Axboe | 15b71ab | 2019-12-11 11:20:36 -0700 | [diff] [blame] | 2689 | static int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) |
| 2690 | { |
Jens Axboe | f874888 | 2020-01-08 17:47:02 -0700 | [diff] [blame] | 2691 | const char __user *fname; |
Jens Axboe | 15b71ab | 2019-12-11 11:20:36 -0700 | [diff] [blame] | 2692 | int ret; |
| 2693 | |
| 2694 | if (sqe->ioprio || sqe->buf_index) |
| 2695 | return -EINVAL; |
Jens Axboe | cf3040c | 2020-02-06 21:31:40 -0700 | [diff] [blame] | 2696 | if (sqe->flags & IOSQE_FIXED_FILE) |
| 2697 | return -EBADF; |
Pavel Begunkov | 0bdbdd0 | 2020-02-08 13:28:03 +0300 | [diff] [blame] | 2698 | if (req->flags & REQ_F_NEED_CLEANUP) |
| 2699 | return 0; |
Jens Axboe | 15b71ab | 2019-12-11 11:20:36 -0700 | [diff] [blame] | 2700 | |
| 2701 | req->open.dfd = READ_ONCE(sqe->fd); |
Jens Axboe | c12cedf | 2020-01-08 17:41:21 -0700 | [diff] [blame] | 2702 | req->open.how.mode = READ_ONCE(sqe->len); |
Jens Axboe | f874888 | 2020-01-08 17:47:02 -0700 | [diff] [blame] | 2703 | fname = u64_to_user_ptr(READ_ONCE(sqe->addr)); |
Jens Axboe | c12cedf | 2020-01-08 17:41:21 -0700 | [diff] [blame] | 2704 | req->open.how.flags = READ_ONCE(sqe->open_flags); |
Jens Axboe | 15b71ab | 2019-12-11 11:20:36 -0700 | [diff] [blame] | 2705 | |
Jens Axboe | f874888 | 2020-01-08 17:47:02 -0700 | [diff] [blame] | 2706 | req->open.filename = getname(fname); |
Jens Axboe | 15b71ab | 2019-12-11 11:20:36 -0700 | [diff] [blame] | 2707 | if (IS_ERR(req->open.filename)) { |
| 2708 | ret = PTR_ERR(req->open.filename); |
| 2709 | req->open.filename = NULL; |
| 2710 | return ret; |
| 2711 | } |
| 2712 | |
Pavel Begunkov | 8fef80b | 2020-02-07 23:59:53 +0300 | [diff] [blame] | 2713 | req->flags |= REQ_F_NEED_CLEANUP; |
Jens Axboe | 15b71ab | 2019-12-11 11:20:36 -0700 | [diff] [blame] | 2714 | return 0; |
| 2715 | } |
| 2716 | |
Jens Axboe | cebdb98 | 2020-01-08 17:59:24 -0700 | [diff] [blame] | 2717 | static int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) |
| 2718 | { |
| 2719 | struct open_how __user *how; |
| 2720 | const char __user *fname; |
| 2721 | size_t len; |
| 2722 | int ret; |
| 2723 | |
| 2724 | if (sqe->ioprio || sqe->buf_index) |
| 2725 | return -EINVAL; |
Jens Axboe | cf3040c | 2020-02-06 21:31:40 -0700 | [diff] [blame] | 2726 | if (sqe->flags & IOSQE_FIXED_FILE) |
| 2727 | return -EBADF; |
Pavel Begunkov | 0bdbdd0 | 2020-02-08 13:28:03 +0300 | [diff] [blame] | 2728 | if (req->flags & REQ_F_NEED_CLEANUP) |
| 2729 | return 0; |
Jens Axboe | cebdb98 | 2020-01-08 17:59:24 -0700 | [diff] [blame] | 2730 | |
| 2731 | req->open.dfd = READ_ONCE(sqe->fd); |
| 2732 | fname = u64_to_user_ptr(READ_ONCE(sqe->addr)); |
| 2733 | how = u64_to_user_ptr(READ_ONCE(sqe->addr2)); |
| 2734 | len = READ_ONCE(sqe->len); |
| 2735 | |
| 2736 | if (len < OPEN_HOW_SIZE_VER0) |
| 2737 | return -EINVAL; |
| 2738 | |
| 2739 | ret = copy_struct_from_user(&req->open.how, sizeof(req->open.how), how, |
| 2740 | len); |
| 2741 | if (ret) |
| 2742 | return ret; |
| 2743 | |
| 2744 | if (!(req->open.how.flags & O_PATH) && force_o_largefile()) |
| 2745 | req->open.how.flags |= O_LARGEFILE; |
| 2746 | |
| 2747 | req->open.filename = getname(fname); |
| 2748 | if (IS_ERR(req->open.filename)) { |
| 2749 | ret = PTR_ERR(req->open.filename); |
| 2750 | req->open.filename = NULL; |
| 2751 | return ret; |
| 2752 | } |
| 2753 | |
Pavel Begunkov | 8fef80b | 2020-02-07 23:59:53 +0300 | [diff] [blame] | 2754 | req->flags |= REQ_F_NEED_CLEANUP; |
Jens Axboe | cebdb98 | 2020-01-08 17:59:24 -0700 | [diff] [blame] | 2755 | return 0; |
| 2756 | } |
| 2757 | |
Pavel Begunkov | 014db00 | 2020-03-03 21:33:12 +0300 | [diff] [blame] | 2758 | static int io_openat2(struct io_kiocb *req, bool force_nonblock) |
Jens Axboe | 15b71ab | 2019-12-11 11:20:36 -0700 | [diff] [blame] | 2759 | { |
| 2760 | struct open_flags op; |
Jens Axboe | 15b71ab | 2019-12-11 11:20:36 -0700 | [diff] [blame] | 2761 | struct file *file; |
| 2762 | int ret; |
| 2763 | |
Jens Axboe | f86cd20 | 2020-01-29 13:46:44 -0700 | [diff] [blame] | 2764 | if (force_nonblock) |
Jens Axboe | 15b71ab | 2019-12-11 11:20:36 -0700 | [diff] [blame] | 2765 | return -EAGAIN; |
Jens Axboe | 15b71ab | 2019-12-11 11:20:36 -0700 | [diff] [blame] | 2766 | |
Jens Axboe | cebdb98 | 2020-01-08 17:59:24 -0700 | [diff] [blame] | 2767 | ret = build_open_flags(&req->open.how, &op); |
Jens Axboe | 15b71ab | 2019-12-11 11:20:36 -0700 | [diff] [blame] | 2768 | if (ret) |
| 2769 | goto err; |
| 2770 | |
Jens Axboe | cebdb98 | 2020-01-08 17:59:24 -0700 | [diff] [blame] | 2771 | ret = get_unused_fd_flags(req->open.how.flags); |
Jens Axboe | 15b71ab | 2019-12-11 11:20:36 -0700 | [diff] [blame] | 2772 | if (ret < 0) |
| 2773 | goto err; |
| 2774 | |
| 2775 | file = do_filp_open(req->open.dfd, req->open.filename, &op); |
| 2776 | if (IS_ERR(file)) { |
| 2777 | put_unused_fd(ret); |
| 2778 | ret = PTR_ERR(file); |
| 2779 | } else { |
| 2780 | fsnotify_open(file); |
| 2781 | fd_install(ret, file); |
| 2782 | } |
| 2783 | err: |
| 2784 | putname(req->open.filename); |
Pavel Begunkov | 8fef80b | 2020-02-07 23:59:53 +0300 | [diff] [blame] | 2785 | req->flags &= ~REQ_F_NEED_CLEANUP; |
Jens Axboe | 15b71ab | 2019-12-11 11:20:36 -0700 | [diff] [blame] | 2786 | if (ret < 0) |
| 2787 | req_set_fail_links(req); |
| 2788 | io_cqring_add_event(req, ret); |
Pavel Begunkov | 014db00 | 2020-03-03 21:33:12 +0300 | [diff] [blame] | 2789 | io_put_req(req); |
Jens Axboe | 15b71ab | 2019-12-11 11:20:36 -0700 | [diff] [blame] | 2790 | return 0; |
| 2791 | } |
| 2792 | |
Pavel Begunkov | 014db00 | 2020-03-03 21:33:12 +0300 | [diff] [blame] | 2793 | static int io_openat(struct io_kiocb *req, bool force_nonblock) |
Jens Axboe | cebdb98 | 2020-01-08 17:59:24 -0700 | [diff] [blame] | 2794 | { |
| 2795 | req->open.how = build_open_how(req->open.how.flags, req->open.how.mode); |
Pavel Begunkov | 014db00 | 2020-03-03 21:33:12 +0300 | [diff] [blame] | 2796 | return io_openat2(req, force_nonblock); |
Jens Axboe | cebdb98 | 2020-01-08 17:59:24 -0700 | [diff] [blame] | 2797 | } |
| 2798 | |
Jens Axboe | ddf0322d | 2020-02-23 16:41:33 -0700 | [diff] [blame^] | 2799 | static int io_provide_buffers_prep(struct io_kiocb *req, |
| 2800 | const struct io_uring_sqe *sqe) |
| 2801 | { |
| 2802 | struct io_provide_buf *p = &req->pbuf; |
| 2803 | u64 tmp; |
| 2804 | |
| 2805 | if (sqe->ioprio || sqe->rw_flags) |
| 2806 | return -EINVAL; |
| 2807 | |
| 2808 | tmp = READ_ONCE(sqe->fd); |
| 2809 | if (!tmp || tmp > USHRT_MAX) |
| 2810 | return -E2BIG; |
| 2811 | p->nbufs = tmp; |
| 2812 | p->addr = READ_ONCE(sqe->addr); |
| 2813 | p->len = READ_ONCE(sqe->len); |
| 2814 | |
| 2815 | if (!access_ok(u64_to_user_ptr(p->addr), p->len)) |
| 2816 | return -EFAULT; |
| 2817 | |
| 2818 | p->bgid = READ_ONCE(sqe->buf_group); |
| 2819 | tmp = READ_ONCE(sqe->off); |
| 2820 | if (tmp > USHRT_MAX) |
| 2821 | return -E2BIG; |
| 2822 | p->bid = tmp; |
| 2823 | return 0; |
| 2824 | } |
| 2825 | |
| 2826 | static int io_add_buffers(struct io_provide_buf *pbuf, struct io_buffer **head) |
| 2827 | { |
| 2828 | struct io_buffer *buf; |
| 2829 | u64 addr = pbuf->addr; |
| 2830 | int i, bid = pbuf->bid; |
| 2831 | |
| 2832 | for (i = 0; i < pbuf->nbufs; i++) { |
| 2833 | buf = kmalloc(sizeof(*buf), GFP_KERNEL); |
| 2834 | if (!buf) |
| 2835 | break; |
| 2836 | |
| 2837 | buf->addr = addr; |
| 2838 | buf->len = pbuf->len; |
| 2839 | buf->bid = bid; |
| 2840 | addr += pbuf->len; |
| 2841 | bid++; |
| 2842 | if (!*head) { |
| 2843 | INIT_LIST_HEAD(&buf->list); |
| 2844 | *head = buf; |
| 2845 | } else { |
| 2846 | list_add_tail(&buf->list, &(*head)->list); |
| 2847 | } |
| 2848 | } |
| 2849 | |
| 2850 | return i ? i : -ENOMEM; |
| 2851 | } |
| 2852 | |
| 2853 | static void io_ring_submit_unlock(struct io_ring_ctx *ctx, bool needs_lock) |
| 2854 | { |
| 2855 | if (needs_lock) |
| 2856 | mutex_unlock(&ctx->uring_lock); |
| 2857 | } |
| 2858 | |
| 2859 | static void io_ring_submit_lock(struct io_ring_ctx *ctx, bool needs_lock) |
| 2860 | { |
| 2861 | /* |
| 2862 | * "Normal" inline submissions always hold the uring_lock, since we |
| 2863 | * grab it from the system call. Same is true for the SQPOLL offload. |
| 2864 | * The only exception is when we've detached the request and issue it |
| 2865 | * from an async worker thread, grab the lock for that case. |
| 2866 | */ |
| 2867 | if (needs_lock) |
| 2868 | mutex_lock(&ctx->uring_lock); |
| 2869 | } |
| 2870 | |
| 2871 | static int io_provide_buffers(struct io_kiocb *req, bool force_nonblock) |
| 2872 | { |
| 2873 | struct io_provide_buf *p = &req->pbuf; |
| 2874 | struct io_ring_ctx *ctx = req->ctx; |
| 2875 | struct io_buffer *head, *list; |
| 2876 | int ret = 0; |
| 2877 | |
| 2878 | io_ring_submit_lock(ctx, !force_nonblock); |
| 2879 | |
| 2880 | lockdep_assert_held(&ctx->uring_lock); |
| 2881 | |
| 2882 | list = head = idr_find(&ctx->io_buffer_idr, p->bgid); |
| 2883 | |
| 2884 | ret = io_add_buffers(p, &head); |
| 2885 | if (ret < 0) |
| 2886 | goto out; |
| 2887 | |
| 2888 | if (!list) { |
| 2889 | ret = idr_alloc(&ctx->io_buffer_idr, head, p->bgid, p->bgid + 1, |
| 2890 | GFP_KERNEL); |
| 2891 | if (ret < 0) { |
| 2892 | while (!list_empty(&head->list)) { |
| 2893 | struct io_buffer *buf; |
| 2894 | |
| 2895 | buf = list_first_entry(&head->list, |
| 2896 | struct io_buffer, list); |
| 2897 | list_del(&buf->list); |
| 2898 | kfree(buf); |
| 2899 | } |
| 2900 | kfree(head); |
| 2901 | goto out; |
| 2902 | } |
| 2903 | } |
| 2904 | out: |
| 2905 | io_ring_submit_unlock(ctx, !force_nonblock); |
| 2906 | if (ret < 0) |
| 2907 | req_set_fail_links(req); |
| 2908 | io_cqring_add_event(req, ret); |
| 2909 | io_put_req(req); |
| 2910 | return 0; |
| 2911 | } |
| 2912 | |
Jens Axboe | 3e4827b | 2020-01-08 15:18:09 -0700 | [diff] [blame] | 2913 | static int io_epoll_ctl_prep(struct io_kiocb *req, |
| 2914 | const struct io_uring_sqe *sqe) |
| 2915 | { |
| 2916 | #if defined(CONFIG_EPOLL) |
| 2917 | if (sqe->ioprio || sqe->buf_index) |
| 2918 | return -EINVAL; |
| 2919 | |
| 2920 | req->epoll.epfd = READ_ONCE(sqe->fd); |
| 2921 | req->epoll.op = READ_ONCE(sqe->len); |
| 2922 | req->epoll.fd = READ_ONCE(sqe->off); |
| 2923 | |
| 2924 | if (ep_op_has_event(req->epoll.op)) { |
| 2925 | struct epoll_event __user *ev; |
| 2926 | |
| 2927 | ev = u64_to_user_ptr(READ_ONCE(sqe->addr)); |
| 2928 | if (copy_from_user(&req->epoll.event, ev, sizeof(*ev))) |
| 2929 | return -EFAULT; |
| 2930 | } |
| 2931 | |
| 2932 | return 0; |
| 2933 | #else |
| 2934 | return -EOPNOTSUPP; |
| 2935 | #endif |
| 2936 | } |
| 2937 | |
Pavel Begunkov | 014db00 | 2020-03-03 21:33:12 +0300 | [diff] [blame] | 2938 | static int io_epoll_ctl(struct io_kiocb *req, bool force_nonblock) |
Jens Axboe | 3e4827b | 2020-01-08 15:18:09 -0700 | [diff] [blame] | 2939 | { |
| 2940 | #if defined(CONFIG_EPOLL) |
| 2941 | struct io_epoll *ie = &req->epoll; |
| 2942 | int ret; |
| 2943 | |
| 2944 | ret = do_epoll_ctl(ie->epfd, ie->op, ie->fd, &ie->event, force_nonblock); |
| 2945 | if (force_nonblock && ret == -EAGAIN) |
| 2946 | return -EAGAIN; |
| 2947 | |
| 2948 | if (ret < 0) |
| 2949 | req_set_fail_links(req); |
| 2950 | io_cqring_add_event(req, ret); |
Pavel Begunkov | 014db00 | 2020-03-03 21:33:12 +0300 | [diff] [blame] | 2951 | io_put_req(req); |
Jens Axboe | 3e4827b | 2020-01-08 15:18:09 -0700 | [diff] [blame] | 2952 | return 0; |
| 2953 | #else |
| 2954 | return -EOPNOTSUPP; |
| 2955 | #endif |
| 2956 | } |
| 2957 | |
Jens Axboe | c1ca757 | 2019-12-25 22:18:28 -0700 | [diff] [blame] | 2958 | static int io_madvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) |
| 2959 | { |
| 2960 | #if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU) |
| 2961 | if (sqe->ioprio || sqe->buf_index || sqe->off) |
| 2962 | return -EINVAL; |
| 2963 | |
| 2964 | req->madvise.addr = READ_ONCE(sqe->addr); |
| 2965 | req->madvise.len = READ_ONCE(sqe->len); |
| 2966 | req->madvise.advice = READ_ONCE(sqe->fadvise_advice); |
| 2967 | return 0; |
| 2968 | #else |
| 2969 | return -EOPNOTSUPP; |
| 2970 | #endif |
| 2971 | } |
| 2972 | |
Pavel Begunkov | 014db00 | 2020-03-03 21:33:12 +0300 | [diff] [blame] | 2973 | static int io_madvise(struct io_kiocb *req, bool force_nonblock) |
Jens Axboe | c1ca757 | 2019-12-25 22:18:28 -0700 | [diff] [blame] | 2974 | { |
| 2975 | #if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU) |
| 2976 | struct io_madvise *ma = &req->madvise; |
| 2977 | int ret; |
| 2978 | |
| 2979 | if (force_nonblock) |
| 2980 | return -EAGAIN; |
| 2981 | |
| 2982 | ret = do_madvise(ma->addr, ma->len, ma->advice); |
| 2983 | if (ret < 0) |
| 2984 | req_set_fail_links(req); |
| 2985 | io_cqring_add_event(req, ret); |
Pavel Begunkov | 014db00 | 2020-03-03 21:33:12 +0300 | [diff] [blame] | 2986 | io_put_req(req); |
Jens Axboe | c1ca757 | 2019-12-25 22:18:28 -0700 | [diff] [blame] | 2987 | return 0; |
| 2988 | #else |
| 2989 | return -EOPNOTSUPP; |
| 2990 | #endif |
| 2991 | } |
| 2992 | |
Jens Axboe | 4840e41 | 2019-12-25 22:03:45 -0700 | [diff] [blame] | 2993 | static int io_fadvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) |
| 2994 | { |
| 2995 | if (sqe->ioprio || sqe->buf_index || sqe->addr) |
| 2996 | return -EINVAL; |
| 2997 | |
| 2998 | req->fadvise.offset = READ_ONCE(sqe->off); |
| 2999 | req->fadvise.len = READ_ONCE(sqe->len); |
| 3000 | req->fadvise.advice = READ_ONCE(sqe->fadvise_advice); |
| 3001 | return 0; |
| 3002 | } |
| 3003 | |
Pavel Begunkov | 014db00 | 2020-03-03 21:33:12 +0300 | [diff] [blame] | 3004 | static int io_fadvise(struct io_kiocb *req, bool force_nonblock) |
Jens Axboe | 4840e41 | 2019-12-25 22:03:45 -0700 | [diff] [blame] | 3005 | { |
| 3006 | struct io_fadvise *fa = &req->fadvise; |
| 3007 | int ret; |
| 3008 | |
Jens Axboe | 3e69426 | 2020-02-01 09:22:49 -0700 | [diff] [blame] | 3009 | if (force_nonblock) { |
| 3010 | switch (fa->advice) { |
| 3011 | case POSIX_FADV_NORMAL: |
| 3012 | case POSIX_FADV_RANDOM: |
| 3013 | case POSIX_FADV_SEQUENTIAL: |
| 3014 | break; |
| 3015 | default: |
| 3016 | return -EAGAIN; |
| 3017 | } |
| 3018 | } |
Jens Axboe | 4840e41 | 2019-12-25 22:03:45 -0700 | [diff] [blame] | 3019 | |
| 3020 | ret = vfs_fadvise(req->file, fa->offset, fa->len, fa->advice); |
| 3021 | if (ret < 0) |
| 3022 | req_set_fail_links(req); |
| 3023 | io_cqring_add_event(req, ret); |
Pavel Begunkov | 014db00 | 2020-03-03 21:33:12 +0300 | [diff] [blame] | 3024 | io_put_req(req); |
Jens Axboe | 4840e41 | 2019-12-25 22:03:45 -0700 | [diff] [blame] | 3025 | return 0; |
| 3026 | } |
| 3027 | |
Jens Axboe | eddc7ef | 2019-12-13 21:18:10 -0700 | [diff] [blame] | 3028 | static int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) |
| 3029 | { |
Jens Axboe | f874888 | 2020-01-08 17:47:02 -0700 | [diff] [blame] | 3030 | const char __user *fname; |
Jens Axboe | eddc7ef | 2019-12-13 21:18:10 -0700 | [diff] [blame] | 3031 | unsigned lookup_flags; |
| 3032 | int ret; |
| 3033 | |
| 3034 | if (sqe->ioprio || sqe->buf_index) |
| 3035 | return -EINVAL; |
Jens Axboe | cf3040c | 2020-02-06 21:31:40 -0700 | [diff] [blame] | 3036 | if (sqe->flags & IOSQE_FIXED_FILE) |
| 3037 | return -EBADF; |
Pavel Begunkov | 0bdbdd0 | 2020-02-08 13:28:03 +0300 | [diff] [blame] | 3038 | if (req->flags & REQ_F_NEED_CLEANUP) |
| 3039 | return 0; |
Jens Axboe | eddc7ef | 2019-12-13 21:18:10 -0700 | [diff] [blame] | 3040 | |
| 3041 | req->open.dfd = READ_ONCE(sqe->fd); |
| 3042 | req->open.mask = READ_ONCE(sqe->len); |
Jens Axboe | f874888 | 2020-01-08 17:47:02 -0700 | [diff] [blame] | 3043 | fname = u64_to_user_ptr(READ_ONCE(sqe->addr)); |
Jens Axboe | eddc7ef | 2019-12-13 21:18:10 -0700 | [diff] [blame] | 3044 | req->open.buffer = u64_to_user_ptr(READ_ONCE(sqe->addr2)); |
Jens Axboe | c12cedf | 2020-01-08 17:41:21 -0700 | [diff] [blame] | 3045 | req->open.how.flags = READ_ONCE(sqe->statx_flags); |
Jens Axboe | eddc7ef | 2019-12-13 21:18:10 -0700 | [diff] [blame] | 3046 | |
Jens Axboe | c12cedf | 2020-01-08 17:41:21 -0700 | [diff] [blame] | 3047 | if (vfs_stat_set_lookup_flags(&lookup_flags, req->open.how.flags)) |
Jens Axboe | eddc7ef | 2019-12-13 21:18:10 -0700 | [diff] [blame] | 3048 | return -EINVAL; |
| 3049 | |
Jens Axboe | f874888 | 2020-01-08 17:47:02 -0700 | [diff] [blame] | 3050 | req->open.filename = getname_flags(fname, lookup_flags, NULL); |
Jens Axboe | eddc7ef | 2019-12-13 21:18:10 -0700 | [diff] [blame] | 3051 | if (IS_ERR(req->open.filename)) { |
| 3052 | ret = PTR_ERR(req->open.filename); |
| 3053 | req->open.filename = NULL; |
| 3054 | return ret; |
| 3055 | } |
| 3056 | |
Pavel Begunkov | 8fef80b | 2020-02-07 23:59:53 +0300 | [diff] [blame] | 3057 | req->flags |= REQ_F_NEED_CLEANUP; |
Jens Axboe | eddc7ef | 2019-12-13 21:18:10 -0700 | [diff] [blame] | 3058 | return 0; |
| 3059 | } |
| 3060 | |
Pavel Begunkov | 014db00 | 2020-03-03 21:33:12 +0300 | [diff] [blame] | 3061 | static int io_statx(struct io_kiocb *req, bool force_nonblock) |
Jens Axboe | eddc7ef | 2019-12-13 21:18:10 -0700 | [diff] [blame] | 3062 | { |
| 3063 | struct io_open *ctx = &req->open; |
| 3064 | unsigned lookup_flags; |
| 3065 | struct path path; |
| 3066 | struct kstat stat; |
| 3067 | int ret; |
| 3068 | |
| 3069 | if (force_nonblock) |
| 3070 | return -EAGAIN; |
| 3071 | |
Jens Axboe | c12cedf | 2020-01-08 17:41:21 -0700 | [diff] [blame] | 3072 | if (vfs_stat_set_lookup_flags(&lookup_flags, ctx->how.flags)) |
Jens Axboe | eddc7ef | 2019-12-13 21:18:10 -0700 | [diff] [blame] | 3073 | return -EINVAL; |
| 3074 | |
| 3075 | retry: |
| 3076 | /* filename_lookup() drops it, keep a reference */ |
| 3077 | ctx->filename->refcnt++; |
| 3078 | |
| 3079 | ret = filename_lookup(ctx->dfd, ctx->filename, lookup_flags, &path, |
| 3080 | NULL); |
| 3081 | if (ret) |
| 3082 | goto err; |
| 3083 | |
Jens Axboe | c12cedf | 2020-01-08 17:41:21 -0700 | [diff] [blame] | 3084 | ret = vfs_getattr(&path, &stat, ctx->mask, ctx->how.flags); |
Jens Axboe | eddc7ef | 2019-12-13 21:18:10 -0700 | [diff] [blame] | 3085 | path_put(&path); |
| 3086 | if (retry_estale(ret, lookup_flags)) { |
| 3087 | lookup_flags |= LOOKUP_REVAL; |
| 3088 | goto retry; |
| 3089 | } |
| 3090 | if (!ret) |
| 3091 | ret = cp_statx(&stat, ctx->buffer); |
| 3092 | err: |
| 3093 | putname(ctx->filename); |
Pavel Begunkov | 8fef80b | 2020-02-07 23:59:53 +0300 | [diff] [blame] | 3094 | req->flags &= ~REQ_F_NEED_CLEANUP; |
Jens Axboe | eddc7ef | 2019-12-13 21:18:10 -0700 | [diff] [blame] | 3095 | if (ret < 0) |
| 3096 | req_set_fail_links(req); |
| 3097 | io_cqring_add_event(req, ret); |
Pavel Begunkov | 014db00 | 2020-03-03 21:33:12 +0300 | [diff] [blame] | 3098 | io_put_req(req); |
Jens Axboe | eddc7ef | 2019-12-13 21:18:10 -0700 | [diff] [blame] | 3099 | return 0; |
| 3100 | } |
| 3101 | |
Jens Axboe | b5dba59 | 2019-12-11 14:02:38 -0700 | [diff] [blame] | 3102 | static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) |
| 3103 | { |
| 3104 | /* |
| 3105 | * If we queue this for async, it must not be cancellable. That would |
| 3106 | * leave the 'file' in an undeterminate state. |
| 3107 | */ |
| 3108 | req->work.flags |= IO_WQ_WORK_NO_CANCEL; |
| 3109 | |
| 3110 | if (sqe->ioprio || sqe->off || sqe->addr || sqe->len || |
| 3111 | sqe->rw_flags || sqe->buf_index) |
| 3112 | return -EINVAL; |
| 3113 | if (sqe->flags & IOSQE_FIXED_FILE) |
Jens Axboe | cf3040c | 2020-02-06 21:31:40 -0700 | [diff] [blame] | 3114 | return -EBADF; |
Jens Axboe | b5dba59 | 2019-12-11 14:02:38 -0700 | [diff] [blame] | 3115 | |
| 3116 | req->close.fd = READ_ONCE(sqe->fd); |
| 3117 | if (req->file->f_op == &io_uring_fops || |
Pavel Begunkov | b14cca0 | 2020-01-17 04:45:59 +0300 | [diff] [blame] | 3118 | req->close.fd == req->ctx->ring_fd) |
Jens Axboe | b5dba59 | 2019-12-11 14:02:38 -0700 | [diff] [blame] | 3119 | return -EBADF; |
| 3120 | |
| 3121 | return 0; |
| 3122 | } |
| 3123 | |
Pavel Begunkov | a93b333 | 2020-02-08 14:04:34 +0300 | [diff] [blame] | 3124 | /* only called when __close_fd_get_file() is done */ |
Pavel Begunkov | 014db00 | 2020-03-03 21:33:12 +0300 | [diff] [blame] | 3125 | static void __io_close_finish(struct io_kiocb *req) |
Pavel Begunkov | a93b333 | 2020-02-08 14:04:34 +0300 | [diff] [blame] | 3126 | { |
| 3127 | int ret; |
| 3128 | |
| 3129 | ret = filp_close(req->close.put_file, req->work.files); |
| 3130 | if (ret < 0) |
| 3131 | req_set_fail_links(req); |
| 3132 | io_cqring_add_event(req, ret); |
| 3133 | fput(req->close.put_file); |
Pavel Begunkov | 014db00 | 2020-03-03 21:33:12 +0300 | [diff] [blame] | 3134 | io_put_req(req); |
Pavel Begunkov | a93b333 | 2020-02-08 14:04:34 +0300 | [diff] [blame] | 3135 | } |
| 3136 | |
Jens Axboe | b5dba59 | 2019-12-11 14:02:38 -0700 | [diff] [blame] | 3137 | static void io_close_finish(struct io_wq_work **workptr) |
| 3138 | { |
| 3139 | struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work); |
Jens Axboe | b5dba59 | 2019-12-11 14:02:38 -0700 | [diff] [blame] | 3140 | |
Pavel Begunkov | 7fbeb95 | 2020-02-16 01:01:18 +0300 | [diff] [blame] | 3141 | /* not cancellable, don't do io_req_cancelled() */ |
Pavel Begunkov | 014db00 | 2020-03-03 21:33:12 +0300 | [diff] [blame] | 3142 | __io_close_finish(req); |
Pavel Begunkov | e9fd939 | 2020-03-04 16:14:12 +0300 | [diff] [blame] | 3143 | io_steal_work(req, workptr); |
Jens Axboe | b5dba59 | 2019-12-11 14:02:38 -0700 | [diff] [blame] | 3144 | } |
| 3145 | |
Pavel Begunkov | 014db00 | 2020-03-03 21:33:12 +0300 | [diff] [blame] | 3146 | static int io_close(struct io_kiocb *req, bool force_nonblock) |
Jens Axboe | b5dba59 | 2019-12-11 14:02:38 -0700 | [diff] [blame] | 3147 | { |
| 3148 | int ret; |
| 3149 | |
| 3150 | req->close.put_file = NULL; |
| 3151 | ret = __close_fd_get_file(req->close.fd, &req->close.put_file); |
| 3152 | if (ret < 0) |
| 3153 | return ret; |
| 3154 | |
| 3155 | /* if the file has a flush method, be safe and punt to async */ |
Pavel Begunkov | a210067 | 2020-03-02 23:45:16 +0300 | [diff] [blame] | 3156 | if (req->close.put_file->f_op->flush && force_nonblock) { |
Pavel Begunkov | 594506f | 2020-03-03 21:33:11 +0300 | [diff] [blame] | 3157 | /* submission ref will be dropped, take it for async */ |
| 3158 | refcount_inc(&req->refs); |
| 3159 | |
Pavel Begunkov | a210067 | 2020-03-02 23:45:16 +0300 | [diff] [blame] | 3160 | req->work.func = io_close_finish; |
| 3161 | /* |
| 3162 | * Do manual async queue here to avoid grabbing files - we don't |
| 3163 | * need the files, and it'll cause io_close_finish() to close |
| 3164 | * the file again and cause a double CQE entry for this request |
| 3165 | */ |
| 3166 | io_queue_async_work(req); |
| 3167 | return 0; |
| 3168 | } |
Jens Axboe | b5dba59 | 2019-12-11 14:02:38 -0700 | [diff] [blame] | 3169 | |
| 3170 | /* |
| 3171 | * No ->flush(), safely close from here and just punt the |
| 3172 | * fput() to async context. |
| 3173 | */ |
Pavel Begunkov | 014db00 | 2020-03-03 21:33:12 +0300 | [diff] [blame] | 3174 | __io_close_finish(req); |
Pavel Begunkov | a93b333 | 2020-02-08 14:04:34 +0300 | [diff] [blame] | 3175 | return 0; |
Jens Axboe | b5dba59 | 2019-12-11 14:02:38 -0700 | [diff] [blame] | 3176 | } |
| 3177 | |
Jens Axboe | 3529d8c | 2019-12-19 18:24:38 -0700 | [diff] [blame] | 3178 | static int io_prep_sfr(struct io_kiocb *req, const struct io_uring_sqe *sqe) |
Jens Axboe | 5d17b4a | 2019-04-09 14:56:44 -0600 | [diff] [blame] | 3179 | { |
| 3180 | struct io_ring_ctx *ctx = req->ctx; |
Jens Axboe | 5d17b4a | 2019-04-09 14:56:44 -0600 | [diff] [blame] | 3181 | |
| 3182 | if (!req->file) |
| 3183 | return -EBADF; |
Jens Axboe | 5d17b4a | 2019-04-09 14:56:44 -0600 | [diff] [blame] | 3184 | |
| 3185 | if (unlikely(ctx->flags & IORING_SETUP_IOPOLL)) |
| 3186 | return -EINVAL; |
| 3187 | if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index)) |
| 3188 | return -EINVAL; |
| 3189 | |
Jens Axboe | 8ed8d3c | 2019-12-16 11:55:28 -0700 | [diff] [blame] | 3190 | req->sync.off = READ_ONCE(sqe->off); |
| 3191 | req->sync.len = READ_ONCE(sqe->len); |
| 3192 | req->sync.flags = READ_ONCE(sqe->sync_range_flags); |
Jens Axboe | 8ed8d3c | 2019-12-16 11:55:28 -0700 | [diff] [blame] | 3193 | return 0; |
| 3194 | } |
| 3195 | |
Pavel Begunkov | 014db00 | 2020-03-03 21:33:12 +0300 | [diff] [blame] | 3196 | static void __io_sync_file_range(struct io_kiocb *req) |
Jens Axboe | 8ed8d3c | 2019-12-16 11:55:28 -0700 | [diff] [blame] | 3197 | { |
Jens Axboe | 8ed8d3c | 2019-12-16 11:55:28 -0700 | [diff] [blame] | 3198 | int ret; |
| 3199 | |
Jens Axboe | 9adbd45 | 2019-12-20 08:45:55 -0700 | [diff] [blame] | 3200 | ret = sync_file_range(req->file, req->sync.off, req->sync.len, |
Jens Axboe | 8ed8d3c | 2019-12-16 11:55:28 -0700 | [diff] [blame] | 3201 | req->sync.flags); |
| 3202 | if (ret < 0) |
| 3203 | req_set_fail_links(req); |
| 3204 | io_cqring_add_event(req, ret); |
Pavel Begunkov | 014db00 | 2020-03-03 21:33:12 +0300 | [diff] [blame] | 3205 | io_put_req(req); |
Pavel Begunkov | 5ea6216 | 2020-02-24 11:30:16 +0300 | [diff] [blame] | 3206 | } |
| 3207 | |
| 3208 | |
| 3209 | static void io_sync_file_range_finish(struct io_wq_work **workptr) |
| 3210 | { |
| 3211 | struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work); |
| 3212 | struct io_kiocb *nxt = NULL; |
| 3213 | |
| 3214 | if (io_req_cancelled(req)) |
| 3215 | return; |
Pavel Begunkov | 014db00 | 2020-03-03 21:33:12 +0300 | [diff] [blame] | 3216 | __io_sync_file_range(req); |
Pavel Begunkov | 594506f | 2020-03-03 21:33:11 +0300 | [diff] [blame] | 3217 | io_put_req(req); /* put submission ref */ |
Jens Axboe | 8ed8d3c | 2019-12-16 11:55:28 -0700 | [diff] [blame] | 3218 | if (nxt) |
Jens Axboe | 7891293 | 2020-01-14 22:09:06 -0700 | [diff] [blame] | 3219 | io_wq_assign_next(workptr, nxt); |
Jens Axboe | 5d17b4a | 2019-04-09 14:56:44 -0600 | [diff] [blame] | 3220 | } |
| 3221 | |
Pavel Begunkov | 014db00 | 2020-03-03 21:33:12 +0300 | [diff] [blame] | 3222 | static int io_sync_file_range(struct io_kiocb *req, bool force_nonblock) |
Jens Axboe | 5d17b4a | 2019-04-09 14:56:44 -0600 | [diff] [blame] | 3223 | { |
Jens Axboe | 5d17b4a | 2019-04-09 14:56:44 -0600 | [diff] [blame] | 3224 | /* sync_file_range always requires a blocking context */ |
Jens Axboe | 8ed8d3c | 2019-12-16 11:55:28 -0700 | [diff] [blame] | 3225 | if (force_nonblock) { |
Jens Axboe | 8ed8d3c | 2019-12-16 11:55:28 -0700 | [diff] [blame] | 3226 | req->work.func = io_sync_file_range_finish; |
Jens Axboe | 5d17b4a | 2019-04-09 14:56:44 -0600 | [diff] [blame] | 3227 | return -EAGAIN; |
Jens Axboe | 8ed8d3c | 2019-12-16 11:55:28 -0700 | [diff] [blame] | 3228 | } |
Jens Axboe | 5d17b4a | 2019-04-09 14:56:44 -0600 | [diff] [blame] | 3229 | |
Pavel Begunkov | 014db00 | 2020-03-03 21:33:12 +0300 | [diff] [blame] | 3230 | __io_sync_file_range(req); |
Jens Axboe | 5d17b4a | 2019-04-09 14:56:44 -0600 | [diff] [blame] | 3231 | return 0; |
| 3232 | } |
| 3233 | |
Pavel Begunkov | 02d27d8 | 2020-02-28 10:36:36 +0300 | [diff] [blame] | 3234 | static int io_setup_async_msg(struct io_kiocb *req, |
| 3235 | struct io_async_msghdr *kmsg) |
| 3236 | { |
| 3237 | if (req->io) |
| 3238 | return -EAGAIN; |
| 3239 | if (io_alloc_async_ctx(req)) { |
| 3240 | if (kmsg->iov != kmsg->fast_iov) |
| 3241 | kfree(kmsg->iov); |
| 3242 | return -ENOMEM; |
| 3243 | } |
| 3244 | req->flags |= REQ_F_NEED_CLEANUP; |
| 3245 | memcpy(&req->io->msg, kmsg, sizeof(*kmsg)); |
| 3246 | return -EAGAIN; |
| 3247 | } |
| 3248 | |
Jens Axboe | 3529d8c | 2019-12-19 18:24:38 -0700 | [diff] [blame] | 3249 | static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) |
Jens Axboe | aa1fa28 | 2019-04-19 13:38:09 -0600 | [diff] [blame] | 3250 | { |
Jens Axboe | 03b1230 | 2019-12-02 18:50:25 -0700 | [diff] [blame] | 3251 | #if defined(CONFIG_NET) |
Jens Axboe | e47293f | 2019-12-20 08:58:21 -0700 | [diff] [blame] | 3252 | struct io_sr_msg *sr = &req->sr_msg; |
Jens Axboe | 3529d8c | 2019-12-19 18:24:38 -0700 | [diff] [blame] | 3253 | struct io_async_ctx *io = req->io; |
Pavel Begunkov | 99bc4c3 | 2020-02-07 22:04:45 +0300 | [diff] [blame] | 3254 | int ret; |
Jens Axboe | 03b1230 | 2019-12-02 18:50:25 -0700 | [diff] [blame] | 3255 | |
Jens Axboe | e47293f | 2019-12-20 08:58:21 -0700 | [diff] [blame] | 3256 | sr->msg_flags = READ_ONCE(sqe->msg_flags); |
| 3257 | sr->msg = u64_to_user_ptr(READ_ONCE(sqe->addr)); |
Jens Axboe | fddafac | 2020-01-04 20:19:44 -0700 | [diff] [blame] | 3258 | sr->len = READ_ONCE(sqe->len); |
Jens Axboe | 3529d8c | 2019-12-19 18:24:38 -0700 | [diff] [blame] | 3259 | |
Jens Axboe | d876836 | 2020-02-27 14:17:49 -0700 | [diff] [blame] | 3260 | #ifdef CONFIG_COMPAT |
| 3261 | if (req->ctx->compat) |
| 3262 | sr->msg_flags |= MSG_CMSG_COMPAT; |
| 3263 | #endif |
| 3264 | |
Jens Axboe | fddafac | 2020-01-04 20:19:44 -0700 | [diff] [blame] | 3265 | if (!io || req->opcode == IORING_OP_SEND) |
Jens Axboe | 3529d8c | 2019-12-19 18:24:38 -0700 | [diff] [blame] | 3266 | return 0; |
Pavel Begunkov | 5f798be | 2020-02-08 13:28:02 +0300 | [diff] [blame] | 3267 | /* iovec is already imported */ |
| 3268 | if (req->flags & REQ_F_NEED_CLEANUP) |
| 3269 | return 0; |
Jens Axboe | 3529d8c | 2019-12-19 18:24:38 -0700 | [diff] [blame] | 3270 | |
Jens Axboe | d968856 | 2019-12-09 19:35:20 -0700 | [diff] [blame] | 3271 | io->msg.iov = io->msg.fast_iov; |
Pavel Begunkov | 99bc4c3 | 2020-02-07 22:04:45 +0300 | [diff] [blame] | 3272 | ret = sendmsg_copy_msghdr(&io->msg.msg, sr->msg, sr->msg_flags, |
Jens Axboe | e47293f | 2019-12-20 08:58:21 -0700 | [diff] [blame] | 3273 | &io->msg.iov); |
Pavel Begunkov | 99bc4c3 | 2020-02-07 22:04:45 +0300 | [diff] [blame] | 3274 | if (!ret) |
| 3275 | req->flags |= REQ_F_NEED_CLEANUP; |
| 3276 | return ret; |
Jens Axboe | 03b1230 | 2019-12-02 18:50:25 -0700 | [diff] [blame] | 3277 | #else |
Jens Axboe | e47293f | 2019-12-20 08:58:21 -0700 | [diff] [blame] | 3278 | return -EOPNOTSUPP; |
Jens Axboe | 03b1230 | 2019-12-02 18:50:25 -0700 | [diff] [blame] | 3279 | #endif |
| 3280 | } |
| 3281 | |
Pavel Begunkov | 014db00 | 2020-03-03 21:33:12 +0300 | [diff] [blame] | 3282 | static int io_sendmsg(struct io_kiocb *req, bool force_nonblock) |
Jens Axboe | 03b1230 | 2019-12-02 18:50:25 -0700 | [diff] [blame] | 3283 | { |
| 3284 | #if defined(CONFIG_NET) |
Jens Axboe | 0b416c3 | 2019-12-15 10:57:46 -0700 | [diff] [blame] | 3285 | struct io_async_msghdr *kmsg = NULL; |
Jens Axboe | 03b1230 | 2019-12-02 18:50:25 -0700 | [diff] [blame] | 3286 | struct socket *sock; |
| 3287 | int ret; |
| 3288 | |
| 3289 | if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) |
| 3290 | return -EINVAL; |
| 3291 | |
| 3292 | sock = sock_from_file(req->file, &ret); |
| 3293 | if (sock) { |
Jens Axboe | b7bb4f7 | 2019-12-15 22:13:43 -0700 | [diff] [blame] | 3294 | struct io_async_ctx io; |
Jens Axboe | 03b1230 | 2019-12-02 18:50:25 -0700 | [diff] [blame] | 3295 | unsigned flags; |
| 3296 | |
Jens Axboe | 03b1230 | 2019-12-02 18:50:25 -0700 | [diff] [blame] | 3297 | if (req->io) { |
Jens Axboe | 0b416c3 | 2019-12-15 10:57:46 -0700 | [diff] [blame] | 3298 | kmsg = &req->io->msg; |
Jens Axboe | b537916 | 2020-02-09 11:29:15 -0700 | [diff] [blame] | 3299 | kmsg->msg.msg_name = &req->io->msg.addr; |
Jens Axboe | 0b416c3 | 2019-12-15 10:57:46 -0700 | [diff] [blame] | 3300 | /* if iov is set, it's allocated already */ |
| 3301 | if (!kmsg->iov) |
| 3302 | kmsg->iov = kmsg->fast_iov; |
| 3303 | kmsg->msg.msg_iter.iov = kmsg->iov; |
Jens Axboe | 03b1230 | 2019-12-02 18:50:25 -0700 | [diff] [blame] | 3304 | } else { |
Jens Axboe | 3529d8c | 2019-12-19 18:24:38 -0700 | [diff] [blame] | 3305 | struct io_sr_msg *sr = &req->sr_msg; |
| 3306 | |
Jens Axboe | 0b416c3 | 2019-12-15 10:57:46 -0700 | [diff] [blame] | 3307 | kmsg = &io.msg; |
Jens Axboe | b537916 | 2020-02-09 11:29:15 -0700 | [diff] [blame] | 3308 | kmsg->msg.msg_name = &io.msg.addr; |
Jens Axboe | 3529d8c | 2019-12-19 18:24:38 -0700 | [diff] [blame] | 3309 | |
| 3310 | io.msg.iov = io.msg.fast_iov; |
| 3311 | ret = sendmsg_copy_msghdr(&io.msg.msg, sr->msg, |
| 3312 | sr->msg_flags, &io.msg.iov); |
Jens Axboe | 03b1230 | 2019-12-02 18:50:25 -0700 | [diff] [blame] | 3313 | if (ret) |
Jens Axboe | 3529d8c | 2019-12-19 18:24:38 -0700 | [diff] [blame] | 3314 | return ret; |
Jens Axboe | 03b1230 | 2019-12-02 18:50:25 -0700 | [diff] [blame] | 3315 | } |
| 3316 | |
Jens Axboe | e47293f | 2019-12-20 08:58:21 -0700 | [diff] [blame] | 3317 | flags = req->sr_msg.msg_flags; |
| 3318 | if (flags & MSG_DONTWAIT) |
| 3319 | req->flags |= REQ_F_NOWAIT; |
| 3320 | else if (force_nonblock) |
| 3321 | flags |= MSG_DONTWAIT; |
| 3322 | |
Jens Axboe | 0b416c3 | 2019-12-15 10:57:46 -0700 | [diff] [blame] | 3323 | ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags); |
Pavel Begunkov | 02d27d8 | 2020-02-28 10:36:36 +0300 | [diff] [blame] | 3324 | if (force_nonblock && ret == -EAGAIN) |
| 3325 | return io_setup_async_msg(req, kmsg); |
Jens Axboe | 03b1230 | 2019-12-02 18:50:25 -0700 | [diff] [blame] | 3326 | if (ret == -ERESTARTSYS) |
| 3327 | ret = -EINTR; |
| 3328 | } |
| 3329 | |
Pavel Begunkov | 1e95081 | 2020-02-06 19:51:16 +0300 | [diff] [blame] | 3330 | if (kmsg && kmsg->iov != kmsg->fast_iov) |
Jens Axboe | 0b416c3 | 2019-12-15 10:57:46 -0700 | [diff] [blame] | 3331 | kfree(kmsg->iov); |
Pavel Begunkov | 99bc4c3 | 2020-02-07 22:04:45 +0300 | [diff] [blame] | 3332 | req->flags &= ~REQ_F_NEED_CLEANUP; |
Jens Axboe | 03b1230 | 2019-12-02 18:50:25 -0700 | [diff] [blame] | 3333 | io_cqring_add_event(req, ret); |
Jens Axboe | 4e88d6e | 2019-12-07 20:59:47 -0700 | [diff] [blame] | 3334 | if (ret < 0) |
| 3335 | req_set_fail_links(req); |
Pavel Begunkov | 014db00 | 2020-03-03 21:33:12 +0300 | [diff] [blame] | 3336 | io_put_req(req); |
Jens Axboe | 03b1230 | 2019-12-02 18:50:25 -0700 | [diff] [blame] | 3337 | return 0; |
| 3338 | #else |
| 3339 | return -EOPNOTSUPP; |
| 3340 | #endif |
| 3341 | } |
| 3342 | |
Pavel Begunkov | 014db00 | 2020-03-03 21:33:12 +0300 | [diff] [blame] | 3343 | static int io_send(struct io_kiocb *req, bool force_nonblock) |
Jens Axboe | fddafac | 2020-01-04 20:19:44 -0700 | [diff] [blame] | 3344 | { |
| 3345 | #if defined(CONFIG_NET) |
| 3346 | struct socket *sock; |
| 3347 | int ret; |
| 3348 | |
| 3349 | if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) |
| 3350 | return -EINVAL; |
| 3351 | |
| 3352 | sock = sock_from_file(req->file, &ret); |
| 3353 | if (sock) { |
| 3354 | struct io_sr_msg *sr = &req->sr_msg; |
| 3355 | struct msghdr msg; |
| 3356 | struct iovec iov; |
| 3357 | unsigned flags; |
| 3358 | |
| 3359 | ret = import_single_range(WRITE, sr->buf, sr->len, &iov, |
| 3360 | &msg.msg_iter); |
| 3361 | if (ret) |
| 3362 | return ret; |
| 3363 | |
| 3364 | msg.msg_name = NULL; |
| 3365 | msg.msg_control = NULL; |
| 3366 | msg.msg_controllen = 0; |
| 3367 | msg.msg_namelen = 0; |
| 3368 | |
| 3369 | flags = req->sr_msg.msg_flags; |
| 3370 | if (flags & MSG_DONTWAIT) |
| 3371 | req->flags |= REQ_F_NOWAIT; |
| 3372 | else if (force_nonblock) |
| 3373 | flags |= MSG_DONTWAIT; |
| 3374 | |
Jens Axboe | 0b7b21e | 2020-01-31 08:34:59 -0700 | [diff] [blame] | 3375 | msg.msg_flags = flags; |
| 3376 | ret = sock_sendmsg(sock, &msg); |
Jens Axboe | fddafac | 2020-01-04 20:19:44 -0700 | [diff] [blame] | 3377 | if (force_nonblock && ret == -EAGAIN) |
| 3378 | return -EAGAIN; |
| 3379 | if (ret == -ERESTARTSYS) |
| 3380 | ret = -EINTR; |
| 3381 | } |
| 3382 | |
| 3383 | io_cqring_add_event(req, ret); |
| 3384 | if (ret < 0) |
| 3385 | req_set_fail_links(req); |
Pavel Begunkov | 014db00 | 2020-03-03 21:33:12 +0300 | [diff] [blame] | 3386 | io_put_req(req); |
Jens Axboe | fddafac | 2020-01-04 20:19:44 -0700 | [diff] [blame] | 3387 | return 0; |
| 3388 | #else |
| 3389 | return -EOPNOTSUPP; |
| 3390 | #endif |
| 3391 | } |
| 3392 | |
Jens Axboe | 3529d8c | 2019-12-19 18:24:38 -0700 | [diff] [blame] | 3393 | static int io_recvmsg_prep(struct io_kiocb *req, |
| 3394 | const struct io_uring_sqe *sqe) |
Jens Axboe | 03b1230 | 2019-12-02 18:50:25 -0700 | [diff] [blame] | 3395 | { |
| 3396 | #if defined(CONFIG_NET) |
Jens Axboe | e47293f | 2019-12-20 08:58:21 -0700 | [diff] [blame] | 3397 | struct io_sr_msg *sr = &req->sr_msg; |
Jens Axboe | 3529d8c | 2019-12-19 18:24:38 -0700 | [diff] [blame] | 3398 | struct io_async_ctx *io = req->io; |
Pavel Begunkov | 99bc4c3 | 2020-02-07 22:04:45 +0300 | [diff] [blame] | 3399 | int ret; |
Jens Axboe | 06b76d4 | 2019-12-19 14:44:26 -0700 | [diff] [blame] | 3400 | |
Jens Axboe | 3529d8c | 2019-12-19 18:24:38 -0700 | [diff] [blame] | 3401 | sr->msg_flags = READ_ONCE(sqe->msg_flags); |
| 3402 | sr->msg = u64_to_user_ptr(READ_ONCE(sqe->addr)); |
Jens Axboe | 0b7b21e | 2020-01-31 08:34:59 -0700 | [diff] [blame] | 3403 | sr->len = READ_ONCE(sqe->len); |
Jens Axboe | 3529d8c | 2019-12-19 18:24:38 -0700 | [diff] [blame] | 3404 | |
Jens Axboe | d876836 | 2020-02-27 14:17:49 -0700 | [diff] [blame] | 3405 | #ifdef CONFIG_COMPAT |
| 3406 | if (req->ctx->compat) |
| 3407 | sr->msg_flags |= MSG_CMSG_COMPAT; |
| 3408 | #endif |
| 3409 | |
Jens Axboe | fddafac | 2020-01-04 20:19:44 -0700 | [diff] [blame] | 3410 | if (!io || req->opcode == IORING_OP_RECV) |
Jens Axboe | 06b76d4 | 2019-12-19 14:44:26 -0700 | [diff] [blame] | 3411 | return 0; |
Pavel Begunkov | 5f798be | 2020-02-08 13:28:02 +0300 | [diff] [blame] | 3412 | /* iovec is already imported */ |
| 3413 | if (req->flags & REQ_F_NEED_CLEANUP) |
| 3414 | return 0; |
Jens Axboe | 03b1230 | 2019-12-02 18:50:25 -0700 | [diff] [blame] | 3415 | |
Jens Axboe | d968856 | 2019-12-09 19:35:20 -0700 | [diff] [blame] | 3416 | io->msg.iov = io->msg.fast_iov; |
Pavel Begunkov | 99bc4c3 | 2020-02-07 22:04:45 +0300 | [diff] [blame] | 3417 | ret = recvmsg_copy_msghdr(&io->msg.msg, sr->msg, sr->msg_flags, |
Jens Axboe | e47293f | 2019-12-20 08:58:21 -0700 | [diff] [blame] | 3418 | &io->msg.uaddr, &io->msg.iov); |
Pavel Begunkov | 99bc4c3 | 2020-02-07 22:04:45 +0300 | [diff] [blame] | 3419 | if (!ret) |
| 3420 | req->flags |= REQ_F_NEED_CLEANUP; |
| 3421 | return ret; |
Jens Axboe | 03b1230 | 2019-12-02 18:50:25 -0700 | [diff] [blame] | 3422 | #else |
Jens Axboe | e47293f | 2019-12-20 08:58:21 -0700 | [diff] [blame] | 3423 | return -EOPNOTSUPP; |
Jens Axboe | 03b1230 | 2019-12-02 18:50:25 -0700 | [diff] [blame] | 3424 | #endif |
| 3425 | } |
| 3426 | |
Pavel Begunkov | 014db00 | 2020-03-03 21:33:12 +0300 | [diff] [blame] | 3427 | static int io_recvmsg(struct io_kiocb *req, bool force_nonblock) |
Jens Axboe | 03b1230 | 2019-12-02 18:50:25 -0700 | [diff] [blame] | 3428 | { |
| 3429 | #if defined(CONFIG_NET) |
Jens Axboe | 0b416c3 | 2019-12-15 10:57:46 -0700 | [diff] [blame] | 3430 | struct io_async_msghdr *kmsg = NULL; |
Jens Axboe | 0fa03c6 | 2019-04-19 13:34:07 -0600 | [diff] [blame] | 3431 | struct socket *sock; |
| 3432 | int ret; |
| 3433 | |
| 3434 | if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) |
| 3435 | return -EINVAL; |
| 3436 | |
| 3437 | sock = sock_from_file(req->file, &ret); |
| 3438 | if (sock) { |
Jens Axboe | b7bb4f7 | 2019-12-15 22:13:43 -0700 | [diff] [blame] | 3439 | struct io_async_ctx io; |
Jens Axboe | 0fa03c6 | 2019-04-19 13:34:07 -0600 | [diff] [blame] | 3440 | unsigned flags; |
| 3441 | |
Jens Axboe | 03b1230 | 2019-12-02 18:50:25 -0700 | [diff] [blame] | 3442 | if (req->io) { |
Jens Axboe | 0b416c3 | 2019-12-15 10:57:46 -0700 | [diff] [blame] | 3443 | kmsg = &req->io->msg; |
Jens Axboe | b537916 | 2020-02-09 11:29:15 -0700 | [diff] [blame] | 3444 | kmsg->msg.msg_name = &req->io->msg.addr; |
Jens Axboe | 0b416c3 | 2019-12-15 10:57:46 -0700 | [diff] [blame] | 3445 | /* if iov is set, it's allocated already */ |
| 3446 | if (!kmsg->iov) |
| 3447 | kmsg->iov = kmsg->fast_iov; |
| 3448 | kmsg->msg.msg_iter.iov = kmsg->iov; |
Jens Axboe | 03b1230 | 2019-12-02 18:50:25 -0700 | [diff] [blame] | 3449 | } else { |
Jens Axboe | 3529d8c | 2019-12-19 18:24:38 -0700 | [diff] [blame] | 3450 | struct io_sr_msg *sr = &req->sr_msg; |
| 3451 | |
Jens Axboe | 0b416c3 | 2019-12-15 10:57:46 -0700 | [diff] [blame] | 3452 | kmsg = &io.msg; |
Jens Axboe | b537916 | 2020-02-09 11:29:15 -0700 | [diff] [blame] | 3453 | kmsg->msg.msg_name = &io.msg.addr; |
Jens Axboe | 3529d8c | 2019-12-19 18:24:38 -0700 | [diff] [blame] | 3454 | |
| 3455 | io.msg.iov = io.msg.fast_iov; |
| 3456 | ret = recvmsg_copy_msghdr(&io.msg.msg, sr->msg, |
| 3457 | sr->msg_flags, &io.msg.uaddr, |
| 3458 | &io.msg.iov); |
Jens Axboe | 03b1230 | 2019-12-02 18:50:25 -0700 | [diff] [blame] | 3459 | if (ret) |
Jens Axboe | 3529d8c | 2019-12-19 18:24:38 -0700 | [diff] [blame] | 3460 | return ret; |
Jens Axboe | 03b1230 | 2019-12-02 18:50:25 -0700 | [diff] [blame] | 3461 | } |
Jens Axboe | 0fa03c6 | 2019-04-19 13:34:07 -0600 | [diff] [blame] | 3462 | |
Jens Axboe | e47293f | 2019-12-20 08:58:21 -0700 | [diff] [blame] | 3463 | flags = req->sr_msg.msg_flags; |
| 3464 | if (flags & MSG_DONTWAIT) |
| 3465 | req->flags |= REQ_F_NOWAIT; |
| 3466 | else if (force_nonblock) |
| 3467 | flags |= MSG_DONTWAIT; |
| 3468 | |
| 3469 | ret = __sys_recvmsg_sock(sock, &kmsg->msg, req->sr_msg.msg, |
| 3470 | kmsg->uaddr, flags); |
Pavel Begunkov | 02d27d8 | 2020-02-28 10:36:36 +0300 | [diff] [blame] | 3471 | if (force_nonblock && ret == -EAGAIN) |
| 3472 | return io_setup_async_msg(req, kmsg); |
Jens Axboe | 441cdbd | 2019-12-02 18:49:10 -0700 | [diff] [blame] | 3473 | if (ret == -ERESTARTSYS) |
| 3474 | ret = -EINTR; |
Jens Axboe | 0fa03c6 | 2019-04-19 13:34:07 -0600 | [diff] [blame] | 3475 | } |
| 3476 | |
Pavel Begunkov | 1e95081 | 2020-02-06 19:51:16 +0300 | [diff] [blame] | 3477 | if (kmsg && kmsg->iov != kmsg->fast_iov) |
Jens Axboe | 0b416c3 | 2019-12-15 10:57:46 -0700 | [diff] [blame] | 3478 | kfree(kmsg->iov); |
Pavel Begunkov | 99bc4c3 | 2020-02-07 22:04:45 +0300 | [diff] [blame] | 3479 | req->flags &= ~REQ_F_NEED_CLEANUP; |
Jens Axboe | 78e19bb | 2019-11-06 15:21:34 -0700 | [diff] [blame] | 3480 | io_cqring_add_event(req, ret); |
Jens Axboe | 4e88d6e | 2019-12-07 20:59:47 -0700 | [diff] [blame] | 3481 | if (ret < 0) |
| 3482 | req_set_fail_links(req); |
Pavel Begunkov | 014db00 | 2020-03-03 21:33:12 +0300 | [diff] [blame] | 3483 | io_put_req(req); |
Jens Axboe | 0fa03c6 | 2019-04-19 13:34:07 -0600 | [diff] [blame] | 3484 | return 0; |
| 3485 | #else |
| 3486 | return -EOPNOTSUPP; |
| 3487 | #endif |
| 3488 | } |
| 3489 | |
Pavel Begunkov | 014db00 | 2020-03-03 21:33:12 +0300 | [diff] [blame] | 3490 | static int io_recv(struct io_kiocb *req, bool force_nonblock) |
Jens Axboe | fddafac | 2020-01-04 20:19:44 -0700 | [diff] [blame] | 3491 | { |
| 3492 | #if defined(CONFIG_NET) |
| 3493 | struct socket *sock; |
| 3494 | int ret; |
| 3495 | |
| 3496 | if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) |
| 3497 | return -EINVAL; |
| 3498 | |
| 3499 | sock = sock_from_file(req->file, &ret); |
| 3500 | if (sock) { |
| 3501 | struct io_sr_msg *sr = &req->sr_msg; |
| 3502 | struct msghdr msg; |
| 3503 | struct iovec iov; |
| 3504 | unsigned flags; |
| 3505 | |
| 3506 | ret = import_single_range(READ, sr->buf, sr->len, &iov, |
| 3507 | &msg.msg_iter); |
| 3508 | if (ret) |
| 3509 | return ret; |
| 3510 | |
| 3511 | msg.msg_name = NULL; |
| 3512 | msg.msg_control = NULL; |
| 3513 | msg.msg_controllen = 0; |
| 3514 | msg.msg_namelen = 0; |
| 3515 | msg.msg_iocb = NULL; |
| 3516 | msg.msg_flags = 0; |
| 3517 | |
| 3518 | flags = req->sr_msg.msg_flags; |
| 3519 | if (flags & MSG_DONTWAIT) |
| 3520 | req->flags |= REQ_F_NOWAIT; |
| 3521 | else if (force_nonblock) |
| 3522 | flags |= MSG_DONTWAIT; |
| 3523 | |
Jens Axboe | 0b7b21e | 2020-01-31 08:34:59 -0700 | [diff] [blame] | 3524 | ret = sock_recvmsg(sock, &msg, flags); |
Jens Axboe | fddafac | 2020-01-04 20:19:44 -0700 | [diff] [blame] | 3525 | if (force_nonblock && ret == -EAGAIN) |
| 3526 | return -EAGAIN; |
| 3527 | if (ret == -ERESTARTSYS) |
| 3528 | ret = -EINTR; |
| 3529 | } |
| 3530 | |
| 3531 | io_cqring_add_event(req, ret); |
| 3532 | if (ret < 0) |
| 3533 | req_set_fail_links(req); |
Pavel Begunkov | 014db00 | 2020-03-03 21:33:12 +0300 | [diff] [blame] | 3534 | io_put_req(req); |
Jens Axboe | fddafac | 2020-01-04 20:19:44 -0700 | [diff] [blame] | 3535 | return 0; |
| 3536 | #else |
| 3537 | return -EOPNOTSUPP; |
| 3538 | #endif |
| 3539 | } |
| 3540 | |
| 3541 | |
Jens Axboe | 3529d8c | 2019-12-19 18:24:38 -0700 | [diff] [blame] | 3542 | static int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) |
Jens Axboe | 17f2fe3 | 2019-10-17 14:42:58 -0600 | [diff] [blame] | 3543 | { |
| 3544 | #if defined(CONFIG_NET) |
Jens Axboe | 8ed8d3c | 2019-12-16 11:55:28 -0700 | [diff] [blame] | 3545 | struct io_accept *accept = &req->accept; |
| 3546 | |
Jens Axboe | 17f2fe3 | 2019-10-17 14:42:58 -0600 | [diff] [blame] | 3547 | if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL))) |
| 3548 | return -EINVAL; |
Hrvoje Zeba | 8042d6c | 2019-11-25 14:40:22 -0500 | [diff] [blame] | 3549 | if (sqe->ioprio || sqe->len || sqe->buf_index) |
Jens Axboe | 17f2fe3 | 2019-10-17 14:42:58 -0600 | [diff] [blame] | 3550 | return -EINVAL; |
| 3551 | |
Jens Axboe | d55e5f5 | 2019-12-11 16:12:15 -0700 | [diff] [blame] | 3552 | accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr)); |
| 3553 | accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2)); |
Jens Axboe | 8ed8d3c | 2019-12-16 11:55:28 -0700 | [diff] [blame] | 3554 | accept->flags = READ_ONCE(sqe->accept_flags); |
Jens Axboe | 8ed8d3c | 2019-12-16 11:55:28 -0700 | [diff] [blame] | 3555 | return 0; |
| 3556 | #else |
| 3557 | return -EOPNOTSUPP; |
| 3558 | #endif |
| 3559 | } |
Jens Axboe | 17f2fe3 | 2019-10-17 14:42:58 -0600 | [diff] [blame] | 3560 | |
Jens Axboe | 8ed8d3c | 2019-12-16 11:55:28 -0700 | [diff] [blame] | 3561 | #if defined(CONFIG_NET) |
Pavel Begunkov | 014db00 | 2020-03-03 21:33:12 +0300 | [diff] [blame] | 3562 | static int __io_accept(struct io_kiocb *req, bool force_nonblock) |
Jens Axboe | 8ed8d3c | 2019-12-16 11:55:28 -0700 | [diff] [blame] | 3563 | { |
| 3564 | struct io_accept *accept = &req->accept; |
| 3565 | unsigned file_flags; |
| 3566 | int ret; |
| 3567 | |
| 3568 | file_flags = force_nonblock ? O_NONBLOCK : 0; |
| 3569 | ret = __sys_accept4_file(req->file, file_flags, accept->addr, |
| 3570 | accept->addr_len, accept->flags); |
| 3571 | if (ret == -EAGAIN && force_nonblock) |
Jens Axboe | 17f2fe3 | 2019-10-17 14:42:58 -0600 | [diff] [blame] | 3572 | return -EAGAIN; |
Jens Axboe | 8e3cca1 | 2019-11-09 19:52:33 -0700 | [diff] [blame] | 3573 | if (ret == -ERESTARTSYS) |
| 3574 | ret = -EINTR; |
Jens Axboe | 4e88d6e | 2019-12-07 20:59:47 -0700 | [diff] [blame] | 3575 | if (ret < 0) |
| 3576 | req_set_fail_links(req); |
Jens Axboe | 78e19bb | 2019-11-06 15:21:34 -0700 | [diff] [blame] | 3577 | io_cqring_add_event(req, ret); |
Pavel Begunkov | 014db00 | 2020-03-03 21:33:12 +0300 | [diff] [blame] | 3578 | io_put_req(req); |
Jens Axboe | 17f2fe3 | 2019-10-17 14:42:58 -0600 | [diff] [blame] | 3579 | return 0; |
Jens Axboe | 8ed8d3c | 2019-12-16 11:55:28 -0700 | [diff] [blame] | 3580 | } |
| 3581 | |
| 3582 | static void io_accept_finish(struct io_wq_work **workptr) |
| 3583 | { |
| 3584 | struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work); |
Jens Axboe | 8ed8d3c | 2019-12-16 11:55:28 -0700 | [diff] [blame] | 3585 | |
| 3586 | if (io_req_cancelled(req)) |
| 3587 | return; |
Pavel Begunkov | 014db00 | 2020-03-03 21:33:12 +0300 | [diff] [blame] | 3588 | __io_accept(req, false); |
Pavel Begunkov | e9fd939 | 2020-03-04 16:14:12 +0300 | [diff] [blame] | 3589 | io_steal_work(req, workptr); |
Jens Axboe | 8ed8d3c | 2019-12-16 11:55:28 -0700 | [diff] [blame] | 3590 | } |
| 3591 | #endif |
| 3592 | |
Pavel Begunkov | 014db00 | 2020-03-03 21:33:12 +0300 | [diff] [blame] | 3593 | static int io_accept(struct io_kiocb *req, bool force_nonblock) |
Jens Axboe | 8ed8d3c | 2019-12-16 11:55:28 -0700 | [diff] [blame] | 3594 | { |
| 3595 | #if defined(CONFIG_NET) |
| 3596 | int ret; |
| 3597 | |
Pavel Begunkov | 014db00 | 2020-03-03 21:33:12 +0300 | [diff] [blame] | 3598 | ret = __io_accept(req, force_nonblock); |
Jens Axboe | 8ed8d3c | 2019-12-16 11:55:28 -0700 | [diff] [blame] | 3599 | if (ret == -EAGAIN && force_nonblock) { |
| 3600 | req->work.func = io_accept_finish; |
Jens Axboe | 8ed8d3c | 2019-12-16 11:55:28 -0700 | [diff] [blame] | 3601 | return -EAGAIN; |
| 3602 | } |
| 3603 | return 0; |
Jens Axboe | 17f2fe3 | 2019-10-17 14:42:58 -0600 | [diff] [blame] | 3604 | #else |
| 3605 | return -EOPNOTSUPP; |
| 3606 | #endif |
| 3607 | } |
| 3608 | |
Jens Axboe | 3529d8c | 2019-12-19 18:24:38 -0700 | [diff] [blame] | 3609 | static int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) |
Jens Axboe | f499a02 | 2019-12-02 16:28:46 -0700 | [diff] [blame] | 3610 | { |
| 3611 | #if defined(CONFIG_NET) |
Jens Axboe | 3529d8c | 2019-12-19 18:24:38 -0700 | [diff] [blame] | 3612 | struct io_connect *conn = &req->connect; |
| 3613 | struct io_async_ctx *io = req->io; |
Jens Axboe | f499a02 | 2019-12-02 16:28:46 -0700 | [diff] [blame] | 3614 | |
Jens Axboe | 3fbb51c | 2019-12-20 08:51:52 -0700 | [diff] [blame] | 3615 | if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL))) |
| 3616 | return -EINVAL; |
| 3617 | if (sqe->ioprio || sqe->len || sqe->buf_index || sqe->rw_flags) |
| 3618 | return -EINVAL; |
| 3619 | |
Jens Axboe | 3529d8c | 2019-12-19 18:24:38 -0700 | [diff] [blame] | 3620 | conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr)); |
| 3621 | conn->addr_len = READ_ONCE(sqe->addr2); |
| 3622 | |
| 3623 | if (!io) |
| 3624 | return 0; |
| 3625 | |
| 3626 | return move_addr_to_kernel(conn->addr, conn->addr_len, |
Jens Axboe | 3fbb51c | 2019-12-20 08:51:52 -0700 | [diff] [blame] | 3627 | &io->connect.address); |
Jens Axboe | f499a02 | 2019-12-02 16:28:46 -0700 | [diff] [blame] | 3628 | #else |
Jens Axboe | 3fbb51c | 2019-12-20 08:51:52 -0700 | [diff] [blame] | 3629 | return -EOPNOTSUPP; |
Jens Axboe | f499a02 | 2019-12-02 16:28:46 -0700 | [diff] [blame] | 3630 | #endif |
| 3631 | } |
| 3632 | |
Pavel Begunkov | 014db00 | 2020-03-03 21:33:12 +0300 | [diff] [blame] | 3633 | static int io_connect(struct io_kiocb *req, bool force_nonblock) |
Jens Axboe | f8e85cf | 2019-11-23 14:24:24 -0700 | [diff] [blame] | 3634 | { |
| 3635 | #if defined(CONFIG_NET) |
Jens Axboe | f499a02 | 2019-12-02 16:28:46 -0700 | [diff] [blame] | 3636 | struct io_async_ctx __io, *io; |
Jens Axboe | f8e85cf | 2019-11-23 14:24:24 -0700 | [diff] [blame] | 3637 | unsigned file_flags; |
Jens Axboe | 3fbb51c | 2019-12-20 08:51:52 -0700 | [diff] [blame] | 3638 | int ret; |
Jens Axboe | f8e85cf | 2019-11-23 14:24:24 -0700 | [diff] [blame] | 3639 | |
Jens Axboe | f499a02 | 2019-12-02 16:28:46 -0700 | [diff] [blame] | 3640 | if (req->io) { |
| 3641 | io = req->io; |
| 3642 | } else { |
Jens Axboe | 3529d8c | 2019-12-19 18:24:38 -0700 | [diff] [blame] | 3643 | ret = move_addr_to_kernel(req->connect.addr, |
| 3644 | req->connect.addr_len, |
| 3645 | &__io.connect.address); |
Jens Axboe | f499a02 | 2019-12-02 16:28:46 -0700 | [diff] [blame] | 3646 | if (ret) |
| 3647 | goto out; |
| 3648 | io = &__io; |
| 3649 | } |
| 3650 | |
Jens Axboe | 3fbb51c | 2019-12-20 08:51:52 -0700 | [diff] [blame] | 3651 | file_flags = force_nonblock ? O_NONBLOCK : 0; |
| 3652 | |
| 3653 | ret = __sys_connect_file(req->file, &io->connect.address, |
| 3654 | req->connect.addr_len, file_flags); |
Jens Axboe | 87f80d6 | 2019-12-03 11:23:54 -0700 | [diff] [blame] | 3655 | if ((ret == -EAGAIN || ret == -EINPROGRESS) && force_nonblock) { |
Jens Axboe | b7bb4f7 | 2019-12-15 22:13:43 -0700 | [diff] [blame] | 3656 | if (req->io) |
| 3657 | return -EAGAIN; |
| 3658 | if (io_alloc_async_ctx(req)) { |
Jens Axboe | f499a02 | 2019-12-02 16:28:46 -0700 | [diff] [blame] | 3659 | ret = -ENOMEM; |
| 3660 | goto out; |
| 3661 | } |
Jens Axboe | b7bb4f7 | 2019-12-15 22:13:43 -0700 | [diff] [blame] | 3662 | memcpy(&req->io->connect, &__io.connect, sizeof(__io.connect)); |
Jens Axboe | f8e85cf | 2019-11-23 14:24:24 -0700 | [diff] [blame] | 3663 | return -EAGAIN; |
Jens Axboe | f499a02 | 2019-12-02 16:28:46 -0700 | [diff] [blame] | 3664 | } |
Jens Axboe | f8e85cf | 2019-11-23 14:24:24 -0700 | [diff] [blame] | 3665 | if (ret == -ERESTARTSYS) |
| 3666 | ret = -EINTR; |
Jens Axboe | f499a02 | 2019-12-02 16:28:46 -0700 | [diff] [blame] | 3667 | out: |
Jens Axboe | 4e88d6e | 2019-12-07 20:59:47 -0700 | [diff] [blame] | 3668 | if (ret < 0) |
| 3669 | req_set_fail_links(req); |
Jens Axboe | f8e85cf | 2019-11-23 14:24:24 -0700 | [diff] [blame] | 3670 | io_cqring_add_event(req, ret); |
Pavel Begunkov | 014db00 | 2020-03-03 21:33:12 +0300 | [diff] [blame] | 3671 | io_put_req(req); |
Jens Axboe | f8e85cf | 2019-11-23 14:24:24 -0700 | [diff] [blame] | 3672 | return 0; |
| 3673 | #else |
| 3674 | return -EOPNOTSUPP; |
| 3675 | #endif |
| 3676 | } |
| 3677 | |
Jens Axboe | d7718a9 | 2020-02-14 22:23:12 -0700 | [diff] [blame] | 3678 | struct io_poll_table { |
| 3679 | struct poll_table_struct pt; |
| 3680 | struct io_kiocb *req; |
| 3681 | int error; |
| 3682 | }; |
| 3683 | |
| 3684 | static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt, |
| 3685 | struct wait_queue_head *head) |
Jens Axboe | 221c5eb | 2019-01-17 09:41:58 -0700 | [diff] [blame] | 3686 | { |
Jens Axboe | d7718a9 | 2020-02-14 22:23:12 -0700 | [diff] [blame] | 3687 | if (unlikely(poll->head)) { |
| 3688 | pt->error = -EINVAL; |
| 3689 | return; |
| 3690 | } |
| 3691 | |
| 3692 | pt->error = 0; |
| 3693 | poll->head = head; |
| 3694 | add_wait_queue(head, &poll->wait); |
| 3695 | } |
| 3696 | |
| 3697 | static void io_async_queue_proc(struct file *file, struct wait_queue_head *head, |
| 3698 | struct poll_table_struct *p) |
| 3699 | { |
| 3700 | struct io_poll_table *pt = container_of(p, struct io_poll_table, pt); |
| 3701 | |
| 3702 | __io_queue_proc(&pt->req->apoll->poll, pt, head); |
| 3703 | } |
| 3704 | |
| 3705 | static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll, |
| 3706 | __poll_t mask, task_work_func_t func) |
| 3707 | { |
| 3708 | struct task_struct *tsk; |
| 3709 | |
| 3710 | /* for instances that support it check for an event match first: */ |
| 3711 | if (mask && !(mask & poll->events)) |
| 3712 | return 0; |
| 3713 | |
| 3714 | trace_io_uring_task_add(req->ctx, req->opcode, req->user_data, mask); |
| 3715 | |
| 3716 | list_del_init(&poll->wait.entry); |
| 3717 | |
| 3718 | tsk = req->task; |
| 3719 | req->result = mask; |
| 3720 | init_task_work(&req->task_work, func); |
| 3721 | /* |
| 3722 | * If this fails, then the task is exiting. If that is the case, then |
| 3723 | * the exit check will ultimately cancel these work items. Hence we |
| 3724 | * don't need to check here and handle it specifically. |
| 3725 | */ |
| 3726 | task_work_add(tsk, &req->task_work, true); |
| 3727 | wake_up_process(tsk); |
| 3728 | return 1; |
| 3729 | } |
| 3730 | |
| 3731 | static void io_async_task_func(struct callback_head *cb) |
| 3732 | { |
| 3733 | struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work); |
| 3734 | struct async_poll *apoll = req->apoll; |
| 3735 | struct io_ring_ctx *ctx = req->ctx; |
| 3736 | |
| 3737 | trace_io_uring_task_run(req->ctx, req->opcode, req->user_data); |
| 3738 | |
| 3739 | WARN_ON_ONCE(!list_empty(&req->apoll->poll.wait.entry)); |
| 3740 | |
| 3741 | if (hash_hashed(&req->hash_node)) { |
| 3742 | spin_lock_irq(&ctx->completion_lock); |
| 3743 | hash_del(&req->hash_node); |
| 3744 | spin_unlock_irq(&ctx->completion_lock); |
| 3745 | } |
| 3746 | |
| 3747 | /* restore ->work in case we need to retry again */ |
| 3748 | memcpy(&req->work, &apoll->work, sizeof(req->work)); |
| 3749 | |
| 3750 | __set_current_state(TASK_RUNNING); |
| 3751 | mutex_lock(&ctx->uring_lock); |
| 3752 | __io_queue_sqe(req, NULL); |
| 3753 | mutex_unlock(&ctx->uring_lock); |
| 3754 | |
| 3755 | kfree(apoll); |
| 3756 | } |
| 3757 | |
| 3758 | static int io_async_wake(struct wait_queue_entry *wait, unsigned mode, int sync, |
| 3759 | void *key) |
| 3760 | { |
| 3761 | struct io_kiocb *req = wait->private; |
| 3762 | struct io_poll_iocb *poll = &req->apoll->poll; |
| 3763 | |
| 3764 | trace_io_uring_poll_wake(req->ctx, req->opcode, req->user_data, |
| 3765 | key_to_poll(key)); |
| 3766 | |
| 3767 | return __io_async_wake(req, poll, key_to_poll(key), io_async_task_func); |
| 3768 | } |
| 3769 | |
| 3770 | static void io_poll_req_insert(struct io_kiocb *req) |
| 3771 | { |
| 3772 | struct io_ring_ctx *ctx = req->ctx; |
| 3773 | struct hlist_head *list; |
| 3774 | |
| 3775 | list = &ctx->cancel_hash[hash_long(req->user_data, ctx->cancel_hash_bits)]; |
| 3776 | hlist_add_head(&req->hash_node, list); |
| 3777 | } |
| 3778 | |
| 3779 | static __poll_t __io_arm_poll_handler(struct io_kiocb *req, |
| 3780 | struct io_poll_iocb *poll, |
| 3781 | struct io_poll_table *ipt, __poll_t mask, |
| 3782 | wait_queue_func_t wake_func) |
| 3783 | __acquires(&ctx->completion_lock) |
| 3784 | { |
| 3785 | struct io_ring_ctx *ctx = req->ctx; |
| 3786 | bool cancel = false; |
| 3787 | |
| 3788 | poll->file = req->file; |
| 3789 | poll->head = NULL; |
| 3790 | poll->done = poll->canceled = false; |
| 3791 | poll->events = mask; |
| 3792 | |
| 3793 | ipt->pt._key = mask; |
| 3794 | ipt->req = req; |
| 3795 | ipt->error = -EINVAL; |
| 3796 | |
| 3797 | INIT_LIST_HEAD(&poll->wait.entry); |
| 3798 | init_waitqueue_func_entry(&poll->wait, wake_func); |
| 3799 | poll->wait.private = req; |
| 3800 | |
| 3801 | mask = vfs_poll(req->file, &ipt->pt) & poll->events; |
| 3802 | |
| 3803 | spin_lock_irq(&ctx->completion_lock); |
| 3804 | if (likely(poll->head)) { |
| 3805 | spin_lock(&poll->head->lock); |
| 3806 | if (unlikely(list_empty(&poll->wait.entry))) { |
| 3807 | if (ipt->error) |
| 3808 | cancel = true; |
| 3809 | ipt->error = 0; |
| 3810 | mask = 0; |
| 3811 | } |
| 3812 | if (mask || ipt->error) |
| 3813 | list_del_init(&poll->wait.entry); |
| 3814 | else if (cancel) |
| 3815 | WRITE_ONCE(poll->canceled, true); |
| 3816 | else if (!poll->done) /* actually waiting for an event */ |
| 3817 | io_poll_req_insert(req); |
| 3818 | spin_unlock(&poll->head->lock); |
| 3819 | } |
| 3820 | |
| 3821 | return mask; |
| 3822 | } |
| 3823 | |
| 3824 | static bool io_arm_poll_handler(struct io_kiocb *req) |
| 3825 | { |
| 3826 | const struct io_op_def *def = &io_op_defs[req->opcode]; |
| 3827 | struct io_ring_ctx *ctx = req->ctx; |
| 3828 | struct async_poll *apoll; |
| 3829 | struct io_poll_table ipt; |
| 3830 | __poll_t mask, ret; |
| 3831 | |
| 3832 | if (!req->file || !file_can_poll(req->file)) |
| 3833 | return false; |
| 3834 | if (req->flags & (REQ_F_MUST_PUNT | REQ_F_POLLED)) |
| 3835 | return false; |
| 3836 | if (!def->pollin && !def->pollout) |
| 3837 | return false; |
| 3838 | |
| 3839 | apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC); |
| 3840 | if (unlikely(!apoll)) |
| 3841 | return false; |
| 3842 | |
| 3843 | req->flags |= REQ_F_POLLED; |
| 3844 | memcpy(&apoll->work, &req->work, sizeof(req->work)); |
| 3845 | |
| 3846 | /* |
| 3847 | * Don't need a reference here, as we're adding it to the task |
| 3848 | * task_works list. If the task exits, the list is pruned. |
| 3849 | */ |
| 3850 | req->task = current; |
| 3851 | req->apoll = apoll; |
| 3852 | INIT_HLIST_NODE(&req->hash_node); |
| 3853 | |
Nathan Chancellor | 8755d97 | 2020-03-02 16:01:19 -0700 | [diff] [blame] | 3854 | mask = 0; |
Jens Axboe | d7718a9 | 2020-02-14 22:23:12 -0700 | [diff] [blame] | 3855 | if (def->pollin) |
Nathan Chancellor | 8755d97 | 2020-03-02 16:01:19 -0700 | [diff] [blame] | 3856 | mask |= POLLIN | POLLRDNORM; |
Jens Axboe | d7718a9 | 2020-02-14 22:23:12 -0700 | [diff] [blame] | 3857 | if (def->pollout) |
| 3858 | mask |= POLLOUT | POLLWRNORM; |
| 3859 | mask |= POLLERR | POLLPRI; |
| 3860 | |
| 3861 | ipt.pt._qproc = io_async_queue_proc; |
| 3862 | |
| 3863 | ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask, |
| 3864 | io_async_wake); |
| 3865 | if (ret) { |
| 3866 | ipt.error = 0; |
| 3867 | apoll->poll.done = true; |
| 3868 | spin_unlock_irq(&ctx->completion_lock); |
| 3869 | memcpy(&req->work, &apoll->work, sizeof(req->work)); |
| 3870 | kfree(apoll); |
| 3871 | return false; |
| 3872 | } |
| 3873 | spin_unlock_irq(&ctx->completion_lock); |
| 3874 | trace_io_uring_poll_arm(ctx, req->opcode, req->user_data, mask, |
| 3875 | apoll->poll.events); |
| 3876 | return true; |
| 3877 | } |
| 3878 | |
| 3879 | static bool __io_poll_remove_one(struct io_kiocb *req, |
| 3880 | struct io_poll_iocb *poll) |
| 3881 | { |
Jens Axboe | b41e985 | 2020-02-17 09:52:41 -0700 | [diff] [blame] | 3882 | bool do_complete = false; |
Jens Axboe | 221c5eb | 2019-01-17 09:41:58 -0700 | [diff] [blame] | 3883 | |
| 3884 | spin_lock(&poll->head->lock); |
| 3885 | WRITE_ONCE(poll->canceled, true); |
Jens Axboe | 392edb4 | 2019-12-09 17:52:20 -0700 | [diff] [blame] | 3886 | if (!list_empty(&poll->wait.entry)) { |
| 3887 | list_del_init(&poll->wait.entry); |
Jens Axboe | b41e985 | 2020-02-17 09:52:41 -0700 | [diff] [blame] | 3888 | do_complete = true; |
Jens Axboe | 221c5eb | 2019-01-17 09:41:58 -0700 | [diff] [blame] | 3889 | } |
| 3890 | spin_unlock(&poll->head->lock); |
Jens Axboe | d7718a9 | 2020-02-14 22:23:12 -0700 | [diff] [blame] | 3891 | return do_complete; |
| 3892 | } |
| 3893 | |
| 3894 | static bool io_poll_remove_one(struct io_kiocb *req) |
| 3895 | { |
| 3896 | bool do_complete; |
| 3897 | |
| 3898 | if (req->opcode == IORING_OP_POLL_ADD) { |
| 3899 | do_complete = __io_poll_remove_one(req, &req->poll); |
| 3900 | } else { |
| 3901 | /* non-poll requests have submit ref still */ |
| 3902 | do_complete = __io_poll_remove_one(req, &req->apoll->poll); |
| 3903 | if (do_complete) |
| 3904 | io_put_req(req); |
| 3905 | } |
| 3906 | |
Jens Axboe | 78076bb | 2019-12-04 19:56:40 -0700 | [diff] [blame] | 3907 | hash_del(&req->hash_node); |
Jens Axboe | d7718a9 | 2020-02-14 22:23:12 -0700 | [diff] [blame] | 3908 | |
Jens Axboe | b41e985 | 2020-02-17 09:52:41 -0700 | [diff] [blame] | 3909 | if (do_complete) { |
| 3910 | io_cqring_fill_event(req, -ECANCELED); |
| 3911 | io_commit_cqring(req->ctx); |
| 3912 | req->flags |= REQ_F_COMP_LOCKED; |
| 3913 | io_put_req(req); |
| 3914 | } |
| 3915 | |
| 3916 | return do_complete; |
Jens Axboe | 221c5eb | 2019-01-17 09:41:58 -0700 | [diff] [blame] | 3917 | } |
| 3918 | |
| 3919 | static void io_poll_remove_all(struct io_ring_ctx *ctx) |
| 3920 | { |
Jens Axboe | 78076bb | 2019-12-04 19:56:40 -0700 | [diff] [blame] | 3921 | struct hlist_node *tmp; |
Jens Axboe | 221c5eb | 2019-01-17 09:41:58 -0700 | [diff] [blame] | 3922 | struct io_kiocb *req; |
Jens Axboe | 78076bb | 2019-12-04 19:56:40 -0700 | [diff] [blame] | 3923 | int i; |
Jens Axboe | 221c5eb | 2019-01-17 09:41:58 -0700 | [diff] [blame] | 3924 | |
| 3925 | spin_lock_irq(&ctx->completion_lock); |
Jens Axboe | 78076bb | 2019-12-04 19:56:40 -0700 | [diff] [blame] | 3926 | for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) { |
| 3927 | struct hlist_head *list; |
| 3928 | |
| 3929 | list = &ctx->cancel_hash[i]; |
| 3930 | hlist_for_each_entry_safe(req, tmp, list, hash_node) |
| 3931 | io_poll_remove_one(req); |
Jens Axboe | 221c5eb | 2019-01-17 09:41:58 -0700 | [diff] [blame] | 3932 | } |
| 3933 | spin_unlock_irq(&ctx->completion_lock); |
Jens Axboe | b41e985 | 2020-02-17 09:52:41 -0700 | [diff] [blame] | 3934 | |
| 3935 | io_cqring_ev_posted(ctx); |
Jens Axboe | 221c5eb | 2019-01-17 09:41:58 -0700 | [diff] [blame] | 3936 | } |
| 3937 | |
Jens Axboe | 47f4676 | 2019-11-09 17:43:02 -0700 | [diff] [blame] | 3938 | static int io_poll_cancel(struct io_ring_ctx *ctx, __u64 sqe_addr) |
| 3939 | { |
Jens Axboe | 78076bb | 2019-12-04 19:56:40 -0700 | [diff] [blame] | 3940 | struct hlist_head *list; |
Jens Axboe | 47f4676 | 2019-11-09 17:43:02 -0700 | [diff] [blame] | 3941 | struct io_kiocb *req; |
| 3942 | |
Jens Axboe | 78076bb | 2019-12-04 19:56:40 -0700 | [diff] [blame] | 3943 | list = &ctx->cancel_hash[hash_long(sqe_addr, ctx->cancel_hash_bits)]; |
| 3944 | hlist_for_each_entry(req, list, hash_node) { |
Jens Axboe | b41e985 | 2020-02-17 09:52:41 -0700 | [diff] [blame] | 3945 | if (sqe_addr != req->user_data) |
| 3946 | continue; |
| 3947 | if (io_poll_remove_one(req)) |
Jens Axboe | eac406c | 2019-11-14 12:09:58 -0700 | [diff] [blame] | 3948 | return 0; |
Jens Axboe | b41e985 | 2020-02-17 09:52:41 -0700 | [diff] [blame] | 3949 | return -EALREADY; |
Jens Axboe | 47f4676 | 2019-11-09 17:43:02 -0700 | [diff] [blame] | 3950 | } |
| 3951 | |
| 3952 | return -ENOENT; |
| 3953 | } |
| 3954 | |
Jens Axboe | 3529d8c | 2019-12-19 18:24:38 -0700 | [diff] [blame] | 3955 | static int io_poll_remove_prep(struct io_kiocb *req, |
| 3956 | const struct io_uring_sqe *sqe) |
Jens Axboe | 221c5eb | 2019-01-17 09:41:58 -0700 | [diff] [blame] | 3957 | { |
Jens Axboe | 221c5eb | 2019-01-17 09:41:58 -0700 | [diff] [blame] | 3958 | if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) |
| 3959 | return -EINVAL; |
| 3960 | if (sqe->ioprio || sqe->off || sqe->len || sqe->buf_index || |
| 3961 | sqe->poll_events) |
| 3962 | return -EINVAL; |
| 3963 | |
Jens Axboe | 0969e78 | 2019-12-17 18:40:57 -0700 | [diff] [blame] | 3964 | req->poll.addr = READ_ONCE(sqe->addr); |
Jens Axboe | 0969e78 | 2019-12-17 18:40:57 -0700 | [diff] [blame] | 3965 | return 0; |
| 3966 | } |
| 3967 | |
| 3968 | /* |
| 3969 | * Find a running poll command that matches one specified in sqe->addr, |
| 3970 | * and remove it if found. |
| 3971 | */ |
| 3972 | static int io_poll_remove(struct io_kiocb *req) |
| 3973 | { |
| 3974 | struct io_ring_ctx *ctx = req->ctx; |
| 3975 | u64 addr; |
| 3976 | int ret; |
| 3977 | |
Jens Axboe | 0969e78 | 2019-12-17 18:40:57 -0700 | [diff] [blame] | 3978 | addr = req->poll.addr; |
Jens Axboe | 221c5eb | 2019-01-17 09:41:58 -0700 | [diff] [blame] | 3979 | spin_lock_irq(&ctx->completion_lock); |
Jens Axboe | 0969e78 | 2019-12-17 18:40:57 -0700 | [diff] [blame] | 3980 | ret = io_poll_cancel(ctx, addr); |
Jens Axboe | 221c5eb | 2019-01-17 09:41:58 -0700 | [diff] [blame] | 3981 | spin_unlock_irq(&ctx->completion_lock); |
| 3982 | |
Jens Axboe | 78e19bb | 2019-11-06 15:21:34 -0700 | [diff] [blame] | 3983 | io_cqring_add_event(req, ret); |
Jens Axboe | 4e88d6e | 2019-12-07 20:59:47 -0700 | [diff] [blame] | 3984 | if (ret < 0) |
| 3985 | req_set_fail_links(req); |
Jens Axboe | e65ef56 | 2019-03-12 10:16:44 -0600 | [diff] [blame] | 3986 | io_put_req(req); |
Jens Axboe | 221c5eb | 2019-01-17 09:41:58 -0700 | [diff] [blame] | 3987 | return 0; |
| 3988 | } |
| 3989 | |
Jens Axboe | b0dd8a4 | 2019-11-18 12:14:54 -0700 | [diff] [blame] | 3990 | static void io_poll_complete(struct io_kiocb *req, __poll_t mask, int error) |
Jens Axboe | 221c5eb | 2019-01-17 09:41:58 -0700 | [diff] [blame] | 3991 | { |
Jackie Liu | a197f66 | 2019-11-08 08:09:12 -0700 | [diff] [blame] | 3992 | struct io_ring_ctx *ctx = req->ctx; |
| 3993 | |
Jens Axboe | 8c83878 | 2019-03-12 15:48:16 -0600 | [diff] [blame] | 3994 | req->poll.done = true; |
Pavel Begunkov | b0a2034 | 2020-02-28 10:36:35 +0300 | [diff] [blame] | 3995 | io_cqring_fill_event(req, error ? error : mangle_poll(mask)); |
Jens Axboe | 8c83878 | 2019-03-12 15:48:16 -0600 | [diff] [blame] | 3996 | io_commit_cqring(ctx); |
Jens Axboe | 221c5eb | 2019-01-17 09:41:58 -0700 | [diff] [blame] | 3997 | } |
| 3998 | |
Jens Axboe | b41e985 | 2020-02-17 09:52:41 -0700 | [diff] [blame] | 3999 | static void io_poll_task_handler(struct io_kiocb *req, struct io_kiocb **nxt) |
Jens Axboe | 221c5eb | 2019-01-17 09:41:58 -0700 | [diff] [blame] | 4000 | { |
Jens Axboe | 221c5eb | 2019-01-17 09:41:58 -0700 | [diff] [blame] | 4001 | struct io_ring_ctx *ctx = req->ctx; |
Jens Axboe | 221c5eb | 2019-01-17 09:41:58 -0700 | [diff] [blame] | 4002 | |
Jens Axboe | 221c5eb | 2019-01-17 09:41:58 -0700 | [diff] [blame] | 4003 | spin_lock_irq(&ctx->completion_lock); |
Jens Axboe | 78076bb | 2019-12-04 19:56:40 -0700 | [diff] [blame] | 4004 | hash_del(&req->hash_node); |
Jens Axboe | b41e985 | 2020-02-17 09:52:41 -0700 | [diff] [blame] | 4005 | io_poll_complete(req, req->result, 0); |
| 4006 | req->flags |= REQ_F_COMP_LOCKED; |
| 4007 | io_put_req_find_next(req, nxt); |
Jens Axboe | 221c5eb | 2019-01-17 09:41:58 -0700 | [diff] [blame] | 4008 | spin_unlock_irq(&ctx->completion_lock); |
| 4009 | |
Jens Axboe | 8c83878 | 2019-03-12 15:48:16 -0600 | [diff] [blame] | 4010 | io_cqring_ev_posted(ctx); |
Jens Axboe | b41e985 | 2020-02-17 09:52:41 -0700 | [diff] [blame] | 4011 | } |
Jens Axboe | 89723d0 | 2019-11-05 15:32:58 -0700 | [diff] [blame] | 4012 | |
Jens Axboe | b41e985 | 2020-02-17 09:52:41 -0700 | [diff] [blame] | 4013 | static void io_poll_task_func(struct callback_head *cb) |
| 4014 | { |
| 4015 | struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work); |
| 4016 | struct io_kiocb *nxt = NULL; |
| 4017 | |
| 4018 | io_poll_task_handler(req, &nxt); |
Jens Axboe | d7718a9 | 2020-02-14 22:23:12 -0700 | [diff] [blame] | 4019 | if (nxt) { |
| 4020 | struct io_ring_ctx *ctx = nxt->ctx; |
| 4021 | |
| 4022 | mutex_lock(&ctx->uring_lock); |
Jens Axboe | b41e985 | 2020-02-17 09:52:41 -0700 | [diff] [blame] | 4023 | __io_queue_sqe(nxt, NULL); |
Jens Axboe | d7718a9 | 2020-02-14 22:23:12 -0700 | [diff] [blame] | 4024 | mutex_unlock(&ctx->uring_lock); |
| 4025 | } |
Jens Axboe | f0b493e | 2020-02-01 21:30:11 -0700 | [diff] [blame] | 4026 | } |
| 4027 | |
Jens Axboe | 221c5eb | 2019-01-17 09:41:58 -0700 | [diff] [blame] | 4028 | static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync, |
| 4029 | void *key) |
| 4030 | { |
Jens Axboe | c2f2eb7 | 2020-02-10 09:07:05 -0700 | [diff] [blame] | 4031 | struct io_kiocb *req = wait->private; |
| 4032 | struct io_poll_iocb *poll = &req->poll; |
Jens Axboe | 221c5eb | 2019-01-17 09:41:58 -0700 | [diff] [blame] | 4033 | |
Jens Axboe | d7718a9 | 2020-02-14 22:23:12 -0700 | [diff] [blame] | 4034 | return __io_async_wake(req, poll, key_to_poll(key), io_poll_task_func); |
Jens Axboe | 221c5eb | 2019-01-17 09:41:58 -0700 | [diff] [blame] | 4035 | } |
| 4036 | |
Jens Axboe | 221c5eb | 2019-01-17 09:41:58 -0700 | [diff] [blame] | 4037 | static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head, |
| 4038 | struct poll_table_struct *p) |
| 4039 | { |
| 4040 | struct io_poll_table *pt = container_of(p, struct io_poll_table, pt); |
| 4041 | |
Jens Axboe | d7718a9 | 2020-02-14 22:23:12 -0700 | [diff] [blame] | 4042 | __io_queue_proc(&pt->req->poll, pt, head); |
Jens Axboe | eac406c | 2019-11-14 12:09:58 -0700 | [diff] [blame] | 4043 | } |
| 4044 | |
Jens Axboe | 3529d8c | 2019-12-19 18:24:38 -0700 | [diff] [blame] | 4045 | static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) |
Jens Axboe | 221c5eb | 2019-01-17 09:41:58 -0700 | [diff] [blame] | 4046 | { |
| 4047 | struct io_poll_iocb *poll = &req->poll; |
Jens Axboe | 221c5eb | 2019-01-17 09:41:58 -0700 | [diff] [blame] | 4048 | u16 events; |
Jens Axboe | 221c5eb | 2019-01-17 09:41:58 -0700 | [diff] [blame] | 4049 | |
| 4050 | if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) |
| 4051 | return -EINVAL; |
| 4052 | if (sqe->addr || sqe->ioprio || sqe->off || sqe->len || sqe->buf_index) |
| 4053 | return -EINVAL; |
Jens Axboe | 09bb839 | 2019-03-13 12:39:28 -0600 | [diff] [blame] | 4054 | if (!poll->file) |
| 4055 | return -EBADF; |
Jens Axboe | 221c5eb | 2019-01-17 09:41:58 -0700 | [diff] [blame] | 4056 | |
Jens Axboe | 221c5eb | 2019-01-17 09:41:58 -0700 | [diff] [blame] | 4057 | events = READ_ONCE(sqe->poll_events); |
| 4058 | poll->events = demangle_poll(events) | EPOLLERR | EPOLLHUP; |
Jens Axboe | b41e985 | 2020-02-17 09:52:41 -0700 | [diff] [blame] | 4059 | |
Jens Axboe | d7718a9 | 2020-02-14 22:23:12 -0700 | [diff] [blame] | 4060 | /* |
| 4061 | * Don't need a reference here, as we're adding it to the task |
| 4062 | * task_works list. If the task exits, the list is pruned. |
| 4063 | */ |
Jens Axboe | b41e985 | 2020-02-17 09:52:41 -0700 | [diff] [blame] | 4064 | req->task = current; |
Jens Axboe | 0969e78 | 2019-12-17 18:40:57 -0700 | [diff] [blame] | 4065 | return 0; |
| 4066 | } |
| 4067 | |
Pavel Begunkov | 014db00 | 2020-03-03 21:33:12 +0300 | [diff] [blame] | 4068 | static int io_poll_add(struct io_kiocb *req) |
Jens Axboe | 0969e78 | 2019-12-17 18:40:57 -0700 | [diff] [blame] | 4069 | { |
| 4070 | struct io_poll_iocb *poll = &req->poll; |
| 4071 | struct io_ring_ctx *ctx = req->ctx; |
| 4072 | struct io_poll_table ipt; |
Jens Axboe | 0969e78 | 2019-12-17 18:40:57 -0700 | [diff] [blame] | 4073 | __poll_t mask; |
Jens Axboe | 0969e78 | 2019-12-17 18:40:57 -0700 | [diff] [blame] | 4074 | |
Jens Axboe | 78076bb | 2019-12-04 19:56:40 -0700 | [diff] [blame] | 4075 | INIT_HLIST_NODE(&req->hash_node); |
Jens Axboe | 3670324 | 2019-07-25 10:20:18 -0600 | [diff] [blame] | 4076 | INIT_LIST_HEAD(&req->list); |
Jens Axboe | d7718a9 | 2020-02-14 22:23:12 -0700 | [diff] [blame] | 4077 | ipt.pt._qproc = io_poll_queue_proc; |
Jens Axboe | 3670324 | 2019-07-25 10:20:18 -0600 | [diff] [blame] | 4078 | |
Jens Axboe | d7718a9 | 2020-02-14 22:23:12 -0700 | [diff] [blame] | 4079 | mask = __io_arm_poll_handler(req, &req->poll, &ipt, poll->events, |
| 4080 | io_poll_wake); |
Jens Axboe | 221c5eb | 2019-01-17 09:41:58 -0700 | [diff] [blame] | 4081 | |
Jens Axboe | 8c83878 | 2019-03-12 15:48:16 -0600 | [diff] [blame] | 4082 | if (mask) { /* no async, we'd stolen it */ |
Jens Axboe | 8c83878 | 2019-03-12 15:48:16 -0600 | [diff] [blame] | 4083 | ipt.error = 0; |
Jens Axboe | b0dd8a4 | 2019-11-18 12:14:54 -0700 | [diff] [blame] | 4084 | io_poll_complete(req, mask, 0); |
Jens Axboe | 8c83878 | 2019-03-12 15:48:16 -0600 | [diff] [blame] | 4085 | } |
Jens Axboe | 221c5eb | 2019-01-17 09:41:58 -0700 | [diff] [blame] | 4086 | spin_unlock_irq(&ctx->completion_lock); |
| 4087 | |
Jens Axboe | 8c83878 | 2019-03-12 15:48:16 -0600 | [diff] [blame] | 4088 | if (mask) { |
| 4089 | io_cqring_ev_posted(ctx); |
Pavel Begunkov | 014db00 | 2020-03-03 21:33:12 +0300 | [diff] [blame] | 4090 | io_put_req(req); |
Jens Axboe | 221c5eb | 2019-01-17 09:41:58 -0700 | [diff] [blame] | 4091 | } |
Jens Axboe | 8c83878 | 2019-03-12 15:48:16 -0600 | [diff] [blame] | 4092 | return ipt.error; |
Jens Axboe | 221c5eb | 2019-01-17 09:41:58 -0700 | [diff] [blame] | 4093 | } |
| 4094 | |
Jens Axboe | 5262f56 | 2019-09-17 12:26:57 -0600 | [diff] [blame] | 4095 | static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer) |
| 4096 | { |
Jens Axboe | ad8a48a | 2019-11-15 08:49:11 -0700 | [diff] [blame] | 4097 | struct io_timeout_data *data = container_of(timer, |
| 4098 | struct io_timeout_data, timer); |
| 4099 | struct io_kiocb *req = data->req; |
| 4100 | struct io_ring_ctx *ctx = req->ctx; |
Jens Axboe | 5262f56 | 2019-09-17 12:26:57 -0600 | [diff] [blame] | 4101 | unsigned long flags; |
| 4102 | |
Jens Axboe | 5262f56 | 2019-09-17 12:26:57 -0600 | [diff] [blame] | 4103 | atomic_inc(&ctx->cq_timeouts); |
| 4104 | |
| 4105 | spin_lock_irqsave(&ctx->completion_lock, flags); |
zhangyi (F) | ef03681 | 2019-10-23 15:10:08 +0800 | [diff] [blame] | 4106 | /* |
Jens Axboe | 1136504 | 2019-10-16 09:08:32 -0600 | [diff] [blame] | 4107 | * We could be racing with timeout deletion. If the list is empty, |
| 4108 | * then timeout lookup already found it and will be handling it. |
zhangyi (F) | ef03681 | 2019-10-23 15:10:08 +0800 | [diff] [blame] | 4109 | */ |
Jens Axboe | 842f961 | 2019-10-29 12:34:10 -0600 | [diff] [blame] | 4110 | if (!list_empty(&req->list)) { |
Jens Axboe | 1136504 | 2019-10-16 09:08:32 -0600 | [diff] [blame] | 4111 | struct io_kiocb *prev; |
Jens Axboe | 5262f56 | 2019-09-17 12:26:57 -0600 | [diff] [blame] | 4112 | |
Jens Axboe | 1136504 | 2019-10-16 09:08:32 -0600 | [diff] [blame] | 4113 | /* |
| 4114 | * Adjust the reqs sequence before the current one because it |
Brian Gianforcaro | d195a66 | 2019-12-13 03:09:50 -0800 | [diff] [blame] | 4115 | * will consume a slot in the cq_ring and the cq_tail |
Jens Axboe | 1136504 | 2019-10-16 09:08:32 -0600 | [diff] [blame] | 4116 | * pointer will be increased, otherwise other timeout reqs may |
| 4117 | * return in advance without waiting for enough wait_nr. |
| 4118 | */ |
| 4119 | prev = req; |
| 4120 | list_for_each_entry_continue_reverse(prev, &ctx->timeout_list, list) |
| 4121 | prev->sequence++; |
Jens Axboe | 1136504 | 2019-10-16 09:08:32 -0600 | [diff] [blame] | 4122 | list_del_init(&req->list); |
Jens Axboe | 1136504 | 2019-10-16 09:08:32 -0600 | [diff] [blame] | 4123 | } |
Jens Axboe | 842f961 | 2019-10-29 12:34:10 -0600 | [diff] [blame] | 4124 | |
Jens Axboe | 78e19bb | 2019-11-06 15:21:34 -0700 | [diff] [blame] | 4125 | io_cqring_fill_event(req, -ETIME); |
Jens Axboe | 5262f56 | 2019-09-17 12:26:57 -0600 | [diff] [blame] | 4126 | io_commit_cqring(ctx); |
| 4127 | spin_unlock_irqrestore(&ctx->completion_lock, flags); |
| 4128 | |
| 4129 | io_cqring_ev_posted(ctx); |
Jens Axboe | 4e88d6e | 2019-12-07 20:59:47 -0700 | [diff] [blame] | 4130 | req_set_fail_links(req); |
Jens Axboe | 5262f56 | 2019-09-17 12:26:57 -0600 | [diff] [blame] | 4131 | io_put_req(req); |
| 4132 | return HRTIMER_NORESTART; |
| 4133 | } |
| 4134 | |
Jens Axboe | 47f4676 | 2019-11-09 17:43:02 -0700 | [diff] [blame] | 4135 | static int io_timeout_cancel(struct io_ring_ctx *ctx, __u64 user_data) |
| 4136 | { |
| 4137 | struct io_kiocb *req; |
| 4138 | int ret = -ENOENT; |
| 4139 | |
| 4140 | list_for_each_entry(req, &ctx->timeout_list, list) { |
| 4141 | if (user_data == req->user_data) { |
| 4142 | list_del_init(&req->list); |
| 4143 | ret = 0; |
| 4144 | break; |
| 4145 | } |
| 4146 | } |
| 4147 | |
| 4148 | if (ret == -ENOENT) |
| 4149 | return ret; |
| 4150 | |
Jens Axboe | 2d28390 | 2019-12-04 11:08:05 -0700 | [diff] [blame] | 4151 | ret = hrtimer_try_to_cancel(&req->io->timeout.timer); |
Jens Axboe | 47f4676 | 2019-11-09 17:43:02 -0700 | [diff] [blame] | 4152 | if (ret == -1) |
| 4153 | return -EALREADY; |
| 4154 | |
Jens Axboe | 4e88d6e | 2019-12-07 20:59:47 -0700 | [diff] [blame] | 4155 | req_set_fail_links(req); |
Jens Axboe | 47f4676 | 2019-11-09 17:43:02 -0700 | [diff] [blame] | 4156 | io_cqring_fill_event(req, -ECANCELED); |
| 4157 | io_put_req(req); |
| 4158 | return 0; |
| 4159 | } |
| 4160 | |
Jens Axboe | 3529d8c | 2019-12-19 18:24:38 -0700 | [diff] [blame] | 4161 | static int io_timeout_remove_prep(struct io_kiocb *req, |
| 4162 | const struct io_uring_sqe *sqe) |
Jens Axboe | b29472e | 2019-12-17 18:50:29 -0700 | [diff] [blame] | 4163 | { |
Jens Axboe | b29472e | 2019-12-17 18:50:29 -0700 | [diff] [blame] | 4164 | if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) |
| 4165 | return -EINVAL; |
| 4166 | if (sqe->flags || sqe->ioprio || sqe->buf_index || sqe->len) |
| 4167 | return -EINVAL; |
| 4168 | |
| 4169 | req->timeout.addr = READ_ONCE(sqe->addr); |
| 4170 | req->timeout.flags = READ_ONCE(sqe->timeout_flags); |
| 4171 | if (req->timeout.flags) |
| 4172 | return -EINVAL; |
| 4173 | |
Jens Axboe | b29472e | 2019-12-17 18:50:29 -0700 | [diff] [blame] | 4174 | return 0; |
| 4175 | } |
| 4176 | |
Jens Axboe | 1136504 | 2019-10-16 09:08:32 -0600 | [diff] [blame] | 4177 | /* |
| 4178 | * Remove or update an existing timeout command |
| 4179 | */ |
Jens Axboe | fc4df99 | 2019-12-10 14:38:45 -0700 | [diff] [blame] | 4180 | static int io_timeout_remove(struct io_kiocb *req) |
Jens Axboe | 1136504 | 2019-10-16 09:08:32 -0600 | [diff] [blame] | 4181 | { |
| 4182 | struct io_ring_ctx *ctx = req->ctx; |
Jens Axboe | 47f4676 | 2019-11-09 17:43:02 -0700 | [diff] [blame] | 4183 | int ret; |
Jens Axboe | 1136504 | 2019-10-16 09:08:32 -0600 | [diff] [blame] | 4184 | |
Jens Axboe | 1136504 | 2019-10-16 09:08:32 -0600 | [diff] [blame] | 4185 | spin_lock_irq(&ctx->completion_lock); |
Jens Axboe | b29472e | 2019-12-17 18:50:29 -0700 | [diff] [blame] | 4186 | ret = io_timeout_cancel(ctx, req->timeout.addr); |
Jens Axboe | 1136504 | 2019-10-16 09:08:32 -0600 | [diff] [blame] | 4187 | |
Jens Axboe | 47f4676 | 2019-11-09 17:43:02 -0700 | [diff] [blame] | 4188 | io_cqring_fill_event(req, ret); |
Jens Axboe | 1136504 | 2019-10-16 09:08:32 -0600 | [diff] [blame] | 4189 | io_commit_cqring(ctx); |
| 4190 | spin_unlock_irq(&ctx->completion_lock); |
Jens Axboe | 5262f56 | 2019-09-17 12:26:57 -0600 | [diff] [blame] | 4191 | io_cqring_ev_posted(ctx); |
Jens Axboe | 4e88d6e | 2019-12-07 20:59:47 -0700 | [diff] [blame] | 4192 | if (ret < 0) |
| 4193 | req_set_fail_links(req); |
Jackie Liu | ec9c02a | 2019-11-08 23:50:36 +0800 | [diff] [blame] | 4194 | io_put_req(req); |
Jens Axboe | 1136504 | 2019-10-16 09:08:32 -0600 | [diff] [blame] | 4195 | return 0; |
Jens Axboe | 5262f56 | 2019-09-17 12:26:57 -0600 | [diff] [blame] | 4196 | } |
| 4197 | |
Jens Axboe | 3529d8c | 2019-12-19 18:24:38 -0700 | [diff] [blame] | 4198 | static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe, |
Jens Axboe | 2d28390 | 2019-12-04 11:08:05 -0700 | [diff] [blame] | 4199 | bool is_timeout_link) |
Jens Axboe | 5262f56 | 2019-09-17 12:26:57 -0600 | [diff] [blame] | 4200 | { |
Jens Axboe | ad8a48a | 2019-11-15 08:49:11 -0700 | [diff] [blame] | 4201 | struct io_timeout_data *data; |
Jens Axboe | a41525a | 2019-10-15 16:48:15 -0600 | [diff] [blame] | 4202 | unsigned flags; |
Jens Axboe | 5262f56 | 2019-09-17 12:26:57 -0600 | [diff] [blame] | 4203 | |
Jens Axboe | ad8a48a | 2019-11-15 08:49:11 -0700 | [diff] [blame] | 4204 | if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) |
Jens Axboe | 5262f56 | 2019-09-17 12:26:57 -0600 | [diff] [blame] | 4205 | return -EINVAL; |
Jens Axboe | ad8a48a | 2019-11-15 08:49:11 -0700 | [diff] [blame] | 4206 | if (sqe->ioprio || sqe->buf_index || sqe->len != 1) |
Jens Axboe | a41525a | 2019-10-15 16:48:15 -0600 | [diff] [blame] | 4207 | return -EINVAL; |
Jens Axboe | 2d28390 | 2019-12-04 11:08:05 -0700 | [diff] [blame] | 4208 | if (sqe->off && is_timeout_link) |
| 4209 | return -EINVAL; |
Jens Axboe | a41525a | 2019-10-15 16:48:15 -0600 | [diff] [blame] | 4210 | flags = READ_ONCE(sqe->timeout_flags); |
| 4211 | if (flags & ~IORING_TIMEOUT_ABS) |
Jens Axboe | 5262f56 | 2019-09-17 12:26:57 -0600 | [diff] [blame] | 4212 | return -EINVAL; |
Arnd Bergmann | bdf2007 | 2019-10-01 09:53:29 -0600 | [diff] [blame] | 4213 | |
Jens Axboe | 26a6167 | 2019-12-20 09:02:01 -0700 | [diff] [blame] | 4214 | req->timeout.count = READ_ONCE(sqe->off); |
| 4215 | |
Jens Axboe | 3529d8c | 2019-12-19 18:24:38 -0700 | [diff] [blame] | 4216 | if (!req->io && io_alloc_async_ctx(req)) |
Jens Axboe | 26a6167 | 2019-12-20 09:02:01 -0700 | [diff] [blame] | 4217 | return -ENOMEM; |
| 4218 | |
| 4219 | data = &req->io->timeout; |
Jens Axboe | ad8a48a | 2019-11-15 08:49:11 -0700 | [diff] [blame] | 4220 | data->req = req; |
Jens Axboe | ad8a48a | 2019-11-15 08:49:11 -0700 | [diff] [blame] | 4221 | req->flags |= REQ_F_TIMEOUT; |
| 4222 | |
| 4223 | if (get_timespec64(&data->ts, u64_to_user_ptr(sqe->addr))) |
Jens Axboe | 5262f56 | 2019-09-17 12:26:57 -0600 | [diff] [blame] | 4224 | return -EFAULT; |
| 4225 | |
Jens Axboe | 1136504 | 2019-10-16 09:08:32 -0600 | [diff] [blame] | 4226 | if (flags & IORING_TIMEOUT_ABS) |
Jens Axboe | ad8a48a | 2019-11-15 08:49:11 -0700 | [diff] [blame] | 4227 | data->mode = HRTIMER_MODE_ABS; |
Jens Axboe | 1136504 | 2019-10-16 09:08:32 -0600 | [diff] [blame] | 4228 | else |
Jens Axboe | ad8a48a | 2019-11-15 08:49:11 -0700 | [diff] [blame] | 4229 | data->mode = HRTIMER_MODE_REL; |
Jens Axboe | 1136504 | 2019-10-16 09:08:32 -0600 | [diff] [blame] | 4230 | |
Jens Axboe | ad8a48a | 2019-11-15 08:49:11 -0700 | [diff] [blame] | 4231 | hrtimer_init(&data->timer, CLOCK_MONOTONIC, data->mode); |
| 4232 | return 0; |
| 4233 | } |
| 4234 | |
Jens Axboe | fc4df99 | 2019-12-10 14:38:45 -0700 | [diff] [blame] | 4235 | static int io_timeout(struct io_kiocb *req) |
Jens Axboe | ad8a48a | 2019-11-15 08:49:11 -0700 | [diff] [blame] | 4236 | { |
| 4237 | unsigned count; |
| 4238 | struct io_ring_ctx *ctx = req->ctx; |
| 4239 | struct io_timeout_data *data; |
| 4240 | struct list_head *entry; |
| 4241 | unsigned span = 0; |
Jens Axboe | ad8a48a | 2019-11-15 08:49:11 -0700 | [diff] [blame] | 4242 | |
Jens Axboe | 2d28390 | 2019-12-04 11:08:05 -0700 | [diff] [blame] | 4243 | data = &req->io->timeout; |
Jens Axboe | 93bd25b | 2019-11-11 23:34:31 -0700 | [diff] [blame] | 4244 | |
Jens Axboe | 5262f56 | 2019-09-17 12:26:57 -0600 | [diff] [blame] | 4245 | /* |
| 4246 | * sqe->off holds how many events that need to occur for this |
Jens Axboe | 93bd25b | 2019-11-11 23:34:31 -0700 | [diff] [blame] | 4247 | * timeout event to be satisfied. If it isn't set, then this is |
| 4248 | * a pure timeout request, sequence isn't used. |
Jens Axboe | 5262f56 | 2019-09-17 12:26:57 -0600 | [diff] [blame] | 4249 | */ |
Jens Axboe | 26a6167 | 2019-12-20 09:02:01 -0700 | [diff] [blame] | 4250 | count = req->timeout.count; |
Jens Axboe | 93bd25b | 2019-11-11 23:34:31 -0700 | [diff] [blame] | 4251 | if (!count) { |
| 4252 | req->flags |= REQ_F_TIMEOUT_NOSEQ; |
| 4253 | spin_lock_irq(&ctx->completion_lock); |
| 4254 | entry = ctx->timeout_list.prev; |
| 4255 | goto add; |
| 4256 | } |
Jens Axboe | 5262f56 | 2019-09-17 12:26:57 -0600 | [diff] [blame] | 4257 | |
| 4258 | req->sequence = ctx->cached_sq_head + count - 1; |
Jens Axboe | 2d28390 | 2019-12-04 11:08:05 -0700 | [diff] [blame] | 4259 | data->seq_offset = count; |
Jens Axboe | 5262f56 | 2019-09-17 12:26:57 -0600 | [diff] [blame] | 4260 | |
| 4261 | /* |
| 4262 | * Insertion sort, ensuring the first entry in the list is always |
| 4263 | * the one we need first. |
| 4264 | */ |
Jens Axboe | 5262f56 | 2019-09-17 12:26:57 -0600 | [diff] [blame] | 4265 | spin_lock_irq(&ctx->completion_lock); |
| 4266 | list_for_each_prev(entry, &ctx->timeout_list) { |
| 4267 | struct io_kiocb *nxt = list_entry(entry, struct io_kiocb, list); |
yangerkun | 5da0fb1 | 2019-10-15 21:59:29 +0800 | [diff] [blame] | 4268 | unsigned nxt_sq_head; |
| 4269 | long long tmp, tmp_nxt; |
Jens Axboe | 2d28390 | 2019-12-04 11:08:05 -0700 | [diff] [blame] | 4270 | u32 nxt_offset = nxt->io->timeout.seq_offset; |
Jens Axboe | 5262f56 | 2019-09-17 12:26:57 -0600 | [diff] [blame] | 4271 | |
Jens Axboe | 93bd25b | 2019-11-11 23:34:31 -0700 | [diff] [blame] | 4272 | if (nxt->flags & REQ_F_TIMEOUT_NOSEQ) |
| 4273 | continue; |
| 4274 | |
yangerkun | 5da0fb1 | 2019-10-15 21:59:29 +0800 | [diff] [blame] | 4275 | /* |
| 4276 | * Since cached_sq_head + count - 1 can overflow, use type long |
| 4277 | * long to store it. |
| 4278 | */ |
| 4279 | tmp = (long long)ctx->cached_sq_head + count - 1; |
Pavel Begunkov | cc42e0a | 2019-11-25 23:14:38 +0300 | [diff] [blame] | 4280 | nxt_sq_head = nxt->sequence - nxt_offset + 1; |
| 4281 | tmp_nxt = (long long)nxt_sq_head + nxt_offset - 1; |
yangerkun | 5da0fb1 | 2019-10-15 21:59:29 +0800 | [diff] [blame] | 4282 | |
| 4283 | /* |
| 4284 | * cached_sq_head may overflow, and it will never overflow twice |
| 4285 | * once there is some timeout req still be valid. |
| 4286 | */ |
| 4287 | if (ctx->cached_sq_head < nxt_sq_head) |
yangerkun | 8b07a65 | 2019-10-17 12:12:35 +0800 | [diff] [blame] | 4288 | tmp += UINT_MAX; |
yangerkun | 5da0fb1 | 2019-10-15 21:59:29 +0800 | [diff] [blame] | 4289 | |
zhangyi (F) | a1f58ba | 2019-10-23 15:10:09 +0800 | [diff] [blame] | 4290 | if (tmp > tmp_nxt) |
Jens Axboe | 5262f56 | 2019-09-17 12:26:57 -0600 | [diff] [blame] | 4291 | break; |
zhangyi (F) | a1f58ba | 2019-10-23 15:10:09 +0800 | [diff] [blame] | 4292 | |
| 4293 | /* |
| 4294 | * Sequence of reqs after the insert one and itself should |
| 4295 | * be adjusted because each timeout req consumes a slot. |
| 4296 | */ |
| 4297 | span++; |
| 4298 | nxt->sequence++; |
Jens Axboe | 5262f56 | 2019-09-17 12:26:57 -0600 | [diff] [blame] | 4299 | } |
zhangyi (F) | a1f58ba | 2019-10-23 15:10:09 +0800 | [diff] [blame] | 4300 | req->sequence -= span; |
Jens Axboe | 93bd25b | 2019-11-11 23:34:31 -0700 | [diff] [blame] | 4301 | add: |
Jens Axboe | 5262f56 | 2019-09-17 12:26:57 -0600 | [diff] [blame] | 4302 | list_add(&req->list, entry); |
Jens Axboe | ad8a48a | 2019-11-15 08:49:11 -0700 | [diff] [blame] | 4303 | data->timer.function = io_timeout_fn; |
| 4304 | hrtimer_start(&data->timer, timespec64_to_ktime(data->ts), data->mode); |
Jens Axboe | 842f961 | 2019-10-29 12:34:10 -0600 | [diff] [blame] | 4305 | spin_unlock_irq(&ctx->completion_lock); |
Jens Axboe | 5262f56 | 2019-09-17 12:26:57 -0600 | [diff] [blame] | 4306 | return 0; |
| 4307 | } |
| 4308 | |
Jens Axboe | 62755e3 | 2019-10-28 21:49:21 -0600 | [diff] [blame] | 4309 | static bool io_cancel_cb(struct io_wq_work *work, void *data) |
Jens Axboe | de0617e | 2019-04-06 21:51:27 -0600 | [diff] [blame] | 4310 | { |
Jens Axboe | 62755e3 | 2019-10-28 21:49:21 -0600 | [diff] [blame] | 4311 | struct io_kiocb *req = container_of(work, struct io_kiocb, work); |
Jens Axboe | de0617e | 2019-04-06 21:51:27 -0600 | [diff] [blame] | 4312 | |
Jens Axboe | 62755e3 | 2019-10-28 21:49:21 -0600 | [diff] [blame] | 4313 | return req->user_data == (unsigned long) data; |
| 4314 | } |
| 4315 | |
Jens Axboe | e977d6d | 2019-11-05 12:39:45 -0700 | [diff] [blame] | 4316 | static int io_async_cancel_one(struct io_ring_ctx *ctx, void *sqe_addr) |
Jens Axboe | 62755e3 | 2019-10-28 21:49:21 -0600 | [diff] [blame] | 4317 | { |
Jens Axboe | 62755e3 | 2019-10-28 21:49:21 -0600 | [diff] [blame] | 4318 | enum io_wq_cancel cancel_ret; |
Jens Axboe | 62755e3 | 2019-10-28 21:49:21 -0600 | [diff] [blame] | 4319 | int ret = 0; |
| 4320 | |
Jens Axboe | 62755e3 | 2019-10-28 21:49:21 -0600 | [diff] [blame] | 4321 | cancel_ret = io_wq_cancel_cb(ctx->io_wq, io_cancel_cb, sqe_addr); |
| 4322 | switch (cancel_ret) { |
| 4323 | case IO_WQ_CANCEL_OK: |
| 4324 | ret = 0; |
| 4325 | break; |
| 4326 | case IO_WQ_CANCEL_RUNNING: |
| 4327 | ret = -EALREADY; |
| 4328 | break; |
| 4329 | case IO_WQ_CANCEL_NOTFOUND: |
| 4330 | ret = -ENOENT; |
| 4331 | break; |
| 4332 | } |
| 4333 | |
Jens Axboe | e977d6d | 2019-11-05 12:39:45 -0700 | [diff] [blame] | 4334 | return ret; |
| 4335 | } |
| 4336 | |
Jens Axboe | 47f4676 | 2019-11-09 17:43:02 -0700 | [diff] [blame] | 4337 | static void io_async_find_and_cancel(struct io_ring_ctx *ctx, |
| 4338 | struct io_kiocb *req, __u64 sqe_addr, |
Pavel Begunkov | 014db00 | 2020-03-03 21:33:12 +0300 | [diff] [blame] | 4339 | int success_ret) |
Jens Axboe | 47f4676 | 2019-11-09 17:43:02 -0700 | [diff] [blame] | 4340 | { |
| 4341 | unsigned long flags; |
| 4342 | int ret; |
| 4343 | |
| 4344 | ret = io_async_cancel_one(ctx, (void *) (unsigned long) sqe_addr); |
| 4345 | if (ret != -ENOENT) { |
| 4346 | spin_lock_irqsave(&ctx->completion_lock, flags); |
| 4347 | goto done; |
| 4348 | } |
| 4349 | |
| 4350 | spin_lock_irqsave(&ctx->completion_lock, flags); |
| 4351 | ret = io_timeout_cancel(ctx, sqe_addr); |
| 4352 | if (ret != -ENOENT) |
| 4353 | goto done; |
| 4354 | ret = io_poll_cancel(ctx, sqe_addr); |
| 4355 | done: |
Jens Axboe | b0dd8a4 | 2019-11-18 12:14:54 -0700 | [diff] [blame] | 4356 | if (!ret) |
| 4357 | ret = success_ret; |
Jens Axboe | 47f4676 | 2019-11-09 17:43:02 -0700 | [diff] [blame] | 4358 | io_cqring_fill_event(req, ret); |
| 4359 | io_commit_cqring(ctx); |
| 4360 | spin_unlock_irqrestore(&ctx->completion_lock, flags); |
| 4361 | io_cqring_ev_posted(ctx); |
| 4362 | |
Jens Axboe | 4e88d6e | 2019-12-07 20:59:47 -0700 | [diff] [blame] | 4363 | if (ret < 0) |
| 4364 | req_set_fail_links(req); |
Pavel Begunkov | 014db00 | 2020-03-03 21:33:12 +0300 | [diff] [blame] | 4365 | io_put_req(req); |
Jens Axboe | 47f4676 | 2019-11-09 17:43:02 -0700 | [diff] [blame] | 4366 | } |
| 4367 | |
Jens Axboe | 3529d8c | 2019-12-19 18:24:38 -0700 | [diff] [blame] | 4368 | static int io_async_cancel_prep(struct io_kiocb *req, |
| 4369 | const struct io_uring_sqe *sqe) |
Jens Axboe | e977d6d | 2019-11-05 12:39:45 -0700 | [diff] [blame] | 4370 | { |
Jens Axboe | fbf2384 | 2019-12-17 18:45:56 -0700 | [diff] [blame] | 4371 | if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) |
Jens Axboe | e977d6d | 2019-11-05 12:39:45 -0700 | [diff] [blame] | 4372 | return -EINVAL; |
| 4373 | if (sqe->flags || sqe->ioprio || sqe->off || sqe->len || |
| 4374 | sqe->cancel_flags) |
| 4375 | return -EINVAL; |
| 4376 | |
Jens Axboe | fbf2384 | 2019-12-17 18:45:56 -0700 | [diff] [blame] | 4377 | req->cancel.addr = READ_ONCE(sqe->addr); |
| 4378 | return 0; |
| 4379 | } |
| 4380 | |
Pavel Begunkov | 014db00 | 2020-03-03 21:33:12 +0300 | [diff] [blame] | 4381 | static int io_async_cancel(struct io_kiocb *req) |
Jens Axboe | fbf2384 | 2019-12-17 18:45:56 -0700 | [diff] [blame] | 4382 | { |
| 4383 | struct io_ring_ctx *ctx = req->ctx; |
Jens Axboe | fbf2384 | 2019-12-17 18:45:56 -0700 | [diff] [blame] | 4384 | |
Pavel Begunkov | 014db00 | 2020-03-03 21:33:12 +0300 | [diff] [blame] | 4385 | io_async_find_and_cancel(ctx, req, req->cancel.addr, 0); |
Jens Axboe | 62755e3 | 2019-10-28 21:49:21 -0600 | [diff] [blame] | 4386 | return 0; |
| 4387 | } |
| 4388 | |
Jens Axboe | 05f3fb3 | 2019-12-09 11:22:50 -0700 | [diff] [blame] | 4389 | static int io_files_update_prep(struct io_kiocb *req, |
| 4390 | const struct io_uring_sqe *sqe) |
| 4391 | { |
| 4392 | if (sqe->flags || sqe->ioprio || sqe->rw_flags) |
| 4393 | return -EINVAL; |
| 4394 | |
| 4395 | req->files_update.offset = READ_ONCE(sqe->off); |
| 4396 | req->files_update.nr_args = READ_ONCE(sqe->len); |
| 4397 | if (!req->files_update.nr_args) |
| 4398 | return -EINVAL; |
| 4399 | req->files_update.arg = READ_ONCE(sqe->addr); |
| 4400 | return 0; |
| 4401 | } |
| 4402 | |
| 4403 | static int io_files_update(struct io_kiocb *req, bool force_nonblock) |
| 4404 | { |
| 4405 | struct io_ring_ctx *ctx = req->ctx; |
| 4406 | struct io_uring_files_update up; |
| 4407 | int ret; |
| 4408 | |
Jens Axboe | f86cd20 | 2020-01-29 13:46:44 -0700 | [diff] [blame] | 4409 | if (force_nonblock) |
Jens Axboe | 05f3fb3 | 2019-12-09 11:22:50 -0700 | [diff] [blame] | 4410 | return -EAGAIN; |
Jens Axboe | 05f3fb3 | 2019-12-09 11:22:50 -0700 | [diff] [blame] | 4411 | |
| 4412 | up.offset = req->files_update.offset; |
| 4413 | up.fds = req->files_update.arg; |
| 4414 | |
| 4415 | mutex_lock(&ctx->uring_lock); |
| 4416 | ret = __io_sqe_files_update(ctx, &up, req->files_update.nr_args); |
| 4417 | mutex_unlock(&ctx->uring_lock); |
| 4418 | |
| 4419 | if (ret < 0) |
| 4420 | req_set_fail_links(req); |
| 4421 | io_cqring_add_event(req, ret); |
| 4422 | io_put_req(req); |
| 4423 | return 0; |
| 4424 | } |
| 4425 | |
Jens Axboe | 3529d8c | 2019-12-19 18:24:38 -0700 | [diff] [blame] | 4426 | static int io_req_defer_prep(struct io_kiocb *req, |
| 4427 | const struct io_uring_sqe *sqe) |
Jens Axboe | f67676d | 2019-12-02 11:03:47 -0700 | [diff] [blame] | 4428 | { |
Jens Axboe | e781573 | 2019-12-17 19:45:06 -0700 | [diff] [blame] | 4429 | ssize_t ret = 0; |
Jens Axboe | f67676d | 2019-12-02 11:03:47 -0700 | [diff] [blame] | 4430 | |
Jens Axboe | f86cd20 | 2020-01-29 13:46:44 -0700 | [diff] [blame] | 4431 | if (io_op_defs[req->opcode].file_table) { |
| 4432 | ret = io_grab_files(req); |
| 4433 | if (unlikely(ret)) |
| 4434 | return ret; |
| 4435 | } |
| 4436 | |
Jens Axboe | cccf0ee | 2020-01-27 16:34:48 -0700 | [diff] [blame] | 4437 | io_req_work_grab_env(req, &io_op_defs[req->opcode]); |
| 4438 | |
Jens Axboe | d625c6e | 2019-12-17 19:53:05 -0700 | [diff] [blame] | 4439 | switch (req->opcode) { |
Jens Axboe | e781573 | 2019-12-17 19:45:06 -0700 | [diff] [blame] | 4440 | case IORING_OP_NOP: |
| 4441 | break; |
Jens Axboe | f67676d | 2019-12-02 11:03:47 -0700 | [diff] [blame] | 4442 | case IORING_OP_READV: |
| 4443 | case IORING_OP_READ_FIXED: |
Jens Axboe | 3a6820f | 2019-12-22 15:19:35 -0700 | [diff] [blame] | 4444 | case IORING_OP_READ: |
Jens Axboe | 3529d8c | 2019-12-19 18:24:38 -0700 | [diff] [blame] | 4445 | ret = io_read_prep(req, sqe, true); |
Jens Axboe | f67676d | 2019-12-02 11:03:47 -0700 | [diff] [blame] | 4446 | break; |
| 4447 | case IORING_OP_WRITEV: |
| 4448 | case IORING_OP_WRITE_FIXED: |
Jens Axboe | 3a6820f | 2019-12-22 15:19:35 -0700 | [diff] [blame] | 4449 | case IORING_OP_WRITE: |
Jens Axboe | 3529d8c | 2019-12-19 18:24:38 -0700 | [diff] [blame] | 4450 | ret = io_write_prep(req, sqe, true); |
Jens Axboe | f67676d | 2019-12-02 11:03:47 -0700 | [diff] [blame] | 4451 | break; |
Jens Axboe | 0969e78 | 2019-12-17 18:40:57 -0700 | [diff] [blame] | 4452 | case IORING_OP_POLL_ADD: |
Jens Axboe | 3529d8c | 2019-12-19 18:24:38 -0700 | [diff] [blame] | 4453 | ret = io_poll_add_prep(req, sqe); |
Jens Axboe | 0969e78 | 2019-12-17 18:40:57 -0700 | [diff] [blame] | 4454 | break; |
| 4455 | case IORING_OP_POLL_REMOVE: |
Jens Axboe | 3529d8c | 2019-12-19 18:24:38 -0700 | [diff] [blame] | 4456 | ret = io_poll_remove_prep(req, sqe); |
Jens Axboe | 0969e78 | 2019-12-17 18:40:57 -0700 | [diff] [blame] | 4457 | break; |
Jens Axboe | 8ed8d3c | 2019-12-16 11:55:28 -0700 | [diff] [blame] | 4458 | case IORING_OP_FSYNC: |
Jens Axboe | 3529d8c | 2019-12-19 18:24:38 -0700 | [diff] [blame] | 4459 | ret = io_prep_fsync(req, sqe); |
Jens Axboe | 8ed8d3c | 2019-12-16 11:55:28 -0700 | [diff] [blame] | 4460 | break; |
| 4461 | case IORING_OP_SYNC_FILE_RANGE: |
Jens Axboe | 3529d8c | 2019-12-19 18:24:38 -0700 | [diff] [blame] | 4462 | ret = io_prep_sfr(req, sqe); |
Jens Axboe | 8ed8d3c | 2019-12-16 11:55:28 -0700 | [diff] [blame] | 4463 | break; |
Jens Axboe | 03b1230 | 2019-12-02 18:50:25 -0700 | [diff] [blame] | 4464 | case IORING_OP_SENDMSG: |
Jens Axboe | fddafac | 2020-01-04 20:19:44 -0700 | [diff] [blame] | 4465 | case IORING_OP_SEND: |
Jens Axboe | 3529d8c | 2019-12-19 18:24:38 -0700 | [diff] [blame] | 4466 | ret = io_sendmsg_prep(req, sqe); |
Jens Axboe | 03b1230 | 2019-12-02 18:50:25 -0700 | [diff] [blame] | 4467 | break; |
| 4468 | case IORING_OP_RECVMSG: |
Jens Axboe | fddafac | 2020-01-04 20:19:44 -0700 | [diff] [blame] | 4469 | case IORING_OP_RECV: |
Jens Axboe | 3529d8c | 2019-12-19 18:24:38 -0700 | [diff] [blame] | 4470 | ret = io_recvmsg_prep(req, sqe); |
Jens Axboe | 03b1230 | 2019-12-02 18:50:25 -0700 | [diff] [blame] | 4471 | break; |
Jens Axboe | f499a02 | 2019-12-02 16:28:46 -0700 | [diff] [blame] | 4472 | case IORING_OP_CONNECT: |
Jens Axboe | 3529d8c | 2019-12-19 18:24:38 -0700 | [diff] [blame] | 4473 | ret = io_connect_prep(req, sqe); |
Jens Axboe | f499a02 | 2019-12-02 16:28:46 -0700 | [diff] [blame] | 4474 | break; |
Jens Axboe | 2d28390 | 2019-12-04 11:08:05 -0700 | [diff] [blame] | 4475 | case IORING_OP_TIMEOUT: |
Jens Axboe | 3529d8c | 2019-12-19 18:24:38 -0700 | [diff] [blame] | 4476 | ret = io_timeout_prep(req, sqe, false); |
Jens Axboe | b7bb4f7 | 2019-12-15 22:13:43 -0700 | [diff] [blame] | 4477 | break; |
Jens Axboe | b29472e | 2019-12-17 18:50:29 -0700 | [diff] [blame] | 4478 | case IORING_OP_TIMEOUT_REMOVE: |
Jens Axboe | 3529d8c | 2019-12-19 18:24:38 -0700 | [diff] [blame] | 4479 | ret = io_timeout_remove_prep(req, sqe); |
Jens Axboe | b29472e | 2019-12-17 18:50:29 -0700 | [diff] [blame] | 4480 | break; |
Jens Axboe | fbf2384 | 2019-12-17 18:45:56 -0700 | [diff] [blame] | 4481 | case IORING_OP_ASYNC_CANCEL: |
Jens Axboe | 3529d8c | 2019-12-19 18:24:38 -0700 | [diff] [blame] | 4482 | ret = io_async_cancel_prep(req, sqe); |
Jens Axboe | fbf2384 | 2019-12-17 18:45:56 -0700 | [diff] [blame] | 4483 | break; |
Jens Axboe | 2d28390 | 2019-12-04 11:08:05 -0700 | [diff] [blame] | 4484 | case IORING_OP_LINK_TIMEOUT: |
Jens Axboe | 3529d8c | 2019-12-19 18:24:38 -0700 | [diff] [blame] | 4485 | ret = io_timeout_prep(req, sqe, true); |
Jens Axboe | b7bb4f7 | 2019-12-15 22:13:43 -0700 | [diff] [blame] | 4486 | break; |
Jens Axboe | 8ed8d3c | 2019-12-16 11:55:28 -0700 | [diff] [blame] | 4487 | case IORING_OP_ACCEPT: |
Jens Axboe | 3529d8c | 2019-12-19 18:24:38 -0700 | [diff] [blame] | 4488 | ret = io_accept_prep(req, sqe); |
Jens Axboe | 8ed8d3c | 2019-12-16 11:55:28 -0700 | [diff] [blame] | 4489 | break; |
Jens Axboe | d63d1b5 | 2019-12-10 10:38:56 -0700 | [diff] [blame] | 4490 | case IORING_OP_FALLOCATE: |
| 4491 | ret = io_fallocate_prep(req, sqe); |
| 4492 | break; |
Jens Axboe | 15b71ab | 2019-12-11 11:20:36 -0700 | [diff] [blame] | 4493 | case IORING_OP_OPENAT: |
| 4494 | ret = io_openat_prep(req, sqe); |
| 4495 | break; |
Jens Axboe | b5dba59 | 2019-12-11 14:02:38 -0700 | [diff] [blame] | 4496 | case IORING_OP_CLOSE: |
| 4497 | ret = io_close_prep(req, sqe); |
| 4498 | break; |
Jens Axboe | 05f3fb3 | 2019-12-09 11:22:50 -0700 | [diff] [blame] | 4499 | case IORING_OP_FILES_UPDATE: |
| 4500 | ret = io_files_update_prep(req, sqe); |
| 4501 | break; |
Jens Axboe | eddc7ef | 2019-12-13 21:18:10 -0700 | [diff] [blame] | 4502 | case IORING_OP_STATX: |
| 4503 | ret = io_statx_prep(req, sqe); |
| 4504 | break; |
Jens Axboe | 4840e41 | 2019-12-25 22:03:45 -0700 | [diff] [blame] | 4505 | case IORING_OP_FADVISE: |
| 4506 | ret = io_fadvise_prep(req, sqe); |
| 4507 | break; |
Jens Axboe | c1ca757 | 2019-12-25 22:18:28 -0700 | [diff] [blame] | 4508 | case IORING_OP_MADVISE: |
| 4509 | ret = io_madvise_prep(req, sqe); |
| 4510 | break; |
Jens Axboe | cebdb98 | 2020-01-08 17:59:24 -0700 | [diff] [blame] | 4511 | case IORING_OP_OPENAT2: |
| 4512 | ret = io_openat2_prep(req, sqe); |
| 4513 | break; |
Jens Axboe | 3e4827b | 2020-01-08 15:18:09 -0700 | [diff] [blame] | 4514 | case IORING_OP_EPOLL_CTL: |
| 4515 | ret = io_epoll_ctl_prep(req, sqe); |
| 4516 | break; |
Pavel Begunkov | 7d67af2 | 2020-02-24 11:32:45 +0300 | [diff] [blame] | 4517 | case IORING_OP_SPLICE: |
| 4518 | ret = io_splice_prep(req, sqe); |
| 4519 | break; |
Jens Axboe | ddf0322d | 2020-02-23 16:41:33 -0700 | [diff] [blame^] | 4520 | case IORING_OP_PROVIDE_BUFFERS: |
| 4521 | ret = io_provide_buffers_prep(req, sqe); |
| 4522 | break; |
Jens Axboe | f67676d | 2019-12-02 11:03:47 -0700 | [diff] [blame] | 4523 | default: |
Jens Axboe | e781573 | 2019-12-17 19:45:06 -0700 | [diff] [blame] | 4524 | printk_once(KERN_WARNING "io_uring: unhandled opcode %d\n", |
| 4525 | req->opcode); |
| 4526 | ret = -EINVAL; |
Jens Axboe | b7bb4f7 | 2019-12-15 22:13:43 -0700 | [diff] [blame] | 4527 | break; |
Jens Axboe | f67676d | 2019-12-02 11:03:47 -0700 | [diff] [blame] | 4528 | } |
| 4529 | |
Jens Axboe | b7bb4f7 | 2019-12-15 22:13:43 -0700 | [diff] [blame] | 4530 | return ret; |
Jens Axboe | f67676d | 2019-12-02 11:03:47 -0700 | [diff] [blame] | 4531 | } |
| 4532 | |
Jens Axboe | 3529d8c | 2019-12-19 18:24:38 -0700 | [diff] [blame] | 4533 | static int io_req_defer(struct io_kiocb *req, const struct io_uring_sqe *sqe) |
Jens Axboe | de0617e | 2019-04-06 21:51:27 -0600 | [diff] [blame] | 4534 | { |
Jackie Liu | a197f66 | 2019-11-08 08:09:12 -0700 | [diff] [blame] | 4535 | struct io_ring_ctx *ctx = req->ctx; |
Jens Axboe | f67676d | 2019-12-02 11:03:47 -0700 | [diff] [blame] | 4536 | int ret; |
Jens Axboe | de0617e | 2019-04-06 21:51:27 -0600 | [diff] [blame] | 4537 | |
Bob Liu | 9d858b2 | 2019-11-13 18:06:25 +0800 | [diff] [blame] | 4538 | /* Still need defer if there is pending req in defer list. */ |
| 4539 | if (!req_need_defer(req) && list_empty(&ctx->defer_list)) |
Jens Axboe | de0617e | 2019-04-06 21:51:27 -0600 | [diff] [blame] | 4540 | return 0; |
| 4541 | |
Jens Axboe | 3529d8c | 2019-12-19 18:24:38 -0700 | [diff] [blame] | 4542 | if (!req->io && io_alloc_async_ctx(req)) |
Jens Axboe | de0617e | 2019-04-06 21:51:27 -0600 | [diff] [blame] | 4543 | return -EAGAIN; |
| 4544 | |
Jens Axboe | 3529d8c | 2019-12-19 18:24:38 -0700 | [diff] [blame] | 4545 | ret = io_req_defer_prep(req, sqe); |
Jens Axboe | b7bb4f7 | 2019-12-15 22:13:43 -0700 | [diff] [blame] | 4546 | if (ret < 0) |
Jens Axboe | 2d28390 | 2019-12-04 11:08:05 -0700 | [diff] [blame] | 4547 | return ret; |
Jens Axboe | 2d28390 | 2019-12-04 11:08:05 -0700 | [diff] [blame] | 4548 | |
Jens Axboe | de0617e | 2019-04-06 21:51:27 -0600 | [diff] [blame] | 4549 | spin_lock_irq(&ctx->completion_lock); |
Bob Liu | 9d858b2 | 2019-11-13 18:06:25 +0800 | [diff] [blame] | 4550 | if (!req_need_defer(req) && list_empty(&ctx->defer_list)) { |
Jens Axboe | de0617e | 2019-04-06 21:51:27 -0600 | [diff] [blame] | 4551 | spin_unlock_irq(&ctx->completion_lock); |
Jens Axboe | de0617e | 2019-04-06 21:51:27 -0600 | [diff] [blame] | 4552 | return 0; |
| 4553 | } |
| 4554 | |
Jens Axboe | 915967f | 2019-11-21 09:01:20 -0700 | [diff] [blame] | 4555 | trace_io_uring_defer(ctx, req, req->user_data); |
Jens Axboe | de0617e | 2019-04-06 21:51:27 -0600 | [diff] [blame] | 4556 | list_add_tail(&req->list, &ctx->defer_list); |
| 4557 | spin_unlock_irq(&ctx->completion_lock); |
| 4558 | return -EIOCBQUEUED; |
| 4559 | } |
| 4560 | |
Pavel Begunkov | 99bc4c3 | 2020-02-07 22:04:45 +0300 | [diff] [blame] | 4561 | static void io_cleanup_req(struct io_kiocb *req) |
| 4562 | { |
| 4563 | struct io_async_ctx *io = req->io; |
| 4564 | |
| 4565 | switch (req->opcode) { |
| 4566 | case IORING_OP_READV: |
| 4567 | case IORING_OP_READ_FIXED: |
| 4568 | case IORING_OP_READ: |
| 4569 | case IORING_OP_WRITEV: |
| 4570 | case IORING_OP_WRITE_FIXED: |
| 4571 | case IORING_OP_WRITE: |
| 4572 | if (io->rw.iov != io->rw.fast_iov) |
| 4573 | kfree(io->rw.iov); |
| 4574 | break; |
| 4575 | case IORING_OP_SENDMSG: |
| 4576 | case IORING_OP_RECVMSG: |
| 4577 | if (io->msg.iov != io->msg.fast_iov) |
| 4578 | kfree(io->msg.iov); |
| 4579 | break; |
Pavel Begunkov | 8fef80b | 2020-02-07 23:59:53 +0300 | [diff] [blame] | 4580 | case IORING_OP_OPENAT: |
| 4581 | case IORING_OP_OPENAT2: |
| 4582 | case IORING_OP_STATX: |
| 4583 | putname(req->open.filename); |
| 4584 | break; |
Pavel Begunkov | 7d67af2 | 2020-02-24 11:32:45 +0300 | [diff] [blame] | 4585 | case IORING_OP_SPLICE: |
| 4586 | io_put_file(req, req->splice.file_in, |
| 4587 | (req->splice.flags & SPLICE_F_FD_IN_FIXED)); |
| 4588 | break; |
Pavel Begunkov | 99bc4c3 | 2020-02-07 22:04:45 +0300 | [diff] [blame] | 4589 | } |
| 4590 | |
| 4591 | req->flags &= ~REQ_F_NEED_CLEANUP; |
| 4592 | } |
| 4593 | |
Jens Axboe | 3529d8c | 2019-12-19 18:24:38 -0700 | [diff] [blame] | 4594 | static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe, |
Pavel Begunkov | 014db00 | 2020-03-03 21:33:12 +0300 | [diff] [blame] | 4595 | bool force_nonblock) |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 4596 | { |
Jackie Liu | a197f66 | 2019-11-08 08:09:12 -0700 | [diff] [blame] | 4597 | struct io_ring_ctx *ctx = req->ctx; |
Jens Axboe | d625c6e | 2019-12-17 19:53:05 -0700 | [diff] [blame] | 4598 | int ret; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 4599 | |
Jens Axboe | d625c6e | 2019-12-17 19:53:05 -0700 | [diff] [blame] | 4600 | switch (req->opcode) { |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 4601 | case IORING_OP_NOP: |
Jens Axboe | 78e19bb | 2019-11-06 15:21:34 -0700 | [diff] [blame] | 4602 | ret = io_nop(req); |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 4603 | break; |
| 4604 | case IORING_OP_READV: |
Jens Axboe | 3529d8c | 2019-12-19 18:24:38 -0700 | [diff] [blame] | 4605 | case IORING_OP_READ_FIXED: |
Jens Axboe | 3a6820f | 2019-12-22 15:19:35 -0700 | [diff] [blame] | 4606 | case IORING_OP_READ: |
Jens Axboe | 3529d8c | 2019-12-19 18:24:38 -0700 | [diff] [blame] | 4607 | if (sqe) { |
| 4608 | ret = io_read_prep(req, sqe, force_nonblock); |
| 4609 | if (ret < 0) |
| 4610 | break; |
| 4611 | } |
Pavel Begunkov | 014db00 | 2020-03-03 21:33:12 +0300 | [diff] [blame] | 4612 | ret = io_read(req, force_nonblock); |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 4613 | break; |
| 4614 | case IORING_OP_WRITEV: |
Jens Axboe | edafcce | 2019-01-09 09:16:05 -0700 | [diff] [blame] | 4615 | case IORING_OP_WRITE_FIXED: |
Jens Axboe | 3a6820f | 2019-12-22 15:19:35 -0700 | [diff] [blame] | 4616 | case IORING_OP_WRITE: |
Jens Axboe | 3529d8c | 2019-12-19 18:24:38 -0700 | [diff] [blame] | 4617 | if (sqe) { |
| 4618 | ret = io_write_prep(req, sqe, force_nonblock); |
| 4619 | if (ret < 0) |
| 4620 | break; |
| 4621 | } |
Pavel Begunkov | 014db00 | 2020-03-03 21:33:12 +0300 | [diff] [blame] | 4622 | ret = io_write(req, force_nonblock); |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 4623 | break; |
Christoph Hellwig | c992fe2 | 2019-01-11 09:43:02 -0700 | [diff] [blame] | 4624 | case IORING_OP_FSYNC: |
Jens Axboe | 3529d8c | 2019-12-19 18:24:38 -0700 | [diff] [blame] | 4625 | if (sqe) { |
| 4626 | ret = io_prep_fsync(req, sqe); |
| 4627 | if (ret < 0) |
| 4628 | break; |
| 4629 | } |
Pavel Begunkov | 014db00 | 2020-03-03 21:33:12 +0300 | [diff] [blame] | 4630 | ret = io_fsync(req, force_nonblock); |
Christoph Hellwig | c992fe2 | 2019-01-11 09:43:02 -0700 | [diff] [blame] | 4631 | break; |
Jens Axboe | 221c5eb | 2019-01-17 09:41:58 -0700 | [diff] [blame] | 4632 | case IORING_OP_POLL_ADD: |
Jens Axboe | 3529d8c | 2019-12-19 18:24:38 -0700 | [diff] [blame] | 4633 | if (sqe) { |
| 4634 | ret = io_poll_add_prep(req, sqe); |
| 4635 | if (ret) |
| 4636 | break; |
| 4637 | } |
Pavel Begunkov | 014db00 | 2020-03-03 21:33:12 +0300 | [diff] [blame] | 4638 | ret = io_poll_add(req); |
Jens Axboe | 221c5eb | 2019-01-17 09:41:58 -0700 | [diff] [blame] | 4639 | break; |
| 4640 | case IORING_OP_POLL_REMOVE: |
Jens Axboe | 3529d8c | 2019-12-19 18:24:38 -0700 | [diff] [blame] | 4641 | if (sqe) { |
| 4642 | ret = io_poll_remove_prep(req, sqe); |
| 4643 | if (ret < 0) |
| 4644 | break; |
| 4645 | } |
Jens Axboe | fc4df99 | 2019-12-10 14:38:45 -0700 | [diff] [blame] | 4646 | ret = io_poll_remove(req); |
Jens Axboe | 221c5eb | 2019-01-17 09:41:58 -0700 | [diff] [blame] | 4647 | break; |
Jens Axboe | 5d17b4a | 2019-04-09 14:56:44 -0600 | [diff] [blame] | 4648 | case IORING_OP_SYNC_FILE_RANGE: |
Jens Axboe | 3529d8c | 2019-12-19 18:24:38 -0700 | [diff] [blame] | 4649 | if (sqe) { |
| 4650 | ret = io_prep_sfr(req, sqe); |
| 4651 | if (ret < 0) |
| 4652 | break; |
| 4653 | } |
Pavel Begunkov | 014db00 | 2020-03-03 21:33:12 +0300 | [diff] [blame] | 4654 | ret = io_sync_file_range(req, force_nonblock); |
Jens Axboe | 5d17b4a | 2019-04-09 14:56:44 -0600 | [diff] [blame] | 4655 | break; |
Jens Axboe | 0fa03c6 | 2019-04-19 13:34:07 -0600 | [diff] [blame] | 4656 | case IORING_OP_SENDMSG: |
Jens Axboe | fddafac | 2020-01-04 20:19:44 -0700 | [diff] [blame] | 4657 | case IORING_OP_SEND: |
Jens Axboe | 3529d8c | 2019-12-19 18:24:38 -0700 | [diff] [blame] | 4658 | if (sqe) { |
| 4659 | ret = io_sendmsg_prep(req, sqe); |
| 4660 | if (ret < 0) |
| 4661 | break; |
| 4662 | } |
Jens Axboe | fddafac | 2020-01-04 20:19:44 -0700 | [diff] [blame] | 4663 | if (req->opcode == IORING_OP_SENDMSG) |
Pavel Begunkov | 014db00 | 2020-03-03 21:33:12 +0300 | [diff] [blame] | 4664 | ret = io_sendmsg(req, force_nonblock); |
Jens Axboe | fddafac | 2020-01-04 20:19:44 -0700 | [diff] [blame] | 4665 | else |
Pavel Begunkov | 014db00 | 2020-03-03 21:33:12 +0300 | [diff] [blame] | 4666 | ret = io_send(req, force_nonblock); |
Jens Axboe | 0fa03c6 | 2019-04-19 13:34:07 -0600 | [diff] [blame] | 4667 | break; |
Jens Axboe | aa1fa28 | 2019-04-19 13:38:09 -0600 | [diff] [blame] | 4668 | case IORING_OP_RECVMSG: |
Jens Axboe | fddafac | 2020-01-04 20:19:44 -0700 | [diff] [blame] | 4669 | case IORING_OP_RECV: |
Jens Axboe | 3529d8c | 2019-12-19 18:24:38 -0700 | [diff] [blame] | 4670 | if (sqe) { |
| 4671 | ret = io_recvmsg_prep(req, sqe); |
| 4672 | if (ret) |
| 4673 | break; |
| 4674 | } |
Jens Axboe | fddafac | 2020-01-04 20:19:44 -0700 | [diff] [blame] | 4675 | if (req->opcode == IORING_OP_RECVMSG) |
Pavel Begunkov | 014db00 | 2020-03-03 21:33:12 +0300 | [diff] [blame] | 4676 | ret = io_recvmsg(req, force_nonblock); |
Jens Axboe | fddafac | 2020-01-04 20:19:44 -0700 | [diff] [blame] | 4677 | else |
Pavel Begunkov | 014db00 | 2020-03-03 21:33:12 +0300 | [diff] [blame] | 4678 | ret = io_recv(req, force_nonblock); |
Jens Axboe | aa1fa28 | 2019-04-19 13:38:09 -0600 | [diff] [blame] | 4679 | break; |
Jens Axboe | 5262f56 | 2019-09-17 12:26:57 -0600 | [diff] [blame] | 4680 | case IORING_OP_TIMEOUT: |
Jens Axboe | 3529d8c | 2019-12-19 18:24:38 -0700 | [diff] [blame] | 4681 | if (sqe) { |
| 4682 | ret = io_timeout_prep(req, sqe, false); |
| 4683 | if (ret) |
| 4684 | break; |
| 4685 | } |
Jens Axboe | fc4df99 | 2019-12-10 14:38:45 -0700 | [diff] [blame] | 4686 | ret = io_timeout(req); |
Jens Axboe | 5262f56 | 2019-09-17 12:26:57 -0600 | [diff] [blame] | 4687 | break; |
Jens Axboe | 1136504 | 2019-10-16 09:08:32 -0600 | [diff] [blame] | 4688 | case IORING_OP_TIMEOUT_REMOVE: |
Jens Axboe | 3529d8c | 2019-12-19 18:24:38 -0700 | [diff] [blame] | 4689 | if (sqe) { |
| 4690 | ret = io_timeout_remove_prep(req, sqe); |
| 4691 | if (ret) |
| 4692 | break; |
| 4693 | } |
Jens Axboe | fc4df99 | 2019-12-10 14:38:45 -0700 | [diff] [blame] | 4694 | ret = io_timeout_remove(req); |
Jens Axboe | 1136504 | 2019-10-16 09:08:32 -0600 | [diff] [blame] | 4695 | break; |
Jens Axboe | 17f2fe3 | 2019-10-17 14:42:58 -0600 | [diff] [blame] | 4696 | case IORING_OP_ACCEPT: |
Jens Axboe | 3529d8c | 2019-12-19 18:24:38 -0700 | [diff] [blame] | 4697 | if (sqe) { |
| 4698 | ret = io_accept_prep(req, sqe); |
| 4699 | if (ret) |
| 4700 | break; |
| 4701 | } |
Pavel Begunkov | 014db00 | 2020-03-03 21:33:12 +0300 | [diff] [blame] | 4702 | ret = io_accept(req, force_nonblock); |
Jens Axboe | 17f2fe3 | 2019-10-17 14:42:58 -0600 | [diff] [blame] | 4703 | break; |
Jens Axboe | f8e85cf | 2019-11-23 14:24:24 -0700 | [diff] [blame] | 4704 | case IORING_OP_CONNECT: |
Jens Axboe | 3529d8c | 2019-12-19 18:24:38 -0700 | [diff] [blame] | 4705 | if (sqe) { |
| 4706 | ret = io_connect_prep(req, sqe); |
| 4707 | if (ret) |
| 4708 | break; |
| 4709 | } |
Pavel Begunkov | 014db00 | 2020-03-03 21:33:12 +0300 | [diff] [blame] | 4710 | ret = io_connect(req, force_nonblock); |
Jens Axboe | f8e85cf | 2019-11-23 14:24:24 -0700 | [diff] [blame] | 4711 | break; |
Jens Axboe | 62755e3 | 2019-10-28 21:49:21 -0600 | [diff] [blame] | 4712 | case IORING_OP_ASYNC_CANCEL: |
Jens Axboe | 3529d8c | 2019-12-19 18:24:38 -0700 | [diff] [blame] | 4713 | if (sqe) { |
| 4714 | ret = io_async_cancel_prep(req, sqe); |
| 4715 | if (ret) |
| 4716 | break; |
| 4717 | } |
Pavel Begunkov | 014db00 | 2020-03-03 21:33:12 +0300 | [diff] [blame] | 4718 | ret = io_async_cancel(req); |
Jens Axboe | 62755e3 | 2019-10-28 21:49:21 -0600 | [diff] [blame] | 4719 | break; |
Jens Axboe | d63d1b5 | 2019-12-10 10:38:56 -0700 | [diff] [blame] | 4720 | case IORING_OP_FALLOCATE: |
| 4721 | if (sqe) { |
| 4722 | ret = io_fallocate_prep(req, sqe); |
| 4723 | if (ret) |
| 4724 | break; |
| 4725 | } |
Pavel Begunkov | 014db00 | 2020-03-03 21:33:12 +0300 | [diff] [blame] | 4726 | ret = io_fallocate(req, force_nonblock); |
Jens Axboe | d63d1b5 | 2019-12-10 10:38:56 -0700 | [diff] [blame] | 4727 | break; |
Jens Axboe | 15b71ab | 2019-12-11 11:20:36 -0700 | [diff] [blame] | 4728 | case IORING_OP_OPENAT: |
| 4729 | if (sqe) { |
| 4730 | ret = io_openat_prep(req, sqe); |
| 4731 | if (ret) |
| 4732 | break; |
| 4733 | } |
Pavel Begunkov | 014db00 | 2020-03-03 21:33:12 +0300 | [diff] [blame] | 4734 | ret = io_openat(req, force_nonblock); |
Jens Axboe | 15b71ab | 2019-12-11 11:20:36 -0700 | [diff] [blame] | 4735 | break; |
Jens Axboe | b5dba59 | 2019-12-11 14:02:38 -0700 | [diff] [blame] | 4736 | case IORING_OP_CLOSE: |
| 4737 | if (sqe) { |
| 4738 | ret = io_close_prep(req, sqe); |
| 4739 | if (ret) |
| 4740 | break; |
| 4741 | } |
Pavel Begunkov | 014db00 | 2020-03-03 21:33:12 +0300 | [diff] [blame] | 4742 | ret = io_close(req, force_nonblock); |
Jens Axboe | b5dba59 | 2019-12-11 14:02:38 -0700 | [diff] [blame] | 4743 | break; |
Jens Axboe | 05f3fb3 | 2019-12-09 11:22:50 -0700 | [diff] [blame] | 4744 | case IORING_OP_FILES_UPDATE: |
| 4745 | if (sqe) { |
| 4746 | ret = io_files_update_prep(req, sqe); |
| 4747 | if (ret) |
| 4748 | break; |
| 4749 | } |
| 4750 | ret = io_files_update(req, force_nonblock); |
| 4751 | break; |
Jens Axboe | eddc7ef | 2019-12-13 21:18:10 -0700 | [diff] [blame] | 4752 | case IORING_OP_STATX: |
| 4753 | if (sqe) { |
| 4754 | ret = io_statx_prep(req, sqe); |
| 4755 | if (ret) |
| 4756 | break; |
| 4757 | } |
Pavel Begunkov | 014db00 | 2020-03-03 21:33:12 +0300 | [diff] [blame] | 4758 | ret = io_statx(req, force_nonblock); |
Jens Axboe | eddc7ef | 2019-12-13 21:18:10 -0700 | [diff] [blame] | 4759 | break; |
Jens Axboe | 4840e41 | 2019-12-25 22:03:45 -0700 | [diff] [blame] | 4760 | case IORING_OP_FADVISE: |
| 4761 | if (sqe) { |
| 4762 | ret = io_fadvise_prep(req, sqe); |
| 4763 | if (ret) |
| 4764 | break; |
| 4765 | } |
Pavel Begunkov | 014db00 | 2020-03-03 21:33:12 +0300 | [diff] [blame] | 4766 | ret = io_fadvise(req, force_nonblock); |
Jens Axboe | 4840e41 | 2019-12-25 22:03:45 -0700 | [diff] [blame] | 4767 | break; |
Jens Axboe | c1ca757 | 2019-12-25 22:18:28 -0700 | [diff] [blame] | 4768 | case IORING_OP_MADVISE: |
| 4769 | if (sqe) { |
| 4770 | ret = io_madvise_prep(req, sqe); |
| 4771 | if (ret) |
| 4772 | break; |
| 4773 | } |
Pavel Begunkov | 014db00 | 2020-03-03 21:33:12 +0300 | [diff] [blame] | 4774 | ret = io_madvise(req, force_nonblock); |
Jens Axboe | c1ca757 | 2019-12-25 22:18:28 -0700 | [diff] [blame] | 4775 | break; |
Jens Axboe | cebdb98 | 2020-01-08 17:59:24 -0700 | [diff] [blame] | 4776 | case IORING_OP_OPENAT2: |
| 4777 | if (sqe) { |
| 4778 | ret = io_openat2_prep(req, sqe); |
| 4779 | if (ret) |
| 4780 | break; |
| 4781 | } |
Pavel Begunkov | 014db00 | 2020-03-03 21:33:12 +0300 | [diff] [blame] | 4782 | ret = io_openat2(req, force_nonblock); |
Jens Axboe | cebdb98 | 2020-01-08 17:59:24 -0700 | [diff] [blame] | 4783 | break; |
Jens Axboe | 3e4827b | 2020-01-08 15:18:09 -0700 | [diff] [blame] | 4784 | case IORING_OP_EPOLL_CTL: |
| 4785 | if (sqe) { |
| 4786 | ret = io_epoll_ctl_prep(req, sqe); |
| 4787 | if (ret) |
| 4788 | break; |
| 4789 | } |
Pavel Begunkov | 014db00 | 2020-03-03 21:33:12 +0300 | [diff] [blame] | 4790 | ret = io_epoll_ctl(req, force_nonblock); |
Jens Axboe | 3e4827b | 2020-01-08 15:18:09 -0700 | [diff] [blame] | 4791 | break; |
Pavel Begunkov | 7d67af2 | 2020-02-24 11:32:45 +0300 | [diff] [blame] | 4792 | case IORING_OP_SPLICE: |
| 4793 | if (sqe) { |
| 4794 | ret = io_splice_prep(req, sqe); |
| 4795 | if (ret < 0) |
| 4796 | break; |
| 4797 | } |
Pavel Begunkov | 014db00 | 2020-03-03 21:33:12 +0300 | [diff] [blame] | 4798 | ret = io_splice(req, force_nonblock); |
Pavel Begunkov | 7d67af2 | 2020-02-24 11:32:45 +0300 | [diff] [blame] | 4799 | break; |
Jens Axboe | ddf0322d | 2020-02-23 16:41:33 -0700 | [diff] [blame^] | 4800 | case IORING_OP_PROVIDE_BUFFERS: |
| 4801 | if (sqe) { |
| 4802 | ret = io_provide_buffers_prep(req, sqe); |
| 4803 | if (ret) |
| 4804 | break; |
| 4805 | } |
| 4806 | ret = io_provide_buffers(req, force_nonblock); |
| 4807 | break; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 4808 | default: |
| 4809 | ret = -EINVAL; |
| 4810 | break; |
| 4811 | } |
| 4812 | |
Jens Axboe | def596e | 2019-01-09 08:59:42 -0700 | [diff] [blame] | 4813 | if (ret) |
| 4814 | return ret; |
| 4815 | |
| 4816 | if (ctx->flags & IORING_SETUP_IOPOLL) { |
Jens Axboe | 11ba820 | 2020-01-15 21:51:17 -0700 | [diff] [blame] | 4817 | const bool in_async = io_wq_current_is_worker(); |
| 4818 | |
Jens Axboe | 9e645e11 | 2019-05-10 16:07:28 -0600 | [diff] [blame] | 4819 | if (req->result == -EAGAIN) |
Jens Axboe | def596e | 2019-01-09 08:59:42 -0700 | [diff] [blame] | 4820 | return -EAGAIN; |
| 4821 | |
Jens Axboe | 11ba820 | 2020-01-15 21:51:17 -0700 | [diff] [blame] | 4822 | /* workqueue context doesn't hold uring_lock, grab it now */ |
| 4823 | if (in_async) |
| 4824 | mutex_lock(&ctx->uring_lock); |
| 4825 | |
Jens Axboe | def596e | 2019-01-09 08:59:42 -0700 | [diff] [blame] | 4826 | io_iopoll_req_issued(req); |
Jens Axboe | 11ba820 | 2020-01-15 21:51:17 -0700 | [diff] [blame] | 4827 | |
| 4828 | if (in_async) |
| 4829 | mutex_unlock(&ctx->uring_lock); |
Jens Axboe | def596e | 2019-01-09 08:59:42 -0700 | [diff] [blame] | 4830 | } |
| 4831 | |
| 4832 | return 0; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 4833 | } |
| 4834 | |
Jens Axboe | 561fb04 | 2019-10-24 07:25:42 -0600 | [diff] [blame] | 4835 | static void io_wq_submit_work(struct io_wq_work **workptr) |
Jens Axboe | 31b5151 | 2019-01-18 22:56:34 -0700 | [diff] [blame] | 4836 | { |
Jens Axboe | 561fb04 | 2019-10-24 07:25:42 -0600 | [diff] [blame] | 4837 | struct io_wq_work *work = *workptr; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 4838 | struct io_kiocb *req = container_of(work, struct io_kiocb, work); |
Jens Axboe | 561fb04 | 2019-10-24 07:25:42 -0600 | [diff] [blame] | 4839 | int ret = 0; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 4840 | |
Jens Axboe | 0c9d5cc | 2019-12-11 19:29:43 -0700 | [diff] [blame] | 4841 | /* if NO_CANCEL is set, we must still run the work */ |
| 4842 | if ((work->flags & (IO_WQ_WORK_CANCEL|IO_WQ_WORK_NO_CANCEL)) == |
| 4843 | IO_WQ_WORK_CANCEL) { |
Jens Axboe | 561fb04 | 2019-10-24 07:25:42 -0600 | [diff] [blame] | 4844 | ret = -ECANCELED; |
Jens Axboe | 0c9d5cc | 2019-12-11 19:29:43 -0700 | [diff] [blame] | 4845 | } |
Jens Axboe | 31b5151 | 2019-01-18 22:56:34 -0700 | [diff] [blame] | 4846 | |
Jens Axboe | 561fb04 | 2019-10-24 07:25:42 -0600 | [diff] [blame] | 4847 | if (!ret) { |
Jens Axboe | 561fb04 | 2019-10-24 07:25:42 -0600 | [diff] [blame] | 4848 | do { |
Pavel Begunkov | 014db00 | 2020-03-03 21:33:12 +0300 | [diff] [blame] | 4849 | ret = io_issue_sqe(req, NULL, false); |
Jens Axboe | 561fb04 | 2019-10-24 07:25:42 -0600 | [diff] [blame] | 4850 | /* |
| 4851 | * We can get EAGAIN for polled IO even though we're |
| 4852 | * forcing a sync submission from here, since we can't |
| 4853 | * wait for request slots on the block side. |
| 4854 | */ |
| 4855 | if (ret != -EAGAIN) |
| 4856 | break; |
| 4857 | cond_resched(); |
| 4858 | } while (1); |
| 4859 | } |
Jens Axboe | 31b5151 | 2019-01-18 22:56:34 -0700 | [diff] [blame] | 4860 | |
Jens Axboe | 561fb04 | 2019-10-24 07:25:42 -0600 | [diff] [blame] | 4861 | if (ret) { |
Jens Axboe | 4e88d6e | 2019-12-07 20:59:47 -0700 | [diff] [blame] | 4862 | req_set_fail_links(req); |
Jens Axboe | 78e19bb | 2019-11-06 15:21:34 -0700 | [diff] [blame] | 4863 | io_cqring_add_event(req, ret); |
Jens Axboe | 817869d | 2019-04-30 14:44:05 -0600 | [diff] [blame] | 4864 | io_put_req(req); |
Jens Axboe | edafcce | 2019-01-09 09:16:05 -0700 | [diff] [blame] | 4865 | } |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 4866 | |
Pavel Begunkov | e9fd939 | 2020-03-04 16:14:12 +0300 | [diff] [blame] | 4867 | io_steal_work(req, workptr); |
Jens Axboe | 31b5151 | 2019-01-18 22:56:34 -0700 | [diff] [blame] | 4868 | } |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 4869 | |
Jens Axboe | 15b71ab | 2019-12-11 11:20:36 -0700 | [diff] [blame] | 4870 | static int io_req_needs_file(struct io_kiocb *req, int fd) |
Jens Axboe | 9e3aa61 | 2019-12-11 15:55:43 -0700 | [diff] [blame] | 4871 | { |
Jens Axboe | d365634 | 2019-12-18 09:50:26 -0700 | [diff] [blame] | 4872 | if (!io_op_defs[req->opcode].needs_file) |
Jens Axboe | 9e3aa61 | 2019-12-11 15:55:43 -0700 | [diff] [blame] | 4873 | return 0; |
Jens Axboe | 0b5faf6 | 2020-02-06 21:42:51 -0700 | [diff] [blame] | 4874 | if ((fd == -1 || fd == AT_FDCWD) && io_op_defs[req->opcode].fd_non_neg) |
Jens Axboe | d365634 | 2019-12-18 09:50:26 -0700 | [diff] [blame] | 4875 | return 0; |
| 4876 | return 1; |
Jens Axboe | 09bb839 | 2019-03-13 12:39:28 -0600 | [diff] [blame] | 4877 | } |
| 4878 | |
Jens Axboe | 65e19f5 | 2019-10-26 07:20:21 -0600 | [diff] [blame] | 4879 | static inline struct file *io_file_from_index(struct io_ring_ctx *ctx, |
| 4880 | int index) |
Jens Axboe | 09bb839 | 2019-03-13 12:39:28 -0600 | [diff] [blame] | 4881 | { |
Jens Axboe | 65e19f5 | 2019-10-26 07:20:21 -0600 | [diff] [blame] | 4882 | struct fixed_file_table *table; |
| 4883 | |
Jens Axboe | 05f3fb3 | 2019-12-09 11:22:50 -0700 | [diff] [blame] | 4884 | table = &ctx->file_data->table[index >> IORING_FILE_TABLE_SHIFT]; |
| 4885 | return table->files[index & IORING_FILE_TABLE_MASK];; |
Jens Axboe | 65e19f5 | 2019-10-26 07:20:21 -0600 | [diff] [blame] | 4886 | } |
| 4887 | |
Pavel Begunkov | 8da11c1 | 2020-02-24 11:32:44 +0300 | [diff] [blame] | 4888 | static int io_file_get(struct io_submit_state *state, struct io_kiocb *req, |
| 4889 | int fd, struct file **out_file, bool fixed) |
| 4890 | { |
| 4891 | struct io_ring_ctx *ctx = req->ctx; |
| 4892 | struct file *file; |
| 4893 | |
| 4894 | if (fixed) { |
| 4895 | if (unlikely(!ctx->file_data || |
| 4896 | (unsigned) fd >= ctx->nr_user_files)) |
| 4897 | return -EBADF; |
| 4898 | fd = array_index_nospec(fd, ctx->nr_user_files); |
| 4899 | file = io_file_from_index(ctx, fd); |
| 4900 | if (!file) |
| 4901 | return -EBADF; |
| 4902 | percpu_ref_get(&ctx->file_data->refs); |
| 4903 | } else { |
| 4904 | trace_io_uring_file_get(ctx, fd); |
| 4905 | file = __io_file_get(state, fd); |
| 4906 | if (unlikely(!file)) |
| 4907 | return -EBADF; |
| 4908 | } |
| 4909 | |
| 4910 | *out_file = file; |
| 4911 | return 0; |
| 4912 | } |
| 4913 | |
Jens Axboe | 3529d8c | 2019-12-19 18:24:38 -0700 | [diff] [blame] | 4914 | static int io_req_set_file(struct io_submit_state *state, struct io_kiocb *req, |
| 4915 | const struct io_uring_sqe *sqe) |
Jens Axboe | 09bb839 | 2019-03-13 12:39:28 -0600 | [diff] [blame] | 4916 | { |
| 4917 | unsigned flags; |
Jens Axboe | d365634 | 2019-12-18 09:50:26 -0700 | [diff] [blame] | 4918 | int fd; |
Pavel Begunkov | 8da11c1 | 2020-02-24 11:32:44 +0300 | [diff] [blame] | 4919 | bool fixed; |
Jens Axboe | 09bb839 | 2019-03-13 12:39:28 -0600 | [diff] [blame] | 4920 | |
Jens Axboe | 3529d8c | 2019-12-19 18:24:38 -0700 | [diff] [blame] | 4921 | flags = READ_ONCE(sqe->flags); |
| 4922 | fd = READ_ONCE(sqe->fd); |
Jens Axboe | 09bb839 | 2019-03-13 12:39:28 -0600 | [diff] [blame] | 4923 | |
Jens Axboe | d365634 | 2019-12-18 09:50:26 -0700 | [diff] [blame] | 4924 | if (!io_req_needs_file(req, fd)) |
| 4925 | return 0; |
Jens Axboe | 09bb839 | 2019-03-13 12:39:28 -0600 | [diff] [blame] | 4926 | |
Pavel Begunkov | 8da11c1 | 2020-02-24 11:32:44 +0300 | [diff] [blame] | 4927 | fixed = (flags & IOSQE_FIXED_FILE); |
| 4928 | if (unlikely(!fixed && req->needs_fixed_file)) |
| 4929 | return -EBADF; |
Jens Axboe | 09bb839 | 2019-03-13 12:39:28 -0600 | [diff] [blame] | 4930 | |
Pavel Begunkov | 8da11c1 | 2020-02-24 11:32:44 +0300 | [diff] [blame] | 4931 | return io_file_get(state, req, fd, &req->file, fixed); |
Jens Axboe | 09bb839 | 2019-03-13 12:39:28 -0600 | [diff] [blame] | 4932 | } |
| 4933 | |
Jackie Liu | a197f66 | 2019-11-08 08:09:12 -0700 | [diff] [blame] | 4934 | static int io_grab_files(struct io_kiocb *req) |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 4935 | { |
Jens Axboe | fcb323c | 2019-10-24 12:39:47 -0600 | [diff] [blame] | 4936 | int ret = -EBADF; |
Jackie Liu | a197f66 | 2019-11-08 08:09:12 -0700 | [diff] [blame] | 4937 | struct io_ring_ctx *ctx = req->ctx; |
Jens Axboe | fcb323c | 2019-10-24 12:39:47 -0600 | [diff] [blame] | 4938 | |
Jens Axboe | f86cd20 | 2020-01-29 13:46:44 -0700 | [diff] [blame] | 4939 | if (req->work.files) |
| 4940 | return 0; |
Pavel Begunkov | b14cca0 | 2020-01-17 04:45:59 +0300 | [diff] [blame] | 4941 | if (!ctx->ring_file) |
Jens Axboe | b5dba59 | 2019-12-11 14:02:38 -0700 | [diff] [blame] | 4942 | return -EBADF; |
| 4943 | |
Jens Axboe | fcb323c | 2019-10-24 12:39:47 -0600 | [diff] [blame] | 4944 | rcu_read_lock(); |
| 4945 | spin_lock_irq(&ctx->inflight_lock); |
| 4946 | /* |
| 4947 | * We use the f_ops->flush() handler to ensure that we can flush |
| 4948 | * out work accessing these files if the fd is closed. Check if |
| 4949 | * the fd has changed since we started down this path, and disallow |
| 4950 | * this operation if it has. |
| 4951 | */ |
Pavel Begunkov | b14cca0 | 2020-01-17 04:45:59 +0300 | [diff] [blame] | 4952 | if (fcheck(ctx->ring_fd) == ctx->ring_file) { |
Jens Axboe | fcb323c | 2019-10-24 12:39:47 -0600 | [diff] [blame] | 4953 | list_add(&req->inflight_entry, &ctx->inflight_list); |
| 4954 | req->flags |= REQ_F_INFLIGHT; |
| 4955 | req->work.files = current->files; |
| 4956 | ret = 0; |
| 4957 | } |
| 4958 | spin_unlock_irq(&ctx->inflight_lock); |
| 4959 | rcu_read_unlock(); |
| 4960 | |
| 4961 | return ret; |
| 4962 | } |
| 4963 | |
Jens Axboe | 2665abf | 2019-11-05 12:40:47 -0700 | [diff] [blame] | 4964 | static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer) |
| 4965 | { |
Jens Axboe | ad8a48a | 2019-11-15 08:49:11 -0700 | [diff] [blame] | 4966 | struct io_timeout_data *data = container_of(timer, |
| 4967 | struct io_timeout_data, timer); |
| 4968 | struct io_kiocb *req = data->req; |
Jens Axboe | 2665abf | 2019-11-05 12:40:47 -0700 | [diff] [blame] | 4969 | struct io_ring_ctx *ctx = req->ctx; |
| 4970 | struct io_kiocb *prev = NULL; |
| 4971 | unsigned long flags; |
Jens Axboe | 2665abf | 2019-11-05 12:40:47 -0700 | [diff] [blame] | 4972 | |
| 4973 | spin_lock_irqsave(&ctx->completion_lock, flags); |
| 4974 | |
| 4975 | /* |
| 4976 | * We don't expect the list to be empty, that will only happen if we |
| 4977 | * race with the completion of the linked work. |
| 4978 | */ |
Pavel Begunkov | 4493233 | 2019-12-05 16:16:35 +0300 | [diff] [blame] | 4979 | if (!list_empty(&req->link_list)) { |
| 4980 | prev = list_entry(req->link_list.prev, struct io_kiocb, |
| 4981 | link_list); |
Jens Axboe | 5d96072 | 2019-11-19 15:31:28 -0700 | [diff] [blame] | 4982 | if (refcount_inc_not_zero(&prev->refs)) { |
Pavel Begunkov | 4493233 | 2019-12-05 16:16:35 +0300 | [diff] [blame] | 4983 | list_del_init(&req->link_list); |
Jens Axboe | 5d96072 | 2019-11-19 15:31:28 -0700 | [diff] [blame] | 4984 | prev->flags &= ~REQ_F_LINK_TIMEOUT; |
| 4985 | } else |
Jens Axboe | 76a46e0 | 2019-11-10 23:34:16 -0700 | [diff] [blame] | 4986 | prev = NULL; |
Jens Axboe | 2665abf | 2019-11-05 12:40:47 -0700 | [diff] [blame] | 4987 | } |
| 4988 | |
| 4989 | spin_unlock_irqrestore(&ctx->completion_lock, flags); |
| 4990 | |
| 4991 | if (prev) { |
Jens Axboe | 4e88d6e | 2019-12-07 20:59:47 -0700 | [diff] [blame] | 4992 | req_set_fail_links(prev); |
Pavel Begunkov | 014db00 | 2020-03-03 21:33:12 +0300 | [diff] [blame] | 4993 | io_async_find_and_cancel(ctx, req, prev->user_data, -ETIME); |
Jens Axboe | 76a46e0 | 2019-11-10 23:34:16 -0700 | [diff] [blame] | 4994 | io_put_req(prev); |
Jens Axboe | 47f4676 | 2019-11-09 17:43:02 -0700 | [diff] [blame] | 4995 | } else { |
| 4996 | io_cqring_add_event(req, -ETIME); |
| 4997 | io_put_req(req); |
Jens Axboe | 2665abf | 2019-11-05 12:40:47 -0700 | [diff] [blame] | 4998 | } |
Jens Axboe | 2665abf | 2019-11-05 12:40:47 -0700 | [diff] [blame] | 4999 | return HRTIMER_NORESTART; |
| 5000 | } |
| 5001 | |
Jens Axboe | ad8a48a | 2019-11-15 08:49:11 -0700 | [diff] [blame] | 5002 | static void io_queue_linked_timeout(struct io_kiocb *req) |
Jens Axboe | 2665abf | 2019-11-05 12:40:47 -0700 | [diff] [blame] | 5003 | { |
Jens Axboe | 76a46e0 | 2019-11-10 23:34:16 -0700 | [diff] [blame] | 5004 | struct io_ring_ctx *ctx = req->ctx; |
Jens Axboe | 2665abf | 2019-11-05 12:40:47 -0700 | [diff] [blame] | 5005 | |
Jens Axboe | 76a46e0 | 2019-11-10 23:34:16 -0700 | [diff] [blame] | 5006 | /* |
| 5007 | * If the list is now empty, then our linked request finished before |
| 5008 | * we got a chance to setup the timer |
| 5009 | */ |
| 5010 | spin_lock_irq(&ctx->completion_lock); |
Pavel Begunkov | 4493233 | 2019-12-05 16:16:35 +0300 | [diff] [blame] | 5011 | if (!list_empty(&req->link_list)) { |
Jens Axboe | 2d28390 | 2019-12-04 11:08:05 -0700 | [diff] [blame] | 5012 | struct io_timeout_data *data = &req->io->timeout; |
Jens Axboe | 94ae5e7 | 2019-11-14 19:39:52 -0700 | [diff] [blame] | 5013 | |
Jens Axboe | ad8a48a | 2019-11-15 08:49:11 -0700 | [diff] [blame] | 5014 | data->timer.function = io_link_timeout_fn; |
| 5015 | hrtimer_start(&data->timer, timespec64_to_ktime(data->ts), |
| 5016 | data->mode); |
Jens Axboe | 2665abf | 2019-11-05 12:40:47 -0700 | [diff] [blame] | 5017 | } |
Jens Axboe | 76a46e0 | 2019-11-10 23:34:16 -0700 | [diff] [blame] | 5018 | spin_unlock_irq(&ctx->completion_lock); |
Jens Axboe | 2665abf | 2019-11-05 12:40:47 -0700 | [diff] [blame] | 5019 | |
Jens Axboe | 2665abf | 2019-11-05 12:40:47 -0700 | [diff] [blame] | 5020 | /* drop submission reference */ |
Jens Axboe | 76a46e0 | 2019-11-10 23:34:16 -0700 | [diff] [blame] | 5021 | io_put_req(req); |
Jens Axboe | 2665abf | 2019-11-05 12:40:47 -0700 | [diff] [blame] | 5022 | } |
| 5023 | |
Jens Axboe | ad8a48a | 2019-11-15 08:49:11 -0700 | [diff] [blame] | 5024 | static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req) |
Jens Axboe | 2665abf | 2019-11-05 12:40:47 -0700 | [diff] [blame] | 5025 | { |
| 5026 | struct io_kiocb *nxt; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 5027 | |
Jens Axboe | 2665abf | 2019-11-05 12:40:47 -0700 | [diff] [blame] | 5028 | if (!(req->flags & REQ_F_LINK)) |
| 5029 | return NULL; |
Jens Axboe | d7718a9 | 2020-02-14 22:23:12 -0700 | [diff] [blame] | 5030 | /* for polled retry, if flag is set, we already went through here */ |
| 5031 | if (req->flags & REQ_F_POLLED) |
| 5032 | return NULL; |
Jens Axboe | 2665abf | 2019-11-05 12:40:47 -0700 | [diff] [blame] | 5033 | |
Pavel Begunkov | 4493233 | 2019-12-05 16:16:35 +0300 | [diff] [blame] | 5034 | nxt = list_first_entry_or_null(&req->link_list, struct io_kiocb, |
| 5035 | link_list); |
Jens Axboe | d625c6e | 2019-12-17 19:53:05 -0700 | [diff] [blame] | 5036 | if (!nxt || nxt->opcode != IORING_OP_LINK_TIMEOUT) |
Jens Axboe | 76a46e0 | 2019-11-10 23:34:16 -0700 | [diff] [blame] | 5037 | return NULL; |
Jens Axboe | 2665abf | 2019-11-05 12:40:47 -0700 | [diff] [blame] | 5038 | |
Jens Axboe | 76a46e0 | 2019-11-10 23:34:16 -0700 | [diff] [blame] | 5039 | req->flags |= REQ_F_LINK_TIMEOUT; |
Jens Axboe | 76a46e0 | 2019-11-10 23:34:16 -0700 | [diff] [blame] | 5040 | return nxt; |
Jens Axboe | 2665abf | 2019-11-05 12:40:47 -0700 | [diff] [blame] | 5041 | } |
| 5042 | |
Jens Axboe | 3529d8c | 2019-12-19 18:24:38 -0700 | [diff] [blame] | 5043 | static void __io_queue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe) |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 5044 | { |
Jens Axboe | 4a0a7a1 | 2019-12-09 20:01:01 -0700 | [diff] [blame] | 5045 | struct io_kiocb *linked_timeout; |
Pavel Begunkov | 4bc4494 | 2020-02-29 22:48:24 +0300 | [diff] [blame] | 5046 | struct io_kiocb *nxt; |
Jens Axboe | 193155c | 2020-02-22 23:22:19 -0700 | [diff] [blame] | 5047 | const struct cred *old_creds = NULL; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 5048 | int ret; |
| 5049 | |
Jens Axboe | 4a0a7a1 | 2019-12-09 20:01:01 -0700 | [diff] [blame] | 5050 | again: |
| 5051 | linked_timeout = io_prep_linked_timeout(req); |
| 5052 | |
Jens Axboe | 193155c | 2020-02-22 23:22:19 -0700 | [diff] [blame] | 5053 | if (req->work.creds && req->work.creds != current_cred()) { |
| 5054 | if (old_creds) |
| 5055 | revert_creds(old_creds); |
| 5056 | if (old_creds == req->work.creds) |
| 5057 | old_creds = NULL; /* restored original creds */ |
| 5058 | else |
| 5059 | old_creds = override_creds(req->work.creds); |
| 5060 | } |
| 5061 | |
Pavel Begunkov | 014db00 | 2020-03-03 21:33:12 +0300 | [diff] [blame] | 5062 | ret = io_issue_sqe(req, sqe, true); |
Jens Axboe | 491381ce | 2019-10-17 09:20:46 -0600 | [diff] [blame] | 5063 | |
| 5064 | /* |
| 5065 | * We async punt it if the file wasn't marked NOWAIT, or if the file |
| 5066 | * doesn't support non-blocking read/write attempts |
| 5067 | */ |
| 5068 | if (ret == -EAGAIN && (!(req->flags & REQ_F_NOWAIT) || |
| 5069 | (req->flags & REQ_F_MUST_PUNT))) { |
Jens Axboe | d7718a9 | 2020-02-14 22:23:12 -0700 | [diff] [blame] | 5070 | if (io_arm_poll_handler(req)) { |
| 5071 | if (linked_timeout) |
| 5072 | io_queue_linked_timeout(linked_timeout); |
Pavel Begunkov | 4bc4494 | 2020-02-29 22:48:24 +0300 | [diff] [blame] | 5073 | goto exit; |
Jens Axboe | d7718a9 | 2020-02-14 22:23:12 -0700 | [diff] [blame] | 5074 | } |
Pavel Begunkov | 86a761f | 2020-01-22 23:09:36 +0300 | [diff] [blame] | 5075 | punt: |
Jens Axboe | f86cd20 | 2020-01-29 13:46:44 -0700 | [diff] [blame] | 5076 | if (io_op_defs[req->opcode].file_table) { |
Pavel Begunkov | bbad27b | 2019-11-19 23:32:47 +0300 | [diff] [blame] | 5077 | ret = io_grab_files(req); |
| 5078 | if (ret) |
| 5079 | goto err; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 5080 | } |
Pavel Begunkov | bbad27b | 2019-11-19 23:32:47 +0300 | [diff] [blame] | 5081 | |
| 5082 | /* |
| 5083 | * Queued up for async execution, worker will release |
| 5084 | * submit reference when the iocb is actually submitted. |
| 5085 | */ |
| 5086 | io_queue_async_work(req); |
Pavel Begunkov | 4bc4494 | 2020-02-29 22:48:24 +0300 | [diff] [blame] | 5087 | goto exit; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 5088 | } |
Jens Axboe | e65ef56 | 2019-03-12 10:16:44 -0600 | [diff] [blame] | 5089 | |
Jens Axboe | fcb323c | 2019-10-24 12:39:47 -0600 | [diff] [blame] | 5090 | err: |
Pavel Begunkov | 4bc4494 | 2020-02-29 22:48:24 +0300 | [diff] [blame] | 5091 | nxt = NULL; |
Jens Axboe | e65ef56 | 2019-03-12 10:16:44 -0600 | [diff] [blame] | 5092 | /* drop submission reference */ |
Jens Axboe | 2a44f46 | 2020-02-25 13:25:41 -0700 | [diff] [blame] | 5093 | io_put_req_find_next(req, &nxt); |
Jens Axboe | e65ef56 | 2019-03-12 10:16:44 -0600 | [diff] [blame] | 5094 | |
Pavel Begunkov | f9bd67f | 2019-11-21 23:21:03 +0300 | [diff] [blame] | 5095 | if (linked_timeout) { |
Jens Axboe | 76a46e0 | 2019-11-10 23:34:16 -0700 | [diff] [blame] | 5096 | if (!ret) |
Pavel Begunkov | f9bd67f | 2019-11-21 23:21:03 +0300 | [diff] [blame] | 5097 | io_queue_linked_timeout(linked_timeout); |
Jens Axboe | 76a46e0 | 2019-11-10 23:34:16 -0700 | [diff] [blame] | 5098 | else |
Pavel Begunkov | f9bd67f | 2019-11-21 23:21:03 +0300 | [diff] [blame] | 5099 | io_put_req(linked_timeout); |
Jens Axboe | 76a46e0 | 2019-11-10 23:34:16 -0700 | [diff] [blame] | 5100 | } |
| 5101 | |
Jens Axboe | e65ef56 | 2019-03-12 10:16:44 -0600 | [diff] [blame] | 5102 | /* and drop final reference, if we failed */ |
Jens Axboe | 9e645e11 | 2019-05-10 16:07:28 -0600 | [diff] [blame] | 5103 | if (ret) { |
Jens Axboe | 78e19bb | 2019-11-06 15:21:34 -0700 | [diff] [blame] | 5104 | io_cqring_add_event(req, ret); |
Jens Axboe | 4e88d6e | 2019-12-07 20:59:47 -0700 | [diff] [blame] | 5105 | req_set_fail_links(req); |
Jens Axboe | e65ef56 | 2019-03-12 10:16:44 -0600 | [diff] [blame] | 5106 | io_put_req(req); |
Jens Axboe | 9e645e11 | 2019-05-10 16:07:28 -0600 | [diff] [blame] | 5107 | } |
Jens Axboe | 4a0a7a1 | 2019-12-09 20:01:01 -0700 | [diff] [blame] | 5108 | if (nxt) { |
| 5109 | req = nxt; |
Pavel Begunkov | 86a761f | 2020-01-22 23:09:36 +0300 | [diff] [blame] | 5110 | |
| 5111 | if (req->flags & REQ_F_FORCE_ASYNC) |
| 5112 | goto punt; |
Jens Axboe | 4a0a7a1 | 2019-12-09 20:01:01 -0700 | [diff] [blame] | 5113 | goto again; |
| 5114 | } |
Pavel Begunkov | 4bc4494 | 2020-02-29 22:48:24 +0300 | [diff] [blame] | 5115 | exit: |
Jens Axboe | 193155c | 2020-02-22 23:22:19 -0700 | [diff] [blame] | 5116 | if (old_creds) |
| 5117 | revert_creds(old_creds); |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 5118 | } |
| 5119 | |
Jens Axboe | 3529d8c | 2019-12-19 18:24:38 -0700 | [diff] [blame] | 5120 | static void io_queue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe) |
Jackie Liu | 4fe2c96 | 2019-09-09 20:50:40 +0800 | [diff] [blame] | 5121 | { |
| 5122 | int ret; |
| 5123 | |
Jens Axboe | 3529d8c | 2019-12-19 18:24:38 -0700 | [diff] [blame] | 5124 | ret = io_req_defer(req, sqe); |
Jackie Liu | 4fe2c96 | 2019-09-09 20:50:40 +0800 | [diff] [blame] | 5125 | if (ret) { |
| 5126 | if (ret != -EIOCBQUEUED) { |
Pavel Begunkov | 1118591 | 2020-01-22 23:09:35 +0300 | [diff] [blame] | 5127 | fail_req: |
Jens Axboe | 78e19bb | 2019-11-06 15:21:34 -0700 | [diff] [blame] | 5128 | io_cqring_add_event(req, ret); |
Jens Axboe | 4e88d6e | 2019-12-07 20:59:47 -0700 | [diff] [blame] | 5129 | req_set_fail_links(req); |
Jens Axboe | 78e19bb | 2019-11-06 15:21:34 -0700 | [diff] [blame] | 5130 | io_double_put_req(req); |
Jackie Liu | 4fe2c96 | 2019-09-09 20:50:40 +0800 | [diff] [blame] | 5131 | } |
Pavel Begunkov | 2550878 | 2019-12-30 21:24:47 +0300 | [diff] [blame] | 5132 | } else if (req->flags & REQ_F_FORCE_ASYNC) { |
Pavel Begunkov | 1118591 | 2020-01-22 23:09:35 +0300 | [diff] [blame] | 5133 | ret = io_req_defer_prep(req, sqe); |
| 5134 | if (unlikely(ret < 0)) |
| 5135 | goto fail_req; |
Jens Axboe | ce35a47 | 2019-12-17 08:04:44 -0700 | [diff] [blame] | 5136 | /* |
| 5137 | * Never try inline submit of IOSQE_ASYNC is set, go straight |
| 5138 | * to async execution. |
| 5139 | */ |
| 5140 | req->work.flags |= IO_WQ_WORK_CONCURRENT; |
| 5141 | io_queue_async_work(req); |
| 5142 | } else { |
Jens Axboe | 3529d8c | 2019-12-19 18:24:38 -0700 | [diff] [blame] | 5143 | __io_queue_sqe(req, sqe); |
Jens Axboe | ce35a47 | 2019-12-17 08:04:44 -0700 | [diff] [blame] | 5144 | } |
Jackie Liu | 4fe2c96 | 2019-09-09 20:50:40 +0800 | [diff] [blame] | 5145 | } |
| 5146 | |
Pavel Begunkov | 1b4a51b | 2019-11-21 11:54:28 +0300 | [diff] [blame] | 5147 | static inline void io_queue_link_head(struct io_kiocb *req) |
Jackie Liu | 4fe2c96 | 2019-09-09 20:50:40 +0800 | [diff] [blame] | 5148 | { |
Jens Axboe | 94ae5e7 | 2019-11-14 19:39:52 -0700 | [diff] [blame] | 5149 | if (unlikely(req->flags & REQ_F_FAIL_LINK)) { |
Pavel Begunkov | 1b4a51b | 2019-11-21 11:54:28 +0300 | [diff] [blame] | 5150 | io_cqring_add_event(req, -ECANCELED); |
| 5151 | io_double_put_req(req); |
| 5152 | } else |
Jens Axboe | 3529d8c | 2019-12-19 18:24:38 -0700 | [diff] [blame] | 5153 | io_queue_sqe(req, NULL); |
Jackie Liu | 4fe2c96 | 2019-09-09 20:50:40 +0800 | [diff] [blame] | 5154 | } |
| 5155 | |
Jens Axboe | 4e88d6e | 2019-12-07 20:59:47 -0700 | [diff] [blame] | 5156 | #define SQE_VALID_FLAGS (IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK| \ |
Jens Axboe | ce35a47 | 2019-12-17 08:04:44 -0700 | [diff] [blame] | 5157 | IOSQE_IO_HARDLINK | IOSQE_ASYNC) |
Jens Axboe | 9e645e11 | 2019-05-10 16:07:28 -0600 | [diff] [blame] | 5158 | |
Jens Axboe | 3529d8c | 2019-12-19 18:24:38 -0700 | [diff] [blame] | 5159 | static bool io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe, |
| 5160 | struct io_submit_state *state, struct io_kiocb **link) |
Jens Axboe | 9e645e11 | 2019-05-10 16:07:28 -0600 | [diff] [blame] | 5161 | { |
Jackie Liu | a197f66 | 2019-11-08 08:09:12 -0700 | [diff] [blame] | 5162 | struct io_ring_ctx *ctx = req->ctx; |
Pavel Begunkov | 32fe525 | 2019-12-17 22:26:58 +0300 | [diff] [blame] | 5163 | unsigned int sqe_flags; |
Jens Axboe | 75c6a03 | 2020-01-28 10:15:23 -0700 | [diff] [blame] | 5164 | int ret, id; |
Jens Axboe | 9e645e11 | 2019-05-10 16:07:28 -0600 | [diff] [blame] | 5165 | |
Pavel Begunkov | 32fe525 | 2019-12-17 22:26:58 +0300 | [diff] [blame] | 5166 | sqe_flags = READ_ONCE(sqe->flags); |
Jens Axboe | 9e645e11 | 2019-05-10 16:07:28 -0600 | [diff] [blame] | 5167 | |
| 5168 | /* enforce forwards compatibility on users */ |
Pavel Begunkov | 32fe525 | 2019-12-17 22:26:58 +0300 | [diff] [blame] | 5169 | if (unlikely(sqe_flags & ~SQE_VALID_FLAGS)) { |
Jens Axboe | 9e645e11 | 2019-05-10 16:07:28 -0600 | [diff] [blame] | 5170 | ret = -EINVAL; |
Pavel Begunkov | 196be95 | 2019-11-07 01:41:06 +0300 | [diff] [blame] | 5171 | goto err_req; |
Jens Axboe | 9e645e11 | 2019-05-10 16:07:28 -0600 | [diff] [blame] | 5172 | } |
| 5173 | |
Jens Axboe | 75c6a03 | 2020-01-28 10:15:23 -0700 | [diff] [blame] | 5174 | id = READ_ONCE(sqe->personality); |
| 5175 | if (id) { |
Jens Axboe | 193155c | 2020-02-22 23:22:19 -0700 | [diff] [blame] | 5176 | req->work.creds = idr_find(&ctx->personality_idr, id); |
| 5177 | if (unlikely(!req->work.creds)) { |
Jens Axboe | 75c6a03 | 2020-01-28 10:15:23 -0700 | [diff] [blame] | 5178 | ret = -EINVAL; |
| 5179 | goto err_req; |
| 5180 | } |
Jens Axboe | 193155c | 2020-02-22 23:22:19 -0700 | [diff] [blame] | 5181 | get_cred(req->work.creds); |
Jens Axboe | 75c6a03 | 2020-01-28 10:15:23 -0700 | [diff] [blame] | 5182 | } |
| 5183 | |
Pavel Begunkov | 6b47ee6 | 2020-01-18 20:22:41 +0300 | [diff] [blame] | 5184 | /* same numerical values with corresponding REQ_F_*, safe to copy */ |
Pavel Begunkov | 8da11c1 | 2020-02-24 11:32:44 +0300 | [diff] [blame] | 5185 | req->flags |= sqe_flags & (IOSQE_IO_DRAIN | IOSQE_IO_HARDLINK | |
| 5186 | IOSQE_ASYNC | IOSQE_FIXED_FILE); |
Jens Axboe | 9e645e11 | 2019-05-10 16:07:28 -0600 | [diff] [blame] | 5187 | |
Jens Axboe | 3529d8c | 2019-12-19 18:24:38 -0700 | [diff] [blame] | 5188 | ret = io_req_set_file(state, req, sqe); |
Jens Axboe | 9e645e11 | 2019-05-10 16:07:28 -0600 | [diff] [blame] | 5189 | if (unlikely(ret)) { |
| 5190 | err_req: |
Jens Axboe | 78e19bb | 2019-11-06 15:21:34 -0700 | [diff] [blame] | 5191 | io_cqring_add_event(req, ret); |
| 5192 | io_double_put_req(req); |
Pavel Begunkov | 2e6e1fd | 2019-12-05 16:15:45 +0300 | [diff] [blame] | 5193 | return false; |
Jens Axboe | 9e645e11 | 2019-05-10 16:07:28 -0600 | [diff] [blame] | 5194 | } |
| 5195 | |
Jens Axboe | 9e645e11 | 2019-05-10 16:07:28 -0600 | [diff] [blame] | 5196 | /* |
| 5197 | * If we already have a head request, queue this one for async |
| 5198 | * submittal once the head completes. If we don't have a head but |
| 5199 | * IOSQE_IO_LINK is set in the sqe, start a new head. This one will be |
| 5200 | * submitted sync once the chain is complete. If none of those |
| 5201 | * conditions are true (normal request), then just queue it. |
| 5202 | */ |
| 5203 | if (*link) { |
Pavel Begunkov | 9d76377 | 2019-12-17 02:22:07 +0300 | [diff] [blame] | 5204 | struct io_kiocb *head = *link; |
Jens Axboe | 9e645e11 | 2019-05-10 16:07:28 -0600 | [diff] [blame] | 5205 | |
Pavel Begunkov | 8cdf219 | 2020-01-25 00:40:24 +0300 | [diff] [blame] | 5206 | /* |
| 5207 | * Taking sequential execution of a link, draining both sides |
| 5208 | * of the link also fullfils IOSQE_IO_DRAIN semantics for all |
| 5209 | * requests in the link. So, it drains the head and the |
| 5210 | * next after the link request. The last one is done via |
| 5211 | * drain_next flag to persist the effect across calls. |
| 5212 | */ |
Pavel Begunkov | 711be03 | 2020-01-17 03:57:59 +0300 | [diff] [blame] | 5213 | if (sqe_flags & IOSQE_IO_DRAIN) { |
| 5214 | head->flags |= REQ_F_IO_DRAIN; |
| 5215 | ctx->drain_next = 1; |
| 5216 | } |
Jens Axboe | b7bb4f7 | 2019-12-15 22:13:43 -0700 | [diff] [blame] | 5217 | if (io_alloc_async_ctx(req)) { |
Jens Axboe | 9e645e11 | 2019-05-10 16:07:28 -0600 | [diff] [blame] | 5218 | ret = -EAGAIN; |
| 5219 | goto err_req; |
| 5220 | } |
| 5221 | |
Jens Axboe | 3529d8c | 2019-12-19 18:24:38 -0700 | [diff] [blame] | 5222 | ret = io_req_defer_prep(req, sqe); |
Jens Axboe | 2d28390 | 2019-12-04 11:08:05 -0700 | [diff] [blame] | 5223 | if (ret) { |
Jens Axboe | 4e88d6e | 2019-12-07 20:59:47 -0700 | [diff] [blame] | 5224 | /* fail even hard links since we don't submit */ |
Pavel Begunkov | 9d76377 | 2019-12-17 02:22:07 +0300 | [diff] [blame] | 5225 | head->flags |= REQ_F_FAIL_LINK; |
Jens Axboe | f67676d | 2019-12-02 11:03:47 -0700 | [diff] [blame] | 5226 | goto err_req; |
Jens Axboe | 2d28390 | 2019-12-04 11:08:05 -0700 | [diff] [blame] | 5227 | } |
Pavel Begunkov | 9d76377 | 2019-12-17 02:22:07 +0300 | [diff] [blame] | 5228 | trace_io_uring_link(ctx, req, head); |
| 5229 | list_add_tail(&req->link_list, &head->link_list); |
Jens Axboe | 9e645e11 | 2019-05-10 16:07:28 -0600 | [diff] [blame] | 5230 | |
Pavel Begunkov | 32fe525 | 2019-12-17 22:26:58 +0300 | [diff] [blame] | 5231 | /* last request of a link, enqueue the link */ |
| 5232 | if (!(sqe_flags & (IOSQE_IO_LINK|IOSQE_IO_HARDLINK))) { |
| 5233 | io_queue_link_head(head); |
| 5234 | *link = NULL; |
| 5235 | } |
Jens Axboe | 9e645e11 | 2019-05-10 16:07:28 -0600 | [diff] [blame] | 5236 | } else { |
Pavel Begunkov | 711be03 | 2020-01-17 03:57:59 +0300 | [diff] [blame] | 5237 | if (unlikely(ctx->drain_next)) { |
| 5238 | req->flags |= REQ_F_IO_DRAIN; |
| 5239 | req->ctx->drain_next = 0; |
| 5240 | } |
| 5241 | if (sqe_flags & (IOSQE_IO_LINK|IOSQE_IO_HARDLINK)) { |
| 5242 | req->flags |= REQ_F_LINK; |
Pavel Begunkov | 711be03 | 2020-01-17 03:57:59 +0300 | [diff] [blame] | 5243 | INIT_LIST_HEAD(&req->link_list); |
| 5244 | ret = io_req_defer_prep(req, sqe); |
| 5245 | if (ret) |
| 5246 | req->flags |= REQ_F_FAIL_LINK; |
| 5247 | *link = req; |
| 5248 | } else { |
| 5249 | io_queue_sqe(req, sqe); |
| 5250 | } |
Jens Axboe | 9e645e11 | 2019-05-10 16:07:28 -0600 | [diff] [blame] | 5251 | } |
Pavel Begunkov | 2e6e1fd | 2019-12-05 16:15:45 +0300 | [diff] [blame] | 5252 | |
| 5253 | return true; |
Jens Axboe | 9e645e11 | 2019-05-10 16:07:28 -0600 | [diff] [blame] | 5254 | } |
| 5255 | |
Jens Axboe | 9a56a23 | 2019-01-09 09:06:50 -0700 | [diff] [blame] | 5256 | /* |
| 5257 | * Batched submission is done, ensure local IO is flushed out. |
| 5258 | */ |
| 5259 | static void io_submit_state_end(struct io_submit_state *state) |
| 5260 | { |
| 5261 | blk_finish_plug(&state->plug); |
Jens Axboe | 3d6770f | 2019-04-13 11:50:54 -0600 | [diff] [blame] | 5262 | io_file_put(state); |
Jens Axboe | 2579f91 | 2019-01-09 09:10:43 -0700 | [diff] [blame] | 5263 | if (state->free_reqs) |
Pavel Begunkov | 6c8a313 | 2020-02-01 03:58:00 +0300 | [diff] [blame] | 5264 | kmem_cache_free_bulk(req_cachep, state->free_reqs, state->reqs); |
Jens Axboe | 9a56a23 | 2019-01-09 09:06:50 -0700 | [diff] [blame] | 5265 | } |
| 5266 | |
| 5267 | /* |
| 5268 | * Start submission side cache. |
| 5269 | */ |
| 5270 | static void io_submit_state_start(struct io_submit_state *state, |
Jackie Liu | 22efde5 | 2019-12-02 17:14:52 +0800 | [diff] [blame] | 5271 | unsigned int max_ios) |
Jens Axboe | 9a56a23 | 2019-01-09 09:06:50 -0700 | [diff] [blame] | 5272 | { |
| 5273 | blk_start_plug(&state->plug); |
Jens Axboe | 2579f91 | 2019-01-09 09:10:43 -0700 | [diff] [blame] | 5274 | state->free_reqs = 0; |
Jens Axboe | 9a56a23 | 2019-01-09 09:06:50 -0700 | [diff] [blame] | 5275 | state->file = NULL; |
| 5276 | state->ios_left = max_ios; |
| 5277 | } |
| 5278 | |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 5279 | static void io_commit_sqring(struct io_ring_ctx *ctx) |
| 5280 | { |
Hristo Venev | 75b28af | 2019-08-26 17:23:46 +0000 | [diff] [blame] | 5281 | struct io_rings *rings = ctx->rings; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 5282 | |
Pavel Begunkov | caf582c | 2019-12-30 21:24:46 +0300 | [diff] [blame] | 5283 | /* |
| 5284 | * Ensure any loads from the SQEs are done at this point, |
| 5285 | * since once we write the new head, the application could |
| 5286 | * write new data to them. |
| 5287 | */ |
| 5288 | smp_store_release(&rings->sq.head, ctx->cached_sq_head); |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 5289 | } |
| 5290 | |
| 5291 | /* |
Jens Axboe | 3529d8c | 2019-12-19 18:24:38 -0700 | [diff] [blame] | 5292 | * Fetch an sqe, if one is available. Note that sqe_ptr will point to memory |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 5293 | * that is mapped by userspace. This means that care needs to be taken to |
| 5294 | * ensure that reads are stable, as we cannot rely on userspace always |
| 5295 | * being a good citizen. If members of the sqe are validated and then later |
| 5296 | * used, it's important that those reads are done through READ_ONCE() to |
| 5297 | * prevent a re-load down the line. |
| 5298 | */ |
Jens Axboe | 3529d8c | 2019-12-19 18:24:38 -0700 | [diff] [blame] | 5299 | static bool io_get_sqring(struct io_ring_ctx *ctx, struct io_kiocb *req, |
| 5300 | const struct io_uring_sqe **sqe_ptr) |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 5301 | { |
Hristo Venev | 75b28af | 2019-08-26 17:23:46 +0000 | [diff] [blame] | 5302 | u32 *sq_array = ctx->sq_array; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 5303 | unsigned head; |
| 5304 | |
| 5305 | /* |
| 5306 | * The cached sq head (or cq tail) serves two purposes: |
| 5307 | * |
| 5308 | * 1) allows us to batch the cost of updating the user visible |
| 5309 | * head updates. |
| 5310 | * 2) allows the kernel side to track the head on its own, even |
| 5311 | * though the application is the one updating it. |
| 5312 | */ |
Pavel Begunkov | ee7d46d | 2019-12-30 21:24:45 +0300 | [diff] [blame] | 5313 | head = READ_ONCE(sq_array[ctx->cached_sq_head & ctx->sq_mask]); |
Pavel Begunkov | 9835d6f | 2019-11-21 21:24:56 +0300 | [diff] [blame] | 5314 | if (likely(head < ctx->sq_entries)) { |
Pavel Begunkov | cf6fd4b | 2019-11-25 23:14:39 +0300 | [diff] [blame] | 5315 | /* |
| 5316 | * All io need record the previous position, if LINK vs DARIN, |
| 5317 | * it can be used to mark the position of the first IO in the |
| 5318 | * link list. |
| 5319 | */ |
| 5320 | req->sequence = ctx->cached_sq_head; |
Jens Axboe | 3529d8c | 2019-12-19 18:24:38 -0700 | [diff] [blame] | 5321 | *sqe_ptr = &ctx->sq_sqes[head]; |
| 5322 | req->opcode = READ_ONCE((*sqe_ptr)->opcode); |
| 5323 | req->user_data = READ_ONCE((*sqe_ptr)->user_data); |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 5324 | ctx->cached_sq_head++; |
| 5325 | return true; |
| 5326 | } |
| 5327 | |
| 5328 | /* drop invalid entries */ |
| 5329 | ctx->cached_sq_head++; |
Jens Axboe | 498ccd9 | 2019-10-25 10:04:25 -0600 | [diff] [blame] | 5330 | ctx->cached_sq_dropped++; |
Pavel Begunkov | ee7d46d | 2019-12-30 21:24:45 +0300 | [diff] [blame] | 5331 | WRITE_ONCE(ctx->rings->sq_dropped, ctx->cached_sq_dropped); |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 5332 | return false; |
| 5333 | } |
| 5334 | |
Pavel Begunkov | fb5ccc9 | 2019-10-25 12:31:30 +0300 | [diff] [blame] | 5335 | static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr, |
Pavel Begunkov | ae9428c | 2019-11-06 00:22:14 +0300 | [diff] [blame] | 5336 | struct file *ring_file, int ring_fd, |
| 5337 | struct mm_struct **mm, bool async) |
Jens Axboe | 6c271ce | 2019-01-10 11:22:30 -0700 | [diff] [blame] | 5338 | { |
| 5339 | struct io_submit_state state, *statep = NULL; |
Jens Axboe | 9e645e11 | 2019-05-10 16:07:28 -0600 | [diff] [blame] | 5340 | struct io_kiocb *link = NULL; |
Jens Axboe | 9e645e11 | 2019-05-10 16:07:28 -0600 | [diff] [blame] | 5341 | int i, submitted = 0; |
Pavel Begunkov | 95a1b3ff | 2019-10-27 23:15:41 +0300 | [diff] [blame] | 5342 | bool mm_fault = false; |
Jens Axboe | 6c271ce | 2019-01-10 11:22:30 -0700 | [diff] [blame] | 5343 | |
Jens Axboe | c4a2ed7 | 2019-11-21 21:01:26 -0700 | [diff] [blame] | 5344 | /* if we have a backlog and couldn't flush it all, return BUSY */ |
Jens Axboe | ad3eb2c | 2019-12-18 17:12:20 -0700 | [diff] [blame] | 5345 | if (test_bit(0, &ctx->sq_check_overflow)) { |
| 5346 | if (!list_empty(&ctx->cq_overflow_list) && |
| 5347 | !io_cqring_overflow_flush(ctx, false)) |
| 5348 | return -EBUSY; |
| 5349 | } |
Jens Axboe | 6c271ce | 2019-01-10 11:22:30 -0700 | [diff] [blame] | 5350 | |
Pavel Begunkov | ee7d46d | 2019-12-30 21:24:45 +0300 | [diff] [blame] | 5351 | /* make sure SQ entry isn't read before tail */ |
| 5352 | nr = min3(nr, ctx->sq_entries, io_sqring_entries(ctx)); |
Pavel Begunkov | 9ef4f12 | 2019-12-30 21:24:44 +0300 | [diff] [blame] | 5353 | |
Pavel Begunkov | 2b85edf | 2019-12-28 14:13:03 +0300 | [diff] [blame] | 5354 | if (!percpu_ref_tryget_many(&ctx->refs, nr)) |
| 5355 | return -EAGAIN; |
Jens Axboe | 6c271ce | 2019-01-10 11:22:30 -0700 | [diff] [blame] | 5356 | |
| 5357 | if (nr > IO_PLUG_THRESHOLD) { |
Jackie Liu | 22efde5 | 2019-12-02 17:14:52 +0800 | [diff] [blame] | 5358 | io_submit_state_start(&state, nr); |
Jens Axboe | 6c271ce | 2019-01-10 11:22:30 -0700 | [diff] [blame] | 5359 | statep = &state; |
| 5360 | } |
| 5361 | |
Pavel Begunkov | b14cca0 | 2020-01-17 04:45:59 +0300 | [diff] [blame] | 5362 | ctx->ring_fd = ring_fd; |
| 5363 | ctx->ring_file = ring_file; |
| 5364 | |
Jens Axboe | 6c271ce | 2019-01-10 11:22:30 -0700 | [diff] [blame] | 5365 | for (i = 0; i < nr; i++) { |
Jens Axboe | 3529d8c | 2019-12-19 18:24:38 -0700 | [diff] [blame] | 5366 | const struct io_uring_sqe *sqe; |
Pavel Begunkov | 196be95 | 2019-11-07 01:41:06 +0300 | [diff] [blame] | 5367 | struct io_kiocb *req; |
Pavel Begunkov | 1cb1edb | 2020-02-06 21:16:09 +0300 | [diff] [blame] | 5368 | int err; |
Pavel Begunkov | fb5ccc9 | 2019-10-25 12:31:30 +0300 | [diff] [blame] | 5369 | |
Pavel Begunkov | 196be95 | 2019-11-07 01:41:06 +0300 | [diff] [blame] | 5370 | req = io_get_req(ctx, statep); |
| 5371 | if (unlikely(!req)) { |
| 5372 | if (!submitted) |
| 5373 | submitted = -EAGAIN; |
Pavel Begunkov | fb5ccc9 | 2019-10-25 12:31:30 +0300 | [diff] [blame] | 5374 | break; |
Jens Axboe | 9e645e11 | 2019-05-10 16:07:28 -0600 | [diff] [blame] | 5375 | } |
Jens Axboe | 3529d8c | 2019-12-19 18:24:38 -0700 | [diff] [blame] | 5376 | if (!io_get_sqring(ctx, req, &sqe)) { |
Pavel Begunkov | 2b85edf | 2019-12-28 14:13:03 +0300 | [diff] [blame] | 5377 | __io_req_do_free(req); |
Pavel Begunkov | 196be95 | 2019-11-07 01:41:06 +0300 | [diff] [blame] | 5378 | break; |
| 5379 | } |
Jens Axboe | 9e645e11 | 2019-05-10 16:07:28 -0600 | [diff] [blame] | 5380 | |
Jens Axboe | d365634 | 2019-12-18 09:50:26 -0700 | [diff] [blame] | 5381 | /* will complete beyond this point, count as submitted */ |
| 5382 | submitted++; |
| 5383 | |
| 5384 | if (unlikely(req->opcode >= IORING_OP_LAST)) { |
Pavel Begunkov | 1cb1edb | 2020-02-06 21:16:09 +0300 | [diff] [blame] | 5385 | err = -EINVAL; |
| 5386 | fail_req: |
| 5387 | io_cqring_add_event(req, err); |
Jens Axboe | d365634 | 2019-12-18 09:50:26 -0700 | [diff] [blame] | 5388 | io_double_put_req(req); |
| 5389 | break; |
| 5390 | } |
| 5391 | |
| 5392 | if (io_op_defs[req->opcode].needs_mm && !*mm) { |
Pavel Begunkov | 95a1b3ff | 2019-10-27 23:15:41 +0300 | [diff] [blame] | 5393 | mm_fault = mm_fault || !mmget_not_zero(ctx->sqo_mm); |
Pavel Begunkov | 1cb1edb | 2020-02-06 21:16:09 +0300 | [diff] [blame] | 5394 | if (unlikely(mm_fault)) { |
| 5395 | err = -EFAULT; |
| 5396 | goto fail_req; |
Pavel Begunkov | 95a1b3ff | 2019-10-27 23:15:41 +0300 | [diff] [blame] | 5397 | } |
Pavel Begunkov | 1cb1edb | 2020-02-06 21:16:09 +0300 | [diff] [blame] | 5398 | use_mm(ctx->sqo_mm); |
| 5399 | *mm = ctx->sqo_mm; |
Pavel Begunkov | 95a1b3ff | 2019-10-27 23:15:41 +0300 | [diff] [blame] | 5400 | } |
| 5401 | |
Pavel Begunkov | cf6fd4b | 2019-11-25 23:14:39 +0300 | [diff] [blame] | 5402 | req->needs_fixed_file = async; |
Jens Axboe | 354420f | 2020-01-08 18:55:15 -0700 | [diff] [blame] | 5403 | trace_io_uring_submit_sqe(ctx, req->opcode, req->user_data, |
| 5404 | true, async); |
Jens Axboe | 3529d8c | 2019-12-19 18:24:38 -0700 | [diff] [blame] | 5405 | if (!io_submit_sqe(req, sqe, statep, &link)) |
Pavel Begunkov | 2e6e1fd | 2019-12-05 16:15:45 +0300 | [diff] [blame] | 5406 | break; |
Jens Axboe | 6c271ce | 2019-01-10 11:22:30 -0700 | [diff] [blame] | 5407 | } |
| 5408 | |
Pavel Begunkov | 9466f43 | 2020-01-25 22:34:01 +0300 | [diff] [blame] | 5409 | if (unlikely(submitted != nr)) { |
| 5410 | int ref_used = (submitted == -EAGAIN) ? 0 : submitted; |
| 5411 | |
| 5412 | percpu_ref_put_many(&ctx->refs, nr - ref_used); |
| 5413 | } |
Jens Axboe | 9e645e11 | 2019-05-10 16:07:28 -0600 | [diff] [blame] | 5414 | if (link) |
Pavel Begunkov | 1b4a51b | 2019-11-21 11:54:28 +0300 | [diff] [blame] | 5415 | io_queue_link_head(link); |
Jens Axboe | 6c271ce | 2019-01-10 11:22:30 -0700 | [diff] [blame] | 5416 | if (statep) |
| 5417 | io_submit_state_end(&state); |
| 5418 | |
Pavel Begunkov | ae9428c | 2019-11-06 00:22:14 +0300 | [diff] [blame] | 5419 | /* Commit SQ ring head once we've consumed and submitted all SQEs */ |
| 5420 | io_commit_sqring(ctx); |
| 5421 | |
Jens Axboe | 6c271ce | 2019-01-10 11:22:30 -0700 | [diff] [blame] | 5422 | return submitted; |
| 5423 | } |
| 5424 | |
| 5425 | static int io_sq_thread(void *data) |
| 5426 | { |
Jens Axboe | 6c271ce | 2019-01-10 11:22:30 -0700 | [diff] [blame] | 5427 | struct io_ring_ctx *ctx = data; |
| 5428 | struct mm_struct *cur_mm = NULL; |
Jens Axboe | 181e448 | 2019-11-25 08:52:30 -0700 | [diff] [blame] | 5429 | const struct cred *old_cred; |
Jens Axboe | 6c271ce | 2019-01-10 11:22:30 -0700 | [diff] [blame] | 5430 | mm_segment_t old_fs; |
| 5431 | DEFINE_WAIT(wait); |
Jens Axboe | 6c271ce | 2019-01-10 11:22:30 -0700 | [diff] [blame] | 5432 | unsigned long timeout; |
Xiaoguang Wang | bdcd3ea | 2020-02-25 22:12:08 +0800 | [diff] [blame] | 5433 | int ret = 0; |
Jens Axboe | 6c271ce | 2019-01-10 11:22:30 -0700 | [diff] [blame] | 5434 | |
Jens Axboe | 206aefd | 2019-11-07 18:27:42 -0700 | [diff] [blame] | 5435 | complete(&ctx->completions[1]); |
Jackie Liu | a4c0b3d | 2019-07-08 13:41:12 +0800 | [diff] [blame] | 5436 | |
Jens Axboe | 6c271ce | 2019-01-10 11:22:30 -0700 | [diff] [blame] | 5437 | old_fs = get_fs(); |
| 5438 | set_fs(USER_DS); |
Jens Axboe | 181e448 | 2019-11-25 08:52:30 -0700 | [diff] [blame] | 5439 | old_cred = override_creds(ctx->creds); |
Jens Axboe | 6c271ce | 2019-01-10 11:22:30 -0700 | [diff] [blame] | 5440 | |
Xiaoguang Wang | bdcd3ea | 2020-02-25 22:12:08 +0800 | [diff] [blame] | 5441 | timeout = jiffies + ctx->sq_thread_idle; |
Roman Penyaev | 2bbcd6d | 2019-05-16 10:53:57 +0200 | [diff] [blame] | 5442 | while (!kthread_should_park()) { |
Pavel Begunkov | fb5ccc9 | 2019-10-25 12:31:30 +0300 | [diff] [blame] | 5443 | unsigned int to_submit; |
Jens Axboe | 6c271ce | 2019-01-10 11:22:30 -0700 | [diff] [blame] | 5444 | |
Xiaoguang Wang | bdcd3ea | 2020-02-25 22:12:08 +0800 | [diff] [blame] | 5445 | if (!list_empty(&ctx->poll_list)) { |
Jens Axboe | 6c271ce | 2019-01-10 11:22:30 -0700 | [diff] [blame] | 5446 | unsigned nr_events = 0; |
| 5447 | |
Xiaoguang Wang | bdcd3ea | 2020-02-25 22:12:08 +0800 | [diff] [blame] | 5448 | mutex_lock(&ctx->uring_lock); |
| 5449 | if (!list_empty(&ctx->poll_list)) |
| 5450 | io_iopoll_getevents(ctx, &nr_events, 0); |
| 5451 | else |
Jens Axboe | 6c271ce | 2019-01-10 11:22:30 -0700 | [diff] [blame] | 5452 | timeout = jiffies + ctx->sq_thread_idle; |
Xiaoguang Wang | bdcd3ea | 2020-02-25 22:12:08 +0800 | [diff] [blame] | 5453 | mutex_unlock(&ctx->uring_lock); |
Jens Axboe | 6c271ce | 2019-01-10 11:22:30 -0700 | [diff] [blame] | 5454 | } |
| 5455 | |
Pavel Begunkov | fb5ccc9 | 2019-10-25 12:31:30 +0300 | [diff] [blame] | 5456 | to_submit = io_sqring_entries(ctx); |
Jens Axboe | c1edbf5 | 2019-11-10 16:56:04 -0700 | [diff] [blame] | 5457 | |
| 5458 | /* |
| 5459 | * If submit got -EBUSY, flag us as needing the application |
| 5460 | * to enter the kernel to reap and flush events. |
| 5461 | */ |
| 5462 | if (!to_submit || ret == -EBUSY) { |
Jens Axboe | 6c271ce | 2019-01-10 11:22:30 -0700 | [diff] [blame] | 5463 | /* |
Stefano Garzarella | 7143b5a | 2020-02-21 16:42:16 +0100 | [diff] [blame] | 5464 | * Drop cur_mm before scheduling, we can't hold it for |
| 5465 | * long periods (or over schedule()). Do this before |
| 5466 | * adding ourselves to the waitqueue, as the unuse/drop |
| 5467 | * may sleep. |
| 5468 | */ |
| 5469 | if (cur_mm) { |
| 5470 | unuse_mm(cur_mm); |
| 5471 | mmput(cur_mm); |
| 5472 | cur_mm = NULL; |
| 5473 | } |
| 5474 | |
| 5475 | /* |
Jens Axboe | 6c271ce | 2019-01-10 11:22:30 -0700 | [diff] [blame] | 5476 | * We're polling. If we're within the defined idle |
| 5477 | * period, then let us spin without work before going |
Jens Axboe | c1edbf5 | 2019-11-10 16:56:04 -0700 | [diff] [blame] | 5478 | * to sleep. The exception is if we got EBUSY doing |
| 5479 | * more IO, we should wait for the application to |
| 5480 | * reap events and wake us up. |
Jens Axboe | 6c271ce | 2019-01-10 11:22:30 -0700 | [diff] [blame] | 5481 | */ |
Xiaoguang Wang | bdcd3ea | 2020-02-25 22:12:08 +0800 | [diff] [blame] | 5482 | if (!list_empty(&ctx->poll_list) || |
Jens Axboe | df069d8 | 2020-02-04 16:48:34 -0700 | [diff] [blame] | 5483 | (!time_after(jiffies, timeout) && ret != -EBUSY && |
| 5484 | !percpu_ref_is_dying(&ctx->refs))) { |
Jens Axboe | b41e985 | 2020-02-17 09:52:41 -0700 | [diff] [blame] | 5485 | if (current->task_works) |
| 5486 | task_work_run(); |
Jens Axboe | 9831a90 | 2019-09-19 09:48:55 -0600 | [diff] [blame] | 5487 | cond_resched(); |
Jens Axboe | 6c271ce | 2019-01-10 11:22:30 -0700 | [diff] [blame] | 5488 | continue; |
| 5489 | } |
| 5490 | |
Jens Axboe | 6c271ce | 2019-01-10 11:22:30 -0700 | [diff] [blame] | 5491 | prepare_to_wait(&ctx->sqo_wait, &wait, |
| 5492 | TASK_INTERRUPTIBLE); |
| 5493 | |
Xiaoguang Wang | bdcd3ea | 2020-02-25 22:12:08 +0800 | [diff] [blame] | 5494 | /* |
| 5495 | * While doing polled IO, before going to sleep, we need |
| 5496 | * to check if there are new reqs added to poll_list, it |
| 5497 | * is because reqs may have been punted to io worker and |
| 5498 | * will be added to poll_list later, hence check the |
| 5499 | * poll_list again. |
| 5500 | */ |
| 5501 | if ((ctx->flags & IORING_SETUP_IOPOLL) && |
| 5502 | !list_empty_careful(&ctx->poll_list)) { |
| 5503 | finish_wait(&ctx->sqo_wait, &wait); |
| 5504 | continue; |
| 5505 | } |
| 5506 | |
Jens Axboe | 6c271ce | 2019-01-10 11:22:30 -0700 | [diff] [blame] | 5507 | /* Tell userspace we may need a wakeup call */ |
Hristo Venev | 75b28af | 2019-08-26 17:23:46 +0000 | [diff] [blame] | 5508 | ctx->rings->sq_flags |= IORING_SQ_NEED_WAKEUP; |
Stefan Bühler | 0d7bae6 | 2019-04-19 11:57:45 +0200 | [diff] [blame] | 5509 | /* make sure to read SQ tail after writing flags */ |
| 5510 | smp_mb(); |
Jens Axboe | 6c271ce | 2019-01-10 11:22:30 -0700 | [diff] [blame] | 5511 | |
Pavel Begunkov | fb5ccc9 | 2019-10-25 12:31:30 +0300 | [diff] [blame] | 5512 | to_submit = io_sqring_entries(ctx); |
Jens Axboe | c1edbf5 | 2019-11-10 16:56:04 -0700 | [diff] [blame] | 5513 | if (!to_submit || ret == -EBUSY) { |
Roman Penyaev | 2bbcd6d | 2019-05-16 10:53:57 +0200 | [diff] [blame] | 5514 | if (kthread_should_park()) { |
Jens Axboe | 6c271ce | 2019-01-10 11:22:30 -0700 | [diff] [blame] | 5515 | finish_wait(&ctx->sqo_wait, &wait); |
| 5516 | break; |
| 5517 | } |
Jens Axboe | b41e985 | 2020-02-17 09:52:41 -0700 | [diff] [blame] | 5518 | if (current->task_works) { |
| 5519 | task_work_run(); |
| 5520 | continue; |
| 5521 | } |
Jens Axboe | 6c271ce | 2019-01-10 11:22:30 -0700 | [diff] [blame] | 5522 | if (signal_pending(current)) |
| 5523 | flush_signals(current); |
| 5524 | schedule(); |
| 5525 | finish_wait(&ctx->sqo_wait, &wait); |
| 5526 | |
Hristo Venev | 75b28af | 2019-08-26 17:23:46 +0000 | [diff] [blame] | 5527 | ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP; |
Jens Axboe | 6c271ce | 2019-01-10 11:22:30 -0700 | [diff] [blame] | 5528 | continue; |
| 5529 | } |
| 5530 | finish_wait(&ctx->sqo_wait, &wait); |
| 5531 | |
Hristo Venev | 75b28af | 2019-08-26 17:23:46 +0000 | [diff] [blame] | 5532 | ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP; |
Jens Axboe | 6c271ce | 2019-01-10 11:22:30 -0700 | [diff] [blame] | 5533 | } |
| 5534 | |
Jens Axboe | 8a4955f | 2019-12-09 14:52:35 -0700 | [diff] [blame] | 5535 | mutex_lock(&ctx->uring_lock); |
Jens Axboe | 1d7bb1d | 2019-11-06 11:31:17 -0700 | [diff] [blame] | 5536 | ret = io_submit_sqes(ctx, to_submit, NULL, -1, &cur_mm, true); |
Jens Axboe | 8a4955f | 2019-12-09 14:52:35 -0700 | [diff] [blame] | 5537 | mutex_unlock(&ctx->uring_lock); |
Xiaoguang Wang | bdcd3ea | 2020-02-25 22:12:08 +0800 | [diff] [blame] | 5538 | timeout = jiffies + ctx->sq_thread_idle; |
Jens Axboe | 6c271ce | 2019-01-10 11:22:30 -0700 | [diff] [blame] | 5539 | } |
| 5540 | |
Jens Axboe | b41e985 | 2020-02-17 09:52:41 -0700 | [diff] [blame] | 5541 | if (current->task_works) |
| 5542 | task_work_run(); |
| 5543 | |
Jens Axboe | 6c271ce | 2019-01-10 11:22:30 -0700 | [diff] [blame] | 5544 | set_fs(old_fs); |
| 5545 | if (cur_mm) { |
| 5546 | unuse_mm(cur_mm); |
| 5547 | mmput(cur_mm); |
| 5548 | } |
Jens Axboe | 181e448 | 2019-11-25 08:52:30 -0700 | [diff] [blame] | 5549 | revert_creds(old_cred); |
Jens Axboe | 0605863 | 2019-04-13 09:26:03 -0600 | [diff] [blame] | 5550 | |
Roman Penyaev | 2bbcd6d | 2019-05-16 10:53:57 +0200 | [diff] [blame] | 5551 | kthread_parkme(); |
Jens Axboe | 0605863 | 2019-04-13 09:26:03 -0600 | [diff] [blame] | 5552 | |
Jens Axboe | 6c271ce | 2019-01-10 11:22:30 -0700 | [diff] [blame] | 5553 | return 0; |
| 5554 | } |
| 5555 | |
Jens Axboe | bda5216 | 2019-09-24 13:47:15 -0600 | [diff] [blame] | 5556 | struct io_wait_queue { |
| 5557 | struct wait_queue_entry wq; |
| 5558 | struct io_ring_ctx *ctx; |
| 5559 | unsigned to_wait; |
| 5560 | unsigned nr_timeouts; |
| 5561 | }; |
| 5562 | |
Jens Axboe | 1d7bb1d | 2019-11-06 11:31:17 -0700 | [diff] [blame] | 5563 | static inline bool io_should_wake(struct io_wait_queue *iowq, bool noflush) |
Jens Axboe | bda5216 | 2019-09-24 13:47:15 -0600 | [diff] [blame] | 5564 | { |
| 5565 | struct io_ring_ctx *ctx = iowq->ctx; |
| 5566 | |
| 5567 | /* |
Brian Gianforcaro | d195a66 | 2019-12-13 03:09:50 -0800 | [diff] [blame] | 5568 | * Wake up if we have enough events, or if a timeout occurred since we |
Jens Axboe | bda5216 | 2019-09-24 13:47:15 -0600 | [diff] [blame] | 5569 | * started waiting. For timeouts, we always want to return to userspace, |
| 5570 | * regardless of event count. |
| 5571 | */ |
Jens Axboe | 1d7bb1d | 2019-11-06 11:31:17 -0700 | [diff] [blame] | 5572 | return io_cqring_events(ctx, noflush) >= iowq->to_wait || |
Jens Axboe | bda5216 | 2019-09-24 13:47:15 -0600 | [diff] [blame] | 5573 | atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts; |
| 5574 | } |
| 5575 | |
| 5576 | static int io_wake_function(struct wait_queue_entry *curr, unsigned int mode, |
| 5577 | int wake_flags, void *key) |
| 5578 | { |
| 5579 | struct io_wait_queue *iowq = container_of(curr, struct io_wait_queue, |
| 5580 | wq); |
| 5581 | |
Jens Axboe | 1d7bb1d | 2019-11-06 11:31:17 -0700 | [diff] [blame] | 5582 | /* use noflush == true, as we can't safely rely on locking context */ |
| 5583 | if (!io_should_wake(iowq, true)) |
Jens Axboe | bda5216 | 2019-09-24 13:47:15 -0600 | [diff] [blame] | 5584 | return -1; |
| 5585 | |
| 5586 | return autoremove_wake_function(curr, mode, wake_flags, key); |
| 5587 | } |
| 5588 | |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 5589 | /* |
| 5590 | * Wait until events become available, if we don't already have some. The |
| 5591 | * application must reap them itself, as they reside on the shared cq ring. |
| 5592 | */ |
| 5593 | static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events, |
| 5594 | const sigset_t __user *sig, size_t sigsz) |
| 5595 | { |
Jens Axboe | bda5216 | 2019-09-24 13:47:15 -0600 | [diff] [blame] | 5596 | struct io_wait_queue iowq = { |
| 5597 | .wq = { |
| 5598 | .private = current, |
| 5599 | .func = io_wake_function, |
| 5600 | .entry = LIST_HEAD_INIT(iowq.wq.entry), |
| 5601 | }, |
| 5602 | .ctx = ctx, |
| 5603 | .to_wait = min_events, |
| 5604 | }; |
Hristo Venev | 75b28af | 2019-08-26 17:23:46 +0000 | [diff] [blame] | 5605 | struct io_rings *rings = ctx->rings; |
Jackie Liu | e9ffa5c | 2019-10-29 11:16:42 +0800 | [diff] [blame] | 5606 | int ret = 0; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 5607 | |
Jens Axboe | b41e985 | 2020-02-17 09:52:41 -0700 | [diff] [blame] | 5608 | do { |
| 5609 | if (io_cqring_events(ctx, false) >= min_events) |
| 5610 | return 0; |
| 5611 | if (!current->task_works) |
| 5612 | break; |
| 5613 | task_work_run(); |
| 5614 | } while (1); |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 5615 | |
| 5616 | if (sig) { |
Arnd Bergmann | 9e75ad5 | 2019-03-25 15:34:53 +0100 | [diff] [blame] | 5617 | #ifdef CONFIG_COMPAT |
| 5618 | if (in_compat_syscall()) |
| 5619 | ret = set_compat_user_sigmask((const compat_sigset_t __user *)sig, |
Oleg Nesterov | b772434 | 2019-07-16 16:29:53 -0700 | [diff] [blame] | 5620 | sigsz); |
Arnd Bergmann | 9e75ad5 | 2019-03-25 15:34:53 +0100 | [diff] [blame] | 5621 | else |
| 5622 | #endif |
Oleg Nesterov | b772434 | 2019-07-16 16:29:53 -0700 | [diff] [blame] | 5623 | ret = set_user_sigmask(sig, sigsz); |
Arnd Bergmann | 9e75ad5 | 2019-03-25 15:34:53 +0100 | [diff] [blame] | 5624 | |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 5625 | if (ret) |
| 5626 | return ret; |
| 5627 | } |
| 5628 | |
Jens Axboe | bda5216 | 2019-09-24 13:47:15 -0600 | [diff] [blame] | 5629 | iowq.nr_timeouts = atomic_read(&ctx->cq_timeouts); |
Dmitrii Dolgov | c826bd7 | 2019-10-15 19:02:01 +0200 | [diff] [blame] | 5630 | trace_io_uring_cqring_wait(ctx, min_events); |
Jens Axboe | bda5216 | 2019-09-24 13:47:15 -0600 | [diff] [blame] | 5631 | do { |
| 5632 | prepare_to_wait_exclusive(&ctx->wait, &iowq.wq, |
| 5633 | TASK_INTERRUPTIBLE); |
Jens Axboe | b41e985 | 2020-02-17 09:52:41 -0700 | [diff] [blame] | 5634 | if (current->task_works) |
| 5635 | task_work_run(); |
Jens Axboe | 1d7bb1d | 2019-11-06 11:31:17 -0700 | [diff] [blame] | 5636 | if (io_should_wake(&iowq, false)) |
Jens Axboe | bda5216 | 2019-09-24 13:47:15 -0600 | [diff] [blame] | 5637 | break; |
| 5638 | schedule(); |
| 5639 | if (signal_pending(current)) { |
Jackie Liu | e9ffa5c | 2019-10-29 11:16:42 +0800 | [diff] [blame] | 5640 | ret = -EINTR; |
Jens Axboe | bda5216 | 2019-09-24 13:47:15 -0600 | [diff] [blame] | 5641 | break; |
| 5642 | } |
| 5643 | } while (1); |
| 5644 | finish_wait(&ctx->wait, &iowq.wq); |
| 5645 | |
Jackie Liu | e9ffa5c | 2019-10-29 11:16:42 +0800 | [diff] [blame] | 5646 | restore_saved_sigmask_unless(ret == -EINTR); |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 5647 | |
Hristo Venev | 75b28af | 2019-08-26 17:23:46 +0000 | [diff] [blame] | 5648 | return READ_ONCE(rings->cq.head) == READ_ONCE(rings->cq.tail) ? ret : 0; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 5649 | } |
| 5650 | |
Jens Axboe | 6b06314 | 2019-01-10 22:13:58 -0700 | [diff] [blame] | 5651 | static void __io_sqe_files_unregister(struct io_ring_ctx *ctx) |
| 5652 | { |
| 5653 | #if defined(CONFIG_UNIX) |
| 5654 | if (ctx->ring_sock) { |
| 5655 | struct sock *sock = ctx->ring_sock->sk; |
| 5656 | struct sk_buff *skb; |
| 5657 | |
| 5658 | while ((skb = skb_dequeue(&sock->sk_receive_queue)) != NULL) |
| 5659 | kfree_skb(skb); |
| 5660 | } |
| 5661 | #else |
| 5662 | int i; |
| 5663 | |
Jens Axboe | 65e19f5 | 2019-10-26 07:20:21 -0600 | [diff] [blame] | 5664 | for (i = 0; i < ctx->nr_user_files; i++) { |
| 5665 | struct file *file; |
| 5666 | |
| 5667 | file = io_file_from_index(ctx, i); |
| 5668 | if (file) |
| 5669 | fput(file); |
| 5670 | } |
Jens Axboe | 6b06314 | 2019-01-10 22:13:58 -0700 | [diff] [blame] | 5671 | #endif |
| 5672 | } |
| 5673 | |
Jens Axboe | 05f3fb3 | 2019-12-09 11:22:50 -0700 | [diff] [blame] | 5674 | static void io_file_ref_kill(struct percpu_ref *ref) |
| 5675 | { |
| 5676 | struct fixed_file_data *data; |
| 5677 | |
| 5678 | data = container_of(ref, struct fixed_file_data, refs); |
| 5679 | complete(&data->done); |
| 5680 | } |
| 5681 | |
Jens Axboe | 6b06314 | 2019-01-10 22:13:58 -0700 | [diff] [blame] | 5682 | static int io_sqe_files_unregister(struct io_ring_ctx *ctx) |
| 5683 | { |
Jens Axboe | 05f3fb3 | 2019-12-09 11:22:50 -0700 | [diff] [blame] | 5684 | struct fixed_file_data *data = ctx->file_data; |
Jens Axboe | 65e19f5 | 2019-10-26 07:20:21 -0600 | [diff] [blame] | 5685 | unsigned nr_tables, i; |
| 5686 | |
Jens Axboe | 05f3fb3 | 2019-12-09 11:22:50 -0700 | [diff] [blame] | 5687 | if (!data) |
Jens Axboe | 6b06314 | 2019-01-10 22:13:58 -0700 | [diff] [blame] | 5688 | return -ENXIO; |
| 5689 | |
Jens Axboe | 05f3fb3 | 2019-12-09 11:22:50 -0700 | [diff] [blame] | 5690 | percpu_ref_kill_and_confirm(&data->refs, io_file_ref_kill); |
Jens Axboe | e46a795 | 2020-01-17 11:15:34 -0700 | [diff] [blame] | 5691 | flush_work(&data->ref_work); |
Jens Axboe | 2faf852 | 2020-02-04 19:54:55 -0700 | [diff] [blame] | 5692 | wait_for_completion(&data->done); |
| 5693 | io_ring_file_ref_flush(data); |
Jens Axboe | 05f3fb3 | 2019-12-09 11:22:50 -0700 | [diff] [blame] | 5694 | percpu_ref_exit(&data->refs); |
| 5695 | |
Jens Axboe | 6b06314 | 2019-01-10 22:13:58 -0700 | [diff] [blame] | 5696 | __io_sqe_files_unregister(ctx); |
Jens Axboe | 65e19f5 | 2019-10-26 07:20:21 -0600 | [diff] [blame] | 5697 | nr_tables = DIV_ROUND_UP(ctx->nr_user_files, IORING_MAX_FILES_TABLE); |
| 5698 | for (i = 0; i < nr_tables; i++) |
Jens Axboe | 05f3fb3 | 2019-12-09 11:22:50 -0700 | [diff] [blame] | 5699 | kfree(data->table[i].files); |
| 5700 | kfree(data->table); |
| 5701 | kfree(data); |
| 5702 | ctx->file_data = NULL; |
Jens Axboe | 6b06314 | 2019-01-10 22:13:58 -0700 | [diff] [blame] | 5703 | ctx->nr_user_files = 0; |
| 5704 | return 0; |
| 5705 | } |
| 5706 | |
Jens Axboe | 6c271ce | 2019-01-10 11:22:30 -0700 | [diff] [blame] | 5707 | static void io_sq_thread_stop(struct io_ring_ctx *ctx) |
| 5708 | { |
| 5709 | if (ctx->sqo_thread) { |
Jens Axboe | 206aefd | 2019-11-07 18:27:42 -0700 | [diff] [blame] | 5710 | wait_for_completion(&ctx->completions[1]); |
Roman Penyaev | 2bbcd6d | 2019-05-16 10:53:57 +0200 | [diff] [blame] | 5711 | /* |
| 5712 | * The park is a bit of a work-around, without it we get |
| 5713 | * warning spews on shutdown with SQPOLL set and affinity |
| 5714 | * set to a single CPU. |
| 5715 | */ |
Jens Axboe | 0605863 | 2019-04-13 09:26:03 -0600 | [diff] [blame] | 5716 | kthread_park(ctx->sqo_thread); |
Jens Axboe | 6c271ce | 2019-01-10 11:22:30 -0700 | [diff] [blame] | 5717 | kthread_stop(ctx->sqo_thread); |
| 5718 | ctx->sqo_thread = NULL; |
| 5719 | } |
| 5720 | } |
| 5721 | |
Jens Axboe | 6b06314 | 2019-01-10 22:13:58 -0700 | [diff] [blame] | 5722 | static void io_finish_async(struct io_ring_ctx *ctx) |
| 5723 | { |
Jens Axboe | 6c271ce | 2019-01-10 11:22:30 -0700 | [diff] [blame] | 5724 | io_sq_thread_stop(ctx); |
| 5725 | |
Jens Axboe | 561fb04 | 2019-10-24 07:25:42 -0600 | [diff] [blame] | 5726 | if (ctx->io_wq) { |
| 5727 | io_wq_destroy(ctx->io_wq); |
| 5728 | ctx->io_wq = NULL; |
Jens Axboe | 6b06314 | 2019-01-10 22:13:58 -0700 | [diff] [blame] | 5729 | } |
| 5730 | } |
| 5731 | |
| 5732 | #if defined(CONFIG_UNIX) |
Jens Axboe | 6b06314 | 2019-01-10 22:13:58 -0700 | [diff] [blame] | 5733 | /* |
| 5734 | * Ensure the UNIX gc is aware of our file set, so we are certain that |
| 5735 | * the io_uring can be safely unregistered on process exit, even if we have |
| 5736 | * loops in the file referencing. |
| 5737 | */ |
| 5738 | static int __io_sqe_files_scm(struct io_ring_ctx *ctx, int nr, int offset) |
| 5739 | { |
| 5740 | struct sock *sk = ctx->ring_sock->sk; |
| 5741 | struct scm_fp_list *fpl; |
| 5742 | struct sk_buff *skb; |
Jens Axboe | 08a4517 | 2019-10-03 08:11:03 -0600 | [diff] [blame] | 5743 | int i, nr_files; |
Jens Axboe | 6b06314 | 2019-01-10 22:13:58 -0700 | [diff] [blame] | 5744 | |
| 5745 | if (!capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN)) { |
| 5746 | unsigned long inflight = ctx->user->unix_inflight + nr; |
| 5747 | |
| 5748 | if (inflight > task_rlimit(current, RLIMIT_NOFILE)) |
| 5749 | return -EMFILE; |
| 5750 | } |
| 5751 | |
| 5752 | fpl = kzalloc(sizeof(*fpl), GFP_KERNEL); |
| 5753 | if (!fpl) |
| 5754 | return -ENOMEM; |
| 5755 | |
| 5756 | skb = alloc_skb(0, GFP_KERNEL); |
| 5757 | if (!skb) { |
| 5758 | kfree(fpl); |
| 5759 | return -ENOMEM; |
| 5760 | } |
| 5761 | |
| 5762 | skb->sk = sk; |
Jens Axboe | 6b06314 | 2019-01-10 22:13:58 -0700 | [diff] [blame] | 5763 | |
Jens Axboe | 08a4517 | 2019-10-03 08:11:03 -0600 | [diff] [blame] | 5764 | nr_files = 0; |
Jens Axboe | 6b06314 | 2019-01-10 22:13:58 -0700 | [diff] [blame] | 5765 | fpl->user = get_uid(ctx->user); |
| 5766 | for (i = 0; i < nr; i++) { |
Jens Axboe | 65e19f5 | 2019-10-26 07:20:21 -0600 | [diff] [blame] | 5767 | struct file *file = io_file_from_index(ctx, i + offset); |
| 5768 | |
| 5769 | if (!file) |
Jens Axboe | 08a4517 | 2019-10-03 08:11:03 -0600 | [diff] [blame] | 5770 | continue; |
Jens Axboe | 65e19f5 | 2019-10-26 07:20:21 -0600 | [diff] [blame] | 5771 | fpl->fp[nr_files] = get_file(file); |
Jens Axboe | 08a4517 | 2019-10-03 08:11:03 -0600 | [diff] [blame] | 5772 | unix_inflight(fpl->user, fpl->fp[nr_files]); |
| 5773 | nr_files++; |
Jens Axboe | 6b06314 | 2019-01-10 22:13:58 -0700 | [diff] [blame] | 5774 | } |
| 5775 | |
Jens Axboe | 08a4517 | 2019-10-03 08:11:03 -0600 | [diff] [blame] | 5776 | if (nr_files) { |
| 5777 | fpl->max = SCM_MAX_FD; |
| 5778 | fpl->count = nr_files; |
| 5779 | UNIXCB(skb).fp = fpl; |
Jens Axboe | 05f3fb3 | 2019-12-09 11:22:50 -0700 | [diff] [blame] | 5780 | skb->destructor = unix_destruct_scm; |
Jens Axboe | 08a4517 | 2019-10-03 08:11:03 -0600 | [diff] [blame] | 5781 | refcount_add(skb->truesize, &sk->sk_wmem_alloc); |
| 5782 | skb_queue_head(&sk->sk_receive_queue, skb); |
Jens Axboe | 6b06314 | 2019-01-10 22:13:58 -0700 | [diff] [blame] | 5783 | |
Jens Axboe | 08a4517 | 2019-10-03 08:11:03 -0600 | [diff] [blame] | 5784 | for (i = 0; i < nr_files; i++) |
| 5785 | fput(fpl->fp[i]); |
| 5786 | } else { |
| 5787 | kfree_skb(skb); |
| 5788 | kfree(fpl); |
| 5789 | } |
Jens Axboe | 6b06314 | 2019-01-10 22:13:58 -0700 | [diff] [blame] | 5790 | |
| 5791 | return 0; |
| 5792 | } |
| 5793 | |
| 5794 | /* |
| 5795 | * If UNIX sockets are enabled, fd passing can cause a reference cycle which |
| 5796 | * causes regular reference counting to break down. We rely on the UNIX |
| 5797 | * garbage collection to take care of this problem for us. |
| 5798 | */ |
| 5799 | static int io_sqe_files_scm(struct io_ring_ctx *ctx) |
| 5800 | { |
| 5801 | unsigned left, total; |
| 5802 | int ret = 0; |
| 5803 | |
| 5804 | total = 0; |
| 5805 | left = ctx->nr_user_files; |
| 5806 | while (left) { |
| 5807 | unsigned this_files = min_t(unsigned, left, SCM_MAX_FD); |
Jens Axboe | 6b06314 | 2019-01-10 22:13:58 -0700 | [diff] [blame] | 5808 | |
| 5809 | ret = __io_sqe_files_scm(ctx, this_files, total); |
| 5810 | if (ret) |
| 5811 | break; |
| 5812 | left -= this_files; |
| 5813 | total += this_files; |
| 5814 | } |
| 5815 | |
| 5816 | if (!ret) |
| 5817 | return 0; |
| 5818 | |
| 5819 | while (total < ctx->nr_user_files) { |
Jens Axboe | 65e19f5 | 2019-10-26 07:20:21 -0600 | [diff] [blame] | 5820 | struct file *file = io_file_from_index(ctx, total); |
| 5821 | |
| 5822 | if (file) |
| 5823 | fput(file); |
Jens Axboe | 6b06314 | 2019-01-10 22:13:58 -0700 | [diff] [blame] | 5824 | total++; |
| 5825 | } |
| 5826 | |
| 5827 | return ret; |
| 5828 | } |
| 5829 | #else |
| 5830 | static int io_sqe_files_scm(struct io_ring_ctx *ctx) |
| 5831 | { |
| 5832 | return 0; |
| 5833 | } |
| 5834 | #endif |
| 5835 | |
Jens Axboe | 65e19f5 | 2019-10-26 07:20:21 -0600 | [diff] [blame] | 5836 | static int io_sqe_alloc_file_tables(struct io_ring_ctx *ctx, unsigned nr_tables, |
| 5837 | unsigned nr_files) |
| 5838 | { |
| 5839 | int i; |
| 5840 | |
| 5841 | for (i = 0; i < nr_tables; i++) { |
Jens Axboe | 05f3fb3 | 2019-12-09 11:22:50 -0700 | [diff] [blame] | 5842 | struct fixed_file_table *table = &ctx->file_data->table[i]; |
Jens Axboe | 65e19f5 | 2019-10-26 07:20:21 -0600 | [diff] [blame] | 5843 | unsigned this_files; |
| 5844 | |
| 5845 | this_files = min(nr_files, IORING_MAX_FILES_TABLE); |
| 5846 | table->files = kcalloc(this_files, sizeof(struct file *), |
| 5847 | GFP_KERNEL); |
| 5848 | if (!table->files) |
| 5849 | break; |
| 5850 | nr_files -= this_files; |
| 5851 | } |
| 5852 | |
| 5853 | if (i == nr_tables) |
| 5854 | return 0; |
| 5855 | |
| 5856 | for (i = 0; i < nr_tables; i++) { |
Jens Axboe | 05f3fb3 | 2019-12-09 11:22:50 -0700 | [diff] [blame] | 5857 | struct fixed_file_table *table = &ctx->file_data->table[i]; |
Jens Axboe | 65e19f5 | 2019-10-26 07:20:21 -0600 | [diff] [blame] | 5858 | kfree(table->files); |
| 5859 | } |
| 5860 | return 1; |
| 5861 | } |
| 5862 | |
Jens Axboe | 05f3fb3 | 2019-12-09 11:22:50 -0700 | [diff] [blame] | 5863 | static void io_ring_file_put(struct io_ring_ctx *ctx, struct file *file) |
Jens Axboe | c3a31e6 | 2019-10-03 13:59:56 -0600 | [diff] [blame] | 5864 | { |
| 5865 | #if defined(CONFIG_UNIX) |
Jens Axboe | c3a31e6 | 2019-10-03 13:59:56 -0600 | [diff] [blame] | 5866 | struct sock *sock = ctx->ring_sock->sk; |
| 5867 | struct sk_buff_head list, *head = &sock->sk_receive_queue; |
| 5868 | struct sk_buff *skb; |
| 5869 | int i; |
| 5870 | |
| 5871 | __skb_queue_head_init(&list); |
| 5872 | |
| 5873 | /* |
| 5874 | * Find the skb that holds this file in its SCM_RIGHTS. When found, |
| 5875 | * remove this entry and rearrange the file array. |
| 5876 | */ |
| 5877 | skb = skb_dequeue(head); |
| 5878 | while (skb) { |
| 5879 | struct scm_fp_list *fp; |
| 5880 | |
| 5881 | fp = UNIXCB(skb).fp; |
| 5882 | for (i = 0; i < fp->count; i++) { |
| 5883 | int left; |
| 5884 | |
| 5885 | if (fp->fp[i] != file) |
| 5886 | continue; |
| 5887 | |
| 5888 | unix_notinflight(fp->user, fp->fp[i]); |
| 5889 | left = fp->count - 1 - i; |
| 5890 | if (left) { |
| 5891 | memmove(&fp->fp[i], &fp->fp[i + 1], |
| 5892 | left * sizeof(struct file *)); |
| 5893 | } |
| 5894 | fp->count--; |
| 5895 | if (!fp->count) { |
| 5896 | kfree_skb(skb); |
| 5897 | skb = NULL; |
| 5898 | } else { |
| 5899 | __skb_queue_tail(&list, skb); |
| 5900 | } |
| 5901 | fput(file); |
| 5902 | file = NULL; |
| 5903 | break; |
| 5904 | } |
| 5905 | |
| 5906 | if (!file) |
| 5907 | break; |
| 5908 | |
| 5909 | __skb_queue_tail(&list, skb); |
| 5910 | |
| 5911 | skb = skb_dequeue(head); |
| 5912 | } |
| 5913 | |
| 5914 | if (skb_peek(&list)) { |
| 5915 | spin_lock_irq(&head->lock); |
| 5916 | while ((skb = __skb_dequeue(&list)) != NULL) |
| 5917 | __skb_queue_tail(head, skb); |
| 5918 | spin_unlock_irq(&head->lock); |
| 5919 | } |
| 5920 | #else |
Jens Axboe | 05f3fb3 | 2019-12-09 11:22:50 -0700 | [diff] [blame] | 5921 | fput(file); |
Jens Axboe | c3a31e6 | 2019-10-03 13:59:56 -0600 | [diff] [blame] | 5922 | #endif |
| 5923 | } |
| 5924 | |
Jens Axboe | 05f3fb3 | 2019-12-09 11:22:50 -0700 | [diff] [blame] | 5925 | struct io_file_put { |
| 5926 | struct llist_node llist; |
| 5927 | struct file *file; |
| 5928 | struct completion *done; |
| 5929 | }; |
| 5930 | |
Jens Axboe | 2faf852 | 2020-02-04 19:54:55 -0700 | [diff] [blame] | 5931 | static void io_ring_file_ref_flush(struct fixed_file_data *data) |
Jens Axboe | 05f3fb3 | 2019-12-09 11:22:50 -0700 | [diff] [blame] | 5932 | { |
| 5933 | struct io_file_put *pfile, *tmp; |
Jens Axboe | 05f3fb3 | 2019-12-09 11:22:50 -0700 | [diff] [blame] | 5934 | struct llist_node *node; |
| 5935 | |
Jens Axboe | 05f3fb3 | 2019-12-09 11:22:50 -0700 | [diff] [blame] | 5936 | while ((node = llist_del_all(&data->put_llist)) != NULL) { |
| 5937 | llist_for_each_entry_safe(pfile, tmp, node, llist) { |
| 5938 | io_ring_file_put(data->ctx, pfile->file); |
| 5939 | if (pfile->done) |
| 5940 | complete(pfile->done); |
| 5941 | else |
| 5942 | kfree(pfile); |
| 5943 | } |
| 5944 | } |
Jens Axboe | 2faf852 | 2020-02-04 19:54:55 -0700 | [diff] [blame] | 5945 | } |
Jens Axboe | 05f3fb3 | 2019-12-09 11:22:50 -0700 | [diff] [blame] | 5946 | |
Jens Axboe | 2faf852 | 2020-02-04 19:54:55 -0700 | [diff] [blame] | 5947 | static void io_ring_file_ref_switch(struct work_struct *work) |
| 5948 | { |
| 5949 | struct fixed_file_data *data; |
| 5950 | |
| 5951 | data = container_of(work, struct fixed_file_data, ref_work); |
| 5952 | io_ring_file_ref_flush(data); |
Jens Axboe | 05f3fb3 | 2019-12-09 11:22:50 -0700 | [diff] [blame] | 5953 | percpu_ref_switch_to_percpu(&data->refs); |
| 5954 | } |
| 5955 | |
| 5956 | static void io_file_data_ref_zero(struct percpu_ref *ref) |
| 5957 | { |
| 5958 | struct fixed_file_data *data; |
| 5959 | |
| 5960 | data = container_of(ref, struct fixed_file_data, refs); |
| 5961 | |
Jens Axboe | 2faf852 | 2020-02-04 19:54:55 -0700 | [diff] [blame] | 5962 | /* |
| 5963 | * We can't safely switch from inside this context, punt to wq. If |
| 5964 | * the table ref is going away, the table is being unregistered. |
| 5965 | * Don't queue up the async work for that case, the caller will |
| 5966 | * handle it. |
| 5967 | */ |
| 5968 | if (!percpu_ref_is_dying(&data->refs)) |
| 5969 | queue_work(system_wq, &data->ref_work); |
Jens Axboe | 05f3fb3 | 2019-12-09 11:22:50 -0700 | [diff] [blame] | 5970 | } |
| 5971 | |
| 5972 | static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg, |
| 5973 | unsigned nr_args) |
| 5974 | { |
| 5975 | __s32 __user *fds = (__s32 __user *) arg; |
| 5976 | unsigned nr_tables; |
| 5977 | struct file *file; |
| 5978 | int fd, ret = 0; |
| 5979 | unsigned i; |
| 5980 | |
| 5981 | if (ctx->file_data) |
| 5982 | return -EBUSY; |
| 5983 | if (!nr_args) |
| 5984 | return -EINVAL; |
| 5985 | if (nr_args > IORING_MAX_FIXED_FILES) |
| 5986 | return -EMFILE; |
| 5987 | |
| 5988 | ctx->file_data = kzalloc(sizeof(*ctx->file_data), GFP_KERNEL); |
| 5989 | if (!ctx->file_data) |
| 5990 | return -ENOMEM; |
| 5991 | ctx->file_data->ctx = ctx; |
| 5992 | init_completion(&ctx->file_data->done); |
| 5993 | |
| 5994 | nr_tables = DIV_ROUND_UP(nr_args, IORING_MAX_FILES_TABLE); |
| 5995 | ctx->file_data->table = kcalloc(nr_tables, |
| 5996 | sizeof(struct fixed_file_table), |
| 5997 | GFP_KERNEL); |
| 5998 | if (!ctx->file_data->table) { |
| 5999 | kfree(ctx->file_data); |
| 6000 | ctx->file_data = NULL; |
| 6001 | return -ENOMEM; |
| 6002 | } |
| 6003 | |
| 6004 | if (percpu_ref_init(&ctx->file_data->refs, io_file_data_ref_zero, |
| 6005 | PERCPU_REF_ALLOW_REINIT, GFP_KERNEL)) { |
| 6006 | kfree(ctx->file_data->table); |
| 6007 | kfree(ctx->file_data); |
| 6008 | ctx->file_data = NULL; |
| 6009 | return -ENOMEM; |
| 6010 | } |
| 6011 | ctx->file_data->put_llist.first = NULL; |
| 6012 | INIT_WORK(&ctx->file_data->ref_work, io_ring_file_ref_switch); |
| 6013 | |
| 6014 | if (io_sqe_alloc_file_tables(ctx, nr_tables, nr_args)) { |
| 6015 | percpu_ref_exit(&ctx->file_data->refs); |
| 6016 | kfree(ctx->file_data->table); |
| 6017 | kfree(ctx->file_data); |
| 6018 | ctx->file_data = NULL; |
| 6019 | return -ENOMEM; |
| 6020 | } |
| 6021 | |
| 6022 | for (i = 0; i < nr_args; i++, ctx->nr_user_files++) { |
| 6023 | struct fixed_file_table *table; |
| 6024 | unsigned index; |
| 6025 | |
| 6026 | ret = -EFAULT; |
| 6027 | if (copy_from_user(&fd, &fds[i], sizeof(fd))) |
| 6028 | break; |
| 6029 | /* allow sparse sets */ |
| 6030 | if (fd == -1) { |
| 6031 | ret = 0; |
| 6032 | continue; |
| 6033 | } |
| 6034 | |
| 6035 | table = &ctx->file_data->table[i >> IORING_FILE_TABLE_SHIFT]; |
| 6036 | index = i & IORING_FILE_TABLE_MASK; |
| 6037 | file = fget(fd); |
| 6038 | |
| 6039 | ret = -EBADF; |
| 6040 | if (!file) |
| 6041 | break; |
| 6042 | |
| 6043 | /* |
| 6044 | * Don't allow io_uring instances to be registered. If UNIX |
| 6045 | * isn't enabled, then this causes a reference cycle and this |
| 6046 | * instance can never get freed. If UNIX is enabled we'll |
| 6047 | * handle it just fine, but there's still no point in allowing |
| 6048 | * a ring fd as it doesn't support regular read/write anyway. |
| 6049 | */ |
| 6050 | if (file->f_op == &io_uring_fops) { |
| 6051 | fput(file); |
| 6052 | break; |
| 6053 | } |
| 6054 | ret = 0; |
| 6055 | table->files[index] = file; |
| 6056 | } |
| 6057 | |
| 6058 | if (ret) { |
| 6059 | for (i = 0; i < ctx->nr_user_files; i++) { |
| 6060 | file = io_file_from_index(ctx, i); |
| 6061 | if (file) |
| 6062 | fput(file); |
| 6063 | } |
| 6064 | for (i = 0; i < nr_tables; i++) |
| 6065 | kfree(ctx->file_data->table[i].files); |
| 6066 | |
| 6067 | kfree(ctx->file_data->table); |
| 6068 | kfree(ctx->file_data); |
| 6069 | ctx->file_data = NULL; |
| 6070 | ctx->nr_user_files = 0; |
| 6071 | return ret; |
| 6072 | } |
| 6073 | |
| 6074 | ret = io_sqe_files_scm(ctx); |
| 6075 | if (ret) |
| 6076 | io_sqe_files_unregister(ctx); |
| 6077 | |
| 6078 | return ret; |
| 6079 | } |
| 6080 | |
Jens Axboe | c3a31e6 | 2019-10-03 13:59:56 -0600 | [diff] [blame] | 6081 | static int io_sqe_file_register(struct io_ring_ctx *ctx, struct file *file, |
| 6082 | int index) |
| 6083 | { |
| 6084 | #if defined(CONFIG_UNIX) |
| 6085 | struct sock *sock = ctx->ring_sock->sk; |
| 6086 | struct sk_buff_head *head = &sock->sk_receive_queue; |
| 6087 | struct sk_buff *skb; |
| 6088 | |
| 6089 | /* |
| 6090 | * See if we can merge this file into an existing skb SCM_RIGHTS |
| 6091 | * file set. If there's no room, fall back to allocating a new skb |
| 6092 | * and filling it in. |
| 6093 | */ |
| 6094 | spin_lock_irq(&head->lock); |
| 6095 | skb = skb_peek(head); |
| 6096 | if (skb) { |
| 6097 | struct scm_fp_list *fpl = UNIXCB(skb).fp; |
| 6098 | |
| 6099 | if (fpl->count < SCM_MAX_FD) { |
| 6100 | __skb_unlink(skb, head); |
| 6101 | spin_unlock_irq(&head->lock); |
| 6102 | fpl->fp[fpl->count] = get_file(file); |
| 6103 | unix_inflight(fpl->user, fpl->fp[fpl->count]); |
| 6104 | fpl->count++; |
| 6105 | spin_lock_irq(&head->lock); |
| 6106 | __skb_queue_head(head, skb); |
| 6107 | } else { |
| 6108 | skb = NULL; |
| 6109 | } |
| 6110 | } |
| 6111 | spin_unlock_irq(&head->lock); |
| 6112 | |
| 6113 | if (skb) { |
| 6114 | fput(file); |
| 6115 | return 0; |
| 6116 | } |
| 6117 | |
| 6118 | return __io_sqe_files_scm(ctx, 1, index); |
| 6119 | #else |
| 6120 | return 0; |
| 6121 | #endif |
| 6122 | } |
| 6123 | |
Jens Axboe | 05f3fb3 | 2019-12-09 11:22:50 -0700 | [diff] [blame] | 6124 | static void io_atomic_switch(struct percpu_ref *ref) |
Jens Axboe | c3a31e6 | 2019-10-03 13:59:56 -0600 | [diff] [blame] | 6125 | { |
Jens Axboe | 05f3fb3 | 2019-12-09 11:22:50 -0700 | [diff] [blame] | 6126 | struct fixed_file_data *data; |
| 6127 | |
Jens Axboe | dd3db2a | 2020-02-26 10:23:43 -0700 | [diff] [blame] | 6128 | /* |
| 6129 | * Juggle reference to ensure we hit zero, if needed, so we can |
| 6130 | * switch back to percpu mode |
| 6131 | */ |
Jens Axboe | 05f3fb3 | 2019-12-09 11:22:50 -0700 | [diff] [blame] | 6132 | data = container_of(ref, struct fixed_file_data, refs); |
Jens Axboe | dd3db2a | 2020-02-26 10:23:43 -0700 | [diff] [blame] | 6133 | percpu_ref_put(&data->refs); |
| 6134 | percpu_ref_get(&data->refs); |
Jens Axboe | 05f3fb3 | 2019-12-09 11:22:50 -0700 | [diff] [blame] | 6135 | } |
| 6136 | |
| 6137 | static bool io_queue_file_removal(struct fixed_file_data *data, |
| 6138 | struct file *file) |
| 6139 | { |
| 6140 | struct io_file_put *pfile, pfile_stack; |
| 6141 | DECLARE_COMPLETION_ONSTACK(done); |
| 6142 | |
| 6143 | /* |
| 6144 | * If we fail allocating the struct we need for doing async reomval |
| 6145 | * of this file, just punt to sync and wait for it. |
| 6146 | */ |
| 6147 | pfile = kzalloc(sizeof(*pfile), GFP_KERNEL); |
| 6148 | if (!pfile) { |
| 6149 | pfile = &pfile_stack; |
| 6150 | pfile->done = &done; |
| 6151 | } |
| 6152 | |
| 6153 | pfile->file = file; |
| 6154 | llist_add(&pfile->llist, &data->put_llist); |
| 6155 | |
| 6156 | if (pfile == &pfile_stack) { |
Jens Axboe | dd3db2a | 2020-02-26 10:23:43 -0700 | [diff] [blame] | 6157 | percpu_ref_switch_to_atomic(&data->refs, io_atomic_switch); |
Jens Axboe | 05f3fb3 | 2019-12-09 11:22:50 -0700 | [diff] [blame] | 6158 | wait_for_completion(&done); |
| 6159 | flush_work(&data->ref_work); |
| 6160 | return false; |
| 6161 | } |
| 6162 | |
| 6163 | return true; |
| 6164 | } |
| 6165 | |
| 6166 | static int __io_sqe_files_update(struct io_ring_ctx *ctx, |
| 6167 | struct io_uring_files_update *up, |
| 6168 | unsigned nr_args) |
| 6169 | { |
| 6170 | struct fixed_file_data *data = ctx->file_data; |
| 6171 | bool ref_switch = false; |
| 6172 | struct file *file; |
Jens Axboe | c3a31e6 | 2019-10-03 13:59:56 -0600 | [diff] [blame] | 6173 | __s32 __user *fds; |
| 6174 | int fd, i, err; |
| 6175 | __u32 done; |
| 6176 | |
Jens Axboe | 05f3fb3 | 2019-12-09 11:22:50 -0700 | [diff] [blame] | 6177 | if (check_add_overflow(up->offset, nr_args, &done)) |
Jens Axboe | c3a31e6 | 2019-10-03 13:59:56 -0600 | [diff] [blame] | 6178 | return -EOVERFLOW; |
| 6179 | if (done > ctx->nr_user_files) |
| 6180 | return -EINVAL; |
| 6181 | |
| 6182 | done = 0; |
Jens Axboe | 05f3fb3 | 2019-12-09 11:22:50 -0700 | [diff] [blame] | 6183 | fds = u64_to_user_ptr(up->fds); |
Jens Axboe | c3a31e6 | 2019-10-03 13:59:56 -0600 | [diff] [blame] | 6184 | while (nr_args) { |
Jens Axboe | 65e19f5 | 2019-10-26 07:20:21 -0600 | [diff] [blame] | 6185 | struct fixed_file_table *table; |
| 6186 | unsigned index; |
| 6187 | |
Jens Axboe | c3a31e6 | 2019-10-03 13:59:56 -0600 | [diff] [blame] | 6188 | err = 0; |
| 6189 | if (copy_from_user(&fd, &fds[done], sizeof(fd))) { |
| 6190 | err = -EFAULT; |
| 6191 | break; |
| 6192 | } |
Jens Axboe | 05f3fb3 | 2019-12-09 11:22:50 -0700 | [diff] [blame] | 6193 | i = array_index_nospec(up->offset, ctx->nr_user_files); |
| 6194 | table = &ctx->file_data->table[i >> IORING_FILE_TABLE_SHIFT]; |
Jens Axboe | 65e19f5 | 2019-10-26 07:20:21 -0600 | [diff] [blame] | 6195 | index = i & IORING_FILE_TABLE_MASK; |
| 6196 | if (table->files[index]) { |
Jens Axboe | 05f3fb3 | 2019-12-09 11:22:50 -0700 | [diff] [blame] | 6197 | file = io_file_from_index(ctx, index); |
Jens Axboe | 65e19f5 | 2019-10-26 07:20:21 -0600 | [diff] [blame] | 6198 | table->files[index] = NULL; |
Jens Axboe | 05f3fb3 | 2019-12-09 11:22:50 -0700 | [diff] [blame] | 6199 | if (io_queue_file_removal(data, file)) |
| 6200 | ref_switch = true; |
Jens Axboe | c3a31e6 | 2019-10-03 13:59:56 -0600 | [diff] [blame] | 6201 | } |
| 6202 | if (fd != -1) { |
Jens Axboe | c3a31e6 | 2019-10-03 13:59:56 -0600 | [diff] [blame] | 6203 | file = fget(fd); |
| 6204 | if (!file) { |
| 6205 | err = -EBADF; |
| 6206 | break; |
| 6207 | } |
| 6208 | /* |
| 6209 | * Don't allow io_uring instances to be registered. If |
| 6210 | * UNIX isn't enabled, then this causes a reference |
| 6211 | * cycle and this instance can never get freed. If UNIX |
| 6212 | * is enabled we'll handle it just fine, but there's |
| 6213 | * still no point in allowing a ring fd as it doesn't |
| 6214 | * support regular read/write anyway. |
| 6215 | */ |
| 6216 | if (file->f_op == &io_uring_fops) { |
| 6217 | fput(file); |
| 6218 | err = -EBADF; |
| 6219 | break; |
| 6220 | } |
Jens Axboe | 65e19f5 | 2019-10-26 07:20:21 -0600 | [diff] [blame] | 6221 | table->files[index] = file; |
Jens Axboe | c3a31e6 | 2019-10-03 13:59:56 -0600 | [diff] [blame] | 6222 | err = io_sqe_file_register(ctx, file, i); |
| 6223 | if (err) |
| 6224 | break; |
| 6225 | } |
| 6226 | nr_args--; |
| 6227 | done++; |
Jens Axboe | 05f3fb3 | 2019-12-09 11:22:50 -0700 | [diff] [blame] | 6228 | up->offset++; |
| 6229 | } |
| 6230 | |
Jens Axboe | dd3db2a | 2020-02-26 10:23:43 -0700 | [diff] [blame] | 6231 | if (ref_switch) |
Jens Axboe | 05f3fb3 | 2019-12-09 11:22:50 -0700 | [diff] [blame] | 6232 | percpu_ref_switch_to_atomic(&data->refs, io_atomic_switch); |
Jens Axboe | c3a31e6 | 2019-10-03 13:59:56 -0600 | [diff] [blame] | 6233 | |
| 6234 | return done ? done : err; |
| 6235 | } |
Jens Axboe | 05f3fb3 | 2019-12-09 11:22:50 -0700 | [diff] [blame] | 6236 | static int io_sqe_files_update(struct io_ring_ctx *ctx, void __user *arg, |
| 6237 | unsigned nr_args) |
| 6238 | { |
| 6239 | struct io_uring_files_update up; |
| 6240 | |
| 6241 | if (!ctx->file_data) |
| 6242 | return -ENXIO; |
| 6243 | if (!nr_args) |
| 6244 | return -EINVAL; |
| 6245 | if (copy_from_user(&up, arg, sizeof(up))) |
| 6246 | return -EFAULT; |
| 6247 | if (up.resv) |
| 6248 | return -EINVAL; |
| 6249 | |
| 6250 | return __io_sqe_files_update(ctx, &up, nr_args); |
| 6251 | } |
Jens Axboe | c3a31e6 | 2019-10-03 13:59:56 -0600 | [diff] [blame] | 6252 | |
Pavel Begunkov | e9fd939 | 2020-03-04 16:14:12 +0300 | [diff] [blame] | 6253 | static void io_free_work(struct io_wq_work *work) |
Jens Axboe | 7d72306 | 2019-11-12 22:31:31 -0700 | [diff] [blame] | 6254 | { |
| 6255 | struct io_kiocb *req = container_of(work, struct io_kiocb, work); |
| 6256 | |
Pavel Begunkov | e9fd939 | 2020-03-04 16:14:12 +0300 | [diff] [blame] | 6257 | /* Consider that io_steal_work() relies on this ref */ |
Jens Axboe | 7d72306 | 2019-11-12 22:31:31 -0700 | [diff] [blame] | 6258 | io_put_req(req); |
| 6259 | } |
| 6260 | |
Pavel Begunkov | 24369c2 | 2020-01-28 03:15:48 +0300 | [diff] [blame] | 6261 | static int io_init_wq_offload(struct io_ring_ctx *ctx, |
| 6262 | struct io_uring_params *p) |
| 6263 | { |
| 6264 | struct io_wq_data data; |
| 6265 | struct fd f; |
| 6266 | struct io_ring_ctx *ctx_attach; |
| 6267 | unsigned int concurrency; |
| 6268 | int ret = 0; |
| 6269 | |
| 6270 | data.user = ctx->user; |
Pavel Begunkov | e9fd939 | 2020-03-04 16:14:12 +0300 | [diff] [blame] | 6271 | data.free_work = io_free_work; |
Pavel Begunkov | 24369c2 | 2020-01-28 03:15:48 +0300 | [diff] [blame] | 6272 | |
| 6273 | if (!(p->flags & IORING_SETUP_ATTACH_WQ)) { |
| 6274 | /* Do QD, or 4 * CPUS, whatever is smallest */ |
| 6275 | concurrency = min(ctx->sq_entries, 4 * num_online_cpus()); |
| 6276 | |
| 6277 | ctx->io_wq = io_wq_create(concurrency, &data); |
| 6278 | if (IS_ERR(ctx->io_wq)) { |
| 6279 | ret = PTR_ERR(ctx->io_wq); |
| 6280 | ctx->io_wq = NULL; |
| 6281 | } |
| 6282 | return ret; |
| 6283 | } |
| 6284 | |
| 6285 | f = fdget(p->wq_fd); |
| 6286 | if (!f.file) |
| 6287 | return -EBADF; |
| 6288 | |
| 6289 | if (f.file->f_op != &io_uring_fops) { |
| 6290 | ret = -EINVAL; |
| 6291 | goto out_fput; |
| 6292 | } |
| 6293 | |
| 6294 | ctx_attach = f.file->private_data; |
| 6295 | /* @io_wq is protected by holding the fd */ |
| 6296 | if (!io_wq_get(ctx_attach->io_wq, &data)) { |
| 6297 | ret = -EINVAL; |
| 6298 | goto out_fput; |
| 6299 | } |
| 6300 | |
| 6301 | ctx->io_wq = ctx_attach->io_wq; |
| 6302 | out_fput: |
| 6303 | fdput(f); |
| 6304 | return ret; |
| 6305 | } |
| 6306 | |
Jens Axboe | 6c271ce | 2019-01-10 11:22:30 -0700 | [diff] [blame] | 6307 | static int io_sq_offload_start(struct io_ring_ctx *ctx, |
| 6308 | struct io_uring_params *p) |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 6309 | { |
| 6310 | int ret; |
| 6311 | |
Jens Axboe | 6c271ce | 2019-01-10 11:22:30 -0700 | [diff] [blame] | 6312 | init_waitqueue_head(&ctx->sqo_wait); |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 6313 | mmgrab(current->mm); |
| 6314 | ctx->sqo_mm = current->mm; |
| 6315 | |
Jens Axboe | 6c271ce | 2019-01-10 11:22:30 -0700 | [diff] [blame] | 6316 | if (ctx->flags & IORING_SETUP_SQPOLL) { |
Jens Axboe | 3ec482d | 2019-04-08 10:51:01 -0600 | [diff] [blame] | 6317 | ret = -EPERM; |
| 6318 | if (!capable(CAP_SYS_ADMIN)) |
| 6319 | goto err; |
| 6320 | |
Jens Axboe | 917257d | 2019-04-13 09:28:55 -0600 | [diff] [blame] | 6321 | ctx->sq_thread_idle = msecs_to_jiffies(p->sq_thread_idle); |
| 6322 | if (!ctx->sq_thread_idle) |
| 6323 | ctx->sq_thread_idle = HZ; |
| 6324 | |
Jens Axboe | 6c271ce | 2019-01-10 11:22:30 -0700 | [diff] [blame] | 6325 | if (p->flags & IORING_SETUP_SQ_AFF) { |
Jens Axboe | 44a9bd1 | 2019-05-14 20:00:30 -0600 | [diff] [blame] | 6326 | int cpu = p->sq_thread_cpu; |
Jens Axboe | 6c271ce | 2019-01-10 11:22:30 -0700 | [diff] [blame] | 6327 | |
Jens Axboe | 917257d | 2019-04-13 09:28:55 -0600 | [diff] [blame] | 6328 | ret = -EINVAL; |
Jens Axboe | 44a9bd1 | 2019-05-14 20:00:30 -0600 | [diff] [blame] | 6329 | if (cpu >= nr_cpu_ids) |
| 6330 | goto err; |
Shenghui Wang | 7889f44 | 2019-05-07 16:03:19 +0800 | [diff] [blame] | 6331 | if (!cpu_online(cpu)) |
Jens Axboe | 917257d | 2019-04-13 09:28:55 -0600 | [diff] [blame] | 6332 | goto err; |
| 6333 | |
Jens Axboe | 6c271ce | 2019-01-10 11:22:30 -0700 | [diff] [blame] | 6334 | ctx->sqo_thread = kthread_create_on_cpu(io_sq_thread, |
| 6335 | ctx, cpu, |
| 6336 | "io_uring-sq"); |
| 6337 | } else { |
| 6338 | ctx->sqo_thread = kthread_create(io_sq_thread, ctx, |
| 6339 | "io_uring-sq"); |
| 6340 | } |
| 6341 | if (IS_ERR(ctx->sqo_thread)) { |
| 6342 | ret = PTR_ERR(ctx->sqo_thread); |
| 6343 | ctx->sqo_thread = NULL; |
| 6344 | goto err; |
| 6345 | } |
| 6346 | wake_up_process(ctx->sqo_thread); |
| 6347 | } else if (p->flags & IORING_SETUP_SQ_AFF) { |
| 6348 | /* Can't have SQ_AFF without SQPOLL */ |
| 6349 | ret = -EINVAL; |
| 6350 | goto err; |
| 6351 | } |
| 6352 | |
Pavel Begunkov | 24369c2 | 2020-01-28 03:15:48 +0300 | [diff] [blame] | 6353 | ret = io_init_wq_offload(ctx, p); |
| 6354 | if (ret) |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 6355 | goto err; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 6356 | |
| 6357 | return 0; |
| 6358 | err: |
Jens Axboe | 54a91f3 | 2019-09-10 09:15:04 -0600 | [diff] [blame] | 6359 | io_finish_async(ctx); |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 6360 | mmdrop(ctx->sqo_mm); |
| 6361 | ctx->sqo_mm = NULL; |
| 6362 | return ret; |
| 6363 | } |
| 6364 | |
| 6365 | static void io_unaccount_mem(struct user_struct *user, unsigned long nr_pages) |
| 6366 | { |
| 6367 | atomic_long_sub(nr_pages, &user->locked_vm); |
| 6368 | } |
| 6369 | |
| 6370 | static int io_account_mem(struct user_struct *user, unsigned long nr_pages) |
| 6371 | { |
| 6372 | unsigned long page_limit, cur_pages, new_pages; |
| 6373 | |
| 6374 | /* Don't allow more pages than we can safely lock */ |
| 6375 | page_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; |
| 6376 | |
| 6377 | do { |
| 6378 | cur_pages = atomic_long_read(&user->locked_vm); |
| 6379 | new_pages = cur_pages + nr_pages; |
| 6380 | if (new_pages > page_limit) |
| 6381 | return -ENOMEM; |
| 6382 | } while (atomic_long_cmpxchg(&user->locked_vm, cur_pages, |
| 6383 | new_pages) != cur_pages); |
| 6384 | |
| 6385 | return 0; |
| 6386 | } |
| 6387 | |
| 6388 | static void io_mem_free(void *ptr) |
| 6389 | { |
Mark Rutland | 52e04ef | 2019-04-30 17:30:21 +0100 | [diff] [blame] | 6390 | struct page *page; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 6391 | |
Mark Rutland | 52e04ef | 2019-04-30 17:30:21 +0100 | [diff] [blame] | 6392 | if (!ptr) |
| 6393 | return; |
| 6394 | |
| 6395 | page = virt_to_head_page(ptr); |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 6396 | if (put_page_testzero(page)) |
| 6397 | free_compound_page(page); |
| 6398 | } |
| 6399 | |
| 6400 | static void *io_mem_alloc(size_t size) |
| 6401 | { |
| 6402 | gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN | __GFP_COMP | |
| 6403 | __GFP_NORETRY; |
| 6404 | |
| 6405 | return (void *) __get_free_pages(gfp_flags, get_order(size)); |
| 6406 | } |
| 6407 | |
Hristo Venev | 75b28af | 2019-08-26 17:23:46 +0000 | [diff] [blame] | 6408 | static unsigned long rings_size(unsigned sq_entries, unsigned cq_entries, |
| 6409 | size_t *sq_offset) |
| 6410 | { |
| 6411 | struct io_rings *rings; |
| 6412 | size_t off, sq_array_size; |
| 6413 | |
| 6414 | off = struct_size(rings, cqes, cq_entries); |
| 6415 | if (off == SIZE_MAX) |
| 6416 | return SIZE_MAX; |
| 6417 | |
| 6418 | #ifdef CONFIG_SMP |
| 6419 | off = ALIGN(off, SMP_CACHE_BYTES); |
| 6420 | if (off == 0) |
| 6421 | return SIZE_MAX; |
| 6422 | #endif |
| 6423 | |
| 6424 | sq_array_size = array_size(sizeof(u32), sq_entries); |
| 6425 | if (sq_array_size == SIZE_MAX) |
| 6426 | return SIZE_MAX; |
| 6427 | |
| 6428 | if (check_add_overflow(off, sq_array_size, &off)) |
| 6429 | return SIZE_MAX; |
| 6430 | |
| 6431 | if (sq_offset) |
| 6432 | *sq_offset = off; |
| 6433 | |
| 6434 | return off; |
| 6435 | } |
| 6436 | |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 6437 | static unsigned long ring_pages(unsigned sq_entries, unsigned cq_entries) |
| 6438 | { |
Hristo Venev | 75b28af | 2019-08-26 17:23:46 +0000 | [diff] [blame] | 6439 | size_t pages; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 6440 | |
Hristo Venev | 75b28af | 2019-08-26 17:23:46 +0000 | [diff] [blame] | 6441 | pages = (size_t)1 << get_order( |
| 6442 | rings_size(sq_entries, cq_entries, NULL)); |
| 6443 | pages += (size_t)1 << get_order( |
| 6444 | array_size(sizeof(struct io_uring_sqe), sq_entries)); |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 6445 | |
Hristo Venev | 75b28af | 2019-08-26 17:23:46 +0000 | [diff] [blame] | 6446 | return pages; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 6447 | } |
| 6448 | |
Jens Axboe | edafcce | 2019-01-09 09:16:05 -0700 | [diff] [blame] | 6449 | static int io_sqe_buffer_unregister(struct io_ring_ctx *ctx) |
| 6450 | { |
| 6451 | int i, j; |
| 6452 | |
| 6453 | if (!ctx->user_bufs) |
| 6454 | return -ENXIO; |
| 6455 | |
| 6456 | for (i = 0; i < ctx->nr_user_bufs; i++) { |
| 6457 | struct io_mapped_ubuf *imu = &ctx->user_bufs[i]; |
| 6458 | |
| 6459 | for (j = 0; j < imu->nr_bvecs; j++) |
John Hubbard | f1f6a7d | 2020-01-30 22:13:35 -0800 | [diff] [blame] | 6460 | unpin_user_page(imu->bvec[j].bv_page); |
Jens Axboe | edafcce | 2019-01-09 09:16:05 -0700 | [diff] [blame] | 6461 | |
| 6462 | if (ctx->account_mem) |
| 6463 | io_unaccount_mem(ctx->user, imu->nr_bvecs); |
Mark Rutland | d4ef647 | 2019-05-01 16:59:16 +0100 | [diff] [blame] | 6464 | kvfree(imu->bvec); |
Jens Axboe | edafcce | 2019-01-09 09:16:05 -0700 | [diff] [blame] | 6465 | imu->nr_bvecs = 0; |
| 6466 | } |
| 6467 | |
| 6468 | kfree(ctx->user_bufs); |
| 6469 | ctx->user_bufs = NULL; |
| 6470 | ctx->nr_user_bufs = 0; |
| 6471 | return 0; |
| 6472 | } |
| 6473 | |
| 6474 | static int io_copy_iov(struct io_ring_ctx *ctx, struct iovec *dst, |
| 6475 | void __user *arg, unsigned index) |
| 6476 | { |
| 6477 | struct iovec __user *src; |
| 6478 | |
| 6479 | #ifdef CONFIG_COMPAT |
| 6480 | if (ctx->compat) { |
| 6481 | struct compat_iovec __user *ciovs; |
| 6482 | struct compat_iovec ciov; |
| 6483 | |
| 6484 | ciovs = (struct compat_iovec __user *) arg; |
| 6485 | if (copy_from_user(&ciov, &ciovs[index], sizeof(ciov))) |
| 6486 | return -EFAULT; |
| 6487 | |
Jens Axboe | d55e5f5 | 2019-12-11 16:12:15 -0700 | [diff] [blame] | 6488 | dst->iov_base = u64_to_user_ptr((u64)ciov.iov_base); |
Jens Axboe | edafcce | 2019-01-09 09:16:05 -0700 | [diff] [blame] | 6489 | dst->iov_len = ciov.iov_len; |
| 6490 | return 0; |
| 6491 | } |
| 6492 | #endif |
| 6493 | src = (struct iovec __user *) arg; |
| 6494 | if (copy_from_user(dst, &src[index], sizeof(*dst))) |
| 6495 | return -EFAULT; |
| 6496 | return 0; |
| 6497 | } |
| 6498 | |
| 6499 | static int io_sqe_buffer_register(struct io_ring_ctx *ctx, void __user *arg, |
| 6500 | unsigned nr_args) |
| 6501 | { |
| 6502 | struct vm_area_struct **vmas = NULL; |
| 6503 | struct page **pages = NULL; |
| 6504 | int i, j, got_pages = 0; |
| 6505 | int ret = -EINVAL; |
| 6506 | |
| 6507 | if (ctx->user_bufs) |
| 6508 | return -EBUSY; |
| 6509 | if (!nr_args || nr_args > UIO_MAXIOV) |
| 6510 | return -EINVAL; |
| 6511 | |
| 6512 | ctx->user_bufs = kcalloc(nr_args, sizeof(struct io_mapped_ubuf), |
| 6513 | GFP_KERNEL); |
| 6514 | if (!ctx->user_bufs) |
| 6515 | return -ENOMEM; |
| 6516 | |
| 6517 | for (i = 0; i < nr_args; i++) { |
| 6518 | struct io_mapped_ubuf *imu = &ctx->user_bufs[i]; |
| 6519 | unsigned long off, start, end, ubuf; |
| 6520 | int pret, nr_pages; |
| 6521 | struct iovec iov; |
| 6522 | size_t size; |
| 6523 | |
| 6524 | ret = io_copy_iov(ctx, &iov, arg, i); |
| 6525 | if (ret) |
Pavel Begunkov | a278682 | 2019-05-26 12:35:47 +0300 | [diff] [blame] | 6526 | goto err; |
Jens Axboe | edafcce | 2019-01-09 09:16:05 -0700 | [diff] [blame] | 6527 | |
| 6528 | /* |
| 6529 | * Don't impose further limits on the size and buffer |
| 6530 | * constraints here, we'll -EINVAL later when IO is |
| 6531 | * submitted if they are wrong. |
| 6532 | */ |
| 6533 | ret = -EFAULT; |
| 6534 | if (!iov.iov_base || !iov.iov_len) |
| 6535 | goto err; |
| 6536 | |
| 6537 | /* arbitrary limit, but we need something */ |
| 6538 | if (iov.iov_len > SZ_1G) |
| 6539 | goto err; |
| 6540 | |
| 6541 | ubuf = (unsigned long) iov.iov_base; |
| 6542 | end = (ubuf + iov.iov_len + PAGE_SIZE - 1) >> PAGE_SHIFT; |
| 6543 | start = ubuf >> PAGE_SHIFT; |
| 6544 | nr_pages = end - start; |
| 6545 | |
| 6546 | if (ctx->account_mem) { |
| 6547 | ret = io_account_mem(ctx->user, nr_pages); |
| 6548 | if (ret) |
| 6549 | goto err; |
| 6550 | } |
| 6551 | |
| 6552 | ret = 0; |
| 6553 | if (!pages || nr_pages > got_pages) { |
| 6554 | kfree(vmas); |
| 6555 | kfree(pages); |
Mark Rutland | d4ef647 | 2019-05-01 16:59:16 +0100 | [diff] [blame] | 6556 | pages = kvmalloc_array(nr_pages, sizeof(struct page *), |
Jens Axboe | edafcce | 2019-01-09 09:16:05 -0700 | [diff] [blame] | 6557 | GFP_KERNEL); |
Mark Rutland | d4ef647 | 2019-05-01 16:59:16 +0100 | [diff] [blame] | 6558 | vmas = kvmalloc_array(nr_pages, |
Jens Axboe | edafcce | 2019-01-09 09:16:05 -0700 | [diff] [blame] | 6559 | sizeof(struct vm_area_struct *), |
| 6560 | GFP_KERNEL); |
| 6561 | if (!pages || !vmas) { |
| 6562 | ret = -ENOMEM; |
| 6563 | if (ctx->account_mem) |
| 6564 | io_unaccount_mem(ctx->user, nr_pages); |
| 6565 | goto err; |
| 6566 | } |
| 6567 | got_pages = nr_pages; |
| 6568 | } |
| 6569 | |
Mark Rutland | d4ef647 | 2019-05-01 16:59:16 +0100 | [diff] [blame] | 6570 | imu->bvec = kvmalloc_array(nr_pages, sizeof(struct bio_vec), |
Jens Axboe | edafcce | 2019-01-09 09:16:05 -0700 | [diff] [blame] | 6571 | GFP_KERNEL); |
| 6572 | ret = -ENOMEM; |
| 6573 | if (!imu->bvec) { |
| 6574 | if (ctx->account_mem) |
| 6575 | io_unaccount_mem(ctx->user, nr_pages); |
| 6576 | goto err; |
| 6577 | } |
| 6578 | |
| 6579 | ret = 0; |
| 6580 | down_read(¤t->mm->mmap_sem); |
John Hubbard | 2113b05 | 2020-01-30 22:13:13 -0800 | [diff] [blame] | 6581 | pret = pin_user_pages(ubuf, nr_pages, |
Ira Weiny | 932f4a6 | 2019-05-13 17:17:03 -0700 | [diff] [blame] | 6582 | FOLL_WRITE | FOLL_LONGTERM, |
| 6583 | pages, vmas); |
Jens Axboe | edafcce | 2019-01-09 09:16:05 -0700 | [diff] [blame] | 6584 | if (pret == nr_pages) { |
| 6585 | /* don't support file backed memory */ |
| 6586 | for (j = 0; j < nr_pages; j++) { |
| 6587 | struct vm_area_struct *vma = vmas[j]; |
| 6588 | |
| 6589 | if (vma->vm_file && |
| 6590 | !is_file_hugepages(vma->vm_file)) { |
| 6591 | ret = -EOPNOTSUPP; |
| 6592 | break; |
| 6593 | } |
| 6594 | } |
| 6595 | } else { |
| 6596 | ret = pret < 0 ? pret : -EFAULT; |
| 6597 | } |
| 6598 | up_read(¤t->mm->mmap_sem); |
| 6599 | if (ret) { |
| 6600 | /* |
| 6601 | * if we did partial map, or found file backed vmas, |
| 6602 | * release any pages we did get |
| 6603 | */ |
John Hubbard | 27c4d3a | 2019-08-04 19:32:06 -0700 | [diff] [blame] | 6604 | if (pret > 0) |
John Hubbard | f1f6a7d | 2020-01-30 22:13:35 -0800 | [diff] [blame] | 6605 | unpin_user_pages(pages, pret); |
Jens Axboe | edafcce | 2019-01-09 09:16:05 -0700 | [diff] [blame] | 6606 | if (ctx->account_mem) |
| 6607 | io_unaccount_mem(ctx->user, nr_pages); |
Mark Rutland | d4ef647 | 2019-05-01 16:59:16 +0100 | [diff] [blame] | 6608 | kvfree(imu->bvec); |
Jens Axboe | edafcce | 2019-01-09 09:16:05 -0700 | [diff] [blame] | 6609 | goto err; |
| 6610 | } |
| 6611 | |
| 6612 | off = ubuf & ~PAGE_MASK; |
| 6613 | size = iov.iov_len; |
| 6614 | for (j = 0; j < nr_pages; j++) { |
| 6615 | size_t vec_len; |
| 6616 | |
| 6617 | vec_len = min_t(size_t, size, PAGE_SIZE - off); |
| 6618 | imu->bvec[j].bv_page = pages[j]; |
| 6619 | imu->bvec[j].bv_len = vec_len; |
| 6620 | imu->bvec[j].bv_offset = off; |
| 6621 | off = 0; |
| 6622 | size -= vec_len; |
| 6623 | } |
| 6624 | /* store original address for later verification */ |
| 6625 | imu->ubuf = ubuf; |
| 6626 | imu->len = iov.iov_len; |
| 6627 | imu->nr_bvecs = nr_pages; |
| 6628 | |
| 6629 | ctx->nr_user_bufs++; |
| 6630 | } |
Mark Rutland | d4ef647 | 2019-05-01 16:59:16 +0100 | [diff] [blame] | 6631 | kvfree(pages); |
| 6632 | kvfree(vmas); |
Jens Axboe | edafcce | 2019-01-09 09:16:05 -0700 | [diff] [blame] | 6633 | return 0; |
| 6634 | err: |
Mark Rutland | d4ef647 | 2019-05-01 16:59:16 +0100 | [diff] [blame] | 6635 | kvfree(pages); |
| 6636 | kvfree(vmas); |
Jens Axboe | edafcce | 2019-01-09 09:16:05 -0700 | [diff] [blame] | 6637 | io_sqe_buffer_unregister(ctx); |
| 6638 | return ret; |
| 6639 | } |
| 6640 | |
Jens Axboe | 9b40284 | 2019-04-11 11:45:41 -0600 | [diff] [blame] | 6641 | static int io_eventfd_register(struct io_ring_ctx *ctx, void __user *arg) |
| 6642 | { |
| 6643 | __s32 __user *fds = arg; |
| 6644 | int fd; |
| 6645 | |
| 6646 | if (ctx->cq_ev_fd) |
| 6647 | return -EBUSY; |
| 6648 | |
| 6649 | if (copy_from_user(&fd, fds, sizeof(*fds))) |
| 6650 | return -EFAULT; |
| 6651 | |
| 6652 | ctx->cq_ev_fd = eventfd_ctx_fdget(fd); |
| 6653 | if (IS_ERR(ctx->cq_ev_fd)) { |
| 6654 | int ret = PTR_ERR(ctx->cq_ev_fd); |
| 6655 | ctx->cq_ev_fd = NULL; |
| 6656 | return ret; |
| 6657 | } |
| 6658 | |
| 6659 | return 0; |
| 6660 | } |
| 6661 | |
| 6662 | static int io_eventfd_unregister(struct io_ring_ctx *ctx) |
| 6663 | { |
| 6664 | if (ctx->cq_ev_fd) { |
| 6665 | eventfd_ctx_put(ctx->cq_ev_fd); |
| 6666 | ctx->cq_ev_fd = NULL; |
| 6667 | return 0; |
| 6668 | } |
| 6669 | |
| 6670 | return -ENXIO; |
| 6671 | } |
| 6672 | |
Jens Axboe | 5a2e745 | 2020-02-23 16:23:11 -0700 | [diff] [blame] | 6673 | static int __io_destroy_buffers(int id, void *p, void *data) |
| 6674 | { |
| 6675 | struct io_ring_ctx *ctx = data; |
| 6676 | struct io_buffer *buf = p; |
| 6677 | |
| 6678 | /* the head kbuf is the list itself */ |
| 6679 | while (!list_empty(&buf->list)) { |
| 6680 | struct io_buffer *nxt; |
| 6681 | |
| 6682 | nxt = list_first_entry(&buf->list, struct io_buffer, list); |
| 6683 | list_del(&nxt->list); |
| 6684 | kfree(nxt); |
| 6685 | } |
| 6686 | kfree(buf); |
| 6687 | idr_remove(&ctx->io_buffer_idr, id); |
| 6688 | return 0; |
| 6689 | } |
| 6690 | |
| 6691 | static void io_destroy_buffers(struct io_ring_ctx *ctx) |
| 6692 | { |
| 6693 | idr_for_each(&ctx->io_buffer_idr, __io_destroy_buffers, ctx); |
| 6694 | idr_destroy(&ctx->io_buffer_idr); |
| 6695 | } |
| 6696 | |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 6697 | static void io_ring_ctx_free(struct io_ring_ctx *ctx) |
| 6698 | { |
Jens Axboe | 6b06314 | 2019-01-10 22:13:58 -0700 | [diff] [blame] | 6699 | io_finish_async(ctx); |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 6700 | if (ctx->sqo_mm) |
| 6701 | mmdrop(ctx->sqo_mm); |
Jens Axboe | def596e | 2019-01-09 08:59:42 -0700 | [diff] [blame] | 6702 | |
| 6703 | io_iopoll_reap_events(ctx); |
Jens Axboe | edafcce | 2019-01-09 09:16:05 -0700 | [diff] [blame] | 6704 | io_sqe_buffer_unregister(ctx); |
Jens Axboe | 6b06314 | 2019-01-10 22:13:58 -0700 | [diff] [blame] | 6705 | io_sqe_files_unregister(ctx); |
Jens Axboe | 9b40284 | 2019-04-11 11:45:41 -0600 | [diff] [blame] | 6706 | io_eventfd_unregister(ctx); |
Jens Axboe | 5a2e745 | 2020-02-23 16:23:11 -0700 | [diff] [blame] | 6707 | io_destroy_buffers(ctx); |
Jens Axboe | 41726c9 | 2020-02-23 13:11:42 -0700 | [diff] [blame] | 6708 | idr_destroy(&ctx->personality_idr); |
Jens Axboe | def596e | 2019-01-09 08:59:42 -0700 | [diff] [blame] | 6709 | |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 6710 | #if defined(CONFIG_UNIX) |
Eric Biggers | 355e8d2 | 2019-06-12 14:58:43 -0700 | [diff] [blame] | 6711 | if (ctx->ring_sock) { |
| 6712 | ctx->ring_sock->file = NULL; /* so that iput() is called */ |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 6713 | sock_release(ctx->ring_sock); |
Eric Biggers | 355e8d2 | 2019-06-12 14:58:43 -0700 | [diff] [blame] | 6714 | } |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 6715 | #endif |
| 6716 | |
Hristo Venev | 75b28af | 2019-08-26 17:23:46 +0000 | [diff] [blame] | 6717 | io_mem_free(ctx->rings); |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 6718 | io_mem_free(ctx->sq_sqes); |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 6719 | |
| 6720 | percpu_ref_exit(&ctx->refs); |
| 6721 | if (ctx->account_mem) |
| 6722 | io_unaccount_mem(ctx->user, |
| 6723 | ring_pages(ctx->sq_entries, ctx->cq_entries)); |
| 6724 | free_uid(ctx->user); |
Jens Axboe | 181e448 | 2019-11-25 08:52:30 -0700 | [diff] [blame] | 6725 | put_cred(ctx->creds); |
Jens Axboe | 206aefd | 2019-11-07 18:27:42 -0700 | [diff] [blame] | 6726 | kfree(ctx->completions); |
Jens Axboe | 78076bb | 2019-12-04 19:56:40 -0700 | [diff] [blame] | 6727 | kfree(ctx->cancel_hash); |
Jens Axboe | 0ddf92e | 2019-11-08 08:52:53 -0700 | [diff] [blame] | 6728 | kmem_cache_free(req_cachep, ctx->fallback_req); |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 6729 | kfree(ctx); |
| 6730 | } |
| 6731 | |
| 6732 | static __poll_t io_uring_poll(struct file *file, poll_table *wait) |
| 6733 | { |
| 6734 | struct io_ring_ctx *ctx = file->private_data; |
| 6735 | __poll_t mask = 0; |
| 6736 | |
| 6737 | poll_wait(file, &ctx->cq_wait, wait); |
Stefan Bühler | 4f7067c | 2019-04-24 23:54:17 +0200 | [diff] [blame] | 6738 | /* |
| 6739 | * synchronizes with barrier from wq_has_sleeper call in |
| 6740 | * io_commit_cqring |
| 6741 | */ |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 6742 | smp_rmb(); |
Hristo Venev | 75b28af | 2019-08-26 17:23:46 +0000 | [diff] [blame] | 6743 | if (READ_ONCE(ctx->rings->sq.tail) - ctx->cached_sq_head != |
| 6744 | ctx->rings->sq_ring_entries) |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 6745 | mask |= EPOLLOUT | EPOLLWRNORM; |
Stefano Garzarella | 63e5d81 | 2020-02-07 13:18:28 +0100 | [diff] [blame] | 6746 | if (io_cqring_events(ctx, false)) |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 6747 | mask |= EPOLLIN | EPOLLRDNORM; |
| 6748 | |
| 6749 | return mask; |
| 6750 | } |
| 6751 | |
| 6752 | static int io_uring_fasync(int fd, struct file *file, int on) |
| 6753 | { |
| 6754 | struct io_ring_ctx *ctx = file->private_data; |
| 6755 | |
| 6756 | return fasync_helper(fd, file, on, &ctx->cq_fasync); |
| 6757 | } |
| 6758 | |
Jens Axboe | 071698e | 2020-01-28 10:04:42 -0700 | [diff] [blame] | 6759 | static int io_remove_personalities(int id, void *p, void *data) |
| 6760 | { |
| 6761 | struct io_ring_ctx *ctx = data; |
| 6762 | const struct cred *cred; |
| 6763 | |
| 6764 | cred = idr_remove(&ctx->personality_idr, id); |
| 6765 | if (cred) |
| 6766 | put_cred(cred); |
| 6767 | return 0; |
| 6768 | } |
| 6769 | |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 6770 | static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx) |
| 6771 | { |
| 6772 | mutex_lock(&ctx->uring_lock); |
| 6773 | percpu_ref_kill(&ctx->refs); |
| 6774 | mutex_unlock(&ctx->uring_lock); |
| 6775 | |
Jens Axboe | df069d8 | 2020-02-04 16:48:34 -0700 | [diff] [blame] | 6776 | /* |
| 6777 | * Wait for sq thread to idle, if we have one. It won't spin on new |
| 6778 | * work after we've killed the ctx ref above. This is important to do |
| 6779 | * before we cancel existing commands, as the thread could otherwise |
| 6780 | * be queueing new work post that. If that's work we need to cancel, |
| 6781 | * it could cause shutdown to hang. |
| 6782 | */ |
| 6783 | while (ctx->sqo_thread && !wq_has_sleeper(&ctx->sqo_wait)) |
| 6784 | cpu_relax(); |
| 6785 | |
Jens Axboe | 5262f56 | 2019-09-17 12:26:57 -0600 | [diff] [blame] | 6786 | io_kill_timeouts(ctx); |
Jens Axboe | 221c5eb | 2019-01-17 09:41:58 -0700 | [diff] [blame] | 6787 | io_poll_remove_all(ctx); |
Jens Axboe | 561fb04 | 2019-10-24 07:25:42 -0600 | [diff] [blame] | 6788 | |
| 6789 | if (ctx->io_wq) |
| 6790 | io_wq_cancel_all(ctx->io_wq); |
| 6791 | |
Jens Axboe | def596e | 2019-01-09 08:59:42 -0700 | [diff] [blame] | 6792 | io_iopoll_reap_events(ctx); |
Jens Axboe | 15dff28 | 2019-11-13 09:09:23 -0700 | [diff] [blame] | 6793 | /* if we failed setting up the ctx, we might not have any rings */ |
| 6794 | if (ctx->rings) |
| 6795 | io_cqring_overflow_flush(ctx, true); |
Jens Axboe | 071698e | 2020-01-28 10:04:42 -0700 | [diff] [blame] | 6796 | idr_for_each(&ctx->personality_idr, io_remove_personalities, ctx); |
Jens Axboe | 206aefd | 2019-11-07 18:27:42 -0700 | [diff] [blame] | 6797 | wait_for_completion(&ctx->completions[0]); |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 6798 | io_ring_ctx_free(ctx); |
| 6799 | } |
| 6800 | |
| 6801 | static int io_uring_release(struct inode *inode, struct file *file) |
| 6802 | { |
| 6803 | struct io_ring_ctx *ctx = file->private_data; |
| 6804 | |
| 6805 | file->private_data = NULL; |
| 6806 | io_ring_ctx_wait_and_kill(ctx); |
| 6807 | return 0; |
| 6808 | } |
| 6809 | |
Jens Axboe | fcb323c | 2019-10-24 12:39:47 -0600 | [diff] [blame] | 6810 | static void io_uring_cancel_files(struct io_ring_ctx *ctx, |
| 6811 | struct files_struct *files) |
| 6812 | { |
| 6813 | struct io_kiocb *req; |
| 6814 | DEFINE_WAIT(wait); |
| 6815 | |
| 6816 | while (!list_empty_careful(&ctx->inflight_list)) { |
Jens Axboe | 768134d | 2019-11-10 20:30:53 -0700 | [diff] [blame] | 6817 | struct io_kiocb *cancel_req = NULL; |
Jens Axboe | fcb323c | 2019-10-24 12:39:47 -0600 | [diff] [blame] | 6818 | |
| 6819 | spin_lock_irq(&ctx->inflight_lock); |
| 6820 | list_for_each_entry(req, &ctx->inflight_list, inflight_entry) { |
Jens Axboe | 768134d | 2019-11-10 20:30:53 -0700 | [diff] [blame] | 6821 | if (req->work.files != files) |
| 6822 | continue; |
| 6823 | /* req is being completed, ignore */ |
| 6824 | if (!refcount_inc_not_zero(&req->refs)) |
| 6825 | continue; |
| 6826 | cancel_req = req; |
| 6827 | break; |
Jens Axboe | fcb323c | 2019-10-24 12:39:47 -0600 | [diff] [blame] | 6828 | } |
Jens Axboe | 768134d | 2019-11-10 20:30:53 -0700 | [diff] [blame] | 6829 | if (cancel_req) |
Jens Axboe | fcb323c | 2019-10-24 12:39:47 -0600 | [diff] [blame] | 6830 | prepare_to_wait(&ctx->inflight_wait, &wait, |
Jens Axboe | 768134d | 2019-11-10 20:30:53 -0700 | [diff] [blame] | 6831 | TASK_UNINTERRUPTIBLE); |
Jens Axboe | fcb323c | 2019-10-24 12:39:47 -0600 | [diff] [blame] | 6832 | spin_unlock_irq(&ctx->inflight_lock); |
| 6833 | |
Jens Axboe | 768134d | 2019-11-10 20:30:53 -0700 | [diff] [blame] | 6834 | /* We need to keep going until we don't find a matching req */ |
| 6835 | if (!cancel_req) |
Jens Axboe | fcb323c | 2019-10-24 12:39:47 -0600 | [diff] [blame] | 6836 | break; |
Bob Liu | 2f6d9b9 | 2019-11-13 18:06:24 +0800 | [diff] [blame] | 6837 | |
Jens Axboe | 2ca1025 | 2020-02-13 17:17:35 -0700 | [diff] [blame] | 6838 | if (cancel_req->flags & REQ_F_OVERFLOW) { |
| 6839 | spin_lock_irq(&ctx->completion_lock); |
| 6840 | list_del(&cancel_req->list); |
| 6841 | cancel_req->flags &= ~REQ_F_OVERFLOW; |
| 6842 | if (list_empty(&ctx->cq_overflow_list)) { |
| 6843 | clear_bit(0, &ctx->sq_check_overflow); |
| 6844 | clear_bit(0, &ctx->cq_check_overflow); |
| 6845 | } |
| 6846 | spin_unlock_irq(&ctx->completion_lock); |
| 6847 | |
| 6848 | WRITE_ONCE(ctx->rings->cq_overflow, |
| 6849 | atomic_inc_return(&ctx->cached_cq_overflow)); |
| 6850 | |
| 6851 | /* |
| 6852 | * Put inflight ref and overflow ref. If that's |
| 6853 | * all we had, then we're done with this request. |
| 6854 | */ |
| 6855 | if (refcount_sub_and_test(2, &cancel_req->refs)) { |
| 6856 | io_put_req(cancel_req); |
| 6857 | continue; |
| 6858 | } |
| 6859 | } |
| 6860 | |
Bob Liu | 2f6d9b9 | 2019-11-13 18:06:24 +0800 | [diff] [blame] | 6861 | io_wq_cancel_work(ctx->io_wq, &cancel_req->work); |
| 6862 | io_put_req(cancel_req); |
Jens Axboe | fcb323c | 2019-10-24 12:39:47 -0600 | [diff] [blame] | 6863 | schedule(); |
| 6864 | } |
Jens Axboe | 768134d | 2019-11-10 20:30:53 -0700 | [diff] [blame] | 6865 | finish_wait(&ctx->inflight_wait, &wait); |
Jens Axboe | fcb323c | 2019-10-24 12:39:47 -0600 | [diff] [blame] | 6866 | } |
| 6867 | |
| 6868 | static int io_uring_flush(struct file *file, void *data) |
| 6869 | { |
| 6870 | struct io_ring_ctx *ctx = file->private_data; |
| 6871 | |
| 6872 | io_uring_cancel_files(ctx, data); |
Jens Axboe | 6ab2314 | 2020-02-08 20:23:59 -0700 | [diff] [blame] | 6873 | |
| 6874 | /* |
| 6875 | * If the task is going away, cancel work it may have pending |
| 6876 | */ |
| 6877 | if (fatal_signal_pending(current) || (current->flags & PF_EXITING)) |
| 6878 | io_wq_cancel_pid(ctx->io_wq, task_pid_vnr(current)); |
| 6879 | |
Jens Axboe | fcb323c | 2019-10-24 12:39:47 -0600 | [diff] [blame] | 6880 | return 0; |
| 6881 | } |
| 6882 | |
Roman Penyaev | 6c5c240 | 2019-11-28 12:53:22 +0100 | [diff] [blame] | 6883 | static void *io_uring_validate_mmap_request(struct file *file, |
| 6884 | loff_t pgoff, size_t sz) |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 6885 | { |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 6886 | struct io_ring_ctx *ctx = file->private_data; |
Roman Penyaev | 6c5c240 | 2019-11-28 12:53:22 +0100 | [diff] [blame] | 6887 | loff_t offset = pgoff << PAGE_SHIFT; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 6888 | struct page *page; |
| 6889 | void *ptr; |
| 6890 | |
| 6891 | switch (offset) { |
| 6892 | case IORING_OFF_SQ_RING: |
Hristo Venev | 75b28af | 2019-08-26 17:23:46 +0000 | [diff] [blame] | 6893 | case IORING_OFF_CQ_RING: |
| 6894 | ptr = ctx->rings; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 6895 | break; |
| 6896 | case IORING_OFF_SQES: |
| 6897 | ptr = ctx->sq_sqes; |
| 6898 | break; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 6899 | default: |
Roman Penyaev | 6c5c240 | 2019-11-28 12:53:22 +0100 | [diff] [blame] | 6900 | return ERR_PTR(-EINVAL); |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 6901 | } |
| 6902 | |
| 6903 | page = virt_to_head_page(ptr); |
Matthew Wilcox (Oracle) | a50b854 | 2019-09-23 15:34:25 -0700 | [diff] [blame] | 6904 | if (sz > page_size(page)) |
Roman Penyaev | 6c5c240 | 2019-11-28 12:53:22 +0100 | [diff] [blame] | 6905 | return ERR_PTR(-EINVAL); |
| 6906 | |
| 6907 | return ptr; |
| 6908 | } |
| 6909 | |
| 6910 | #ifdef CONFIG_MMU |
| 6911 | |
| 6912 | static int io_uring_mmap(struct file *file, struct vm_area_struct *vma) |
| 6913 | { |
| 6914 | size_t sz = vma->vm_end - vma->vm_start; |
| 6915 | unsigned long pfn; |
| 6916 | void *ptr; |
| 6917 | |
| 6918 | ptr = io_uring_validate_mmap_request(file, vma->vm_pgoff, sz); |
| 6919 | if (IS_ERR(ptr)) |
| 6920 | return PTR_ERR(ptr); |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 6921 | |
| 6922 | pfn = virt_to_phys(ptr) >> PAGE_SHIFT; |
| 6923 | return remap_pfn_range(vma, vma->vm_start, pfn, sz, vma->vm_page_prot); |
| 6924 | } |
| 6925 | |
Roman Penyaev | 6c5c240 | 2019-11-28 12:53:22 +0100 | [diff] [blame] | 6926 | #else /* !CONFIG_MMU */ |
| 6927 | |
| 6928 | static int io_uring_mmap(struct file *file, struct vm_area_struct *vma) |
| 6929 | { |
| 6930 | return vma->vm_flags & (VM_SHARED | VM_MAYSHARE) ? 0 : -EINVAL; |
| 6931 | } |
| 6932 | |
| 6933 | static unsigned int io_uring_nommu_mmap_capabilities(struct file *file) |
| 6934 | { |
| 6935 | return NOMMU_MAP_DIRECT | NOMMU_MAP_READ | NOMMU_MAP_WRITE; |
| 6936 | } |
| 6937 | |
| 6938 | static unsigned long io_uring_nommu_get_unmapped_area(struct file *file, |
| 6939 | unsigned long addr, unsigned long len, |
| 6940 | unsigned long pgoff, unsigned long flags) |
| 6941 | { |
| 6942 | void *ptr; |
| 6943 | |
| 6944 | ptr = io_uring_validate_mmap_request(file, pgoff, len); |
| 6945 | if (IS_ERR(ptr)) |
| 6946 | return PTR_ERR(ptr); |
| 6947 | |
| 6948 | return (unsigned long) ptr; |
| 6949 | } |
| 6950 | |
| 6951 | #endif /* !CONFIG_MMU */ |
| 6952 | |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 6953 | SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit, |
| 6954 | u32, min_complete, u32, flags, const sigset_t __user *, sig, |
| 6955 | size_t, sigsz) |
| 6956 | { |
| 6957 | struct io_ring_ctx *ctx; |
| 6958 | long ret = -EBADF; |
| 6959 | int submitted = 0; |
| 6960 | struct fd f; |
| 6961 | |
Jens Axboe | b41e985 | 2020-02-17 09:52:41 -0700 | [diff] [blame] | 6962 | if (current->task_works) |
| 6963 | task_work_run(); |
| 6964 | |
Jens Axboe | 6c271ce | 2019-01-10 11:22:30 -0700 | [diff] [blame] | 6965 | if (flags & ~(IORING_ENTER_GETEVENTS | IORING_ENTER_SQ_WAKEUP)) |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 6966 | return -EINVAL; |
| 6967 | |
| 6968 | f = fdget(fd); |
| 6969 | if (!f.file) |
| 6970 | return -EBADF; |
| 6971 | |
| 6972 | ret = -EOPNOTSUPP; |
| 6973 | if (f.file->f_op != &io_uring_fops) |
| 6974 | goto out_fput; |
| 6975 | |
| 6976 | ret = -ENXIO; |
| 6977 | ctx = f.file->private_data; |
| 6978 | if (!percpu_ref_tryget(&ctx->refs)) |
| 6979 | goto out_fput; |
| 6980 | |
Jens Axboe | 6c271ce | 2019-01-10 11:22:30 -0700 | [diff] [blame] | 6981 | /* |
| 6982 | * For SQ polling, the thread will do all submissions and completions. |
| 6983 | * Just return the requested submit count, and wake the thread if |
| 6984 | * we were asked to. |
| 6985 | */ |
Jens Axboe | b2a9ead | 2019-09-12 14:19:16 -0600 | [diff] [blame] | 6986 | ret = 0; |
Jens Axboe | 6c271ce | 2019-01-10 11:22:30 -0700 | [diff] [blame] | 6987 | if (ctx->flags & IORING_SETUP_SQPOLL) { |
Jens Axboe | c1edbf5 | 2019-11-10 16:56:04 -0700 | [diff] [blame] | 6988 | if (!list_empty_careful(&ctx->cq_overflow_list)) |
| 6989 | io_cqring_overflow_flush(ctx, false); |
Jens Axboe | 6c271ce | 2019-01-10 11:22:30 -0700 | [diff] [blame] | 6990 | if (flags & IORING_ENTER_SQ_WAKEUP) |
| 6991 | wake_up(&ctx->sqo_wait); |
| 6992 | submitted = to_submit; |
Jens Axboe | b2a9ead | 2019-09-12 14:19:16 -0600 | [diff] [blame] | 6993 | } else if (to_submit) { |
Pavel Begunkov | ae9428c | 2019-11-06 00:22:14 +0300 | [diff] [blame] | 6994 | struct mm_struct *cur_mm; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 6995 | |
| 6996 | mutex_lock(&ctx->uring_lock); |
Pavel Begunkov | ae9428c | 2019-11-06 00:22:14 +0300 | [diff] [blame] | 6997 | /* already have mm, so io_submit_sqes() won't try to grab it */ |
| 6998 | cur_mm = ctx->sqo_mm; |
| 6999 | submitted = io_submit_sqes(ctx, to_submit, f.file, fd, |
| 7000 | &cur_mm, false); |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 7001 | mutex_unlock(&ctx->uring_lock); |
Pavel Begunkov | 7c504e65 | 2019-12-18 19:53:45 +0300 | [diff] [blame] | 7002 | |
| 7003 | if (submitted != to_submit) |
| 7004 | goto out; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 7005 | } |
| 7006 | if (flags & IORING_ENTER_GETEVENTS) { |
Jens Axboe | def596e | 2019-01-09 08:59:42 -0700 | [diff] [blame] | 7007 | unsigned nr_events = 0; |
| 7008 | |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 7009 | min_complete = min(min_complete, ctx->cq_entries); |
| 7010 | |
Jens Axboe | def596e | 2019-01-09 08:59:42 -0700 | [diff] [blame] | 7011 | if (ctx->flags & IORING_SETUP_IOPOLL) { |
Jens Axboe | def596e | 2019-01-09 08:59:42 -0700 | [diff] [blame] | 7012 | ret = io_iopoll_check(ctx, &nr_events, min_complete); |
Jens Axboe | def596e | 2019-01-09 08:59:42 -0700 | [diff] [blame] | 7013 | } else { |
| 7014 | ret = io_cqring_wait(ctx, min_complete, sig, sigsz); |
| 7015 | } |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 7016 | } |
| 7017 | |
Pavel Begunkov | 7c504e65 | 2019-12-18 19:53:45 +0300 | [diff] [blame] | 7018 | out: |
Pavel Begunkov | 6805b32 | 2019-10-08 02:18:42 +0300 | [diff] [blame] | 7019 | percpu_ref_put(&ctx->refs); |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 7020 | out_fput: |
| 7021 | fdput(f); |
| 7022 | return submitted ? submitted : ret; |
| 7023 | } |
| 7024 | |
Tobias Klauser | bebdb65 | 2020-02-26 18:38:32 +0100 | [diff] [blame] | 7025 | #ifdef CONFIG_PROC_FS |
Jens Axboe | 87ce955 | 2020-01-30 08:25:34 -0700 | [diff] [blame] | 7026 | static int io_uring_show_cred(int id, void *p, void *data) |
| 7027 | { |
| 7028 | const struct cred *cred = p; |
| 7029 | struct seq_file *m = data; |
| 7030 | struct user_namespace *uns = seq_user_ns(m); |
| 7031 | struct group_info *gi; |
| 7032 | kernel_cap_t cap; |
| 7033 | unsigned __capi; |
| 7034 | int g; |
| 7035 | |
| 7036 | seq_printf(m, "%5d\n", id); |
| 7037 | seq_put_decimal_ull(m, "\tUid:\t", from_kuid_munged(uns, cred->uid)); |
| 7038 | seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->euid)); |
| 7039 | seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->suid)); |
| 7040 | seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->fsuid)); |
| 7041 | seq_put_decimal_ull(m, "\n\tGid:\t", from_kgid_munged(uns, cred->gid)); |
| 7042 | seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->egid)); |
| 7043 | seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->sgid)); |
| 7044 | seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->fsgid)); |
| 7045 | seq_puts(m, "\n\tGroups:\t"); |
| 7046 | gi = cred->group_info; |
| 7047 | for (g = 0; g < gi->ngroups; g++) { |
| 7048 | seq_put_decimal_ull(m, g ? " " : "", |
| 7049 | from_kgid_munged(uns, gi->gid[g])); |
| 7050 | } |
| 7051 | seq_puts(m, "\n\tCapEff:\t"); |
| 7052 | cap = cred->cap_effective; |
| 7053 | CAP_FOR_EACH_U32(__capi) |
| 7054 | seq_put_hex_ll(m, NULL, cap.cap[CAP_LAST_U32 - __capi], 8); |
| 7055 | seq_putc(m, '\n'); |
| 7056 | return 0; |
| 7057 | } |
| 7058 | |
| 7059 | static void __io_uring_show_fdinfo(struct io_ring_ctx *ctx, struct seq_file *m) |
| 7060 | { |
| 7061 | int i; |
| 7062 | |
| 7063 | mutex_lock(&ctx->uring_lock); |
| 7064 | seq_printf(m, "UserFiles:\t%u\n", ctx->nr_user_files); |
| 7065 | for (i = 0; i < ctx->nr_user_files; i++) { |
| 7066 | struct fixed_file_table *table; |
| 7067 | struct file *f; |
| 7068 | |
| 7069 | table = &ctx->file_data->table[i >> IORING_FILE_TABLE_SHIFT]; |
| 7070 | f = table->files[i & IORING_FILE_TABLE_MASK]; |
| 7071 | if (f) |
| 7072 | seq_printf(m, "%5u: %s\n", i, file_dentry(f)->d_iname); |
| 7073 | else |
| 7074 | seq_printf(m, "%5u: <none>\n", i); |
| 7075 | } |
| 7076 | seq_printf(m, "UserBufs:\t%u\n", ctx->nr_user_bufs); |
| 7077 | for (i = 0; i < ctx->nr_user_bufs; i++) { |
| 7078 | struct io_mapped_ubuf *buf = &ctx->user_bufs[i]; |
| 7079 | |
| 7080 | seq_printf(m, "%5u: 0x%llx/%u\n", i, buf->ubuf, |
| 7081 | (unsigned int) buf->len); |
| 7082 | } |
| 7083 | if (!idr_is_empty(&ctx->personality_idr)) { |
| 7084 | seq_printf(m, "Personalities:\n"); |
| 7085 | idr_for_each(&ctx->personality_idr, io_uring_show_cred, m); |
| 7086 | } |
Jens Axboe | d7718a9 | 2020-02-14 22:23:12 -0700 | [diff] [blame] | 7087 | seq_printf(m, "PollList:\n"); |
| 7088 | spin_lock_irq(&ctx->completion_lock); |
| 7089 | for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) { |
| 7090 | struct hlist_head *list = &ctx->cancel_hash[i]; |
| 7091 | struct io_kiocb *req; |
| 7092 | |
| 7093 | hlist_for_each_entry(req, list, hash_node) |
| 7094 | seq_printf(m, " op=%d, task_works=%d\n", req->opcode, |
| 7095 | req->task->task_works != NULL); |
| 7096 | } |
| 7097 | spin_unlock_irq(&ctx->completion_lock); |
Jens Axboe | 87ce955 | 2020-01-30 08:25:34 -0700 | [diff] [blame] | 7098 | mutex_unlock(&ctx->uring_lock); |
| 7099 | } |
| 7100 | |
| 7101 | static void io_uring_show_fdinfo(struct seq_file *m, struct file *f) |
| 7102 | { |
| 7103 | struct io_ring_ctx *ctx = f->private_data; |
| 7104 | |
| 7105 | if (percpu_ref_tryget(&ctx->refs)) { |
| 7106 | __io_uring_show_fdinfo(ctx, m); |
| 7107 | percpu_ref_put(&ctx->refs); |
| 7108 | } |
| 7109 | } |
Tobias Klauser | bebdb65 | 2020-02-26 18:38:32 +0100 | [diff] [blame] | 7110 | #endif |
Jens Axboe | 87ce955 | 2020-01-30 08:25:34 -0700 | [diff] [blame] | 7111 | |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 7112 | static const struct file_operations io_uring_fops = { |
| 7113 | .release = io_uring_release, |
Jens Axboe | fcb323c | 2019-10-24 12:39:47 -0600 | [diff] [blame] | 7114 | .flush = io_uring_flush, |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 7115 | .mmap = io_uring_mmap, |
Roman Penyaev | 6c5c240 | 2019-11-28 12:53:22 +0100 | [diff] [blame] | 7116 | #ifndef CONFIG_MMU |
| 7117 | .get_unmapped_area = io_uring_nommu_get_unmapped_area, |
| 7118 | .mmap_capabilities = io_uring_nommu_mmap_capabilities, |
| 7119 | #endif |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 7120 | .poll = io_uring_poll, |
| 7121 | .fasync = io_uring_fasync, |
Tobias Klauser | bebdb65 | 2020-02-26 18:38:32 +0100 | [diff] [blame] | 7122 | #ifdef CONFIG_PROC_FS |
Jens Axboe | 87ce955 | 2020-01-30 08:25:34 -0700 | [diff] [blame] | 7123 | .show_fdinfo = io_uring_show_fdinfo, |
Tobias Klauser | bebdb65 | 2020-02-26 18:38:32 +0100 | [diff] [blame] | 7124 | #endif |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 7125 | }; |
| 7126 | |
| 7127 | static int io_allocate_scq_urings(struct io_ring_ctx *ctx, |
| 7128 | struct io_uring_params *p) |
| 7129 | { |
Hristo Venev | 75b28af | 2019-08-26 17:23:46 +0000 | [diff] [blame] | 7130 | struct io_rings *rings; |
| 7131 | size_t size, sq_array_offset; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 7132 | |
Hristo Venev | 75b28af | 2019-08-26 17:23:46 +0000 | [diff] [blame] | 7133 | size = rings_size(p->sq_entries, p->cq_entries, &sq_array_offset); |
| 7134 | if (size == SIZE_MAX) |
| 7135 | return -EOVERFLOW; |
| 7136 | |
| 7137 | rings = io_mem_alloc(size); |
| 7138 | if (!rings) |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 7139 | return -ENOMEM; |
| 7140 | |
Hristo Venev | 75b28af | 2019-08-26 17:23:46 +0000 | [diff] [blame] | 7141 | ctx->rings = rings; |
| 7142 | ctx->sq_array = (u32 *)((char *)rings + sq_array_offset); |
| 7143 | rings->sq_ring_mask = p->sq_entries - 1; |
| 7144 | rings->cq_ring_mask = p->cq_entries - 1; |
| 7145 | rings->sq_ring_entries = p->sq_entries; |
| 7146 | rings->cq_ring_entries = p->cq_entries; |
| 7147 | ctx->sq_mask = rings->sq_ring_mask; |
| 7148 | ctx->cq_mask = rings->cq_ring_mask; |
| 7149 | ctx->sq_entries = rings->sq_ring_entries; |
| 7150 | ctx->cq_entries = rings->cq_ring_entries; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 7151 | |
| 7152 | size = array_size(sizeof(struct io_uring_sqe), p->sq_entries); |
Jens Axboe | eb065d3 | 2019-11-20 09:26:29 -0700 | [diff] [blame] | 7153 | if (size == SIZE_MAX) { |
| 7154 | io_mem_free(ctx->rings); |
| 7155 | ctx->rings = NULL; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 7156 | return -EOVERFLOW; |
Jens Axboe | eb065d3 | 2019-11-20 09:26:29 -0700 | [diff] [blame] | 7157 | } |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 7158 | |
| 7159 | ctx->sq_sqes = io_mem_alloc(size); |
Jens Axboe | eb065d3 | 2019-11-20 09:26:29 -0700 | [diff] [blame] | 7160 | if (!ctx->sq_sqes) { |
| 7161 | io_mem_free(ctx->rings); |
| 7162 | ctx->rings = NULL; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 7163 | return -ENOMEM; |
Jens Axboe | eb065d3 | 2019-11-20 09:26:29 -0700 | [diff] [blame] | 7164 | } |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 7165 | |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 7166 | return 0; |
| 7167 | } |
| 7168 | |
| 7169 | /* |
| 7170 | * Allocate an anonymous fd, this is what constitutes the application |
| 7171 | * visible backing of an io_uring instance. The application mmaps this |
| 7172 | * fd to gain access to the SQ/CQ ring details. If UNIX sockets are enabled, |
| 7173 | * we have to tie this fd to a socket for file garbage collection purposes. |
| 7174 | */ |
| 7175 | static int io_uring_get_fd(struct io_ring_ctx *ctx) |
| 7176 | { |
| 7177 | struct file *file; |
| 7178 | int ret; |
| 7179 | |
| 7180 | #if defined(CONFIG_UNIX) |
| 7181 | ret = sock_create_kern(&init_net, PF_UNIX, SOCK_RAW, IPPROTO_IP, |
| 7182 | &ctx->ring_sock); |
| 7183 | if (ret) |
| 7184 | return ret; |
| 7185 | #endif |
| 7186 | |
| 7187 | ret = get_unused_fd_flags(O_RDWR | O_CLOEXEC); |
| 7188 | if (ret < 0) |
| 7189 | goto err; |
| 7190 | |
| 7191 | file = anon_inode_getfile("[io_uring]", &io_uring_fops, ctx, |
| 7192 | O_RDWR | O_CLOEXEC); |
| 7193 | if (IS_ERR(file)) { |
| 7194 | put_unused_fd(ret); |
| 7195 | ret = PTR_ERR(file); |
| 7196 | goto err; |
| 7197 | } |
| 7198 | |
| 7199 | #if defined(CONFIG_UNIX) |
| 7200 | ctx->ring_sock->file = file; |
| 7201 | #endif |
| 7202 | fd_install(ret, file); |
| 7203 | return ret; |
| 7204 | err: |
| 7205 | #if defined(CONFIG_UNIX) |
| 7206 | sock_release(ctx->ring_sock); |
| 7207 | ctx->ring_sock = NULL; |
| 7208 | #endif |
| 7209 | return ret; |
| 7210 | } |
| 7211 | |
| 7212 | static int io_uring_create(unsigned entries, struct io_uring_params *p) |
| 7213 | { |
| 7214 | struct user_struct *user = NULL; |
| 7215 | struct io_ring_ctx *ctx; |
| 7216 | bool account_mem; |
| 7217 | int ret; |
| 7218 | |
Jens Axboe | 8110c1a | 2019-12-28 15:39:54 -0700 | [diff] [blame] | 7219 | if (!entries) |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 7220 | return -EINVAL; |
Jens Axboe | 8110c1a | 2019-12-28 15:39:54 -0700 | [diff] [blame] | 7221 | if (entries > IORING_MAX_ENTRIES) { |
| 7222 | if (!(p->flags & IORING_SETUP_CLAMP)) |
| 7223 | return -EINVAL; |
| 7224 | entries = IORING_MAX_ENTRIES; |
| 7225 | } |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 7226 | |
| 7227 | /* |
| 7228 | * Use twice as many entries for the CQ ring. It's possible for the |
| 7229 | * application to drive a higher depth than the size of the SQ ring, |
| 7230 | * since the sqes are only used at submission time. This allows for |
Jens Axboe | 33a107f | 2019-10-04 12:10:03 -0600 | [diff] [blame] | 7231 | * some flexibility in overcommitting a bit. If the application has |
| 7232 | * set IORING_SETUP_CQSIZE, it will have passed in the desired number |
| 7233 | * of CQ ring entries manually. |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 7234 | */ |
| 7235 | p->sq_entries = roundup_pow_of_two(entries); |
Jens Axboe | 33a107f | 2019-10-04 12:10:03 -0600 | [diff] [blame] | 7236 | if (p->flags & IORING_SETUP_CQSIZE) { |
| 7237 | /* |
| 7238 | * If IORING_SETUP_CQSIZE is set, we do the same roundup |
| 7239 | * to a power-of-two, if it isn't already. We do NOT impose |
| 7240 | * any cq vs sq ring sizing. |
| 7241 | */ |
Jens Axboe | 8110c1a | 2019-12-28 15:39:54 -0700 | [diff] [blame] | 7242 | if (p->cq_entries < p->sq_entries) |
Jens Axboe | 33a107f | 2019-10-04 12:10:03 -0600 | [diff] [blame] | 7243 | return -EINVAL; |
Jens Axboe | 8110c1a | 2019-12-28 15:39:54 -0700 | [diff] [blame] | 7244 | if (p->cq_entries > IORING_MAX_CQ_ENTRIES) { |
| 7245 | if (!(p->flags & IORING_SETUP_CLAMP)) |
| 7246 | return -EINVAL; |
| 7247 | p->cq_entries = IORING_MAX_CQ_ENTRIES; |
| 7248 | } |
Jens Axboe | 33a107f | 2019-10-04 12:10:03 -0600 | [diff] [blame] | 7249 | p->cq_entries = roundup_pow_of_two(p->cq_entries); |
| 7250 | } else { |
| 7251 | p->cq_entries = 2 * p->sq_entries; |
| 7252 | } |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 7253 | |
| 7254 | user = get_uid(current_user()); |
| 7255 | account_mem = !capable(CAP_IPC_LOCK); |
| 7256 | |
| 7257 | if (account_mem) { |
| 7258 | ret = io_account_mem(user, |
| 7259 | ring_pages(p->sq_entries, p->cq_entries)); |
| 7260 | if (ret) { |
| 7261 | free_uid(user); |
| 7262 | return ret; |
| 7263 | } |
| 7264 | } |
| 7265 | |
| 7266 | ctx = io_ring_ctx_alloc(p); |
| 7267 | if (!ctx) { |
| 7268 | if (account_mem) |
| 7269 | io_unaccount_mem(user, ring_pages(p->sq_entries, |
| 7270 | p->cq_entries)); |
| 7271 | free_uid(user); |
| 7272 | return -ENOMEM; |
| 7273 | } |
| 7274 | ctx->compat = in_compat_syscall(); |
| 7275 | ctx->account_mem = account_mem; |
| 7276 | ctx->user = user; |
Jens Axboe | 0b8c0ec | 2019-12-02 08:50:00 -0700 | [diff] [blame] | 7277 | ctx->creds = get_current_cred(); |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 7278 | |
| 7279 | ret = io_allocate_scq_urings(ctx, p); |
| 7280 | if (ret) |
| 7281 | goto err; |
| 7282 | |
Jens Axboe | 6c271ce | 2019-01-10 11:22:30 -0700 | [diff] [blame] | 7283 | ret = io_sq_offload_start(ctx, p); |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 7284 | if (ret) |
| 7285 | goto err; |
| 7286 | |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 7287 | memset(&p->sq_off, 0, sizeof(p->sq_off)); |
Hristo Venev | 75b28af | 2019-08-26 17:23:46 +0000 | [diff] [blame] | 7288 | p->sq_off.head = offsetof(struct io_rings, sq.head); |
| 7289 | p->sq_off.tail = offsetof(struct io_rings, sq.tail); |
| 7290 | p->sq_off.ring_mask = offsetof(struct io_rings, sq_ring_mask); |
| 7291 | p->sq_off.ring_entries = offsetof(struct io_rings, sq_ring_entries); |
| 7292 | p->sq_off.flags = offsetof(struct io_rings, sq_flags); |
| 7293 | p->sq_off.dropped = offsetof(struct io_rings, sq_dropped); |
| 7294 | p->sq_off.array = (char *)ctx->sq_array - (char *)ctx->rings; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 7295 | |
| 7296 | memset(&p->cq_off, 0, sizeof(p->cq_off)); |
Hristo Venev | 75b28af | 2019-08-26 17:23:46 +0000 | [diff] [blame] | 7297 | p->cq_off.head = offsetof(struct io_rings, cq.head); |
| 7298 | p->cq_off.tail = offsetof(struct io_rings, cq.tail); |
| 7299 | p->cq_off.ring_mask = offsetof(struct io_rings, cq_ring_mask); |
| 7300 | p->cq_off.ring_entries = offsetof(struct io_rings, cq_ring_entries); |
| 7301 | p->cq_off.overflow = offsetof(struct io_rings, cq_overflow); |
| 7302 | p->cq_off.cqes = offsetof(struct io_rings, cqes); |
Jens Axboe | ac90f24 | 2019-09-06 10:26:21 -0600 | [diff] [blame] | 7303 | |
Jens Axboe | 044c1ab | 2019-10-28 09:15:33 -0600 | [diff] [blame] | 7304 | /* |
| 7305 | * Install ring fd as the very last thing, so we don't risk someone |
| 7306 | * having closed it before we finish setup |
| 7307 | */ |
| 7308 | ret = io_uring_get_fd(ctx); |
| 7309 | if (ret < 0) |
| 7310 | goto err; |
| 7311 | |
Jens Axboe | da8c969 | 2019-12-02 18:51:26 -0700 | [diff] [blame] | 7312 | p->features = IORING_FEAT_SINGLE_MMAP | IORING_FEAT_NODROP | |
Jens Axboe | cccf0ee | 2020-01-27 16:34:48 -0700 | [diff] [blame] | 7313 | IORING_FEAT_SUBMIT_STABLE | IORING_FEAT_RW_CUR_POS | |
Jens Axboe | d7718a9 | 2020-02-14 22:23:12 -0700 | [diff] [blame] | 7314 | IORING_FEAT_CUR_PERSONALITY | IORING_FEAT_FAST_POLL; |
Dmitrii Dolgov | c826bd7 | 2019-10-15 19:02:01 +0200 | [diff] [blame] | 7315 | trace_io_uring_create(ret, ctx, p->sq_entries, p->cq_entries, p->flags); |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 7316 | return ret; |
| 7317 | err: |
| 7318 | io_ring_ctx_wait_and_kill(ctx); |
| 7319 | return ret; |
| 7320 | } |
| 7321 | |
| 7322 | /* |
| 7323 | * Sets up an aio uring context, and returns the fd. Applications asks for a |
| 7324 | * ring size, we return the actual sq/cq ring sizes (among other things) in the |
| 7325 | * params structure passed in. |
| 7326 | */ |
| 7327 | static long io_uring_setup(u32 entries, struct io_uring_params __user *params) |
| 7328 | { |
| 7329 | struct io_uring_params p; |
| 7330 | long ret; |
| 7331 | int i; |
| 7332 | |
| 7333 | if (copy_from_user(&p, params, sizeof(p))) |
| 7334 | return -EFAULT; |
| 7335 | for (i = 0; i < ARRAY_SIZE(p.resv); i++) { |
| 7336 | if (p.resv[i]) |
| 7337 | return -EINVAL; |
| 7338 | } |
| 7339 | |
Jens Axboe | 6c271ce | 2019-01-10 11:22:30 -0700 | [diff] [blame] | 7340 | if (p.flags & ~(IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL | |
Jens Axboe | 8110c1a | 2019-12-28 15:39:54 -0700 | [diff] [blame] | 7341 | IORING_SETUP_SQ_AFF | IORING_SETUP_CQSIZE | |
Pavel Begunkov | 24369c2 | 2020-01-28 03:15:48 +0300 | [diff] [blame] | 7342 | IORING_SETUP_CLAMP | IORING_SETUP_ATTACH_WQ)) |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 7343 | return -EINVAL; |
| 7344 | |
| 7345 | ret = io_uring_create(entries, &p); |
| 7346 | if (ret < 0) |
| 7347 | return ret; |
| 7348 | |
| 7349 | if (copy_to_user(params, &p, sizeof(p))) |
| 7350 | return -EFAULT; |
| 7351 | |
| 7352 | return ret; |
| 7353 | } |
| 7354 | |
| 7355 | SYSCALL_DEFINE2(io_uring_setup, u32, entries, |
| 7356 | struct io_uring_params __user *, params) |
| 7357 | { |
| 7358 | return io_uring_setup(entries, params); |
| 7359 | } |
| 7360 | |
Jens Axboe | 66f4af9 | 2020-01-16 15:36:52 -0700 | [diff] [blame] | 7361 | static int io_probe(struct io_ring_ctx *ctx, void __user *arg, unsigned nr_args) |
| 7362 | { |
| 7363 | struct io_uring_probe *p; |
| 7364 | size_t size; |
| 7365 | int i, ret; |
| 7366 | |
| 7367 | size = struct_size(p, ops, nr_args); |
| 7368 | if (size == SIZE_MAX) |
| 7369 | return -EOVERFLOW; |
| 7370 | p = kzalloc(size, GFP_KERNEL); |
| 7371 | if (!p) |
| 7372 | return -ENOMEM; |
| 7373 | |
| 7374 | ret = -EFAULT; |
| 7375 | if (copy_from_user(p, arg, size)) |
| 7376 | goto out; |
| 7377 | ret = -EINVAL; |
| 7378 | if (memchr_inv(p, 0, size)) |
| 7379 | goto out; |
| 7380 | |
| 7381 | p->last_op = IORING_OP_LAST - 1; |
| 7382 | if (nr_args > IORING_OP_LAST) |
| 7383 | nr_args = IORING_OP_LAST; |
| 7384 | |
| 7385 | for (i = 0; i < nr_args; i++) { |
| 7386 | p->ops[i].op = i; |
| 7387 | if (!io_op_defs[i].not_supported) |
| 7388 | p->ops[i].flags = IO_URING_OP_SUPPORTED; |
| 7389 | } |
| 7390 | p->ops_len = i; |
| 7391 | |
| 7392 | ret = 0; |
| 7393 | if (copy_to_user(arg, p, size)) |
| 7394 | ret = -EFAULT; |
| 7395 | out: |
| 7396 | kfree(p); |
| 7397 | return ret; |
| 7398 | } |
| 7399 | |
Jens Axboe | 071698e | 2020-01-28 10:04:42 -0700 | [diff] [blame] | 7400 | static int io_register_personality(struct io_ring_ctx *ctx) |
| 7401 | { |
| 7402 | const struct cred *creds = get_current_cred(); |
| 7403 | int id; |
| 7404 | |
| 7405 | id = idr_alloc_cyclic(&ctx->personality_idr, (void *) creds, 1, |
| 7406 | USHRT_MAX, GFP_KERNEL); |
| 7407 | if (id < 0) |
| 7408 | put_cred(creds); |
| 7409 | return id; |
| 7410 | } |
| 7411 | |
| 7412 | static int io_unregister_personality(struct io_ring_ctx *ctx, unsigned id) |
| 7413 | { |
| 7414 | const struct cred *old_creds; |
| 7415 | |
| 7416 | old_creds = idr_remove(&ctx->personality_idr, id); |
| 7417 | if (old_creds) { |
| 7418 | put_cred(old_creds); |
| 7419 | return 0; |
| 7420 | } |
| 7421 | |
| 7422 | return -EINVAL; |
| 7423 | } |
| 7424 | |
| 7425 | static bool io_register_op_must_quiesce(int op) |
| 7426 | { |
| 7427 | switch (op) { |
| 7428 | case IORING_UNREGISTER_FILES: |
| 7429 | case IORING_REGISTER_FILES_UPDATE: |
| 7430 | case IORING_REGISTER_PROBE: |
| 7431 | case IORING_REGISTER_PERSONALITY: |
| 7432 | case IORING_UNREGISTER_PERSONALITY: |
| 7433 | return false; |
| 7434 | default: |
| 7435 | return true; |
| 7436 | } |
| 7437 | } |
| 7438 | |
Jens Axboe | edafcce | 2019-01-09 09:16:05 -0700 | [diff] [blame] | 7439 | static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode, |
| 7440 | void __user *arg, unsigned nr_args) |
Jens Axboe | b19062a | 2019-04-15 10:49:38 -0600 | [diff] [blame] | 7441 | __releases(ctx->uring_lock) |
| 7442 | __acquires(ctx->uring_lock) |
Jens Axboe | edafcce | 2019-01-09 09:16:05 -0700 | [diff] [blame] | 7443 | { |
| 7444 | int ret; |
| 7445 | |
Jens Axboe | 35fa71a | 2019-04-22 10:23:23 -0600 | [diff] [blame] | 7446 | /* |
| 7447 | * We're inside the ring mutex, if the ref is already dying, then |
| 7448 | * someone else killed the ctx or is already going through |
| 7449 | * io_uring_register(). |
| 7450 | */ |
| 7451 | if (percpu_ref_is_dying(&ctx->refs)) |
| 7452 | return -ENXIO; |
| 7453 | |
Jens Axboe | 071698e | 2020-01-28 10:04:42 -0700 | [diff] [blame] | 7454 | if (io_register_op_must_quiesce(opcode)) { |
Jens Axboe | 05f3fb3 | 2019-12-09 11:22:50 -0700 | [diff] [blame] | 7455 | percpu_ref_kill(&ctx->refs); |
Jens Axboe | b19062a | 2019-04-15 10:49:38 -0600 | [diff] [blame] | 7456 | |
Jens Axboe | 05f3fb3 | 2019-12-09 11:22:50 -0700 | [diff] [blame] | 7457 | /* |
| 7458 | * Drop uring mutex before waiting for references to exit. If |
| 7459 | * another thread is currently inside io_uring_enter() it might |
| 7460 | * need to grab the uring_lock to make progress. If we hold it |
| 7461 | * here across the drain wait, then we can deadlock. It's safe |
| 7462 | * to drop the mutex here, since no new references will come in |
| 7463 | * after we've killed the percpu ref. |
| 7464 | */ |
| 7465 | mutex_unlock(&ctx->uring_lock); |
Jens Axboe | c150368 | 2020-01-08 08:26:07 -0700 | [diff] [blame] | 7466 | ret = wait_for_completion_interruptible(&ctx->completions[0]); |
Jens Axboe | 05f3fb3 | 2019-12-09 11:22:50 -0700 | [diff] [blame] | 7467 | mutex_lock(&ctx->uring_lock); |
Jens Axboe | c150368 | 2020-01-08 08:26:07 -0700 | [diff] [blame] | 7468 | if (ret) { |
| 7469 | percpu_ref_resurrect(&ctx->refs); |
| 7470 | ret = -EINTR; |
| 7471 | goto out; |
| 7472 | } |
Jens Axboe | 05f3fb3 | 2019-12-09 11:22:50 -0700 | [diff] [blame] | 7473 | } |
Jens Axboe | edafcce | 2019-01-09 09:16:05 -0700 | [diff] [blame] | 7474 | |
| 7475 | switch (opcode) { |
| 7476 | case IORING_REGISTER_BUFFERS: |
| 7477 | ret = io_sqe_buffer_register(ctx, arg, nr_args); |
| 7478 | break; |
| 7479 | case IORING_UNREGISTER_BUFFERS: |
| 7480 | ret = -EINVAL; |
| 7481 | if (arg || nr_args) |
| 7482 | break; |
| 7483 | ret = io_sqe_buffer_unregister(ctx); |
| 7484 | break; |
Jens Axboe | 6b06314 | 2019-01-10 22:13:58 -0700 | [diff] [blame] | 7485 | case IORING_REGISTER_FILES: |
| 7486 | ret = io_sqe_files_register(ctx, arg, nr_args); |
| 7487 | break; |
| 7488 | case IORING_UNREGISTER_FILES: |
| 7489 | ret = -EINVAL; |
| 7490 | if (arg || nr_args) |
| 7491 | break; |
| 7492 | ret = io_sqe_files_unregister(ctx); |
| 7493 | break; |
Jens Axboe | c3a31e6 | 2019-10-03 13:59:56 -0600 | [diff] [blame] | 7494 | case IORING_REGISTER_FILES_UPDATE: |
| 7495 | ret = io_sqe_files_update(ctx, arg, nr_args); |
| 7496 | break; |
Jens Axboe | 9b40284 | 2019-04-11 11:45:41 -0600 | [diff] [blame] | 7497 | case IORING_REGISTER_EVENTFD: |
Jens Axboe | f2842ab | 2020-01-08 11:04:00 -0700 | [diff] [blame] | 7498 | case IORING_REGISTER_EVENTFD_ASYNC: |
Jens Axboe | 9b40284 | 2019-04-11 11:45:41 -0600 | [diff] [blame] | 7499 | ret = -EINVAL; |
| 7500 | if (nr_args != 1) |
| 7501 | break; |
| 7502 | ret = io_eventfd_register(ctx, arg); |
Jens Axboe | f2842ab | 2020-01-08 11:04:00 -0700 | [diff] [blame] | 7503 | if (ret) |
| 7504 | break; |
| 7505 | if (opcode == IORING_REGISTER_EVENTFD_ASYNC) |
| 7506 | ctx->eventfd_async = 1; |
| 7507 | else |
| 7508 | ctx->eventfd_async = 0; |
Jens Axboe | 9b40284 | 2019-04-11 11:45:41 -0600 | [diff] [blame] | 7509 | break; |
| 7510 | case IORING_UNREGISTER_EVENTFD: |
| 7511 | ret = -EINVAL; |
| 7512 | if (arg || nr_args) |
| 7513 | break; |
| 7514 | ret = io_eventfd_unregister(ctx); |
| 7515 | break; |
Jens Axboe | 66f4af9 | 2020-01-16 15:36:52 -0700 | [diff] [blame] | 7516 | case IORING_REGISTER_PROBE: |
| 7517 | ret = -EINVAL; |
| 7518 | if (!arg || nr_args > 256) |
| 7519 | break; |
| 7520 | ret = io_probe(ctx, arg, nr_args); |
| 7521 | break; |
Jens Axboe | 071698e | 2020-01-28 10:04:42 -0700 | [diff] [blame] | 7522 | case IORING_REGISTER_PERSONALITY: |
| 7523 | ret = -EINVAL; |
| 7524 | if (arg || nr_args) |
| 7525 | break; |
| 7526 | ret = io_register_personality(ctx); |
| 7527 | break; |
| 7528 | case IORING_UNREGISTER_PERSONALITY: |
| 7529 | ret = -EINVAL; |
| 7530 | if (arg) |
| 7531 | break; |
| 7532 | ret = io_unregister_personality(ctx, nr_args); |
| 7533 | break; |
Jens Axboe | edafcce | 2019-01-09 09:16:05 -0700 | [diff] [blame] | 7534 | default: |
| 7535 | ret = -EINVAL; |
| 7536 | break; |
| 7537 | } |
| 7538 | |
Jens Axboe | 071698e | 2020-01-28 10:04:42 -0700 | [diff] [blame] | 7539 | if (io_register_op_must_quiesce(opcode)) { |
Jens Axboe | 05f3fb3 | 2019-12-09 11:22:50 -0700 | [diff] [blame] | 7540 | /* bring the ctx back to life */ |
Jens Axboe | 05f3fb3 | 2019-12-09 11:22:50 -0700 | [diff] [blame] | 7541 | percpu_ref_reinit(&ctx->refs); |
Jens Axboe | c150368 | 2020-01-08 08:26:07 -0700 | [diff] [blame] | 7542 | out: |
| 7543 | reinit_completion(&ctx->completions[0]); |
Jens Axboe | 05f3fb3 | 2019-12-09 11:22:50 -0700 | [diff] [blame] | 7544 | } |
Jens Axboe | edafcce | 2019-01-09 09:16:05 -0700 | [diff] [blame] | 7545 | return ret; |
| 7546 | } |
| 7547 | |
| 7548 | SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode, |
| 7549 | void __user *, arg, unsigned int, nr_args) |
| 7550 | { |
| 7551 | struct io_ring_ctx *ctx; |
| 7552 | long ret = -EBADF; |
| 7553 | struct fd f; |
| 7554 | |
| 7555 | f = fdget(fd); |
| 7556 | if (!f.file) |
| 7557 | return -EBADF; |
| 7558 | |
| 7559 | ret = -EOPNOTSUPP; |
| 7560 | if (f.file->f_op != &io_uring_fops) |
| 7561 | goto out_fput; |
| 7562 | |
| 7563 | ctx = f.file->private_data; |
| 7564 | |
| 7565 | mutex_lock(&ctx->uring_lock); |
| 7566 | ret = __io_uring_register(ctx, opcode, arg, nr_args); |
| 7567 | mutex_unlock(&ctx->uring_lock); |
Dmitrii Dolgov | c826bd7 | 2019-10-15 19:02:01 +0200 | [diff] [blame] | 7568 | trace_io_uring_register(ctx, opcode, ctx->nr_user_files, ctx->nr_user_bufs, |
| 7569 | ctx->cq_ev_fd != NULL, ret); |
Jens Axboe | edafcce | 2019-01-09 09:16:05 -0700 | [diff] [blame] | 7570 | out_fput: |
| 7571 | fdput(f); |
| 7572 | return ret; |
| 7573 | } |
| 7574 | |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 7575 | static int __init io_uring_init(void) |
| 7576 | { |
Stefan Metzmacher | d7f62e8 | 2020-01-29 14:39:41 +0100 | [diff] [blame] | 7577 | #define __BUILD_BUG_VERIFY_ELEMENT(stype, eoffset, etype, ename) do { \ |
| 7578 | BUILD_BUG_ON(offsetof(stype, ename) != eoffset); \ |
| 7579 | BUILD_BUG_ON(sizeof(etype) != sizeof_field(stype, ename)); \ |
| 7580 | } while (0) |
| 7581 | |
| 7582 | #define BUILD_BUG_SQE_ELEM(eoffset, etype, ename) \ |
| 7583 | __BUILD_BUG_VERIFY_ELEMENT(struct io_uring_sqe, eoffset, etype, ename) |
| 7584 | BUILD_BUG_ON(sizeof(struct io_uring_sqe) != 64); |
| 7585 | BUILD_BUG_SQE_ELEM(0, __u8, opcode); |
| 7586 | BUILD_BUG_SQE_ELEM(1, __u8, flags); |
| 7587 | BUILD_BUG_SQE_ELEM(2, __u16, ioprio); |
| 7588 | BUILD_BUG_SQE_ELEM(4, __s32, fd); |
| 7589 | BUILD_BUG_SQE_ELEM(8, __u64, off); |
| 7590 | BUILD_BUG_SQE_ELEM(8, __u64, addr2); |
| 7591 | BUILD_BUG_SQE_ELEM(16, __u64, addr); |
Pavel Begunkov | 7d67af2 | 2020-02-24 11:32:45 +0300 | [diff] [blame] | 7592 | BUILD_BUG_SQE_ELEM(16, __u64, splice_off_in); |
Stefan Metzmacher | d7f62e8 | 2020-01-29 14:39:41 +0100 | [diff] [blame] | 7593 | BUILD_BUG_SQE_ELEM(24, __u32, len); |
| 7594 | BUILD_BUG_SQE_ELEM(28, __kernel_rwf_t, rw_flags); |
| 7595 | BUILD_BUG_SQE_ELEM(28, /* compat */ int, rw_flags); |
| 7596 | BUILD_BUG_SQE_ELEM(28, /* compat */ __u32, rw_flags); |
| 7597 | BUILD_BUG_SQE_ELEM(28, __u32, fsync_flags); |
| 7598 | BUILD_BUG_SQE_ELEM(28, __u16, poll_events); |
| 7599 | BUILD_BUG_SQE_ELEM(28, __u32, sync_range_flags); |
| 7600 | BUILD_BUG_SQE_ELEM(28, __u32, msg_flags); |
| 7601 | BUILD_BUG_SQE_ELEM(28, __u32, timeout_flags); |
| 7602 | BUILD_BUG_SQE_ELEM(28, __u32, accept_flags); |
| 7603 | BUILD_BUG_SQE_ELEM(28, __u32, cancel_flags); |
| 7604 | BUILD_BUG_SQE_ELEM(28, __u32, open_flags); |
| 7605 | BUILD_BUG_SQE_ELEM(28, __u32, statx_flags); |
| 7606 | BUILD_BUG_SQE_ELEM(28, __u32, fadvise_advice); |
Pavel Begunkov | 7d67af2 | 2020-02-24 11:32:45 +0300 | [diff] [blame] | 7607 | BUILD_BUG_SQE_ELEM(28, __u32, splice_flags); |
Stefan Metzmacher | d7f62e8 | 2020-01-29 14:39:41 +0100 | [diff] [blame] | 7608 | BUILD_BUG_SQE_ELEM(32, __u64, user_data); |
| 7609 | BUILD_BUG_SQE_ELEM(40, __u16, buf_index); |
| 7610 | BUILD_BUG_SQE_ELEM(42, __u16, personality); |
Pavel Begunkov | 7d67af2 | 2020-02-24 11:32:45 +0300 | [diff] [blame] | 7611 | BUILD_BUG_SQE_ELEM(44, __s32, splice_fd_in); |
Stefan Metzmacher | d7f62e8 | 2020-01-29 14:39:41 +0100 | [diff] [blame] | 7612 | |
Jens Axboe | d365634 | 2019-12-18 09:50:26 -0700 | [diff] [blame] | 7613 | BUILD_BUG_ON(ARRAY_SIZE(io_op_defs) != IORING_OP_LAST); |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 7614 | req_cachep = KMEM_CACHE(io_kiocb, SLAB_HWCACHE_ALIGN | SLAB_PANIC); |
| 7615 | return 0; |
| 7616 | }; |
| 7617 | __initcall(io_uring_init); |