blob: 5ebcdd40df997c8deb7a4b92bcd6581e02d41519 [file] [log] [blame]
Michael S. Tsirkin2e0ab8c2016-06-13 23:54:31 +03001/*
2 * Definitions for the 'struct ptr_ring' datastructure.
3 *
4 * Author:
5 * Michael S. Tsirkin <mst@redhat.com>
6 *
7 * Copyright (C) 2016 Red Hat, Inc.
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2 of the License, or (at your
12 * option) any later version.
13 *
14 * This is a limited-size FIFO maintaining pointers in FIFO order, with
15 * one CPU producing entries and another consuming entries from a FIFO.
16 *
17 * This implementation tries to minimize cache-contention when there is a
18 * single producer and a single consumer CPU.
19 */
20
21#ifndef _LINUX_PTR_RING_H
22#define _LINUX_PTR_RING_H 1
23
24#ifdef __KERNEL__
25#include <linux/spinlock.h>
26#include <linux/cache.h>
27#include <linux/types.h>
28#include <linux/compiler.h>
29#include <linux/cache.h>
30#include <linux/slab.h>
31#include <asm/errno.h>
32#endif
33
34struct ptr_ring {
35 int producer ____cacheline_aligned_in_smp;
36 spinlock_t producer_lock;
Michael S. Tsirkinfb9de972017-04-07 08:25:09 +030037 int consumer_head ____cacheline_aligned_in_smp; /* next valid entry */
38 int consumer_tail; /* next entry to invalidate */
Michael S. Tsirkin2e0ab8c2016-06-13 23:54:31 +030039 spinlock_t consumer_lock;
40 /* Shared consumer/producer data */
41 /* Read-only by both the producer and the consumer */
42 int size ____cacheline_aligned_in_smp; /* max entries in queue */
Michael S. Tsirkinfb9de972017-04-07 08:25:09 +030043 int batch; /* number of entries to consume in a batch */
Michael S. Tsirkin2e0ab8c2016-06-13 23:54:31 +030044 void **queue;
45};
46
47/* Note: callers invoking this in a loop must use a compiler barrier,
Michael S. Tsirkin5d49de52016-06-13 23:54:45 +030048 * for example cpu_relax(). If ring is ever resized, callers must hold
49 * producer_lock - see e.g. ptr_ring_full. Otherwise, if callers don't hold
50 * producer_lock, the next call to __ptr_ring_produce may fail.
Michael S. Tsirkin2e0ab8c2016-06-13 23:54:31 +030051 */
52static inline bool __ptr_ring_full(struct ptr_ring *r)
53{
54 return r->queue[r->producer];
55}
56
57static inline bool ptr_ring_full(struct ptr_ring *r)
58{
Michael S. Tsirkin5d49de52016-06-13 23:54:45 +030059 bool ret;
60
61 spin_lock(&r->producer_lock);
62 ret = __ptr_ring_full(r);
63 spin_unlock(&r->producer_lock);
64
65 return ret;
66}
67
68static inline bool ptr_ring_full_irq(struct ptr_ring *r)
69{
70 bool ret;
71
72 spin_lock_irq(&r->producer_lock);
73 ret = __ptr_ring_full(r);
74 spin_unlock_irq(&r->producer_lock);
75
76 return ret;
77}
78
79static inline bool ptr_ring_full_any(struct ptr_ring *r)
80{
81 unsigned long flags;
82 bool ret;
83
84 spin_lock_irqsave(&r->producer_lock, flags);
85 ret = __ptr_ring_full(r);
86 spin_unlock_irqrestore(&r->producer_lock, flags);
87
88 return ret;
89}
90
91static inline bool ptr_ring_full_bh(struct ptr_ring *r)
92{
93 bool ret;
94
95 spin_lock_bh(&r->producer_lock);
96 ret = __ptr_ring_full(r);
97 spin_unlock_bh(&r->producer_lock);
98
99 return ret;
Michael S. Tsirkin2e0ab8c2016-06-13 23:54:31 +0300100}
101
102/* Note: callers invoking this in a loop must use a compiler barrier,
Michael S. Tsirkin5d49de52016-06-13 23:54:45 +0300103 * for example cpu_relax(). Callers must hold producer_lock.
Michael S. Tsirkina8ceb5d2017-12-05 21:29:37 +0200104 * Callers are responsible for making sure pointer that is being queued
105 * points to a valid data.
Michael S. Tsirkin2e0ab8c2016-06-13 23:54:31 +0300106 */
107static inline int __ptr_ring_produce(struct ptr_ring *r, void *ptr)
108{
Jason Wang982fb492016-06-30 14:45:31 +0800109 if (unlikely(!r->size) || r->queue[r->producer])
Michael S. Tsirkin2e0ab8c2016-06-13 23:54:31 +0300110 return -ENOSPC;
111
Michael S. Tsirkina8ceb5d2017-12-05 21:29:37 +0200112 /* Make sure the pointer we are storing points to a valid data. */
113 /* Pairs with smp_read_barrier_depends in __ptr_ring_consume. */
114 smp_wmb();
115
Michael S. Tsirkin2e0ab8c2016-06-13 23:54:31 +0300116 r->queue[r->producer++] = ptr;
117 if (unlikely(r->producer >= r->size))
118 r->producer = 0;
119 return 0;
120}
121
Michael S. Tsirkine7169532017-02-19 07:17:17 +0200122/*
123 * Note: resize (below) nests producer lock within consumer lock, so if you
124 * consume in interrupt or BH context, you must disable interrupts/BH when
125 * calling this.
126 */
Michael S. Tsirkin2e0ab8c2016-06-13 23:54:31 +0300127static inline int ptr_ring_produce(struct ptr_ring *r, void *ptr)
128{
129 int ret;
130
131 spin_lock(&r->producer_lock);
132 ret = __ptr_ring_produce(r, ptr);
133 spin_unlock(&r->producer_lock);
134
135 return ret;
136}
137
138static inline int ptr_ring_produce_irq(struct ptr_ring *r, void *ptr)
139{
140 int ret;
141
142 spin_lock_irq(&r->producer_lock);
143 ret = __ptr_ring_produce(r, ptr);
144 spin_unlock_irq(&r->producer_lock);
145
146 return ret;
147}
148
149static inline int ptr_ring_produce_any(struct ptr_ring *r, void *ptr)
150{
151 unsigned long flags;
152 int ret;
153
154 spin_lock_irqsave(&r->producer_lock, flags);
155 ret = __ptr_ring_produce(r, ptr);
156 spin_unlock_irqrestore(&r->producer_lock, flags);
157
158 return ret;
159}
160
161static inline int ptr_ring_produce_bh(struct ptr_ring *r, void *ptr)
162{
163 int ret;
164
165 spin_lock_bh(&r->producer_lock);
166 ret = __ptr_ring_produce(r, ptr);
167 spin_unlock_bh(&r->producer_lock);
168
169 return ret;
170}
171
172/* Note: callers invoking this in a loop must use a compiler barrier,
173 * for example cpu_relax(). Callers must take consumer_lock
174 * if they dereference the pointer - see e.g. PTR_RING_PEEK_CALL.
Michael S. Tsirkin5d49de52016-06-13 23:54:45 +0300175 * If ring is never resized, and if the pointer is merely
176 * tested, there's no need to take the lock - see e.g. __ptr_ring_empty.
Michael S. Tsirkin66940f32018-01-10 16:03:05 +0200177 * However, if called outside the lock, and if some other CPU
178 * consumes ring entries at the same time, the value returned
179 * is not guaranteed to be correct.
180 * In this case - to avoid incorrectly detecting the ring
181 * as empty - the CPU consuming the ring entries is responsible
182 * for either consuming all ring entries until the ring is empty,
183 * or synchronizing with some other CPU and causing it to
184 * execute __ptr_ring_peek and/or consume the ring enteries
185 * after the synchronization point.
Michael S. Tsirkin2e0ab8c2016-06-13 23:54:31 +0300186 */
187static inline void *__ptr_ring_peek(struct ptr_ring *r)
188{
Jason Wang982fb492016-06-30 14:45:31 +0800189 if (likely(r->size))
Michael S. Tsirkinfb9de972017-04-07 08:25:09 +0300190 return r->queue[r->consumer_head];
Jason Wang982fb492016-06-30 14:45:31 +0800191 return NULL;
Michael S. Tsirkin2e0ab8c2016-06-13 23:54:31 +0300192}
193
Michael S. Tsirkin66940f32018-01-10 16:03:05 +0200194/* See __ptr_ring_peek above for locking rules. */
Michael S. Tsirkin5d49de52016-06-13 23:54:45 +0300195static inline bool __ptr_ring_empty(struct ptr_ring *r)
196{
197 return !__ptr_ring_peek(r);
198}
199
Michael S. Tsirkin2e0ab8c2016-06-13 23:54:31 +0300200static inline bool ptr_ring_empty(struct ptr_ring *r)
201{
Michael S. Tsirkin5d49de52016-06-13 23:54:45 +0300202 bool ret;
203
204 spin_lock(&r->consumer_lock);
205 ret = __ptr_ring_empty(r);
206 spin_unlock(&r->consumer_lock);
207
208 return ret;
209}
210
211static inline bool ptr_ring_empty_irq(struct ptr_ring *r)
212{
213 bool ret;
214
215 spin_lock_irq(&r->consumer_lock);
216 ret = __ptr_ring_empty(r);
217 spin_unlock_irq(&r->consumer_lock);
218
219 return ret;
220}
221
222static inline bool ptr_ring_empty_any(struct ptr_ring *r)
223{
224 unsigned long flags;
225 bool ret;
226
227 spin_lock_irqsave(&r->consumer_lock, flags);
228 ret = __ptr_ring_empty(r);
229 spin_unlock_irqrestore(&r->consumer_lock, flags);
230
231 return ret;
232}
233
234static inline bool ptr_ring_empty_bh(struct ptr_ring *r)
235{
236 bool ret;
237
238 spin_lock_bh(&r->consumer_lock);
239 ret = __ptr_ring_empty(r);
240 spin_unlock_bh(&r->consumer_lock);
241
242 return ret;
Michael S. Tsirkin2e0ab8c2016-06-13 23:54:31 +0300243}
244
245/* Must only be called after __ptr_ring_peek returned !NULL */
246static inline void __ptr_ring_discard_one(struct ptr_ring *r)
247{
Michael S. Tsirkinfb9de972017-04-07 08:25:09 +0300248 /* Fundamentally, what we want to do is update consumer
249 * index and zero out the entry so producer can reuse it.
250 * Doing it naively at each consume would be as simple as:
Michael S. Tsirkin406de752018-01-26 01:36:27 +0200251 * consumer = r->consumer;
252 * r->queue[consumer++] = NULL;
253 * if (unlikely(consumer >= r->size))
254 * consumer = 0;
255 * r->consumer = consumer;
Michael S. Tsirkinfb9de972017-04-07 08:25:09 +0300256 * but that is suboptimal when the ring is full as producer is writing
257 * out new entries in the same cache line. Defer these updates until a
258 * batch of entries has been consumed.
259 */
Michael S. Tsirkin406de752018-01-26 01:36:27 +0200260 /* Note: we must keep consumer_head valid at all times for __ptr_ring_empty
261 * to work correctly.
262 */
263 int consumer_head = r->consumer_head;
264 int head = consumer_head++;
Michael S. Tsirkinfb9de972017-04-07 08:25:09 +0300265
266 /* Once we have processed enough entries invalidate them in
267 * the ring all at once so producer can reuse their space in the ring.
268 * We also do this when we reach end of the ring - not mandatory
269 * but helps keep the implementation simple.
270 */
Michael S. Tsirkin406de752018-01-26 01:36:27 +0200271 if (unlikely(consumer_head - r->consumer_tail >= r->batch ||
272 consumer_head >= r->size)) {
Michael S. Tsirkinfb9de972017-04-07 08:25:09 +0300273 /* Zero out entries in the reverse order: this way we touch the
274 * cache line that producer might currently be reading the last;
275 * producer won't make progress and touch other cache lines
276 * besides the first one until we write out all entries.
277 */
278 while (likely(head >= r->consumer_tail))
279 r->queue[head--] = NULL;
Michael S. Tsirkin406de752018-01-26 01:36:27 +0200280 r->consumer_tail = consumer_head;
Michael S. Tsirkinfb9de972017-04-07 08:25:09 +0300281 }
Michael S. Tsirkin406de752018-01-26 01:36:27 +0200282 if (unlikely(consumer_head >= r->size)) {
283 consumer_head = 0;
Michael S. Tsirkinfb9de972017-04-07 08:25:09 +0300284 r->consumer_tail = 0;
285 }
Michael S. Tsirkin406de752018-01-26 01:36:27 +0200286 r->consumer_head = consumer_head;
Michael S. Tsirkin2e0ab8c2016-06-13 23:54:31 +0300287}
288
289static inline void *__ptr_ring_consume(struct ptr_ring *r)
290{
291 void *ptr;
292
293 ptr = __ptr_ring_peek(r);
294 if (ptr)
295 __ptr_ring_discard_one(r);
296
Michael S. Tsirkina8ceb5d2017-12-05 21:29:37 +0200297 /* Make sure anyone accessing data through the pointer is up to date. */
298 /* Pairs with smp_wmb in __ptr_ring_produce. */
299 smp_read_barrier_depends();
Michael S. Tsirkin2e0ab8c2016-06-13 23:54:31 +0300300 return ptr;
301}
302
Jason Wang728fc8d2017-05-17 12:14:39 +0800303static inline int __ptr_ring_consume_batched(struct ptr_ring *r,
304 void **array, int n)
305{
306 void *ptr;
307 int i;
308
309 for (i = 0; i < n; i++) {
310 ptr = __ptr_ring_consume(r);
311 if (!ptr)
312 break;
313 array[i] = ptr;
314 }
315
316 return i;
317}
318
Michael S. Tsirkine7169532017-02-19 07:17:17 +0200319/*
320 * Note: resize (below) nests producer lock within consumer lock, so if you
321 * call this in interrupt or BH context, you must disable interrupts/BH when
322 * producing.
323 */
Michael S. Tsirkin2e0ab8c2016-06-13 23:54:31 +0300324static inline void *ptr_ring_consume(struct ptr_ring *r)
325{
326 void *ptr;
327
328 spin_lock(&r->consumer_lock);
329 ptr = __ptr_ring_consume(r);
330 spin_unlock(&r->consumer_lock);
331
332 return ptr;
333}
334
335static inline void *ptr_ring_consume_irq(struct ptr_ring *r)
336{
337 void *ptr;
338
339 spin_lock_irq(&r->consumer_lock);
340 ptr = __ptr_ring_consume(r);
341 spin_unlock_irq(&r->consumer_lock);
342
343 return ptr;
344}
345
346static inline void *ptr_ring_consume_any(struct ptr_ring *r)
347{
348 unsigned long flags;
349 void *ptr;
350
351 spin_lock_irqsave(&r->consumer_lock, flags);
352 ptr = __ptr_ring_consume(r);
353 spin_unlock_irqrestore(&r->consumer_lock, flags);
354
355 return ptr;
356}
357
358static inline void *ptr_ring_consume_bh(struct ptr_ring *r)
359{
360 void *ptr;
361
362 spin_lock_bh(&r->consumer_lock);
363 ptr = __ptr_ring_consume(r);
364 spin_unlock_bh(&r->consumer_lock);
365
366 return ptr;
367}
368
Jason Wang728fc8d2017-05-17 12:14:39 +0800369static inline int ptr_ring_consume_batched(struct ptr_ring *r,
370 void **array, int n)
371{
372 int ret;
373
374 spin_lock(&r->consumer_lock);
375 ret = __ptr_ring_consume_batched(r, array, n);
376 spin_unlock(&r->consumer_lock);
377
378 return ret;
379}
380
381static inline int ptr_ring_consume_batched_irq(struct ptr_ring *r,
382 void **array, int n)
383{
384 int ret;
385
386 spin_lock_irq(&r->consumer_lock);
387 ret = __ptr_ring_consume_batched(r, array, n);
388 spin_unlock_irq(&r->consumer_lock);
389
390 return ret;
391}
392
393static inline int ptr_ring_consume_batched_any(struct ptr_ring *r,
394 void **array, int n)
395{
396 unsigned long flags;
397 int ret;
398
399 spin_lock_irqsave(&r->consumer_lock, flags);
400 ret = __ptr_ring_consume_batched(r, array, n);
401 spin_unlock_irqrestore(&r->consumer_lock, flags);
402
403 return ret;
404}
405
406static inline int ptr_ring_consume_batched_bh(struct ptr_ring *r,
407 void **array, int n)
408{
409 int ret;
410
411 spin_lock_bh(&r->consumer_lock);
412 ret = __ptr_ring_consume_batched(r, array, n);
413 spin_unlock_bh(&r->consumer_lock);
414
415 return ret;
416}
417
Michael S. Tsirkin2e0ab8c2016-06-13 23:54:31 +0300418/* Cast to structure type and call a function without discarding from FIFO.
419 * Function must return a value.
420 * Callers must take consumer_lock.
421 */
422#define __PTR_RING_PEEK_CALL(r, f) ((f)(__ptr_ring_peek(r)))
423
424#define PTR_RING_PEEK_CALL(r, f) ({ \
425 typeof((f)(NULL)) __PTR_RING_PEEK_CALL_v; \
426 \
427 spin_lock(&(r)->consumer_lock); \
428 __PTR_RING_PEEK_CALL_v = __PTR_RING_PEEK_CALL(r, f); \
429 spin_unlock(&(r)->consumer_lock); \
430 __PTR_RING_PEEK_CALL_v; \
431})
432
433#define PTR_RING_PEEK_CALL_IRQ(r, f) ({ \
434 typeof((f)(NULL)) __PTR_RING_PEEK_CALL_v; \
435 \
436 spin_lock_irq(&(r)->consumer_lock); \
437 __PTR_RING_PEEK_CALL_v = __PTR_RING_PEEK_CALL(r, f); \
438 spin_unlock_irq(&(r)->consumer_lock); \
439 __PTR_RING_PEEK_CALL_v; \
440})
441
442#define PTR_RING_PEEK_CALL_BH(r, f) ({ \
443 typeof((f)(NULL)) __PTR_RING_PEEK_CALL_v; \
444 \
445 spin_lock_bh(&(r)->consumer_lock); \
446 __PTR_RING_PEEK_CALL_v = __PTR_RING_PEEK_CALL(r, f); \
447 spin_unlock_bh(&(r)->consumer_lock); \
448 __PTR_RING_PEEK_CALL_v; \
449})
450
451#define PTR_RING_PEEK_CALL_ANY(r, f) ({ \
452 typeof((f)(NULL)) __PTR_RING_PEEK_CALL_v; \
453 unsigned long __PTR_RING_PEEK_CALL_f;\
454 \
455 spin_lock_irqsave(&(r)->consumer_lock, __PTR_RING_PEEK_CALL_f); \
456 __PTR_RING_PEEK_CALL_v = __PTR_RING_PEEK_CALL(r, f); \
457 spin_unlock_irqrestore(&(r)->consumer_lock, __PTR_RING_PEEK_CALL_f); \
458 __PTR_RING_PEEK_CALL_v; \
459})
460
Eric Dumazet81fbfe82017-08-16 10:36:47 -0700461static inline void **__ptr_ring_init_queue_alloc(unsigned int size, gfp_t gfp)
Michael S. Tsirkin5d49de52016-06-13 23:54:45 +0300462{
John Fastabendbcecb4b2017-12-27 19:50:25 -0800463 /* Allocate an extra dummy element at end of ring to avoid consumer head
464 * or produce head access past the end of the array. Possible when
465 * producer/consumer operations and __ptr_ring_peek operations run in
466 * parallel.
467 */
468 return kcalloc(size + 1, sizeof(void *), gfp);
Michael S. Tsirkin5d49de52016-06-13 23:54:45 +0300469}
470
Michael S. Tsirkinfb9de972017-04-07 08:25:09 +0300471static inline void __ptr_ring_set_size(struct ptr_ring *r, int size)
472{
473 r->size = size;
474 r->batch = SMP_CACHE_BYTES * 2 / sizeof(*(r->queue));
475 /* We need to set batch at least to 1 to make logic
476 * in __ptr_ring_discard_one work correctly.
477 * Batching too much (because ring is small) would cause a lot of
478 * burstiness. Needs tuning, for now disable batching.
479 */
480 if (r->batch > r->size / 2 || !r->batch)
481 r->batch = 1;
482}
483
Michael S. Tsirkin2e0ab8c2016-06-13 23:54:31 +0300484static inline int ptr_ring_init(struct ptr_ring *r, int size, gfp_t gfp)
485{
Michael S. Tsirkin5d49de52016-06-13 23:54:45 +0300486 r->queue = __ptr_ring_init_queue_alloc(size, gfp);
Michael S. Tsirkin2e0ab8c2016-06-13 23:54:31 +0300487 if (!r->queue)
488 return -ENOMEM;
489
Michael S. Tsirkinfb9de972017-04-07 08:25:09 +0300490 __ptr_ring_set_size(r, size);
491 r->producer = r->consumer_head = r->consumer_tail = 0;
Michael S. Tsirkin2e0ab8c2016-06-13 23:54:31 +0300492 spin_lock_init(&r->producer_lock);
493 spin_lock_init(&r->consumer_lock);
494
495 return 0;
496}
497
Michael S. Tsirkin197a5212017-05-17 12:14:37 +0800498/*
499 * Return entries into ring. Destroy entries that don't fit.
500 *
501 * Note: this is expected to be a rare slow path operation.
502 *
503 * Note: producer lock is nested within consumer lock, so if you
504 * resize you must make sure all uses nest correctly.
505 * In particular if you consume ring in interrupt or BH context, you must
506 * disable interrupts/BH when doing so.
507 */
508static inline void ptr_ring_unconsume(struct ptr_ring *r, void **batch, int n,
509 void (*destroy)(void *))
510{
511 unsigned long flags;
512 int head;
513
514 spin_lock_irqsave(&r->consumer_lock, flags);
515 spin_lock(&r->producer_lock);
516
517 if (!r->size)
518 goto done;
519
520 /*
521 * Clean out buffered entries (for simplicity). This way following code
522 * can test entries for NULL and if not assume they are valid.
523 */
524 head = r->consumer_head - 1;
525 while (likely(head >= r->consumer_tail))
526 r->queue[head--] = NULL;
527 r->consumer_tail = r->consumer_head;
528
529 /*
530 * Go over entries in batch, start moving head back and copy entries.
531 * Stop when we run into previously unconsumed entries.
532 */
533 while (n) {
534 head = r->consumer_head - 1;
535 if (head < 0)
536 head = r->size - 1;
537 if (r->queue[head]) {
538 /* This batch entry will have to be destroyed. */
539 goto done;
540 }
541 r->queue[head] = batch[--n];
542 r->consumer_tail = r->consumer_head = head;
543 }
544
545done:
546 /* Destroy all entries left in the batch. */
547 while (n)
548 destroy(batch[--n]);
549 spin_unlock(&r->producer_lock);
550 spin_unlock_irqrestore(&r->consumer_lock, flags);
551}
552
Michael S. Tsirkin59e6ae52016-06-30 14:45:33 +0800553static inline void **__ptr_ring_swap_queue(struct ptr_ring *r, void **queue,
554 int size, gfp_t gfp,
555 void (*destroy)(void *))
Michael S. Tsirkin2e0ab8c2016-06-13 23:54:31 +0300556{
Michael S. Tsirkin5d49de52016-06-13 23:54:45 +0300557 int producer = 0;
Michael S. Tsirkin5d49de52016-06-13 23:54:45 +0300558 void **old;
559 void *ptr;
560
Michael S. Tsirkine7169532017-02-19 07:17:17 +0200561 while ((ptr = __ptr_ring_consume(r)))
Michael S. Tsirkin5d49de52016-06-13 23:54:45 +0300562 if (producer < size)
563 queue[producer++] = ptr;
564 else if (destroy)
565 destroy(ptr);
566
Michael S. Tsirkinfb9de972017-04-07 08:25:09 +0300567 __ptr_ring_set_size(r, size);
Michael S. Tsirkin5d49de52016-06-13 23:54:45 +0300568 r->producer = producer;
Michael S. Tsirkinfb9de972017-04-07 08:25:09 +0300569 r->consumer_head = 0;
570 r->consumer_tail = 0;
Michael S. Tsirkin5d49de52016-06-13 23:54:45 +0300571 old = r->queue;
572 r->queue = queue;
573
Michael S. Tsirkin59e6ae52016-06-30 14:45:33 +0800574 return old;
575}
576
Michael S. Tsirkine7169532017-02-19 07:17:17 +0200577/*
578 * Note: producer lock is nested within consumer lock, so if you
579 * resize you must make sure all uses nest correctly.
580 * In particular if you consume ring in interrupt or BH context, you must
581 * disable interrupts/BH when doing so.
582 */
Michael S. Tsirkin59e6ae52016-06-30 14:45:33 +0800583static inline int ptr_ring_resize(struct ptr_ring *r, int size, gfp_t gfp,
584 void (*destroy)(void *))
585{
586 unsigned long flags;
587 void **queue = __ptr_ring_init_queue_alloc(size, gfp);
588 void **old;
589
590 if (!queue)
591 return -ENOMEM;
592
Michael S. Tsirkine7169532017-02-19 07:17:17 +0200593 spin_lock_irqsave(&(r)->consumer_lock, flags);
594 spin_lock(&(r)->producer_lock);
Michael S. Tsirkin59e6ae52016-06-30 14:45:33 +0800595
596 old = __ptr_ring_swap_queue(r, queue, size, gfp, destroy);
597
Michael S. Tsirkine7169532017-02-19 07:17:17 +0200598 spin_unlock(&(r)->producer_lock);
599 spin_unlock_irqrestore(&(r)->consumer_lock, flags);
Michael S. Tsirkin5d49de52016-06-13 23:54:45 +0300600
601 kfree(old);
602
603 return 0;
604}
605
Michael S. Tsirkine7169532017-02-19 07:17:17 +0200606/*
607 * Note: producer lock is nested within consumer lock, so if you
608 * resize you must make sure all uses nest correctly.
609 * In particular if you consume ring in interrupt or BH context, you must
610 * disable interrupts/BH when doing so.
611 */
Eric Dumazet81fbfe82017-08-16 10:36:47 -0700612static inline int ptr_ring_resize_multiple(struct ptr_ring **rings,
613 unsigned int nrings,
Michael S. Tsirkin59e6ae52016-06-30 14:45:33 +0800614 int size,
615 gfp_t gfp, void (*destroy)(void *))
616{
617 unsigned long flags;
618 void ***queues;
619 int i;
620
Eric Dumazet81fbfe82017-08-16 10:36:47 -0700621 queues = kmalloc_array(nrings, sizeof(*queues), gfp);
Michael S. Tsirkin59e6ae52016-06-30 14:45:33 +0800622 if (!queues)
623 goto noqueues;
624
625 for (i = 0; i < nrings; ++i) {
626 queues[i] = __ptr_ring_init_queue_alloc(size, gfp);
627 if (!queues[i])
628 goto nomem;
629 }
630
631 for (i = 0; i < nrings; ++i) {
Michael S. Tsirkine7169532017-02-19 07:17:17 +0200632 spin_lock_irqsave(&(rings[i])->consumer_lock, flags);
633 spin_lock(&(rings[i])->producer_lock);
Michael S. Tsirkin59e6ae52016-06-30 14:45:33 +0800634 queues[i] = __ptr_ring_swap_queue(rings[i], queues[i],
635 size, gfp, destroy);
Michael S. Tsirkine7169532017-02-19 07:17:17 +0200636 spin_unlock(&(rings[i])->producer_lock);
637 spin_unlock_irqrestore(&(rings[i])->consumer_lock, flags);
Michael S. Tsirkin59e6ae52016-06-30 14:45:33 +0800638 }
639
640 for (i = 0; i < nrings; ++i)
641 kfree(queues[i]);
642
643 kfree(queues);
644
645 return 0;
646
647nomem:
648 while (--i >= 0)
649 kfree(queues[i]);
650
651 kfree(queues);
652
653noqueues:
654 return -ENOMEM;
655}
656
Michael S. Tsirkin5d49de52016-06-13 23:54:45 +0300657static inline void ptr_ring_cleanup(struct ptr_ring *r, void (*destroy)(void *))
658{
659 void *ptr;
660
661 if (destroy)
662 while ((ptr = ptr_ring_consume(r)))
663 destroy(ptr);
Michael S. Tsirkin2e0ab8c2016-06-13 23:54:31 +0300664 kfree(r->queue);
665}
666
667#endif /* _LINUX_PTR_RING_H */