yqueue.hpp 7.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215
  1. /*
  2. Copyright (c) 2007-2016 Contributors as noted in the AUTHORS file
  3. This file is part of libzmq, the ZeroMQ core engine in C++.
  4. libzmq is free software; you can redistribute it and/or modify it under
  5. the terms of the GNU Lesser General Public License (LGPL) as published
  6. by the Free Software Foundation; either version 3 of the License, or
  7. (at your option) any later version.
  8. As a special exception, the Contributors give you permission to link
  9. this library with independent modules to produce an executable,
  10. regardless of the license terms of these independent modules, and to
  11. copy and distribute the resulting executable under terms of your choice,
  12. provided that you also meet, for each linked independent module, the
  13. terms and conditions of the license of that module. An independent
  14. module is a module which is not derived from or based on this library.
  15. If you modify this library, you must extend this exception to your
  16. version of the library.
  17. libzmq is distributed in the hope that it will be useful, but WITHOUT
  18. ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  19. FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
  20. License for more details.
  21. You should have received a copy of the GNU Lesser General Public License
  22. along with this program. If not, see <http://www.gnu.org/licenses/>.
  23. */
  24. #ifndef __ZMQ_YQUEUE_HPP_INCLUDED__
  25. #define __ZMQ_YQUEUE_HPP_INCLUDED__
  26. #include <stdlib.h>
  27. #include <stddef.h>
  28. #include "err.hpp"
  29. #include "atomic_ptr.hpp"
  30. #include "platform.hpp"
  31. namespace zmq
  32. {
  33. // yqueue is an efficient queue implementation. The main goal is
  34. // to minimise number of allocations/deallocations needed. Thus yqueue
  35. // allocates/deallocates elements in batches of N.
  36. //
  37. // yqueue allows one thread to use push/back function and another one
  38. // to use pop/front functions. However, user must ensure that there's no
  39. // pop on the empty queue and that both threads don't access the same
  40. // element in unsynchronised manner.
  41. //
  42. // T is the type of the object in the queue.
  43. // N is granularity of the queue (how many pushes have to be done till
  44. // actual memory allocation is required).
  45. #if defined HAVE_POSIX_MEMALIGN
  46. // ALIGN is the memory alignment size to use in the case where we have
  47. // posix_memalign available. Default value is 64, this alignment will
  48. // prevent two queue chunks from occupying the same CPU cache line on
  49. // architectures where cache lines are <= 64 bytes (e.g. most things
  50. // except POWER). It is detected at build time to try to account for other
  51. // platforms like POWER and s390x.
  52. template <typename T, int N, size_t ALIGN = ZMQ_CACHELINE_SIZE> class yqueue_t
  53. #else
  54. template <typename T, int N> class yqueue_t
  55. #endif
  56. {
  57. public:
  58. // Create the queue.
  59. inline yqueue_t ()
  60. {
  61. _begin_chunk = allocate_chunk ();
  62. alloc_assert (_begin_chunk);
  63. _begin_pos = 0;
  64. _back_chunk = NULL;
  65. _back_pos = 0;
  66. _end_chunk = _begin_chunk;
  67. _end_pos = 0;
  68. }
  69. // Destroy the queue.
  70. inline ~yqueue_t ()
  71. {
  72. while (true) {
  73. if (_begin_chunk == _end_chunk) {
  74. free (_begin_chunk);
  75. break;
  76. }
  77. chunk_t *o = _begin_chunk;
  78. _begin_chunk = _begin_chunk->next;
  79. free (o);
  80. }
  81. chunk_t *sc = _spare_chunk.xchg (NULL);
  82. free (sc);
  83. }
  84. // Returns reference to the front element of the queue.
  85. // If the queue is empty, behaviour is undefined.
  86. inline T &front () { return _begin_chunk->values[_begin_pos]; }
  87. // Returns reference to the back element of the queue.
  88. // If the queue is empty, behaviour is undefined.
  89. inline T &back () { return _back_chunk->values[_back_pos]; }
  90. // Adds an element to the back end of the queue.
  91. inline void push ()
  92. {
  93. _back_chunk = _end_chunk;
  94. _back_pos = _end_pos;
  95. if (++_end_pos != N)
  96. return;
  97. chunk_t *sc = _spare_chunk.xchg (NULL);
  98. if (sc) {
  99. _end_chunk->next = sc;
  100. sc->prev = _end_chunk;
  101. } else {
  102. _end_chunk->next = allocate_chunk ();
  103. alloc_assert (_end_chunk->next);
  104. _end_chunk->next->prev = _end_chunk;
  105. }
  106. _end_chunk = _end_chunk->next;
  107. _end_pos = 0;
  108. }
  109. // Removes element from the back end of the queue. In other words
  110. // it rollbacks last push to the queue. Take care: Caller is
  111. // responsible for destroying the object being unpushed.
  112. // The caller must also guarantee that the queue isn't empty when
  113. // unpush is called. It cannot be done automatically as the read
  114. // side of the queue can be managed by different, completely
  115. // unsynchronised thread.
  116. inline void unpush ()
  117. {
  118. // First, move 'back' one position backwards.
  119. if (_back_pos)
  120. --_back_pos;
  121. else {
  122. _back_pos = N - 1;
  123. _back_chunk = _back_chunk->prev;
  124. }
  125. // Now, move 'end' position backwards. Note that obsolete end chunk
  126. // is not used as a spare chunk. The analysis shows that doing so
  127. // would require free and atomic operation per chunk deallocated
  128. // instead of a simple free.
  129. if (_end_pos)
  130. --_end_pos;
  131. else {
  132. _end_pos = N - 1;
  133. _end_chunk = _end_chunk->prev;
  134. free (_end_chunk->next);
  135. _end_chunk->next = NULL;
  136. }
  137. }
  138. // Removes an element from the front end of the queue.
  139. inline void pop ()
  140. {
  141. if (++_begin_pos == N) {
  142. chunk_t *o = _begin_chunk;
  143. _begin_chunk = _begin_chunk->next;
  144. _begin_chunk->prev = NULL;
  145. _begin_pos = 0;
  146. // 'o' has been more recently used than _spare_chunk,
  147. // so for cache reasons we'll get rid of the spare and
  148. // use 'o' as the spare.
  149. chunk_t *cs = _spare_chunk.xchg (o);
  150. free (cs);
  151. }
  152. }
  153. private:
  154. // Individual memory chunk to hold N elements.
  155. struct chunk_t
  156. {
  157. T values[N];
  158. chunk_t *prev;
  159. chunk_t *next;
  160. };
  161. static inline chunk_t *allocate_chunk ()
  162. {
  163. #if defined HAVE_POSIX_MEMALIGN
  164. void *pv;
  165. if (posix_memalign (&pv, ALIGN, sizeof (chunk_t)) == 0)
  166. return (chunk_t *) pv;
  167. return NULL;
  168. #else
  169. return static_cast<chunk_t *> (malloc (sizeof (chunk_t)));
  170. #endif
  171. }
  172. // Back position may point to invalid memory if the queue is empty,
  173. // while begin & end positions are always valid. Begin position is
  174. // accessed exclusively be queue reader (front/pop), while back and
  175. // end positions are accessed exclusively by queue writer (back/push).
  176. chunk_t *_begin_chunk;
  177. int _begin_pos;
  178. chunk_t *_back_chunk;
  179. int _back_pos;
  180. chunk_t *_end_chunk;
  181. int _end_pos;
  182. // People are likely to produce and consume at similar rates. In
  183. // this scenario holding onto the most recently freed chunk saves
  184. // us from having to call malloc/free.
  185. atomic_ptr_t<chunk_t> _spare_chunk;
  186. ZMQ_NON_COPYABLE_NOR_MOVABLE (yqueue_t)
  187. };
  188. }
  189. #endif