1 | /* |
2 | * Copyright (c) Facebook, Inc. and its affiliates. |
3 | * |
4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
5 | * you may not use this file except in compliance with the License. |
6 | * You may obtain a copy of the License at |
7 | * |
8 | * http://www.apache.org/licenses/LICENSE-2.0 |
9 | * |
10 | * Unless required by applicable law or agreed to in writing, software |
11 | * distributed under the License is distributed on an "AS IS" BASIS, |
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
13 | * See the License for the specific language governing permissions and |
14 | * limitations under the License. |
15 | */ |
16 | |
17 | /* |
18 | * Nicholas Ormrod (njormrod) |
19 | * Andrei Alexandrescu (aalexandre) |
20 | * |
21 | * FBVector is Facebook's drop-in implementation of std::vector. It has special |
22 | * optimizations for use with relocatable types and jemalloc. |
23 | */ |
24 | |
25 | #pragma once |
26 | |
27 | //============================================================================= |
28 | // headers |
29 | |
30 | #include <algorithm> |
31 | #include <cassert> |
32 | #include <iterator> |
33 | #include <memory> |
34 | #include <stdexcept> |
35 | #include <type_traits> |
36 | #include <utility> |
37 | |
38 | #include <folly/FormatTraits.h> |
39 | #include <folly/Likely.h> |
40 | #include <folly/ScopeGuard.h> |
41 | #include <folly/Traits.h> |
42 | #include <folly/lang/Exception.h> |
43 | #include <folly/memory/Malloc.h> |
44 | |
45 | //============================================================================= |
46 | // forward declaration |
47 | |
48 | namespace folly { |
49 | template <class T, class Allocator = std::allocator<T>> |
50 | class fbvector; |
51 | } // namespace folly |
52 | |
53 | //============================================================================= |
54 | // unrolling |
55 | |
56 | #define FOLLY_FBV_UNROLL_PTR(first, last, OP) \ |
57 | do { \ |
58 | for (; (last) - (first) >= 4; (first) += 4) { \ |
59 | OP(((first) + 0)); \ |
60 | OP(((first) + 1)); \ |
61 | OP(((first) + 2)); \ |
62 | OP(((first) + 3)); \ |
63 | } \ |
64 | for (; (first) != (last); ++(first)) \ |
65 | OP((first)); \ |
66 | } while (0); |
67 | |
68 | //============================================================================= |
69 | /////////////////////////////////////////////////////////////////////////////// |
70 | // // |
71 | // fbvector class // |
72 | // // |
73 | /////////////////////////////////////////////////////////////////////////////// |
74 | |
75 | namespace folly { |
76 | |
77 | template <class T, class Allocator> |
78 | class fbvector { |
79 | //=========================================================================== |
80 | //--------------------------------------------------------------------------- |
81 | // implementation |
82 | private: |
83 | typedef std::allocator_traits<Allocator> A; |
84 | |
85 | struct Impl : public Allocator { |
86 | // typedefs |
87 | typedef typename A::pointer pointer; |
88 | typedef typename A::size_type size_type; |
89 | |
90 | // data |
91 | pointer b_, e_, z_; |
92 | |
93 | // constructors |
94 | Impl() : Allocator(), b_(nullptr), e_(nullptr), z_(nullptr) {} |
95 | /* implicit */ Impl(const Allocator& alloc) |
96 | : Allocator(alloc), b_(nullptr), e_(nullptr), z_(nullptr) {} |
97 | /* implicit */ Impl(Allocator&& alloc) |
98 | : Allocator(std::move(alloc)), b_(nullptr), e_(nullptr), z_(nullptr) {} |
99 | |
100 | /* implicit */ Impl(size_type n, const Allocator& alloc = Allocator()) |
101 | : Allocator(alloc) { |
102 | init(n); |
103 | } |
104 | |
105 | Impl(Impl&& other) noexcept |
106 | : Allocator(std::move(other)), |
107 | b_(other.b_), |
108 | e_(other.e_), |
109 | z_(other.z_) { |
110 | other.b_ = other.e_ = other.z_ = nullptr; |
111 | } |
112 | |
113 | // destructor |
114 | ~Impl() { |
115 | destroy(); |
116 | } |
117 | |
118 | // allocation |
119 | // note that 'allocate' and 'deallocate' are inherited from Allocator |
120 | T* D_allocate(size_type n) { |
121 | if (usingStdAllocator) { |
122 | return static_cast<T*>(checkedMalloc(n * sizeof(T))); |
123 | } else { |
124 | return std::allocator_traits<Allocator>::allocate(*this, n); |
125 | } |
126 | } |
127 | |
128 | void D_deallocate(T* p, size_type n) noexcept { |
129 | if (usingStdAllocator) { |
130 | free(p); |
131 | } else { |
132 | std::allocator_traits<Allocator>::deallocate(*this, p, n); |
133 | } |
134 | } |
135 | |
136 | // helpers |
137 | void swapData(Impl& other) { |
138 | std::swap(b_, other.b_); |
139 | std::swap(e_, other.e_); |
140 | std::swap(z_, other.z_); |
141 | } |
142 | |
143 | // data ops |
144 | inline void destroy() noexcept { |
145 | if (b_) { |
146 | // THIS DISPATCH CODE IS DUPLICATED IN fbvector::D_destroy_range_a. |
147 | // It has been inlined here for speed. It calls the static fbvector |
148 | // methods to perform the actual destruction. |
149 | if (usingStdAllocator) { |
150 | S_destroy_range(b_, e_); |
151 | } else { |
152 | S_destroy_range_a(*this, b_, e_); |
153 | } |
154 | |
155 | D_deallocate(b_, size_type(z_ - b_)); |
156 | } |
157 | } |
158 | |
159 | void init(size_type n) { |
160 | if (UNLIKELY(n == 0)) { |
161 | b_ = e_ = z_ = nullptr; |
162 | } else { |
163 | size_type sz = folly::goodMallocSize(n * sizeof(T)) / sizeof(T); |
164 | b_ = D_allocate(sz); |
165 | e_ = b_; |
166 | z_ = b_ + sz; |
167 | } |
168 | } |
169 | |
170 | void set(pointer newB, size_type newSize, size_type newCap) { |
171 | z_ = newB + newCap; |
172 | e_ = newB + newSize; |
173 | b_ = newB; |
174 | } |
175 | |
176 | void reset(size_type newCap) { |
177 | destroy(); |
178 | auto rollback = makeGuard([&] { init(0); }); |
179 | init(newCap); |
180 | rollback.dismiss(); |
181 | } |
182 | void reset() { // same as reset(0) |
183 | destroy(); |
184 | b_ = e_ = z_ = nullptr; |
185 | } |
186 | } impl_; |
187 | |
188 | static void swap(Impl& a, Impl& b) { |
189 | using std::swap; |
190 | if (!usingStdAllocator) { |
191 | swap(static_cast<Allocator&>(a), static_cast<Allocator&>(b)); |
192 | } |
193 | a.swapData(b); |
194 | } |
195 | |
196 | //=========================================================================== |
197 | //--------------------------------------------------------------------------- |
198 | // types and constants |
199 | public: |
200 | typedef T value_type; |
201 | typedef value_type& reference; |
202 | typedef const value_type& const_reference; |
203 | typedef T* iterator; |
204 | typedef const T* const_iterator; |
205 | typedef size_t size_type; |
206 | typedef typename std::make_signed<size_type>::type difference_type; |
207 | typedef Allocator allocator_type; |
208 | typedef typename A::pointer pointer; |
209 | typedef typename A::const_pointer const_pointer; |
210 | typedef std::reverse_iterator<iterator> reverse_iterator; |
211 | typedef std::reverse_iterator<const_iterator> const_reverse_iterator; |
212 | |
213 | private: |
214 | static constexpr bool should_pass_by_value = |
215 | is_trivially_copyable<T>::value && |
216 | sizeof(T) <= 16; // don't force large structures to be passed by value |
217 | typedef typename std::conditional<should_pass_by_value, T, const T&>::type VT; |
218 | typedef typename std::conditional<should_pass_by_value, T, T&&>::type MT; |
219 | |
220 | static constexpr bool usingStdAllocator = |
221 | std::is_same<Allocator, std::allocator<T>>::value; |
222 | typedef bool_constant< |
223 | usingStdAllocator || A::propagate_on_container_move_assignment::value> |
224 | moveIsSwap; |
225 | |
226 | //=========================================================================== |
227 | //--------------------------------------------------------------------------- |
228 | // allocator helpers |
229 | private: |
230 | //--------------------------------------------------------------------------- |
231 | // allocate |
232 | |
233 | T* M_allocate(size_type n) { |
234 | return impl_.D_allocate(n); |
235 | } |
236 | |
237 | //--------------------------------------------------------------------------- |
238 | // deallocate |
239 | |
240 | void M_deallocate(T* p, size_type n) noexcept { |
241 | impl_.D_deallocate(p, n); |
242 | } |
243 | |
244 | //--------------------------------------------------------------------------- |
245 | // construct |
246 | |
247 | // GCC is very sensitive to the exact way that construct is called. For |
248 | // that reason there are several different specializations of construct. |
249 | |
250 | template <typename U, typename... Args> |
251 | void M_construct(U* p, Args&&... args) { |
252 | if (usingStdAllocator) { |
253 | new (p) U(std::forward<Args>(args)...); |
254 | } else { |
255 | std::allocator_traits<Allocator>::construct( |
256 | impl_, p, std::forward<Args>(args)...); |
257 | } |
258 | } |
259 | |
260 | template <typename U, typename... Args> |
261 | static void S_construct(U* p, Args&&... args) { |
262 | new (p) U(std::forward<Args>(args)...); |
263 | } |
264 | |
265 | template <typename U, typename... Args> |
266 | static void S_construct_a(Allocator& a, U* p, Args&&... args) { |
267 | std::allocator_traits<Allocator>::construct( |
268 | a, p, std::forward<Args>(args)...); |
269 | } |
270 | |
271 | // scalar optimization |
272 | // TODO we can expand this optimization to: default copyable and assignable |
273 | template < |
274 | typename U, |
275 | typename Enable = typename std::enable_if<std::is_scalar<U>::value>::type> |
276 | void M_construct(U* p, U arg) { |
277 | if (usingStdAllocator) { |
278 | *p = arg; |
279 | } else { |
280 | std::allocator_traits<Allocator>::construct(impl_, p, arg); |
281 | } |
282 | } |
283 | |
284 | template < |
285 | typename U, |
286 | typename Enable = typename std::enable_if<std::is_scalar<U>::value>::type> |
287 | static void S_construct(U* p, U arg) { |
288 | *p = arg; |
289 | } |
290 | |
291 | template < |
292 | typename U, |
293 | typename Enable = typename std::enable_if<std::is_scalar<U>::value>::type> |
294 | static void S_construct_a(Allocator& a, U* p, U arg) { |
295 | std::allocator_traits<Allocator>::construct(a, p, arg); |
296 | } |
297 | |
298 | // const& optimization |
299 | template < |
300 | typename U, |
301 | typename Enable = |
302 | typename std::enable_if<!std::is_scalar<U>::value>::type> |
303 | void M_construct(U* p, const U& value) { |
304 | if (usingStdAllocator) { |
305 | new (p) U(value); |
306 | } else { |
307 | std::allocator_traits<Allocator>::construct(impl_, p, value); |
308 | } |
309 | } |
310 | |
311 | template < |
312 | typename U, |
313 | typename Enable = |
314 | typename std::enable_if<!std::is_scalar<U>::value>::type> |
315 | static void S_construct(U* p, const U& value) { |
316 | new (p) U(value); |
317 | } |
318 | |
319 | template < |
320 | typename U, |
321 | typename Enable = |
322 | typename std::enable_if<!std::is_scalar<U>::value>::type> |
323 | static void S_construct_a(Allocator& a, U* p, const U& value) { |
324 | std::allocator_traits<Allocator>::construct(a, p, value); |
325 | } |
326 | |
327 | //--------------------------------------------------------------------------- |
328 | // destroy |
329 | |
330 | void M_destroy(T* p) noexcept { |
331 | if (usingStdAllocator) { |
332 | if (!std::is_trivially_destructible<T>::value) { |
333 | p->~T(); |
334 | } |
335 | } else { |
336 | std::allocator_traits<Allocator>::destroy(impl_, p); |
337 | } |
338 | } |
339 | |
340 | //=========================================================================== |
341 | //--------------------------------------------------------------------------- |
342 | // algorithmic helpers |
343 | private: |
344 | //--------------------------------------------------------------------------- |
345 | // destroy_range |
346 | |
347 | // wrappers |
348 | void M_destroy_range_e(T* pos) noexcept { |
349 | D_destroy_range_a(pos, impl_.e_); |
350 | impl_.e_ = pos; |
351 | } |
352 | |
353 | // dispatch |
354 | // THIS DISPATCH CODE IS DUPLICATED IN IMPL. SEE IMPL FOR DETAILS. |
355 | void D_destroy_range_a(T* first, T* last) noexcept { |
356 | if (usingStdAllocator) { |
357 | S_destroy_range(first, last); |
358 | } else { |
359 | S_destroy_range_a(impl_, first, last); |
360 | } |
361 | } |
362 | |
363 | // allocator |
364 | static void S_destroy_range_a(Allocator& a, T* first, T* last) noexcept { |
365 | for (; first != last; ++first) { |
366 | std::allocator_traits<Allocator>::destroy(a, first); |
367 | } |
368 | } |
369 | |
370 | // optimized |
371 | static void S_destroy_range(T* first, T* last) noexcept { |
372 | if (!std::is_trivially_destructible<T>::value) { |
373 | #define FOLLY_FBV_OP(p) (p)->~T() |
374 | // EXPERIMENTAL DATA on fbvector<vector<int>> (where each vector<int> has |
375 | // size 0), were vector<int> to be relocatable. |
376 | // The unrolled version seems to work faster for small to medium sized |
377 | // fbvectors. It gets a 10% speedup on fbvectors of size 1024, 64, and |
378 | // 16. |
379 | // The simple loop version seems to work faster for large fbvectors. The |
380 | // unrolled version is about 6% slower on fbvectors on size 16384. |
381 | // The two methods seem tied for very large fbvectors. The unrolled |
382 | // version is about 0.5% slower on size 262144. |
383 | |
384 | // for (; first != last; ++first) first->~T(); |
385 | FOLLY_FBV_UNROLL_PTR(first, last, FOLLY_FBV_OP) |
386 | #undef FOLLY_FBV_OP |
387 | } |
388 | } |
389 | |
390 | //--------------------------------------------------------------------------- |
391 | // uninitialized_fill_n |
392 | |
393 | // wrappers |
394 | void M_uninitialized_fill_n_e(size_type sz) { |
395 | D_uninitialized_fill_n_a(impl_.e_, sz); |
396 | impl_.e_ += sz; |
397 | } |
398 | |
399 | void M_uninitialized_fill_n_e(size_type sz, VT value) { |
400 | D_uninitialized_fill_n_a(impl_.e_, sz, value); |
401 | impl_.e_ += sz; |
402 | } |
403 | |
404 | // dispatch |
405 | void D_uninitialized_fill_n_a(T* dest, size_type sz) { |
406 | if (usingStdAllocator) { |
407 | S_uninitialized_fill_n(dest, sz); |
408 | } else { |
409 | S_uninitialized_fill_n_a(impl_, dest, sz); |
410 | } |
411 | } |
412 | |
413 | void D_uninitialized_fill_n_a(T* dest, size_type sz, VT value) { |
414 | if (usingStdAllocator) { |
415 | S_uninitialized_fill_n(dest, sz, value); |
416 | } else { |
417 | S_uninitialized_fill_n_a(impl_, dest, sz, value); |
418 | } |
419 | } |
420 | |
421 | // allocator |
422 | template <typename... Args> |
423 | static void S_uninitialized_fill_n_a( |
424 | Allocator& a, |
425 | T* dest, |
426 | size_type sz, |
427 | Args&&... args) { |
428 | auto b = dest; |
429 | auto e = dest + sz; |
430 | auto rollback = makeGuard([&] { S_destroy_range_a(a, dest, b); }); |
431 | for (; b != e; ++b) { |
432 | std::allocator_traits<Allocator>::construct( |
433 | a, b, std::forward<Args>(args)...); |
434 | } |
435 | rollback.dismiss(); |
436 | } |
437 | |
438 | // optimized |
439 | static void S_uninitialized_fill_n(T* dest, size_type n) { |
440 | if (folly::IsZeroInitializable<T>::value) { |
441 | if (LIKELY(n != 0)) { |
442 | std::memset((void*)dest, 0, sizeof(T) * n); |
443 | } |
444 | } else { |
445 | auto b = dest; |
446 | auto e = dest + n; |
447 | auto rollback = makeGuard([&] { |
448 | --b; |
449 | for (; b >= dest; --b) { |
450 | b->~T(); |
451 | } |
452 | }); |
453 | for (; b != e; ++b) { |
454 | S_construct(b); |
455 | } |
456 | rollback.dismiss(); |
457 | } |
458 | } |
459 | |
460 | static void S_uninitialized_fill_n(T* dest, size_type n, const T& value) { |
461 | auto b = dest; |
462 | auto e = dest + n; |
463 | auto rollback = makeGuard([&] { S_destroy_range(dest, b); }); |
464 | for (; b != e; ++b) { |
465 | S_construct(b, value); |
466 | } |
467 | rollback.dismiss(); |
468 | } |
469 | |
470 | //--------------------------------------------------------------------------- |
471 | // uninitialized_copy |
472 | |
473 | // it is possible to add an optimization for the case where |
474 | // It = move(T*) and IsRelocatable<T> and Is0Initiailizable<T> |
475 | |
476 | // wrappers |
477 | template <typename It> |
478 | void M_uninitialized_copy_e(It first, It last) { |
479 | D_uninitialized_copy_a(impl_.e_, first, last); |
480 | impl_.e_ += std::distance(first, last); |
481 | } |
482 | |
483 | template <typename It> |
484 | void M_uninitialized_move_e(It first, It last) { |
485 | D_uninitialized_move_a(impl_.e_, first, last); |
486 | impl_.e_ += std::distance(first, last); |
487 | } |
488 | |
489 | // dispatch |
490 | template <typename It> |
491 | void D_uninitialized_copy_a(T* dest, It first, It last) { |
492 | if (usingStdAllocator) { |
493 | if (folly::is_trivially_copyable<T>::value) { |
494 | S_uninitialized_copy_bits(dest, first, last); |
495 | } else { |
496 | S_uninitialized_copy(dest, first, last); |
497 | } |
498 | } else { |
499 | S_uninitialized_copy_a(impl_, dest, first, last); |
500 | } |
501 | } |
502 | |
503 | template <typename It> |
504 | void D_uninitialized_move_a(T* dest, It first, It last) { |
505 | D_uninitialized_copy_a( |
506 | dest, std::make_move_iterator(first), std::make_move_iterator(last)); |
507 | } |
508 | |
509 | // allocator |
510 | template <typename It> |
511 | static void S_uninitialized_copy_a(Allocator& a, T* dest, It first, It last) { |
512 | auto b = dest; |
513 | auto rollback = makeGuard([&] { S_destroy_range_a(a, dest, b); }); |
514 | for (; first != last; ++first, ++b) { |
515 | std::allocator_traits<Allocator>::construct(a, b, *first); |
516 | } |
517 | rollback.dismiss(); |
518 | } |
519 | |
520 | // optimized |
521 | template <typename It> |
522 | static void S_uninitialized_copy(T* dest, It first, It last) { |
523 | auto b = dest; |
524 | auto rollback = makeGuard([&] { S_destroy_range(dest, b); }); |
525 | for (; first != last; ++first, ++b) { |
526 | S_construct(b, *first); |
527 | } |
528 | rollback.dismiss(); |
529 | } |
530 | |
531 | static void |
532 | S_uninitialized_copy_bits(T* dest, const T* first, const T* last) { |
533 | if (last != first) { |
534 | std::memcpy((void*)dest, (void*)first, (last - first) * sizeof(T)); |
535 | } |
536 | } |
537 | |
538 | static void S_uninitialized_copy_bits( |
539 | T* dest, |
540 | std::move_iterator<T*> first, |
541 | std::move_iterator<T*> last) { |
542 | T* bFirst = first.base(); |
543 | T* bLast = last.base(); |
544 | if (bLast != bFirst) { |
545 | std::memcpy((void*)dest, (void*)bFirst, (bLast - bFirst) * sizeof(T)); |
546 | } |
547 | } |
548 | |
549 | template <typename It> |
550 | static void S_uninitialized_copy_bits(T* dest, It first, It last) { |
551 | S_uninitialized_copy(dest, first, last); |
552 | } |
553 | |
554 | //--------------------------------------------------------------------------- |
555 | // copy_n |
556 | |
557 | // This function is "unsafe": it assumes that the iterator can be advanced at |
558 | // least n times. However, as a private function, that unsafety is managed |
559 | // wholly by fbvector itself. |
560 | |
561 | template <typename It> |
562 | static It S_copy_n(T* dest, It first, size_type n) { |
563 | auto e = dest + n; |
564 | for (; dest != e; ++dest, ++first) { |
565 | *dest = *first; |
566 | } |
567 | return first; |
568 | } |
569 | |
570 | static const T* S_copy_n(T* dest, const T* first, size_type n) { |
571 | if (is_trivially_copyable<T>::value) { |
572 | std::memcpy((void*)dest, (void*)first, n * sizeof(T)); |
573 | return first + n; |
574 | } else { |
575 | return S_copy_n<const T*>(dest, first, n); |
576 | } |
577 | } |
578 | |
579 | static std::move_iterator<T*> |
580 | S_copy_n(T* dest, std::move_iterator<T*> mIt, size_type n) { |
581 | if (is_trivially_copyable<T>::value) { |
582 | T* first = mIt.base(); |
583 | std::memcpy((void*)dest, (void*)first, n * sizeof(T)); |
584 | return std::make_move_iterator(first + n); |
585 | } else { |
586 | return S_copy_n<std::move_iterator<T*>>(dest, mIt, n); |
587 | } |
588 | } |
589 | |
590 | //=========================================================================== |
591 | //--------------------------------------------------------------------------- |
592 | // relocation helpers |
593 | private: |
594 | // Relocation is divided into three parts: |
595 | // |
596 | // 1: relocate_move |
597 | // Performs the actual movement of data from point a to point b. |
598 | // |
599 | // 2: relocate_done |
600 | // Destroys the old data. |
601 | // |
602 | // 3: relocate_undo |
603 | // Destoys the new data and restores the old data. |
604 | // |
605 | // The three steps are used because there may be an exception after part 1 |
606 | // has completed. If that is the case, then relocate_undo can nullify the |
607 | // initial move. Otherwise, relocate_done performs the last bit of tidying |
608 | // up. |
609 | // |
610 | // The relocation trio may use either memcpy, move, or copy. It is decided |
611 | // by the following case statement: |
612 | // |
613 | // IsRelocatable && usingStdAllocator -> memcpy |
614 | // has_nothrow_move && usingStdAllocator -> move |
615 | // cannot copy -> move |
616 | // default -> copy |
617 | // |
618 | // If the class is non-copyable then it must be movable. However, if the |
619 | // move constructor is not noexcept, i.e. an error could be thrown, then |
620 | // relocate_undo will be unable to restore the old data, for fear of a |
621 | // second exception being thrown. This is a known and unavoidable |
622 | // deficiency. In lieu of a strong exception guarantee, relocate_undo does |
623 | // the next best thing: it provides a weak exception guarantee by |
624 | // destorying the new data, but leaving the old data in an indeterminate |
625 | // state. Note that that indeterminate state will be valid, since the |
626 | // old data has not been destroyed; it has merely been the source of a |
627 | // move, which is required to leave the source in a valid state. |
628 | |
629 | // wrappers |
630 | void M_relocate(T* newB) { |
631 | relocate_move(newB, impl_.b_, impl_.e_); |
632 | relocate_done(newB, impl_.b_, impl_.e_); |
633 | } |
634 | |
635 | // dispatch type trait |
636 | typedef bool_constant<folly::IsRelocatable<T>::value && usingStdAllocator> |
637 | relocate_use_memcpy; |
638 | |
639 | typedef bool_constant< |
640 | (std::is_nothrow_move_constructible<T>::value && usingStdAllocator) || |
641 | !std::is_copy_constructible<T>::value> |
642 | relocate_use_move; |
643 | |
644 | // move |
645 | void relocate_move(T* dest, T* first, T* last) { |
646 | relocate_move_or_memcpy(dest, first, last, relocate_use_memcpy()); |
647 | } |
648 | |
649 | void relocate_move_or_memcpy(T* dest, T* first, T* last, std::true_type) { |
650 | if (first != nullptr) { |
651 | std::memcpy((void*)dest, (void*)first, (last - first) * sizeof(T)); |
652 | } |
653 | } |
654 | |
655 | void relocate_move_or_memcpy(T* dest, T* first, T* last, std::false_type) { |
656 | relocate_move_or_copy(dest, first, last, relocate_use_move()); |
657 | } |
658 | |
659 | void relocate_move_or_copy(T* dest, T* first, T* last, std::true_type) { |
660 | D_uninitialized_move_a(dest, first, last); |
661 | } |
662 | |
663 | void relocate_move_or_copy(T* dest, T* first, T* last, std::false_type) { |
664 | D_uninitialized_copy_a(dest, first, last); |
665 | } |
666 | |
667 | // done |
668 | void relocate_done(T* /*dest*/, T* first, T* last) noexcept { |
669 | if (folly::IsRelocatable<T>::value && usingStdAllocator) { |
670 | // used memcpy; data has been relocated, do not call destructor |
671 | } else { |
672 | D_destroy_range_a(first, last); |
673 | } |
674 | } |
675 | |
676 | // undo |
677 | void relocate_undo(T* dest, T* first, T* last) noexcept { |
678 | if (folly::IsRelocatable<T>::value && usingStdAllocator) { |
679 | // used memcpy, old data is still valid, nothing to do |
680 | } else if ( |
681 | std::is_nothrow_move_constructible<T>::value && usingStdAllocator) { |
682 | // noexcept move everything back, aka relocate_move |
683 | relocate_move(first, dest, dest + (last - first)); |
684 | } else if (!std::is_copy_constructible<T>::value) { |
685 | // weak guarantee |
686 | D_destroy_range_a(dest, dest + (last - first)); |
687 | } else { |
688 | // used copy, old data is still valid |
689 | D_destroy_range_a(dest, dest + (last - first)); |
690 | } |
691 | } |
692 | |
693 | //=========================================================================== |
694 | //--------------------------------------------------------------------------- |
695 | // construct/copy/destroy |
696 | public: |
697 | fbvector() = default; |
698 | |
699 | explicit fbvector(const Allocator& a) : impl_(a) {} |
700 | |
701 | explicit fbvector(size_type n, const Allocator& a = Allocator()) |
702 | : impl_(n, a) { |
703 | M_uninitialized_fill_n_e(n); |
704 | } |
705 | |
706 | fbvector(size_type n, VT value, const Allocator& a = Allocator()) |
707 | : impl_(n, a) { |
708 | M_uninitialized_fill_n_e(n, value); |
709 | } |
710 | |
711 | template < |
712 | class It, |
713 | class Category = typename std::iterator_traits<It>::iterator_category> |
714 | fbvector(It first, It last, const Allocator& a = Allocator()) |
715 | : fbvector(first, last, a, Category()) {} |
716 | |
717 | fbvector(const fbvector& other) |
718 | : impl_( |
719 | other.size(), |
720 | A::select_on_container_copy_construction(other.impl_)) { |
721 | M_uninitialized_copy_e(other.begin(), other.end()); |
722 | } |
723 | |
724 | fbvector(fbvector&& other) noexcept : impl_(std::move(other.impl_)) {} |
725 | |
726 | fbvector(const fbvector& other, const Allocator& a) |
727 | : fbvector(other.begin(), other.end(), a) {} |
728 | |
729 | /* may throw */ fbvector(fbvector&& other, const Allocator& a) : impl_(a) { |
730 | if (impl_ == other.impl_) { |
731 | impl_.swapData(other.impl_); |
732 | } else { |
733 | impl_.init(other.size()); |
734 | M_uninitialized_move_e(other.begin(), other.end()); |
735 | } |
736 | } |
737 | |
738 | fbvector(std::initializer_list<T> il, const Allocator& a = Allocator()) |
739 | : fbvector(il.begin(), il.end(), a) {} |
740 | |
741 | ~fbvector() = default; // the cleanup occurs in impl_ |
742 | |
743 | fbvector& operator=(const fbvector& other) { |
744 | if (UNLIKELY(this == &other)) { |
745 | return *this; |
746 | } |
747 | |
748 | if (!usingStdAllocator && |
749 | A::propagate_on_container_copy_assignment::value) { |
750 | if (impl_ != other.impl_) { |
751 | // can't use other's different allocator to clean up self |
752 | impl_.reset(); |
753 | } |
754 | (Allocator&)impl_ = (Allocator&)other.impl_; |
755 | } |
756 | |
757 | assign(other.begin(), other.end()); |
758 | return *this; |
759 | } |
760 | |
761 | fbvector& operator=(fbvector&& other) { |
762 | if (UNLIKELY(this == &other)) { |
763 | return *this; |
764 | } |
765 | moveFrom(std::move(other), moveIsSwap()); |
766 | return *this; |
767 | } |
768 | |
769 | fbvector& operator=(std::initializer_list<T> il) { |
770 | assign(il.begin(), il.end()); |
771 | return *this; |
772 | } |
773 | |
774 | template < |
775 | class It, |
776 | class Category = typename std::iterator_traits<It>::iterator_category> |
777 | void assign(It first, It last) { |
778 | assign(first, last, Category()); |
779 | } |
780 | |
781 | void assign(size_type n, VT value) { |
782 | if (n > capacity()) { |
783 | // Not enough space. Do not reserve in place, since we will |
784 | // discard the old values anyways. |
785 | if (dataIsInternalAndNotVT(value)) { |
786 | T copy(std::move(value)); |
787 | impl_.reset(n); |
788 | M_uninitialized_fill_n_e(n, copy); |
789 | } else { |
790 | impl_.reset(n); |
791 | M_uninitialized_fill_n_e(n, value); |
792 | } |
793 | } else if (n <= size()) { |
794 | auto newE = impl_.b_ + n; |
795 | std::fill(impl_.b_, newE, value); |
796 | M_destroy_range_e(newE); |
797 | } else { |
798 | std::fill(impl_.b_, impl_.e_, value); |
799 | M_uninitialized_fill_n_e(n - size(), value); |
800 | } |
801 | } |
802 | |
803 | void assign(std::initializer_list<T> il) { |
804 | assign(il.begin(), il.end()); |
805 | } |
806 | |
807 | allocator_type get_allocator() const noexcept { |
808 | return impl_; |
809 | } |
810 | |
811 | private: |
812 | // contract dispatch for iterator types fbvector(It first, It last) |
813 | template <class ForwardIterator> |
814 | fbvector( |
815 | ForwardIterator first, |
816 | ForwardIterator last, |
817 | const Allocator& a, |
818 | std::forward_iterator_tag) |
819 | : impl_(size_type(std::distance(first, last)), a) { |
820 | M_uninitialized_copy_e(first, last); |
821 | } |
822 | |
823 | template <class InputIterator> |
824 | fbvector( |
825 | InputIterator first, |
826 | InputIterator last, |
827 | const Allocator& a, |
828 | std::input_iterator_tag) |
829 | : impl_(a) { |
830 | for (; first != last; ++first) { |
831 | emplace_back(*first); |
832 | } |
833 | } |
834 | |
835 | // contract dispatch for allocator movement in operator=(fbvector&&) |
836 | void moveFrom(fbvector&& other, std::true_type) { |
837 | swap(impl_, other.impl_); |
838 | } |
839 | void moveFrom(fbvector&& other, std::false_type) { |
840 | if (impl_ == other.impl_) { |
841 | impl_.swapData(other.impl_); |
842 | } else { |
843 | impl_.reset(other.size()); |
844 | M_uninitialized_move_e(other.begin(), other.end()); |
845 | } |
846 | } |
847 | |
848 | // contract dispatch for iterator types in assign(It first, It last) |
849 | template <class ForwardIterator> |
850 | void assign( |
851 | ForwardIterator first, |
852 | ForwardIterator last, |
853 | std::forward_iterator_tag) { |
854 | const auto newSize = size_type(std::distance(first, last)); |
855 | if (newSize > capacity()) { |
856 | impl_.reset(newSize); |
857 | M_uninitialized_copy_e(first, last); |
858 | } else if (newSize <= size()) { |
859 | auto newEnd = std::copy(first, last, impl_.b_); |
860 | M_destroy_range_e(newEnd); |
861 | } else { |
862 | auto mid = S_copy_n(impl_.b_, first, size()); |
863 | M_uninitialized_copy_e<decltype(last)>(mid, last); |
864 | } |
865 | } |
866 | |
867 | template <class InputIterator> |
868 | void |
869 | assign(InputIterator first, InputIterator last, std::input_iterator_tag) { |
870 | auto p = impl_.b_; |
871 | for (; first != last && p != impl_.e_; ++first, ++p) { |
872 | *p = *first; |
873 | } |
874 | if (p != impl_.e_) { |
875 | M_destroy_range_e(p); |
876 | } else { |
877 | for (; first != last; ++first) { |
878 | emplace_back(*first); |
879 | } |
880 | } |
881 | } |
882 | |
883 | // contract dispatch for aliasing under VT optimization |
884 | bool dataIsInternalAndNotVT(const T& t) { |
885 | if (should_pass_by_value) { |
886 | return false; |
887 | } |
888 | return dataIsInternal(t); |
889 | } |
890 | bool dataIsInternal(const T& t) { |
891 | return UNLIKELY( |
892 | impl_.b_ <= std::addressof(t) && std::addressof(t) < impl_.e_); |
893 | } |
894 | |
895 | //=========================================================================== |
896 | //--------------------------------------------------------------------------- |
897 | // iterators |
898 | public: |
899 | iterator begin() noexcept { |
900 | return impl_.b_; |
901 | } |
902 | const_iterator begin() const noexcept { |
903 | return impl_.b_; |
904 | } |
905 | iterator end() noexcept { |
906 | return impl_.e_; |
907 | } |
908 | const_iterator end() const noexcept { |
909 | return impl_.e_; |
910 | } |
911 | reverse_iterator rbegin() noexcept { |
912 | return reverse_iterator(end()); |
913 | } |
914 | const_reverse_iterator rbegin() const noexcept { |
915 | return const_reverse_iterator(end()); |
916 | } |
917 | reverse_iterator rend() noexcept { |
918 | return reverse_iterator(begin()); |
919 | } |
920 | const_reverse_iterator rend() const noexcept { |
921 | return const_reverse_iterator(begin()); |
922 | } |
923 | |
924 | const_iterator cbegin() const noexcept { |
925 | return impl_.b_; |
926 | } |
927 | const_iterator cend() const noexcept { |
928 | return impl_.e_; |
929 | } |
930 | const_reverse_iterator crbegin() const noexcept { |
931 | return const_reverse_iterator(end()); |
932 | } |
933 | const_reverse_iterator crend() const noexcept { |
934 | return const_reverse_iterator(begin()); |
935 | } |
936 | |
937 | //=========================================================================== |
938 | //--------------------------------------------------------------------------- |
939 | // capacity |
940 | public: |
941 | size_type size() const noexcept { |
942 | return size_type(impl_.e_ - impl_.b_); |
943 | } |
944 | |
945 | size_type max_size() const noexcept { |
946 | // good luck gettin' there |
947 | return ~size_type(0); |
948 | } |
949 | |
950 | void resize(size_type n) { |
951 | if (n <= size()) { |
952 | M_destroy_range_e(impl_.b_ + n); |
953 | } else { |
954 | reserve(n); |
955 | M_uninitialized_fill_n_e(n - size()); |
956 | } |
957 | } |
958 | |
959 | void resize(size_type n, VT t) { |
960 | if (n <= size()) { |
961 | M_destroy_range_e(impl_.b_ + n); |
962 | } else if (dataIsInternalAndNotVT(t) && n > capacity()) { |
963 | T copy(t); |
964 | reserve(n); |
965 | M_uninitialized_fill_n_e(n - size(), copy); |
966 | } else { |
967 | reserve(n); |
968 | M_uninitialized_fill_n_e(n - size(), t); |
969 | } |
970 | } |
971 | |
972 | size_type capacity() const noexcept { |
973 | return size_type(impl_.z_ - impl_.b_); |
974 | } |
975 | |
976 | bool empty() const noexcept { |
977 | return impl_.b_ == impl_.e_; |
978 | } |
979 | |
980 | void reserve(size_type n) { |
981 | if (n <= capacity()) { |
982 | return; |
983 | } |
984 | if (impl_.b_ && reserve_in_place(n)) { |
985 | return; |
986 | } |
987 | |
988 | auto newCap = folly::goodMallocSize(n * sizeof(T)) / sizeof(T); |
989 | auto newB = M_allocate(newCap); |
990 | { |
991 | auto rollback = makeGuard([&] { M_deallocate(newB, newCap); }); |
992 | M_relocate(newB); |
993 | rollback.dismiss(); |
994 | } |
995 | if (impl_.b_) { |
996 | M_deallocate(impl_.b_, size_type(impl_.z_ - impl_.b_)); |
997 | } |
998 | impl_.z_ = newB + newCap; |
999 | impl_.e_ = newB + (impl_.e_ - impl_.b_); |
1000 | impl_.b_ = newB; |
1001 | } |
1002 | |
1003 | void shrink_to_fit() noexcept { |
1004 | if (empty()) { |
1005 | impl_.reset(); |
1006 | return; |
1007 | } |
1008 | |
1009 | auto const newCapacityBytes = folly::goodMallocSize(size() * sizeof(T)); |
1010 | auto const newCap = newCapacityBytes / sizeof(T); |
1011 | auto const oldCap = capacity(); |
1012 | |
1013 | if (newCap >= oldCap) { |
1014 | return; |
1015 | } |
1016 | |
1017 | void* p = impl_.b_; |
1018 | // xallocx() will shrink to precisely newCapacityBytes (which was generated |
1019 | // by goodMallocSize()) if it successfully shrinks in place. |
1020 | if ((usingJEMalloc() && usingStdAllocator) && |
1021 | newCapacityBytes >= folly::jemallocMinInPlaceExpandable && |
1022 | xallocx(p, newCapacityBytes, 0, 0) == newCapacityBytes) { |
1023 | impl_.z_ += newCap - oldCap; |
1024 | } else { |
1025 | T* newB; // intentionally uninitialized |
1026 | if (!catch_exception( |
1027 | [&] { |
1028 | newB = M_allocate(newCap); |
1029 | return true; |
1030 | }, |
1031 | [&] { // |
1032 | return false; |
1033 | })) { |
1034 | return; |
1035 | } |
1036 | if (!catch_exception( |
1037 | [&] { |
1038 | M_relocate(newB); |
1039 | return true; |
1040 | }, |
1041 | [&] { |
1042 | M_deallocate(newB, newCap); |
1043 | return false; |
1044 | })) { |
1045 | return; |
1046 | } |
1047 | if (impl_.b_) { |
1048 | M_deallocate(impl_.b_, size_type(impl_.z_ - impl_.b_)); |
1049 | } |
1050 | impl_.z_ = newB + newCap; |
1051 | impl_.e_ = newB + (impl_.e_ - impl_.b_); |
1052 | impl_.b_ = newB; |
1053 | } |
1054 | } |
1055 | |
1056 | private: |
1057 | bool reserve_in_place(size_type n) { |
1058 | if (!usingStdAllocator || !usingJEMalloc()) { |
1059 | return false; |
1060 | } |
1061 | |
1062 | // jemalloc can never grow in place blocks smaller than 4096 bytes. |
1063 | if ((impl_.z_ - impl_.b_) * sizeof(T) < |
1064 | folly::jemallocMinInPlaceExpandable) { |
1065 | return false; |
1066 | } |
1067 | |
1068 | auto const newCapacityBytes = folly::goodMallocSize(n * sizeof(T)); |
1069 | void* p = impl_.b_; |
1070 | if (xallocx(p, newCapacityBytes, 0, 0) == newCapacityBytes) { |
1071 | impl_.z_ = impl_.b_ + newCapacityBytes / sizeof(T); |
1072 | return true; |
1073 | } |
1074 | return false; |
1075 | } |
1076 | |
1077 | //=========================================================================== |
1078 | //--------------------------------------------------------------------------- |
1079 | // element access |
1080 | public: |
1081 | reference operator[](size_type n) { |
1082 | assert(n < size()); |
1083 | return impl_.b_[n]; |
1084 | } |
1085 | const_reference operator[](size_type n) const { |
1086 | assert(n < size()); |
1087 | return impl_.b_[n]; |
1088 | } |
1089 | const_reference at(size_type n) const { |
1090 | if (UNLIKELY(n >= size())) { |
1091 | throw_exception<std::out_of_range>( |
1092 | "fbvector: index is greater than size." ); |
1093 | } |
1094 | return (*this)[n]; |
1095 | } |
1096 | reference at(size_type n) { |
1097 | auto const& cThis = *this; |
1098 | return const_cast<reference>(cThis.at(n)); |
1099 | } |
1100 | reference front() { |
1101 | assert(!empty()); |
1102 | return *impl_.b_; |
1103 | } |
1104 | const_reference front() const { |
1105 | assert(!empty()); |
1106 | return *impl_.b_; |
1107 | } |
1108 | reference back() { |
1109 | assert(!empty()); |
1110 | return impl_.e_[-1]; |
1111 | } |
1112 | const_reference back() const { |
1113 | assert(!empty()); |
1114 | return impl_.e_[-1]; |
1115 | } |
1116 | |
1117 | //=========================================================================== |
1118 | //--------------------------------------------------------------------------- |
1119 | // data access |
1120 | public: |
1121 | T* data() noexcept { |
1122 | return impl_.b_; |
1123 | } |
1124 | const T* data() const noexcept { |
1125 | return impl_.b_; |
1126 | } |
1127 | |
1128 | //=========================================================================== |
1129 | //--------------------------------------------------------------------------- |
1130 | // modifiers (common) |
1131 | public: |
1132 | template <class... Args> |
1133 | reference emplace_back(Args&&... args) { |
1134 | if (impl_.e_ != impl_.z_) { |
1135 | M_construct(impl_.e_, std::forward<Args>(args)...); |
1136 | ++impl_.e_; |
1137 | } else { |
1138 | emplace_back_aux(std::forward<Args>(args)...); |
1139 | } |
1140 | return back(); |
1141 | } |
1142 | |
1143 | void push_back(const T& value) { |
1144 | if (impl_.e_ != impl_.z_) { |
1145 | M_construct(impl_.e_, value); |
1146 | ++impl_.e_; |
1147 | } else { |
1148 | emplace_back_aux(value); |
1149 | } |
1150 | } |
1151 | |
1152 | void push_back(T&& value) { |
1153 | if (impl_.e_ != impl_.z_) { |
1154 | M_construct(impl_.e_, std::move(value)); |
1155 | ++impl_.e_; |
1156 | } else { |
1157 | emplace_back_aux(std::move(value)); |
1158 | } |
1159 | } |
1160 | |
1161 | void pop_back() { |
1162 | assert(!empty()); |
1163 | --impl_.e_; |
1164 | M_destroy(impl_.e_); |
1165 | } |
1166 | |
1167 | void swap(fbvector& other) noexcept { |
1168 | if (!usingStdAllocator && A::propagate_on_container_swap::value) { |
1169 | swap(impl_, other.impl_); |
1170 | } else { |
1171 | impl_.swapData(other.impl_); |
1172 | } |
1173 | } |
1174 | |
1175 | void clear() noexcept { |
1176 | M_destroy_range_e(impl_.b_); |
1177 | } |
1178 | |
1179 | private: |
1180 | // std::vector implements a similar function with a different growth |
1181 | // strategy: empty() ? 1 : capacity() * 2. |
1182 | // |
1183 | // fbvector grows differently on two counts: |
1184 | // |
1185 | // (1) initial size |
1186 | // Instead of growing to size 1 from empty, fbvector allocates at least |
1187 | // 64 bytes. You may still use reserve to reserve a lesser amount of |
1188 | // memory. |
1189 | // (2) 1.5x |
1190 | // For medium-sized vectors, the growth strategy is 1.5x. See the docs |
1191 | // for details. |
1192 | // This does not apply to very small or very large fbvectors. This is a |
1193 | // heuristic. |
1194 | // A nice addition to fbvector would be the capability of having a user- |
1195 | // defined growth strategy, probably as part of the allocator. |
1196 | // |
1197 | |
1198 | size_type computePushBackCapacity() const { |
1199 | if (capacity() == 0) { |
1200 | return std::max(64 / sizeof(T), size_type(1)); |
1201 | } |
1202 | if (capacity() < folly::jemallocMinInPlaceExpandable / sizeof(T)) { |
1203 | return capacity() * 2; |
1204 | } |
1205 | if (capacity() > 4096 * 32 / sizeof(T)) { |
1206 | return capacity() * 2; |
1207 | } |
1208 | return (capacity() * 3 + 1) / 2; |
1209 | } |
1210 | |
1211 | template <class... Args> |
1212 | void emplace_back_aux(Args&&... args) { |
1213 | size_type byte_sz = |
1214 | folly::goodMallocSize(computePushBackCapacity() * sizeof(T)); |
1215 | if (usingStdAllocator && usingJEMalloc() && |
1216 | ((impl_.z_ - impl_.b_) * sizeof(T) >= |
1217 | folly::jemallocMinInPlaceExpandable)) { |
1218 | // Try to reserve in place. |
1219 | // Ask xallocx to allocate in place at least size()+1 and at most sz |
1220 | // space. |
1221 | // xallocx will allocate as much as possible within that range, which |
1222 | // is the best possible outcome: if sz space is available, take it all, |
1223 | // otherwise take as much as possible. If nothing is available, then |
1224 | // fail. |
1225 | // In this fashion, we never relocate if there is a possibility of |
1226 | // expanding in place, and we never reallocate by less than the desired |
1227 | // amount unless we cannot expand further. Hence we will not reallocate |
1228 | // sub-optimally twice in a row (modulo the blocking memory being freed). |
1229 | size_type lower = folly::goodMallocSize(sizeof(T) + size() * sizeof(T)); |
1230 | size_type upper = byte_sz; |
1231 | size_type = upper - lower; |
1232 | |
1233 | void* p = impl_.b_; |
1234 | size_t actual; |
1235 | |
1236 | if ((actual = xallocx(p, lower, extra, 0)) >= lower) { |
1237 | impl_.z_ = impl_.b_ + actual / sizeof(T); |
1238 | M_construct(impl_.e_, std::forward<Args>(args)...); |
1239 | ++impl_.e_; |
1240 | return; |
1241 | } |
1242 | } |
1243 | |
1244 | // Reallocation failed. Perform a manual relocation. |
1245 | size_type sz = byte_sz / sizeof(T); |
1246 | auto newB = M_allocate(sz); |
1247 | auto newE = newB + size(); |
1248 | { |
1249 | auto rollback1 = makeGuard([&] { M_deallocate(newB, sz); }); |
1250 | if (folly::IsRelocatable<T>::value && usingStdAllocator) { |
1251 | // For linear memory access, relocate before construction. |
1252 | // By the test condition, relocate is noexcept. |
1253 | // Note that there is no cleanup to do if M_construct throws - that's |
1254 | // one of the beauties of relocation. |
1255 | // Benchmarks for this code have high variance, and seem to be close. |
1256 | relocate_move(newB, impl_.b_, impl_.e_); |
1257 | M_construct(newE, std::forward<Args>(args)...); |
1258 | ++newE; |
1259 | } else { |
1260 | M_construct(newE, std::forward<Args>(args)...); |
1261 | ++newE; |
1262 | auto rollback2 = makeGuard([&] { M_destroy(newE - 1); }); |
1263 | M_relocate(newB); |
1264 | rollback2.dismiss(); |
1265 | } |
1266 | rollback1.dismiss(); |
1267 | } |
1268 | if (impl_.b_) { |
1269 | M_deallocate(impl_.b_, size()); |
1270 | } |
1271 | impl_.b_ = newB; |
1272 | impl_.e_ = newE; |
1273 | impl_.z_ = newB + sz; |
1274 | } |
1275 | |
1276 | //=========================================================================== |
1277 | //--------------------------------------------------------------------------- |
1278 | // modifiers (erase) |
1279 | public: |
1280 | iterator erase(const_iterator position) { |
1281 | return erase(position, position + 1); |
1282 | } |
1283 | |
1284 | iterator erase(const_iterator first, const_iterator last) { |
1285 | assert(isValid(first) && isValid(last)); |
1286 | assert(first <= last); |
1287 | if (first != last) { |
1288 | if (last == end()) { |
1289 | M_destroy_range_e((iterator)first); |
1290 | } else { |
1291 | if (folly::IsRelocatable<T>::value && usingStdAllocator) { |
1292 | D_destroy_range_a((iterator)first, (iterator)last); |
1293 | if (last - first >= cend() - last) { |
1294 | std::memcpy((void*)first, (void*)last, (cend() - last) * sizeof(T)); |
1295 | } else { |
1296 | std::memmove( |
1297 | (void*)first, (void*)last, (cend() - last) * sizeof(T)); |
1298 | } |
1299 | impl_.e_ -= (last - first); |
1300 | } else { |
1301 | std::copy( |
1302 | std::make_move_iterator((iterator)last), |
1303 | std::make_move_iterator(end()), |
1304 | (iterator)first); |
1305 | auto newEnd = impl_.e_ - std::distance(first, last); |
1306 | M_destroy_range_e(newEnd); |
1307 | } |
1308 | } |
1309 | } |
1310 | return (iterator)first; |
1311 | } |
1312 | |
1313 | //=========================================================================== |
1314 | //--------------------------------------------------------------------------- |
1315 | // modifiers (insert) |
1316 | private: // we have the private section first because it defines some macros |
1317 | bool isValid(const_iterator it) { |
1318 | return cbegin() <= it && it <= cend(); |
1319 | } |
1320 | |
1321 | size_type computeInsertCapacity(size_type n) { |
1322 | size_type nc = std::max(computePushBackCapacity(), size() + n); |
1323 | size_type ac = folly::goodMallocSize(nc * sizeof(T)) / sizeof(T); |
1324 | return ac; |
1325 | } |
1326 | |
1327 | //--------------------------------------------------------------------------- |
1328 | // |
1329 | // make_window takes an fbvector, and creates an uninitialized gap (a |
1330 | // window) at the given position, of the given size. The fbvector must |
1331 | // have enough capacity. |
1332 | // |
1333 | // Explanation by picture. |
1334 | // |
1335 | // 123456789______ |
1336 | // ^ |
1337 | // make_window here of size 3 |
1338 | // |
1339 | // 1234___56789___ |
1340 | // |
1341 | // If something goes wrong and the window must be destroyed, use |
1342 | // undo_window to provide a weak exception guarantee. It destroys |
1343 | // the right ledge. |
1344 | // |
1345 | // 1234___________ |
1346 | // |
1347 | //--------------------------------------------------------------------------- |
1348 | // |
1349 | // wrap_frame takes an inverse window and relocates an fbvector around it. |
1350 | // The fbvector must have at least as many elements as the left ledge. |
1351 | // |
1352 | // Explanation by picture. |
1353 | // |
1354 | // START |
1355 | // fbvector: inverse window: |
1356 | // 123456789______ _____abcde_______ |
1357 | // [idx][ n ] |
1358 | // |
1359 | // RESULT |
1360 | // _______________ 12345abcde6789___ |
1361 | // |
1362 | //--------------------------------------------------------------------------- |
1363 | // |
1364 | // insert_use_fresh_memory returns true iff the fbvector should use a fresh |
1365 | // block of memory for the insertion. If the fbvector does not have enough |
1366 | // spare capacity, then it must return true. Otherwise either true or false |
1367 | // may be returned. |
1368 | // |
1369 | //--------------------------------------------------------------------------- |
1370 | // |
1371 | // These three functions, make_window, wrap_frame, and |
1372 | // insert_use_fresh_memory, can be combined into a uniform interface. |
1373 | // Since that interface involves a lot of case-work, it is built into |
1374 | // some macros: FOLLY_FBVECTOR_INSERT_(PRE|START|TRY|END) |
1375 | // Macros are used in an attempt to let GCC perform better optimizations, |
1376 | // especially control flow optimization. |
1377 | // |
1378 | |
1379 | //--------------------------------------------------------------------------- |
1380 | // window |
1381 | |
1382 | void make_window(iterator position, size_type n) { |
1383 | // The result is guaranteed to be non-negative, so use an unsigned type: |
1384 | size_type tail = size_type(std::distance(position, impl_.e_)); |
1385 | |
1386 | if (tail <= n) { |
1387 | relocate_move(position + n, position, impl_.e_); |
1388 | relocate_done(position + n, position, impl_.e_); |
1389 | impl_.e_ += n; |
1390 | } else { |
1391 | if (folly::IsRelocatable<T>::value && usingStdAllocator) { |
1392 | std::memmove((void*)(position + n), (void*)position, tail * sizeof(T)); |
1393 | impl_.e_ += n; |
1394 | } else { |
1395 | D_uninitialized_move_a(impl_.e_, impl_.e_ - n, impl_.e_); |
1396 | { |
1397 | auto rollback = makeGuard([&] { |
1398 | D_destroy_range_a(impl_.e_ - n, impl_.e_ + n); |
1399 | impl_.e_ -= n; |
1400 | }); |
1401 | std::copy_backward( |
1402 | std::make_move_iterator(position), |
1403 | std::make_move_iterator(impl_.e_ - n), |
1404 | impl_.e_); |
1405 | rollback.dismiss(); |
1406 | } |
1407 | impl_.e_ += n; |
1408 | D_destroy_range_a(position, position + n); |
1409 | } |
1410 | } |
1411 | } |
1412 | |
1413 | void undo_window(iterator position, size_type n) noexcept { |
1414 | D_destroy_range_a(position + n, impl_.e_); |
1415 | impl_.e_ = position; |
1416 | } |
1417 | |
1418 | //--------------------------------------------------------------------------- |
1419 | // frame |
1420 | |
1421 | void wrap_frame(T* ledge, size_type idx, size_type n) { |
1422 | assert(size() >= idx); |
1423 | assert(n != 0); |
1424 | |
1425 | relocate_move(ledge, impl_.b_, impl_.b_ + idx); |
1426 | { |
1427 | auto rollback = makeGuard([&] { // |
1428 | relocate_undo(ledge, impl_.b_, impl_.b_ + idx); |
1429 | }); |
1430 | relocate_move(ledge + idx + n, impl_.b_ + idx, impl_.e_); |
1431 | rollback.dismiss(); |
1432 | } |
1433 | relocate_done(ledge, impl_.b_, impl_.b_ + idx); |
1434 | relocate_done(ledge + idx + n, impl_.b_ + idx, impl_.e_); |
1435 | } |
1436 | |
1437 | //--------------------------------------------------------------------------- |
1438 | // use fresh? |
1439 | |
1440 | bool insert_use_fresh(bool at_end, size_type n) { |
1441 | if (at_end) { |
1442 | if (size() + n <= capacity()) { |
1443 | return false; |
1444 | } |
1445 | if (reserve_in_place(size() + n)) { |
1446 | return false; |
1447 | } |
1448 | return true; |
1449 | } |
1450 | |
1451 | if (size() + n > capacity()) { |
1452 | return true; |
1453 | } |
1454 | |
1455 | return false; |
1456 | } |
1457 | |
1458 | //--------------------------------------------------------------------------- |
1459 | // interface |
1460 | |
1461 | template < |
1462 | typename IsInternalFunc, |
1463 | typename InsertInternalFunc, |
1464 | typename ConstructFunc, |
1465 | typename DestroyFunc> |
1466 | iterator do_real_insert( |
1467 | const_iterator cpos, |
1468 | size_type n, |
1469 | IsInternalFunc&& isInternalFunc, |
1470 | InsertInternalFunc&& insertInternalFunc, |
1471 | ConstructFunc&& constructFunc, |
1472 | DestroyFunc&& destroyFunc) { |
1473 | if (n == 0) { |
1474 | return iterator(cpos); |
1475 | } |
1476 | bool at_end = cpos == cend(); |
1477 | bool fresh = insert_use_fresh(at_end, n); |
1478 | if (!at_end) { |
1479 | if (!fresh && isInternalFunc()) { |
1480 | // check for internal data (technically not required by the standard) |
1481 | return insertInternalFunc(); |
1482 | } |
1483 | assert(isValid(cpos)); |
1484 | } |
1485 | T* position = const_cast<T*>(cpos); |
1486 | size_type idx = size_type(std::distance(impl_.b_, position)); |
1487 | T* b; |
1488 | size_type newCap; /* intentionally uninitialized */ |
1489 | |
1490 | if (fresh) { |
1491 | newCap = computeInsertCapacity(n); |
1492 | b = M_allocate(newCap); |
1493 | } else { |
1494 | if (!at_end) { |
1495 | make_window(position, n); |
1496 | } else { |
1497 | impl_.e_ += n; |
1498 | } |
1499 | b = impl_.b_; |
1500 | } |
1501 | |
1502 | T* start = b + idx; |
1503 | { |
1504 | auto rollback = makeGuard([&] { |
1505 | if (fresh) { |
1506 | M_deallocate(b, newCap); |
1507 | } else { |
1508 | if (!at_end) { |
1509 | undo_window(position, n); |
1510 | } else { |
1511 | impl_.e_ -= n; |
1512 | } |
1513 | } |
1514 | }); |
1515 | // construct the inserted elements |
1516 | constructFunc(start); |
1517 | rollback.dismiss(); |
1518 | } |
1519 | |
1520 | if (fresh) { |
1521 | { |
1522 | auto rollback = makeGuard([&] { |
1523 | // delete the inserted elements (exception has been thrown) |
1524 | destroyFunc(start); |
1525 | M_deallocate(b, newCap); |
1526 | }); |
1527 | wrap_frame(b, idx, n); |
1528 | rollback.dismiss(); |
1529 | } |
1530 | if (impl_.b_) { |
1531 | M_deallocate(impl_.b_, capacity()); |
1532 | } |
1533 | impl_.set(b, size() + n, newCap); |
1534 | return impl_.b_ + idx; |
1535 | } else { |
1536 | return position; |
1537 | } |
1538 | } |
1539 | |
1540 | public: |
1541 | template <class... Args> |
1542 | iterator emplace(const_iterator cpos, Args&&... args) { |
1543 | return do_real_insert( |
1544 | cpos, |
1545 | 1, |
1546 | [&] { return false; }, |
1547 | [&] { return iterator{}; }, |
1548 | [&](iterator start) { |
1549 | M_construct(start, std::forward<Args>(args)...); |
1550 | }, |
1551 | [&](iterator start) { M_destroy(start); }); |
1552 | } |
1553 | |
1554 | iterator insert(const_iterator cpos, const T& value) { |
1555 | return do_real_insert( |
1556 | cpos, |
1557 | 1, |
1558 | [&] { return dataIsInternal(value); }, |
1559 | [&] { return insert(cpos, T(value)); }, |
1560 | [&](iterator start) { M_construct(start, value); }, |
1561 | [&](iterator start) { M_destroy(start); }); |
1562 | } |
1563 | |
1564 | iterator insert(const_iterator cpos, T&& value) { |
1565 | return do_real_insert( |
1566 | cpos, |
1567 | 1, |
1568 | [&] { return dataIsInternal(value); }, |
1569 | [&] { return insert(cpos, T(std::move(value))); }, |
1570 | [&](iterator start) { M_construct(start, std::move(value)); }, |
1571 | [&](iterator start) { M_destroy(start); }); |
1572 | } |
1573 | |
1574 | iterator insert(const_iterator cpos, size_type n, VT value) { |
1575 | return do_real_insert( |
1576 | cpos, |
1577 | n, |
1578 | [&] { return dataIsInternalAndNotVT(value); }, |
1579 | [&] { return insert(cpos, n, T(value)); }, |
1580 | [&](iterator start) { D_uninitialized_fill_n_a(start, n, value); }, |
1581 | [&](iterator start) { D_destroy_range_a(start, start + n); }); |
1582 | } |
1583 | |
1584 | template < |
1585 | class It, |
1586 | class Category = typename std::iterator_traits<It>::iterator_category> |
1587 | iterator insert(const_iterator cpos, It first, It last) { |
1588 | return insert(cpos, first, last, Category()); |
1589 | } |
1590 | |
1591 | iterator insert(const_iterator cpos, std::initializer_list<T> il) { |
1592 | return insert(cpos, il.begin(), il.end()); |
1593 | } |
1594 | |
1595 | //--------------------------------------------------------------------------- |
1596 | // insert dispatch for iterator types |
1597 | private: |
1598 | template <class FIt> |
1599 | iterator |
1600 | insert(const_iterator cpos, FIt first, FIt last, std::forward_iterator_tag) { |
1601 | size_type n = size_type(std::distance(first, last)); |
1602 | return do_real_insert( |
1603 | cpos, |
1604 | n, |
1605 | [&] { return false; }, |
1606 | [&] { return iterator{}; }, |
1607 | [&](iterator start) { D_uninitialized_copy_a(start, first, last); }, |
1608 | [&](iterator start) { D_destroy_range_a(start, start + n); }); |
1609 | } |
1610 | |
1611 | template <class IIt> |
1612 | iterator |
1613 | insert(const_iterator cpos, IIt first, IIt last, std::input_iterator_tag) { |
1614 | T* position = const_cast<T*>(cpos); |
1615 | assert(isValid(position)); |
1616 | size_type idx = std::distance(begin(), position); |
1617 | |
1618 | fbvector storage( |
1619 | std::make_move_iterator(position), |
1620 | std::make_move_iterator(end()), |
1621 | A::select_on_container_copy_construction(impl_)); |
1622 | M_destroy_range_e(position); |
1623 | for (; first != last; ++first) { |
1624 | emplace_back(*first); |
1625 | } |
1626 | insert( |
1627 | cend(), |
1628 | std::make_move_iterator(storage.begin()), |
1629 | std::make_move_iterator(storage.end())); |
1630 | return impl_.b_ + idx; |
1631 | } |
1632 | |
1633 | //=========================================================================== |
1634 | //--------------------------------------------------------------------------- |
1635 | // lexicographical functions |
1636 | public: |
1637 | bool operator==(const fbvector& other) const { |
1638 | return size() == other.size() && std::equal(begin(), end(), other.begin()); |
1639 | } |
1640 | |
1641 | bool operator!=(const fbvector& other) const { |
1642 | return !(*this == other); |
1643 | } |
1644 | |
1645 | bool operator<(const fbvector& other) const { |
1646 | return std::lexicographical_compare( |
1647 | begin(), end(), other.begin(), other.end()); |
1648 | } |
1649 | |
1650 | bool operator>(const fbvector& other) const { |
1651 | return other < *this; |
1652 | } |
1653 | |
1654 | bool operator<=(const fbvector& other) const { |
1655 | return !(*this > other); |
1656 | } |
1657 | |
1658 | bool operator>=(const fbvector& other) const { |
1659 | return !(*this < other); |
1660 | } |
1661 | |
1662 | //=========================================================================== |
1663 | //--------------------------------------------------------------------------- |
1664 | // friends |
1665 | private: |
1666 | template <class _T, class _A> |
1667 | friend _T* relinquish(fbvector<_T, _A>&); |
1668 | |
1669 | template <class _T, class _A> |
1670 | friend void attach(fbvector<_T, _A>&, _T* data, size_t sz, size_t cap); |
1671 | |
1672 | }; // class fbvector |
1673 | |
1674 | //============================================================================= |
1675 | //----------------------------------------------------------------------------- |
1676 | // specialized functions |
1677 | |
1678 | template <class T, class A> |
1679 | void swap(fbvector<T, A>& lhs, fbvector<T, A>& rhs) noexcept { |
1680 | lhs.swap(rhs); |
1681 | } |
1682 | |
1683 | //============================================================================= |
1684 | //----------------------------------------------------------------------------- |
1685 | // other |
1686 | |
1687 | namespace detail { |
1688 | |
1689 | // Format support. |
1690 | template <class T, class A> |
1691 | struct IndexableTraits<fbvector<T, A>> |
1692 | : public IndexableTraitsSeq<fbvector<T, A>> {}; |
1693 | |
1694 | } // namespace detail |
1695 | |
1696 | template <class T, class A> |
1697 | void compactResize(fbvector<T, A>* v, size_t sz) { |
1698 | v->resize(sz); |
1699 | v->shrink_to_fit(); |
1700 | } |
1701 | |
1702 | // DANGER |
1703 | // |
1704 | // relinquish and attach are not a members function specifically so that it is |
1705 | // awkward to call them. It is very easy to shoot yourself in the foot with |
1706 | // these functions. |
1707 | // |
1708 | // If you call relinquish, then it is your responsibility to free the data |
1709 | // and the storage, both of which may have been generated in a non-standard |
1710 | // way through the fbvector's allocator. |
1711 | // |
1712 | // If you call attach, it is your responsibility to ensure that the fbvector |
1713 | // is fresh (size and capacity both zero), and that the supplied data is |
1714 | // capable of being manipulated by the allocator. |
1715 | // It is acceptable to supply a stack pointer IF: |
1716 | // (1) The vector's data does not outlive the stack pointer. This includes |
1717 | // extension of the data's life through a move operation. |
1718 | // (2) The pointer has enough capacity that the vector will never be |
1719 | // relocated. |
1720 | // (3) Insert is not called on the vector; these functions have leeway to |
1721 | // relocate the vector even if there is enough capacity. |
1722 | // (4) A stack pointer is compatible with the fbvector's allocator. |
1723 | // |
1724 | |
1725 | template <class T, class A> |
1726 | T* relinquish(fbvector<T, A>& v) { |
1727 | T* ret = v.data(); |
1728 | v.impl_.b_ = v.impl_.e_ = v.impl_.z_ = nullptr; |
1729 | return ret; |
1730 | } |
1731 | |
1732 | template <class T, class A> |
1733 | void attach(fbvector<T, A>& v, T* data, size_t sz, size_t cap) { |
1734 | assert(v.data() == nullptr); |
1735 | v.impl_.b_ = data; |
1736 | v.impl_.e_ = data + sz; |
1737 | v.impl_.z_ = data + cap; |
1738 | } |
1739 | |
1740 | #if __cpp_deduction_guides >= 201703 |
1741 | template < |
1742 | class InputIt, |
1743 | class Allocator = |
1744 | std::allocator<typename std::iterator_traits<InputIt>::value_type>> |
1745 | fbvector(InputIt, InputIt, Allocator = Allocator()) |
1746 | ->fbvector<typename std::iterator_traits<InputIt>::value_type, Allocator>; |
1747 | #endif |
1748 | |
1749 | template <class T, class A, class U> |
1750 | void erase(fbvector<T, A>& v, U value) { |
1751 | v.erase(std::remove(v.begin(), v.end(), value), v.end()); |
1752 | } |
1753 | |
1754 | template <class T, class A, class Predicate> |
1755 | void erase_if(fbvector<T, A>& v, Predicate predicate) { |
1756 | v.erase(std::remove_if(v.begin(), v.end(), predicate), v.end()); |
1757 | } |
1758 | } // namespace folly |
1759 | |