/* * Copyright (C) 2005, 2006, 2007, 2008, 2014 Apple Inc. All rights reserved. * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Library General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Library General Public License for more details. * * You should have received a copy of the GNU Library General Public License * along with this library; see the file COPYING.LIB. If not, write to * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, * Boston, MA 02110-1301, USA. * */ #ifndef WTF_Vector_h #define WTF_Vector_h #include #include #include #include #include #include #include #include #include #include #include #include #if ASAN_ENABLED extern "C" void __sanitizer_annotate_contiguous_container(const void* begin, const void* end, const void* old_mid, const void* new_mid); #endif namespace WTF { const size_t notFound = static_cast(-1); template struct VectorDestructor; template struct VectorDestructor { static void destruct(T*, T*) {} }; template struct VectorDestructor { static void destruct(T* begin, T* end) { for (T* cur = begin; cur != end; ++cur) cur->~T(); } }; template struct VectorInitializer; template struct VectorInitializer { static void initialize(T*, T*) {} }; template struct VectorInitializer { static void initialize(T* begin, T* end) { for (T* cur = begin; cur != end; ++cur) new (NotNull, cur) T; } }; template struct VectorInitializer { static void initialize(T* begin, T* end) { memset(begin, 0, reinterpret_cast(end) - reinterpret_cast(begin)); } }; template struct VectorMover; template struct VectorMover { static void move(T* src, T* srcEnd, T* dst) { while (src != srcEnd) { new (NotNull, dst) T(WTFMove(*src)); src->~T(); ++dst; ++src; } } static void moveOverlapping(T* src, T* srcEnd, T* dst) { if (src > dst) move(src, srcEnd, dst); else { T* dstEnd = dst + (srcEnd - src); while (src != srcEnd) { --srcEnd; --dstEnd; new (NotNull, dstEnd) T(WTFMove(*srcEnd)); srcEnd->~T(); } } } }; template struct VectorMover { static void move(const T* src, const T* srcEnd, T* dst) { memcpy(dst, src, reinterpret_cast(srcEnd) - reinterpret_cast(src)); } static void moveOverlapping(const T* src, const T* srcEnd, T* dst) { memmove(dst, src, reinterpret_cast(srcEnd) - reinterpret_cast(src)); } }; template struct VectorCopier; template struct VectorCopier { template static void uninitializedCopy(const T* src, const T* srcEnd, U* dst) { while (src != srcEnd) { new (NotNull, dst) U(*src); ++dst; ++src; } } }; template struct VectorCopier { static void uninitializedCopy(const T* src, const T* srcEnd, T* dst) { memcpy(dst, src, reinterpret_cast(srcEnd) - reinterpret_cast(src)); } template static void uninitializedCopy(const T* src, const T* srcEnd, U* dst) { VectorCopier::uninitializedCopy(src, srcEnd, dst); } }; template struct VectorFiller; template struct VectorFiller { static void uninitializedFill(T* dst, T* dstEnd, const T& val) { while (dst != dstEnd) { new (NotNull, dst) T(val); ++dst; } } }; template struct VectorFiller { static void uninitializedFill(T* dst, T* dstEnd, const T& val) { static_assert(sizeof(T) == 1, "Size of type T should be equal to one!"); #if COMPILER(GCC_OR_CLANG) && defined(_FORTIFY_SOURCE) if (!__builtin_constant_p(dstEnd - dst) || (!(dstEnd - dst))) #endif memset(dst, val, dstEnd - dst); } }; template struct VectorComparer; template struct VectorComparer { static bool compare(const T* a, const T* b, size_t size) { for (size_t i = 0; i < size; ++i) if (!(a[i] == b[i])) return false; return true; } }; template struct VectorComparer { static bool compare(const T* a, const T* b, size_t size) { return memcmp(a, b, sizeof(T) * size) == 0; } }; template struct VectorTypeOperations { static void destruct(T* begin, T* end) { VectorDestructor::value, T>::destruct(begin, end); } static void initialize(T* begin, T* end) { VectorInitializer::needsInitialization, VectorTraits::canInitializeWithMemset, T>::initialize(begin, end); } static void move(T* src, T* srcEnd, T* dst) { VectorMover::canMoveWithMemcpy, T>::move(src, srcEnd, dst); } static void moveOverlapping(T* src, T* srcEnd, T* dst) { VectorMover::canMoveWithMemcpy, T>::moveOverlapping(src, srcEnd, dst); } static void uninitializedCopy(const T* src, const T* srcEnd, T* dst) { VectorCopier::canCopyWithMemcpy, T>::uninitializedCopy(src, srcEnd, dst); } static void uninitializedFill(T* dst, T* dstEnd, const T& val) { VectorFiller::canFillWithMemset, T>::uninitializedFill(dst, dstEnd, val); } static bool compare(const T* a, const T* b, size_t size) { return VectorComparer::canCompareWithMemcmp, T>::compare(a, b, size); } }; template class VectorBufferBase { WTF_MAKE_NONCOPYABLE(VectorBufferBase); public: void allocateBuffer(size_t newCapacity) { ASSERT(newCapacity); if (newCapacity > std::numeric_limits::max() / sizeof(T)) CRASH(); size_t sizeToAllocate = newCapacity * sizeof(T); m_capacity = sizeToAllocate / sizeof(T); m_buffer = static_cast(fastMalloc(sizeToAllocate)); } bool tryAllocateBuffer(size_t newCapacity) { ASSERT(newCapacity); if (newCapacity > std::numeric_limits::max() / sizeof(T)) return false; size_t sizeToAllocate = newCapacity * sizeof(T); T* newBuffer; if (tryFastMalloc(sizeToAllocate).getValue(newBuffer)) { m_capacity = sizeToAllocate / sizeof(T); m_buffer = newBuffer; return true; } return false; } bool shouldReallocateBuffer(size_t newCapacity) const { return VectorTraits::canMoveWithMemcpy && m_capacity && newCapacity; } void reallocateBuffer(size_t newCapacity) { ASSERT(shouldReallocateBuffer(newCapacity)); if (newCapacity > std::numeric_limits::max() / sizeof(T)) CRASH(); size_t sizeToAllocate = newCapacity * sizeof(T); m_capacity = sizeToAllocate / sizeof(T); m_buffer = static_cast(fastRealloc(m_buffer, sizeToAllocate)); } void deallocateBuffer(T* bufferToDeallocate) { if (!bufferToDeallocate) return; if (m_buffer == bufferToDeallocate) { m_buffer = 0; m_capacity = 0; } fastFree(bufferToDeallocate); } T* buffer() { return m_buffer; } const T* buffer() const { return m_buffer; } static ptrdiff_t bufferMemoryOffset() { return OBJECT_OFFSETOF(VectorBufferBase, m_buffer); } size_t capacity() const { return m_capacity; } MallocPtr releaseBuffer() { T* buffer = m_buffer; m_buffer = 0; m_capacity = 0; return adoptMallocPtr(buffer); } protected: VectorBufferBase() : m_buffer(0) , m_capacity(0) , m_size(0) { } VectorBufferBase(T* buffer, size_t capacity, size_t size) : m_buffer(buffer) , m_capacity(capacity) , m_size(size) { } ~VectorBufferBase() { // FIXME: It would be nice to find a way to ASSERT that m_buffer hasn't leaked here. } T* m_buffer; unsigned m_capacity; unsigned m_size; // Only used by the Vector subclass, but placed here to avoid padding the struct. }; template class VectorBuffer; template class VectorBuffer : private VectorBufferBase { private: typedef VectorBufferBase Base; public: VectorBuffer() { } VectorBuffer(size_t capacity, size_t size = 0) { m_size = size; // Calling malloc(0) might take a lock and may actually do an // allocation on some systems. if (capacity) allocateBuffer(capacity); } ~VectorBuffer() { deallocateBuffer(buffer()); } void swap(VectorBuffer& other, size_t, size_t) { std::swap(m_buffer, other.m_buffer); std::swap(m_capacity, other.m_capacity); } void restoreInlineBufferIfNeeded() { } #if ASAN_ENABLED void* endOfBuffer() { return buffer() + capacity(); } #endif using Base::allocateBuffer; using Base::tryAllocateBuffer; using Base::shouldReallocateBuffer; using Base::reallocateBuffer; using Base::deallocateBuffer; using Base::buffer; using Base::capacity; using Base::bufferMemoryOffset; using Base::releaseBuffer; protected: using Base::m_size; private: using Base::m_buffer; using Base::m_capacity; }; template class VectorBuffer : private VectorBufferBase { WTF_MAKE_NONCOPYABLE(VectorBuffer); private: typedef VectorBufferBase Base; public: VectorBuffer() : Base(inlineBuffer(), inlineCapacity, 0) { } VectorBuffer(size_t capacity, size_t size = 0) : Base(inlineBuffer(), inlineCapacity, size) { if (capacity > inlineCapacity) Base::allocateBuffer(capacity); } ~VectorBuffer() { deallocateBuffer(buffer()); } void allocateBuffer(size_t newCapacity) { // FIXME: This should ASSERT(!m_buffer) to catch misuse/leaks. if (newCapacity > inlineCapacity) Base::allocateBuffer(newCapacity); else { m_buffer = inlineBuffer(); m_capacity = inlineCapacity; } } bool tryAllocateBuffer(size_t newCapacity) { if (newCapacity > inlineCapacity) return Base::tryAllocateBuffer(newCapacity); m_buffer = inlineBuffer(); m_capacity = inlineCapacity; return true; } void deallocateBuffer(T* bufferToDeallocate) { if (bufferToDeallocate == inlineBuffer()) return; Base::deallocateBuffer(bufferToDeallocate); } bool shouldReallocateBuffer(size_t newCapacity) const { // We cannot reallocate the inline buffer. return Base::shouldReallocateBuffer(newCapacity) && std::min(static_cast(m_capacity), newCapacity) > inlineCapacity; } void reallocateBuffer(size_t newCapacity) { ASSERT(shouldReallocateBuffer(newCapacity)); Base::reallocateBuffer(newCapacity); } void swap(VectorBuffer& other, size_t mySize, size_t otherSize) { if (buffer() == inlineBuffer() && other.buffer() == other.inlineBuffer()) { swapInlineBuffer(other, mySize, otherSize); std::swap(m_capacity, other.m_capacity); } else if (buffer() == inlineBuffer()) { m_buffer = other.m_buffer; other.m_buffer = other.inlineBuffer(); swapInlineBuffer(other, mySize, 0); std::swap(m_capacity, other.m_capacity); } else if (other.buffer() == other.inlineBuffer()) { other.m_buffer = m_buffer; m_buffer = inlineBuffer(); swapInlineBuffer(other, 0, otherSize); std::swap(m_capacity, other.m_capacity); } else { std::swap(m_buffer, other.m_buffer); std::swap(m_capacity, other.m_capacity); } } void restoreInlineBufferIfNeeded() { if (m_buffer) return; m_buffer = inlineBuffer(); m_capacity = inlineCapacity; } #if ASAN_ENABLED void* endOfBuffer() { ASSERT(buffer()); static_assert((offsetof(VectorBuffer, m_inlineBuffer) + sizeof(m_inlineBuffer)) % 8 == 0, "Inline buffer end needs to be on 8 byte boundary for ASan annotations to work."); if (buffer() == inlineBuffer()) return reinterpret_cast(m_inlineBuffer) + sizeof(m_inlineBuffer); return buffer() + capacity(); } #endif using Base::buffer; using Base::capacity; using Base::bufferMemoryOffset; MallocPtr releaseBuffer() { if (buffer() == inlineBuffer()) return nullptr; return Base::releaseBuffer(); } protected: using Base::m_size; private: using Base::m_buffer; using Base::m_capacity; void swapInlineBuffer(VectorBuffer& other, size_t mySize, size_t otherSize) { // FIXME: We could make swap part of VectorTypeOperations // https://bugs.webkit.org/show_bug.cgi?id=128863 if (std::is_pod::value) std::swap(m_inlineBuffer, other.m_inlineBuffer); else swapInlineBuffers(inlineBuffer(), other.inlineBuffer(), mySize, otherSize); } static void swapInlineBuffers(T* left, T* right, size_t leftSize, size_t rightSize) { if (left == right) return; ASSERT(leftSize <= inlineCapacity); ASSERT(rightSize <= inlineCapacity); size_t swapBound = std::min(leftSize, rightSize); for (unsigned i = 0; i < swapBound; ++i) std::swap(left[i], right[i]); VectorTypeOperations::move(left + swapBound, left + leftSize, right + swapBound); VectorTypeOperations::move(right + swapBound, right + rightSize, left + swapBound); } T* inlineBuffer() { return reinterpret_cast_ptr(m_inlineBuffer); } const T* inlineBuffer() const { return reinterpret_cast_ptr(m_inlineBuffer); } #if ASAN_ENABLED // ASan needs the buffer to begin and end on 8-byte boundaries for annotations to work. // FIXME: Add a redzone before the buffer to catch off by one accesses. We don't need a guard after, because the buffer is the last member variable. static const size_t asanInlineBufferAlignment = std::alignment_of::value >= 8 ? std::alignment_of::value : 8; static const size_t asanAdjustedInlineCapacity = ((sizeof(T) * inlineCapacity + 7) & ~7) / sizeof(T); typename std::aligned_storage::type m_inlineBuffer[asanAdjustedInlineCapacity]; #else typename std::aligned_storage::value>::type m_inlineBuffer[inlineCapacity]; #endif }; struct UnsafeVectorOverflow { static NO_RETURN_DUE_TO_ASSERT void overflowed() { ASSERT_NOT_REACHED(); } }; template class Vector : private VectorBuffer { WTF_MAKE_FAST_ALLOCATED; private: typedef VectorBuffer Base; typedef VectorTypeOperations TypeOperations; public: typedef T ValueType; typedef T* iterator; typedef const T* const_iterator; typedef std::reverse_iterator reverse_iterator; typedef std::reverse_iterator const_reverse_iterator; Vector() { } // Unlike in std::vector, this constructor does not initialize POD types. explicit Vector(size_t size) : Base(size, size) { asanSetInitialBufferSizeTo(size); if (begin()) TypeOperations::initialize(begin(), end()); } Vector(size_t size, const T& val) : Base(size, size) { asanSetInitialBufferSizeTo(size); if (begin()) TypeOperations::uninitializedFill(begin(), end(), val); } Vector(std::initializer_list initializerList) { reserveInitialCapacity(initializerList.size()); asanSetInitialBufferSizeTo(initializerList.size()); for (const auto& element : initializerList) uncheckedAppend(element); } ~Vector() { if (m_size) shrink(0); asanSetBufferSizeToFullCapacity(); } Vector(const Vector&); template explicit Vector(const Vector&); Vector& operator=(const Vector&); template Vector& operator=(const Vector&); Vector(Vector&&); Vector& operator=(Vector&&); size_t size() const { return m_size; } static ptrdiff_t sizeMemoryOffset() { return OBJECT_OFFSETOF(Vector, m_size); } size_t capacity() const { return Base::capacity(); } bool isEmpty() const { return !size(); } T& at(size_t i) { if (UNLIKELY(i >= size())) OverflowHandler::overflowed(); return Base::buffer()[i]; } const T& at(size_t i) const { if (UNLIKELY(i >= size())) OverflowHandler::overflowed(); return Base::buffer()[i]; } T& at(Checked i) { RELEASE_ASSERT(i < size()); return Base::buffer()[i]; } const T& at(Checked i) const { RELEASE_ASSERT(i < size()); return Base::buffer()[i]; } T& operator[](size_t i) { return at(i); } const T& operator[](size_t i) const { return at(i); } T& operator[](Checked i) { return at(i); } const T& operator[](Checked i) const { return at(i); } T* data() { return Base::buffer(); } const T* data() const { return Base::buffer(); } static ptrdiff_t dataMemoryOffset() { return Base::bufferMemoryOffset(); } iterator begin() { return data(); } iterator end() { return begin() + m_size; } const_iterator begin() const { return data(); } const_iterator end() const { return begin() + m_size; } reverse_iterator rbegin() { return reverse_iterator(end()); } reverse_iterator rend() { return reverse_iterator(begin()); } const_reverse_iterator rbegin() const { return const_reverse_iterator(end()); } const_reverse_iterator rend() const { return const_reverse_iterator(begin()); } T& first() { return at(0); } const T& first() const { return at(0); } T& last() { return at(size() - 1); } const T& last() const { return at(size() - 1); } T takeLast() { T result = WTFMove(last()); removeLast(); return result; } template bool contains(const U&) const; template size_t find(const U&) const; template size_t reverseFind(const U&) const; void shrink(size_t size); void grow(size_t size); void resize(size_t size); void resizeToFit(size_t size); void reserveCapacity(size_t newCapacity); bool tryReserveCapacity(size_t newCapacity); void reserveInitialCapacity(size_t initialCapacity); void shrinkCapacity(size_t newCapacity); void shrinkToFit() { shrinkCapacity(size()); } void clear() { shrinkCapacity(0); } void append(ValueType&& value) { append(std::forward(value)); } template void append(U&&); template void constructAndAppend(Args&&...); void uncheckedAppend(ValueType&& value) { uncheckedAppend(std::forward(value)); } template void uncheckedAppend(U&&); template void append(const U*, size_t); template void appendVector(const Vector&); template bool tryAppend(const U*, size_t); template void insert(size_t position, const U*, size_t); template void insert(size_t position, U&&); template void insertVector(size_t position, const Vector&); void remove(size_t position); void remove(size_t position, size_t length); template bool removeFirst(const U&); template bool removeFirstMatching(const MatchFunction&); template unsigned removeAll(const U&); template unsigned removeAllMatching(const MatchFunction&); void removeLast() { if (UNLIKELY(isEmpty())) OverflowHandler::overflowed(); shrink(size() - 1); } void fill(const T&, size_t); void fill(const T& val) { fill(val, size()); } template void appendRange(Iterator start, Iterator end); MallocPtr releaseBuffer(); void swap(Vector& other) { #if ASAN_ENABLED if (this == std::addressof(other)) // ASan will crash if we try to restrict access to the same buffer twice. return; #endif // Make it possible to copy inline buffers. asanSetBufferSizeToFullCapacity(); other.asanSetBufferSizeToFullCapacity(); Base::swap(other, m_size, other.m_size); std::swap(m_size, other.m_size); asanSetInitialBufferSizeTo(m_size); other.asanSetInitialBufferSizeTo(other.m_size); } void reverse(); void checkConsistency(); private: void expandCapacity(size_t newMinCapacity); T* expandCapacity(size_t newMinCapacity, T*); bool tryExpandCapacity(size_t newMinCapacity); const T* tryExpandCapacity(size_t newMinCapacity, const T*); template U* expandCapacity(size_t newMinCapacity, U*); template void appendSlowCase(U&&); template void constructAndAppendSlowCase(Args&&...); void asanSetInitialBufferSizeTo(size_t); void asanSetBufferSizeToFullCapacity(); void asanBufferSizeWillChangeTo(size_t); using Base::m_size; using Base::buffer; using Base::capacity; using Base::swap; using Base::allocateBuffer; using Base::deallocateBuffer; using Base::tryAllocateBuffer; using Base::shouldReallocateBuffer; using Base::reallocateBuffer; using Base::restoreInlineBufferIfNeeded; using Base::releaseBuffer; #if ASAN_ENABLED using Base::endOfBuffer; #endif }; template Vector::Vector(const Vector& other) : Base(other.capacity(), other.size()) { asanSetInitialBufferSizeTo(other.size()); if (begin()) TypeOperations::uninitializedCopy(other.begin(), other.end(), begin()); } template template Vector::Vector(const Vector& other) : Base(other.capacity(), other.size()) { asanSetInitialBufferSizeTo(other.size()); if (begin()) TypeOperations::uninitializedCopy(other.begin(), other.end(), begin()); } template Vector& Vector::operator=(const Vector& other) { if (&other == this) return *this; if (size() > other.size()) shrink(other.size()); else if (other.size() > capacity()) { clear(); reserveCapacity(other.size()); ASSERT(begin()); } asanBufferSizeWillChangeTo(other.size()); std::copy(other.begin(), other.begin() + size(), begin()); TypeOperations::uninitializedCopy(other.begin() + size(), other.end(), end()); m_size = other.size(); return *this; } inline bool typelessPointersAreEqual(const void* a, const void* b) { return a == b; } template template Vector& Vector::operator=(const Vector& other) { // If the inline capacities match, we should call the more specific // template. If the inline capacities don't match, the two objects // shouldn't be allocated the same address. ASSERT(!typelessPointersAreEqual(&other, this)); if (size() > other.size()) shrink(other.size()); else if (other.size() > capacity()) { clear(); reserveCapacity(other.size()); ASSERT(begin()); } asanBufferSizeWillChangeTo(other.size()); std::copy(other.begin(), other.begin() + size(), begin()); TypeOperations::uninitializedCopy(other.begin() + size(), other.end(), end()); m_size = other.size(); return *this; } template inline Vector::Vector(Vector&& other) { swap(other); } template inline Vector& Vector::operator=(Vector&& other) { swap(other); return *this; } template template bool Vector::contains(const U& value) const { return find(value) != notFound; } template template size_t Vector::find(const U& value) const { for (size_t i = 0; i < size(); ++i) { if (at(i) == value) return i; } return notFound; } template template size_t Vector::reverseFind(const U& value) const { for (size_t i = 1; i <= size(); ++i) { const size_t index = size() - i; if (at(index) == value) return index; } return notFound; } template void Vector::fill(const T& val, size_t newSize) { if (size() > newSize) shrink(newSize); else if (newSize > capacity()) { clear(); reserveCapacity(newSize); ASSERT(begin()); } asanBufferSizeWillChangeTo(newSize); std::fill(begin(), end(), val); TypeOperations::uninitializedFill(end(), begin() + newSize, val); m_size = newSize; } template template void Vector::appendRange(Iterator start, Iterator end) { for (Iterator it = start; it != end; ++it) append(*it); } template void Vector::expandCapacity(size_t newMinCapacity) { reserveCapacity(std::max(newMinCapacity, std::max(static_cast(minCapacity), capacity() + capacity() / 4 + 1))); } template T* Vector::expandCapacity(size_t newMinCapacity, T* ptr) { if (ptr < begin() || ptr >= end()) { expandCapacity(newMinCapacity); return ptr; } size_t index = ptr - begin(); expandCapacity(newMinCapacity); return begin() + index; } template bool Vector::tryExpandCapacity(size_t newMinCapacity) { return tryReserveCapacity(std::max(newMinCapacity, std::max(static_cast(minCapacity), capacity() + capacity() / 4 + 1))); } template const T* Vector::tryExpandCapacity(size_t newMinCapacity, const T* ptr) { if (ptr < begin() || ptr >= end()) { if (!tryExpandCapacity(newMinCapacity)) return 0; return ptr; } size_t index = ptr - begin(); if (!tryExpandCapacity(newMinCapacity)) return 0; return begin() + index; } template template inline U* Vector::expandCapacity(size_t newMinCapacity, U* ptr) { expandCapacity(newMinCapacity); return ptr; } template inline void Vector::resize(size_t size) { if (size <= m_size) { TypeOperations::destruct(begin() + size, end()); asanBufferSizeWillChangeTo(size); } else { if (size > capacity()) expandCapacity(size); asanBufferSizeWillChangeTo(size); if (begin()) TypeOperations::initialize(end(), begin() + size); } m_size = size; } template void Vector::resizeToFit(size_t size) { reserveCapacity(size); resize(size); } template void Vector::shrink(size_t size) { ASSERT(size <= m_size); TypeOperations::destruct(begin() + size, end()); asanBufferSizeWillChangeTo(size); m_size = size; } template void Vector::grow(size_t size) { ASSERT(size >= m_size); if (size > capacity()) expandCapacity(size); asanBufferSizeWillChangeTo(size); if (begin()) TypeOperations::initialize(end(), begin() + size); m_size = size; } template inline void Vector::asanSetInitialBufferSizeTo(size_t size) { #if ASAN_ENABLED if (!buffer()) return; // This function resticts buffer access to only elements in [begin(), end()) range, making ASan detect an error // when accessing elements in [end(), endOfBuffer()) range. // A newly allocated buffer can be accessed without restrictions, so "old_mid" argument equals "end" argument. __sanitizer_annotate_contiguous_container(buffer(), endOfBuffer(), endOfBuffer(), buffer() + size); #else UNUSED_PARAM(size); #endif } template inline void Vector::asanSetBufferSizeToFullCapacity() { #if ASAN_ENABLED if (!buffer()) return; // ASan requires that the annotation is returned to its initial state before deallocation. __sanitizer_annotate_contiguous_container(buffer(), endOfBuffer(), buffer() + size(), endOfBuffer()); #endif } template inline void Vector::asanBufferSizeWillChangeTo(size_t newSize) { #if ASAN_ENABLED if (!buffer()) return; // Change allowed range. __sanitizer_annotate_contiguous_container(buffer(), endOfBuffer(), buffer() + size(), buffer() + newSize); #else UNUSED_PARAM(newSize); #endif } template void Vector::reserveCapacity(size_t newCapacity) { if (newCapacity <= capacity()) return; T* oldBuffer = begin(); T* oldEnd = end(); asanSetBufferSizeToFullCapacity(); Base::allocateBuffer(newCapacity); ASSERT(begin()); asanSetInitialBufferSizeTo(size()); TypeOperations::move(oldBuffer, oldEnd, begin()); Base::deallocateBuffer(oldBuffer); } template bool Vector::tryReserveCapacity(size_t newCapacity) { if (newCapacity <= capacity()) return true; T* oldBuffer = begin(); T* oldEnd = end(); asanSetBufferSizeToFullCapacity(); if (!Base::tryAllocateBuffer(newCapacity)) { asanSetInitialBufferSizeTo(size()); return false; } ASSERT(begin()); asanSetInitialBufferSizeTo(size()); TypeOperations::move(oldBuffer, oldEnd, begin()); Base::deallocateBuffer(oldBuffer); return true; } template inline void Vector::reserveInitialCapacity(size_t initialCapacity) { ASSERT(!m_size); ASSERT(capacity() == inlineCapacity); if (initialCapacity > inlineCapacity) Base::allocateBuffer(initialCapacity); } template void Vector::shrinkCapacity(size_t newCapacity) { if (newCapacity >= capacity()) return; if (newCapacity < size()) shrink(newCapacity); asanSetBufferSizeToFullCapacity(); T* oldBuffer = begin(); if (newCapacity > 0) { if (Base::shouldReallocateBuffer(newCapacity)) { Base::reallocateBuffer(newCapacity); asanSetInitialBufferSizeTo(size()); return; } T* oldEnd = end(); Base::allocateBuffer(newCapacity); if (begin() != oldBuffer) TypeOperations::move(oldBuffer, oldEnd, begin()); } Base::deallocateBuffer(oldBuffer); Base::restoreInlineBufferIfNeeded(); asanSetInitialBufferSizeTo(size()); } // Templatizing these is better than just letting the conversion happen implicitly, // because for instance it allows a PassRefPtr to be appended to a RefPtr vector // without refcount thrash. template template void Vector::append(const U* data, size_t dataSize) { size_t newSize = m_size + dataSize; if (newSize > capacity()) { data = expandCapacity(newSize, data); ASSERT(begin()); } if (newSize < m_size) CRASH(); asanBufferSizeWillChangeTo(newSize); T* dest = end(); VectorCopier::value, U>::uninitializedCopy(data, std::addressof(data[dataSize]), dest); m_size = newSize; } template template bool Vector::tryAppend(const U* data, size_t dataSize) { size_t newSize = m_size + dataSize; if (newSize > capacity()) { data = tryExpandCapacity(newSize, data); if (!data) return false; ASSERT(begin()); } if (newSize < m_size) return false; asanBufferSizeWillChangeTo(newSize); T* dest = end(); VectorCopier::value, U>::uninitializedCopy(data, std::addressof(data[dataSize]), dest); m_size = newSize; return true; } template template ALWAYS_INLINE void Vector::append(U&& value) { if (size() != capacity()) { asanBufferSizeWillChangeTo(m_size + 1); new (NotNull, end()) T(std::forward(value)); ++m_size; return; } appendSlowCase(std::forward(value)); } template template ALWAYS_INLINE void Vector::constructAndAppend(Args&&... args) { if (size() != capacity()) { asanBufferSizeWillChangeTo(m_size + 1); new (NotNull, end()) T(std::forward(args)...); ++m_size; return; } constructAndAppendSlowCase(std::forward(args)...); } template template void Vector::appendSlowCase(U&& value) { ASSERT(size() == capacity()); auto ptr = const_cast::type>::type*>(std::addressof(value)); ptr = expandCapacity(size() + 1, ptr); ASSERT(begin()); asanBufferSizeWillChangeTo(m_size + 1); new (NotNull, end()) T(std::forward(*ptr)); ++m_size; } template template void Vector::constructAndAppendSlowCase(Args&&... args) { ASSERT(size() == capacity()); expandCapacity(size() + 1); ASSERT(begin()); asanBufferSizeWillChangeTo(m_size + 1); new (NotNull, end()) T(std::forward(args)...); ++m_size; } // This version of append saves a branch in the case where you know that the // vector's capacity is large enough for the append to succeed. template template inline void Vector::uncheckedAppend(U&& value) { ASSERT(size() < capacity()); asanBufferSizeWillChangeTo(m_size + 1); auto ptr = std::addressof(value); new (NotNull, end()) T(std::forward(*ptr)); ++m_size; } template template inline void Vector::appendVector(const Vector& val) { append(val.begin(), val.size()); } template template void Vector::insert(size_t position, const U* data, size_t dataSize) { ASSERT_WITH_SECURITY_IMPLICATION(position <= size()); size_t newSize = m_size + dataSize; if (newSize > capacity()) { data = expandCapacity(newSize, data); ASSERT(begin()); } if (newSize < m_size) CRASH(); asanBufferSizeWillChangeTo(newSize); T* spot = begin() + position; TypeOperations::moveOverlapping(spot, end(), spot + dataSize); VectorCopier::value, U>::uninitializedCopy(data, std::addressof(data[dataSize]), spot); m_size = newSize; } template template inline void Vector::insert(size_t position, U&& value) { ASSERT_WITH_SECURITY_IMPLICATION(position <= size()); auto ptr = const_cast::type>::type*>(std::addressof(value)); if (size() == capacity()) { ptr = expandCapacity(size() + 1, ptr); ASSERT(begin()); } asanBufferSizeWillChangeTo(m_size + 1); T* spot = begin() + position; TypeOperations::moveOverlapping(spot, end(), spot + 1); new (NotNull, spot) T(std::forward(*ptr)); ++m_size; } template template inline void Vector::insertVector(size_t position, const Vector& val) { insert(position, val.begin(), val.size()); } template inline void Vector::remove(size_t position) { ASSERT_WITH_SECURITY_IMPLICATION(position < size()); T* spot = begin() + position; spot->~T(); TypeOperations::moveOverlapping(spot + 1, end(), spot); asanBufferSizeWillChangeTo(m_size - 1); --m_size; } template inline void Vector::remove(size_t position, size_t length) { ASSERT_WITH_SECURITY_IMPLICATION(position <= size()); ASSERT_WITH_SECURITY_IMPLICATION(position + length <= size()); T* beginSpot = begin() + position; T* endSpot = beginSpot + length; TypeOperations::destruct(beginSpot, endSpot); TypeOperations::moveOverlapping(endSpot, end(), beginSpot); asanBufferSizeWillChangeTo(m_size - length); m_size -= length; } template template inline bool Vector::removeFirst(const U& value) { return removeFirstMatching([&value] (const T& current) { return current == value; }); } template template inline bool Vector::removeFirstMatching(const MatchFunction& matches) { for (size_t i = 0; i < size(); ++i) { if (matches(at(i))) { remove(i); return true; } } return false; } template template inline unsigned Vector::removeAll(const U& value) { return removeAllMatching([&value] (const T& current) { return current == value; }); } template template inline unsigned Vector::removeAllMatching(const MatchFunction& matches) { iterator holeBegin = end(); iterator holeEnd = end(); unsigned matchCount = 0; for (auto it = begin(), itEnd = end(); it != itEnd; ++it) { if (matches(*it)) { if (holeBegin == end()) holeBegin = it; else if (holeEnd != it) { TypeOperations::moveOverlapping(holeEnd, it, holeBegin); holeBegin += it - holeEnd; } holeEnd = it + 1; it->~T(); ++matchCount; } } if (holeEnd != end()) TypeOperations::moveOverlapping(holeEnd, end(), holeBegin); asanBufferSizeWillChangeTo(m_size - matchCount); m_size -= matchCount; return matchCount; } template inline void Vector::reverse() { for (size_t i = 0; i < m_size / 2; ++i) std::swap(at(i), at(m_size - 1 - i)); } template inline MallocPtr Vector::releaseBuffer() { // FIXME: Find a way to preserve annotations on the returned buffer. // ASan requires that all annotations are removed before deallocation, // and MallocPtr doesn't implement that. asanSetBufferSizeToFullCapacity(); auto buffer = Base::releaseBuffer(); if (inlineCapacity && !buffer && m_size) { // If the vector had some data, but no buffer to release, // that means it was using the inline buffer. In that case, // we create a brand new buffer so the caller always gets one. size_t bytes = m_size * sizeof(T); buffer = adoptMallocPtr(static_cast(fastMalloc(bytes))); memcpy(buffer.get(), data(), bytes); } m_size = 0; // FIXME: Should we call Base::restoreInlineBufferIfNeeded() here? return buffer; } template inline void Vector::checkConsistency() { #if !ASSERT_DISABLED for (size_t i = 0; i < size(); ++i) ValueCheck::checkConsistency(at(i)); #endif } template inline void swap(Vector& a, Vector& b) { a.swap(b); } template bool operator==(const Vector& a, const Vector& b) { if (a.size() != b.size()) return false; return VectorTypeOperations::compare(a.data(), b.data(), a.size()); } template inline bool operator!=(const Vector& a, const Vector& b) { return !(a == b); } #if !ASSERT_DISABLED template struct ValueCheck> { typedef Vector TraitType; static void checkConsistency(const Vector& v) { v.checkConsistency(); } }; #endif } // namespace WTF using WTF::Vector; using WTF::UnsafeVectorOverflow; using WTF::notFound; #endif // WTF_Vector_h