Changed to use lfpAlloc from ltalloc for experimental multi-threaded .obj parser since ltalloc is not a porable(e.g. it does not support ARM archtecture).

This commit is contained in:
Syoyo Fujita
2017-07-05 16:32:40 +09:00
parent 99518b6d3e
commit 95fba2ab32
12 changed files with 402 additions and 1064 deletions

View File

@@ -0,0 +1,89 @@
#ifndef LF_POOL_ALLOCATOR
#define LF_POOL_ALLOCATOR
#include <memory>
#include <thread>
#include <lfpAlloc/PoolDispatcher.hpp>
namespace lfpAlloc {
template <typename T, std::size_t NumPools = 70>
class lfpAllocator {
public:
using value_type = T;
using pointer = T*;
using const_pointer = const T*;
using reference = T&;
using const_reference = T const&;
template <typename U>
struct rebind {
typedef lfpAllocator<U, NumPools> other;
};
lfpAllocator() {}
template <typename U>
lfpAllocator(lfpAllocator<U, NumPools>&&) noexcept {}
template <typename U>
lfpAllocator(const lfpAllocator<U, NumPools>&) noexcept {}
T* allocate(std::size_t count) {
if (sizeof(T) * count <=
alignof(std::max_align_t) * NumPools - sizeof(void*)) {
return reinterpret_cast<T*>(
dispatcher_.allocate(sizeof(T) * count));
} else {
return new T[count];
}
}
void deallocate(T* p, std::size_t count) noexcept {
if (sizeof(T) * count <=
alignof(std::max_align_t) * NumPools - sizeof(void*)) {
dispatcher_.deallocate(p, sizeof(T) * count);
} else {
delete[] p;
}
}
// Should not be required, but allocator_traits is not complete in
// gcc 4.9.1
template <typename U>
void destroy(U* p) {
p->~U();
}
template <typename U, typename... Args>
void construct(U* p, Args&&... args) {
new (p) U(std::forward<Args>(args)...);
}
template <typename Ty, typename U, std::size_t N, std::size_t M>
friend bool operator==(const lfpAllocator<Ty, N>&,
const lfpAllocator<U, M>&) noexcept;
template <typename U, std::size_t M>
friend class lfpAllocator;
private:
static PoolDispatcher<NumPools> dispatcher_;
};
template <typename T, std::size_t N>
PoolDispatcher<N> lfpAllocator<T, N>::dispatcher_;
template <typename T, typename U, std::size_t N, std::size_t M>
inline bool operator==(const lfpAllocator<T, N>&,
const lfpAllocator<U, M>&) noexcept {
return N == M;
}
template <typename T, typename U, std::size_t N, std::size_t M>
inline bool operator!=(const lfpAllocator<T, N>& left,
const lfpAllocator<U, M>& right) noexcept {
return !(left == right);
}
}
#endif

View File

@@ -0,0 +1,116 @@
#ifndef LF_POOL_ALLOC_CHUNK_LIST
#define LF_POOL_ALLOC_CHUNK_LIST
#include <cstdint>
#include <atomic>
#include <type_traits>
#ifndef LFP_ALLOW_BLOCKING
static_assert(ATOMIC_POINTER_LOCK_FREE == 2,
"Atomic pointer is not lock-free.");
#endif
namespace lfpAlloc {
template <std::size_t Size>
struct Cell {
uint8_t val_[Size];
Cell* next_ = this + 1;
};
// For small types (less than the size of void*), no additional
// space is needed, so union val_ with next_ to avoid overhead.
template <>
struct Cell<0> {
Cell() : next_{this + 1} {}
union {
uint8_t val_[sizeof(Cell*)];
Cell* next_;
};
};
template <std::size_t Size, std::size_t AllocationsPerChunk>
struct Chunk {
Chunk() noexcept {
auto& last = memBlock_[AllocationsPerChunk - 1];
last.next_ = nullptr;
}
Cell<Size> memBlock_[AllocationsPerChunk];
};
template <typename T>
struct Node {
Node() : val_(), next_(nullptr) {}
Node(const T& val) : val_(val), next_(nullptr) {}
T val_;
std::atomic<Node<T>*> next_;
};
template <std::size_t Size, std::size_t AllocationsPerChunk>
class ChunkList {
static constexpr auto CellSize =
(Size > sizeof(void*)) ? Size - sizeof(void*) : 0;
using Chunk_t = Chunk<CellSize, AllocationsPerChunk>;
using Cell_t = Cell<CellSize>;
using ChunkNode = Node<Chunk_t>;
using CellNode = Node<Cell_t*>;
public:
static ChunkList& getInstance() {
static ChunkList c;
return c;
}
Cell_t* allocateChain() {
CellNode* recentHead = head_.load();
CellNode* currentNext = nullptr;
do {
// If there are no available chains, allocate a new chunk
if (!recentHead) {
ChunkNode* currentHandle;
// Make a new node
auto newChunk = new ChunkNode();
// Add the chunk to the chain
do {
currentHandle = handle_.load();
newChunk->next_ = currentHandle;
} while (
!handle_.compare_exchange_weak(currentHandle, newChunk));
return &newChunk->val_.memBlock_[0];
}
currentNext = recentHead->next_;
} while (!head_.compare_exchange_weak(recentHead, currentNext));
auto retnValue = recentHead->val_;
delete recentHead;
return retnValue;
}
void deallocateChain(Cell_t* newCell) {
if (!newCell) {
return;
}
CellNode* currentHead = head_.load();
// Construct a new node to be added to the linked list
CellNode* newHead = new CellNode(newCell);
// Add the chain to the linked list
do {
newHead->next_.store(currentHead, std::memory_order_release);
} while (!head_.compare_exchange_weak(currentHead, newHead));
}
private:
ChunkList() : handle_(nullptr), head_(nullptr) {}
std::atomic<ChunkNode*> handle_;
std::atomic<CellNode*> head_;
};
}
#endif

View File

@@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2014 Adam Schwalm
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

View File

@@ -0,0 +1,48 @@
#ifndef LF_POOL_ALLOC_POOL
#define LF_POOL_ALLOC_POOL
#include <lfpAlloc/Utils.hpp>
#include <lfpAlloc/ChunkList.hpp>
namespace lfpAlloc {
template <std::size_t Size, std::size_t AllocationsPerChunk>
class Pool {
using ChunkList_t = ChunkList<Size, AllocationsPerChunk>;
public:
static constexpr auto CellSize =
(Size > sizeof(void*)) ? Size - sizeof(void*) : 0;
using Cell_t = Cell<CellSize>;
Pool() : head_(nullptr) {}
~Pool() { ChunkList_t::getInstance().deallocateChain(head_); }
void* allocate() {
// Head loaded from head_
Cell_t* currentHead = head_;
Cell_t* next;
// Out of cells to allocate
if (!currentHead) {
currentHead = ChunkList_t::getInstance().allocateChain();
}
next = currentHead->next_;
head_ = next;
return &currentHead->val_;
}
void deallocate(void* p) noexcept {
auto newHead = reinterpret_cast<Cell_t*>(p);
Cell_t* currentHead = head_;
newHead->next_ = currentHead;
head_ = newHead;
}
private:
Cell_t* head_;
};
}
#endif

View File

@@ -0,0 +1,79 @@
#ifndef LF_POOL_DISPATCHER
#define LF_POOL_DISPATCHER
#include <tuple>
#include <cassert>
#include <cstddef>
#include <lfpAlloc/Pool.hpp>
#ifndef LFP_ALLOCATIONS_PER_CHUNK
#define LFP_ALLOCATIONS_PER_CHUNK 64 * 100
#endif
namespace lfpAlloc {
namespace detail {
template <std::size_t Num, uint16_t... Ts>
struct Pools : Pools<Num - 1, alignof(std::max_align_t) * Num, Ts...> {};
template <uint16_t... Size>
struct Pools<0, Size...> {
using type = std::tuple<Pool<Size, LFP_ALLOCATIONS_PER_CHUNK>...>;
};
}
template <std::size_t NumPools>
class PoolDispatcher {
public:
void* allocate(std::size_t size) { return dispatchAllocate<0>(size); }
void deallocate(void* p, std::size_t size) noexcept {
dispatchDeallocate<0>(p, size);
}
private:
thread_local static typename detail::Pools<NumPools>::type pools_;
static_assert(NumPools > 0, "Invalid number of pools");
template <std::size_t Index>
typename std::enable_if <
Index<NumPools, void*>::type
dispatchAllocate(std::size_t const& requestSize) {
if (requestSize <= std::get<Index>(pools_).CellSize) {
return std::get<Index>(pools_).allocate();
} else {
return dispatchAllocate<Index + 1>(requestSize);
}
}
template <std::size_t Index>
typename std::enable_if<!(Index < NumPools), void*>::type
dispatchAllocate(std::size_t const&) {
assert(false && "Invalid allocation size.");
return nullptr;
}
template <std::size_t Index>
typename std::enable_if <
Index<NumPools>::type
dispatchDeallocate(void* p, std::size_t const& requestSize) noexcept {
if (requestSize <= std::get<Index>(pools_).CellSize) {
std::get<Index>(pools_).deallocate(p);
} else {
dispatchDeallocate<Index + 1>(p, requestSize);
}
}
template <std::size_t Index>
typename std::enable_if<!(Index < NumPools)>::type
dispatchDeallocate(void*, std::size_t const&) noexcept {
assert(false && "Invalid deallocation size.");
}
};
template <std::size_t NumPools>
thread_local typename detail::Pools<NumPools>::type
PoolDispatcher<NumPools>::pools_;
}
#endif

View File

@@ -0,0 +1,20 @@
#include <cstdint>
namespace lfpAlloc {
namespace detail {
template <std::size_t Val, std::size_t base = 2>
struct Log {
enum { value = 1 + Log<Val / base, base>::value };
};
template <std::size_t base>
struct Log<1, base> {
enum { value = 0 };
};
template <std::size_t base>
struct Log<0, base> {
enum { value = 0 };
};
}
}