9 #if defined(HAVE_CONFIG_H) 17 #define _WIN32_WINNT 0x0501 18 #define WIN32_LEAN_AND_MEAN 1 25 #include <sys/resource.h> 39 static inline size_t align_up(
size_t x,
size_t align)
41 return (x + align - 1) & ~(align - 1);
48 base(static_cast<char*>(base_in)), end(static_cast<char*>(base_in) + size_in), alignment(alignment_in)
69 [=](
const std::map<char*, size_t>::value_type& chunk){
return chunk.second >= size; });
74 auto alloced =
chunks_used.emplace(it->first + it->second - size, size).first;
75 if (!(it->second -= size))
77 return reinterpret_cast<void*
>(alloced->first);
81 template <
class Iterator,
class Pair>
bool extend(Iterator it,
const Pair& other) {
82 if (it->first + it->second == other.first) {
83 it->second += other.second;
99 throw std::runtime_error(
"Arena: invalid or double free");
117 r.used += chunk.second;
119 r.free += chunk.second;
120 r.total = r.used + r.free;
125 void printchunk(
char*
base,
size_t sz,
bool used) {
127 "0x" << std::hex << std::setw(16) << std::setfill(
'0') << base <<
128 " 0x" << std::hex << std::setw(16) << std::setfill(
'0') << sz <<
129 " 0x" << used << std::endl;
131 void Arena::walk()
const 134 printchunk(chunk.first, chunk.second,
true);
135 std::cout << std::endl;
137 printchunk(chunk.first, chunk.second,
false);
138 std::cout << std::endl;
151 Win32LockedPageAllocator();
152 void* AllocateLocked(
size_t len,
bool *lockingSuccess)
override;
153 void FreeLocked(
void* addr,
size_t len)
override;
154 size_t GetLimit()
override;
159 Win32LockedPageAllocator::Win32LockedPageAllocator()
162 SYSTEM_INFO sSysInfo;
163 GetSystemInfo(&sSysInfo);
164 page_size = sSysInfo.dwPageSize;
166 void *Win32LockedPageAllocator::AllocateLocked(
size_t len,
bool *lockingSuccess)
168 len = align_up(len, page_size);
169 void *addr = VirtualAlloc(
nullptr, len, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
175 *lockingSuccess = VirtualLock(const_cast<void*>(addr), len) != 0;
179 void Win32LockedPageAllocator::FreeLocked(
void* addr,
size_t len)
181 len = align_up(len, page_size);
183 VirtualUnlock(const_cast<void*>(addr), len);
186 size_t Win32LockedPageAllocator::GetLimit()
189 return std::numeric_limits<size_t>::max();
204 void* AllocateLocked(
size_t len,
bool *lockingSuccess)
override;
205 void FreeLocked(
void* addr,
size_t len)
override;
206 size_t GetLimit()
override;
214 #if defined(PAGESIZE) // defined in limits.h 215 page_size = PAGESIZE;
216 #else // assume some POSIX OS 217 page_size = sysconf(_SC_PAGESIZE);
223 #ifndef MAP_ANONYMOUS 224 #define MAP_ANONYMOUS MAP_ANON 230 len = align_up(len, page_size);
231 addr = mmap(
nullptr, len, PROT_READ|PROT_WRITE, MAP_PRIVATE|
MAP_ANONYMOUS, -1, 0);
233 *lockingSuccess = mlock(addr, len) == 0;
239 len = align_up(len, page_size);
246 #ifdef RLIMIT_MEMLOCK 248 if (getrlimit(RLIMIT_MEMLOCK, &rlim) == 0) {
249 if (rlim.rlim_cur != RLIM_INFINITY) {
250 return rlim.rlim_cur;
254 return std::numeric_limits<size_t>::max();
262 allocator(
std::move(allocator_in)), lf_cb(lf_cb_in), cumulative_bytes_locked(0)
271 std::lock_guard<std::mutex> lock(
mutex);
278 for (
auto &arena:
arenas) {
279 void *addr = arena.alloc(size);
286 return arenas.back().alloc(size);
293 std::lock_guard<std::mutex> lock(
mutex);
296 for (
auto &arena:
arenas) {
297 if (arena.addressInArena(ptr)) {
302 throw std::runtime_error(
"LockedPool: invalid address not pointing to any arena");
307 std::lock_guard<std::mutex> lock(
mutex);
309 for (
const auto &arena:
arenas) {
330 size = std::min(size, limit);
333 void *addr =
allocator->AllocateLocked(size, &locked);
350 Arena(base_in, size_in, align_in), base(base_in), size(size_in),
allocator(allocator_in)
380 std::unique_ptr<LockedPageAllocator>
allocator(
new Win32LockedPageAllocator());
385 LockedPoolManager::_instance = &instance;
static std::once_flag init_flag
size_t alignment
Minimum chunk alignment.
std::mutex mutex
Mutex protects access to this pool's data structures, including arenas.
void * AllocateLocked(size_t len, bool *lockingSuccess) override
Allocate and lock memory pages.
std::list< LockedPageArena > arenas
static const size_t ARENA_ALIGN
Chunk alignment.
LockedPool(std::unique_ptr< LockedPageAllocator > allocator, LockingFailed_Callback lf_cb_in=nullptr)
Create a new LockedPool.
bool extend(Iterator it, const Pair &other)
LockingFailed_Callback lf_cb
LockedPageArena(LockedPageAllocator *alloc_in, void *base_in, size_t size, size_t align)
OS-dependent allocation and deallocation of locked/pinned memory pages.
LockedPageAllocator * allocator
Singleton class to keep track of locked (ie, non-swappable) memory, for use in std::allocator templat...
void * alloc(size_t size)
Allocate size bytes from this arena.
void memory_cleanse(void *ptr, size_t len)
void FreeLocked(void *addr, size_t len) override
Unlock and free memory pages.
Stats stats() const
Get arena usage statistics.
static LockedPoolManager * _instance
void * alloc(size_t size)
Allocate size bytes from this arena.
virtual void FreeLocked(void *addr, size_t len)=0
Unlock and free memory pages.
static const size_t ARENA_SIZE
Size of one arena of locked memory.
std::map< char *, size_t > chunks_used
static bool LockingFailed()
Called when locking fails, warn the user here.
Pool for locked memory chunks.
size_t GetLimit() override
Get the total limit on the amount of memory that may be locked by this process, in bytes...
void free(void *ptr)
Free a previously allocated chunk of memory.
void free(void *ptr)
Free a previously allocated chunk of memory.
char * base
Base address of arena.
LockedPageAllocator specialized for OSes that don't try to be special snowflakes. ...
PosixLockedPageAllocator()
bool new_arena(size_t size, size_t align)
LockedPoolManager(std::unique_ptr< LockedPageAllocator > allocator)
Stats stats() const
Get pool usage statistics.
static void CreateInstance()
Create a new LockedPoolManager specialized to the OS.
size_t cumulative_bytes_locked
std::map< char *, size_t > chunks_free
Map of chunk address to chunk information.
Arena(void *base, size_t size, size_t alignment)
std::unique_ptr< LockedPageAllocator > allocator