godot/thirdparty/embree/kernels/common/alloc.h

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

1016 lines
37 KiB
C++
Raw Normal View History

// Copyright 2009-2021 Intel Corporation
2021-04-20 16:38:09 +00:00
// SPDX-License-Identifier: Apache-2.0
#pragma once
#include "default.h"
#include "device.h"
#include "scene.h"
2024-02-24 11:40:55 +00:00
#include "../builders/primref.h"
2021-04-20 16:38:09 +00:00
2024-02-24 11:40:55 +00:00
#include "../../common/tasking/taskscheduler.h"
2022-11-24 14:45:59 +00:00
2021-04-20 16:38:09 +00:00
namespace embree
{
class FastAllocator
{
/*! maximum supported alignment */
static const size_t maxAlignment = 64;
2024-02-24 11:40:55 +00:00
2021-04-20 16:38:09 +00:00
/*! maximum allocation size */
/* default settings */
//static const size_t defaultBlockSize = 4096;
#define maxAllocationSize size_t(2*1024*1024-maxAlignment)
static const size_t MAX_THREAD_USED_BLOCK_SLOTS = 8;
public:
struct ThreadLocal2;
2022-11-24 14:45:59 +00:00
enum AllocationType { ALIGNED_MALLOC, EMBREE_OS_MALLOC, SHARED, ANY_TYPE };
2021-04-20 16:38:09 +00:00
/*! Per thread structure holding the current memory block. */
struct __aligned(64) ThreadLocal
{
ALIGNED_CLASS_(64);
public:
/*! Constructor for usage with ThreadLocalData */
2024-02-24 11:40:55 +00:00
__forceinline ThreadLocal (ThreadLocal2* parent)
: parent(parent), ptr(nullptr), cur(0), end(0), allocBlockSize(0), bytesUsed(0), bytesWasted(0) {}
2021-04-20 16:38:09 +00:00
/*! initialize allocator */
2024-02-24 11:40:55 +00:00
void init(FastAllocator* alloc)
2021-04-20 16:38:09 +00:00
{
ptr = nullptr;
2024-02-24 11:40:55 +00:00
cur = end = 0;
2021-04-20 16:38:09 +00:00
bytesUsed = 0;
bytesWasted = 0;
allocBlockSize = 0;
if (alloc) allocBlockSize = alloc->defaultBlockSize;
}
/* Allocate aligned memory from the threads memory block. */
2024-02-24 11:40:55 +00:00
__forceinline void* malloc(FastAllocator* alloc, size_t bytes, size_t align = 16)
2021-04-20 16:38:09 +00:00
{
/* bind the thread local allocator to the proper FastAllocator*/
parent->bind(alloc);
assert(align <= maxAlignment);
2024-02-24 11:40:55 +00:00
bytesUsed += bytes;
2021-04-20 16:38:09 +00:00
/* try to allocate in local block */
2024-02-24 11:40:55 +00:00
size_t ofs = (align - cur) & (align-1);
2021-04-20 16:38:09 +00:00
cur += bytes + ofs;
if (likely(cur <= end)) { bytesWasted += ofs; return &ptr[cur - bytes]; }
2024-02-24 11:40:55 +00:00
cur -= bytes + ofs;
2021-04-20 16:38:09 +00:00
/* if allocation is too large allocate with parent allocator */
if (4*bytes > allocBlockSize) {
return alloc->malloc(bytes,maxAlignment,false);
2024-02-24 11:40:55 +00:00
}
2021-04-20 16:38:09 +00:00
/* get new partial block if allocation failed */
size_t blockSize = allocBlockSize;
ptr = (char*) alloc->malloc(blockSize,maxAlignment,true);
2024-02-24 11:40:55 +00:00
bytesWasted += end-cur;
cur = 0; end = blockSize;
2021-04-20 16:38:09 +00:00
/* retry allocation */
2024-02-24 11:40:55 +00:00
ofs = (align - cur) & (align-1);
2021-04-20 16:38:09 +00:00
cur += bytes + ofs;
if (likely(cur <= end)) { bytesWasted += ofs; return &ptr[cur - bytes]; }
2024-02-24 11:40:55 +00:00
cur -= bytes + ofs;
2021-04-20 16:38:09 +00:00
/* get new full block if allocation failed */
blockSize = allocBlockSize;
ptr = (char*) alloc->malloc(blockSize,maxAlignment,false);
2024-02-24 11:40:55 +00:00
bytesWasted += end-cur;
cur = 0; end = blockSize;
2021-04-20 16:38:09 +00:00
/* retry allocation */
2024-02-24 11:40:55 +00:00
ofs = (align - cur) & (align-1);
2021-04-20 16:38:09 +00:00
cur += bytes + ofs;
if (likely(cur <= end)) { bytesWasted += ofs; return &ptr[cur - bytes]; }
2024-02-24 11:40:55 +00:00
cur -= bytes + ofs;
2021-04-20 16:38:09 +00:00
/* should never happen as large allocations get handled specially above */
assert(false);
return nullptr;
}
__forceinline size_t getUsedBytes() const { return bytesUsed; }
2024-02-24 11:40:55 +00:00
2021-04-20 16:38:09 +00:00
/*! returns amount of free bytes */
__forceinline size_t getFreeBytes() const { return end-cur; }
2024-02-24 11:40:55 +00:00
2021-04-20 16:38:09 +00:00
/*! returns amount of wasted bytes */
__forceinline size_t getWastedBytes() const { return bytesWasted; }
2024-02-24 11:40:55 +00:00
2021-04-20 16:38:09 +00:00
private:
ThreadLocal2* parent;
char* ptr; //!< pointer to memory block
size_t cur; //!< current location of the allocator
size_t end; //!< end of the memory block
size_t allocBlockSize; //!< block size for allocations
size_t bytesUsed; //!< number of total bytes allocated
size_t bytesWasted; //!< number of bytes wasted
};
/*! Two thread local structures. */
struct __aligned(64) ThreadLocal2
{
ALIGNED_CLASS_(64);
public:
__forceinline ThreadLocal2()
: alloc(nullptr), alloc0(this), alloc1(this) {}
/*! bind to fast allocator */
__forceinline void bind(FastAllocator* alloc_i)
{
assert(alloc_i);
if (alloc.load() == alloc_i) return;
2024-02-24 11:40:55 +00:00
Lock<MutexSys> lock(mutex);
2021-04-20 16:38:09 +00:00
//if (alloc.load() == alloc_i) return; // not required as only one thread calls bind
if (alloc.load()) {
alloc.load()->bytesUsed += alloc0.getUsedBytes() + alloc1.getUsedBytes();
alloc.load()->bytesFree += alloc0.getFreeBytes() + alloc1.getFreeBytes();
alloc.load()->bytesWasted += alloc0.getWastedBytes() + alloc1.getWastedBytes();
}
alloc0.init(alloc_i);
alloc1.init(alloc_i);
alloc.store(alloc_i);
alloc_i->join(this);
}
/*! unbind to fast allocator */
void unbind(FastAllocator* alloc_i)
{
assert(alloc_i);
if (alloc.load() != alloc_i) return;
2024-02-24 11:40:55 +00:00
Lock<MutexSys> lock(mutex);
2021-04-20 16:38:09 +00:00
if (alloc.load() != alloc_i) return; // required as a different thread calls unbind
alloc.load()->bytesUsed += alloc0.getUsedBytes() + alloc1.getUsedBytes();
alloc.load()->bytesFree += alloc0.getFreeBytes() + alloc1.getFreeBytes();
alloc.load()->bytesWasted += alloc0.getWastedBytes() + alloc1.getWastedBytes();
alloc0.init(nullptr);
alloc1.init(nullptr);
alloc.store(nullptr);
}
public:
2024-02-24 11:40:55 +00:00
MutexSys mutex;
2021-04-20 16:38:09 +00:00
std::atomic<FastAllocator*> alloc; //!< parent allocator
ThreadLocal alloc0;
ThreadLocal alloc1;
};
2024-02-24 11:40:55 +00:00
FastAllocator (Device* device,
bool osAllocation,
bool useUSM = false,
bool blockAllocation = true)
: device(device)
, slotMask(0)
, defaultBlockSize(PAGE_SIZE)
, estimatedSize(0)
, growSize(PAGE_SIZE)
, maxGrowSize(maxAllocationSize)
, usedBlocks(nullptr)
, freeBlocks(nullptr)
, useUSM(useUSM)
, blockAllocation(blockAllocation)
, use_single_mode(false)
, log2_grow_size_scale(0)
, bytesUsed(0)
, bytesFree(0)
, bytesWasted(0)
, atype(osAllocation ? EMBREE_OS_MALLOC : ALIGNED_MALLOC)
, primrefarray(device,0)
2021-04-20 16:38:09 +00:00
{
2024-02-24 11:40:55 +00:00
// -- GODOT start --
// if (osAllocation && useUSM)
// throw std::runtime_error("USM allocation cannot be combined with OS allocation.");
if (osAllocation && useUSM) {
abort();
}
// -- GODOT end --
2021-04-20 16:38:09 +00:00
for (size_t i=0; i<MAX_THREAD_USED_BLOCK_SLOTS; i++)
{
threadUsedBlocks[i] = nullptr;
threadBlocks[i] = nullptr;
2024-02-24 11:40:55 +00:00
//assert(!slotMutex[i].isLocked());
2021-04-20 16:38:09 +00:00
}
}
~FastAllocator () {
clear();
}
/*! returns the device attached to this allocator */
Device* getDevice() {
return device;
}
void share(mvector<PrimRef>& primrefarray_i) {
primrefarray = std::move(primrefarray_i);
}
void unshare(mvector<PrimRef>& primrefarray_o)
{
reset(); // this removes blocks that are allocated inside the shared primref array
primrefarray_o = std::move(primrefarray);
}
/*! returns first fast thread local allocator */
__forceinline ThreadLocal* _threadLocal() {
return &threadLocal2()->alloc0;
}
void setOSallocation(bool flag)
{
2022-11-24 14:45:59 +00:00
atype = flag ? EMBREE_OS_MALLOC : ALIGNED_MALLOC;
2021-04-20 16:38:09 +00:00
}
private:
/*! returns both fast thread local allocators */
__forceinline ThreadLocal2* threadLocal2()
{
ThreadLocal2* alloc = thread_local_allocator2;
if (alloc == nullptr) {
thread_local_allocator2 = alloc = new ThreadLocal2;
2024-02-24 11:40:55 +00:00
Lock<MutexSys> lock(s_thread_local_allocators_lock);
2021-04-20 16:38:09 +00:00
s_thread_local_allocators.push_back(make_unique(alloc));
}
return alloc;
}
public:
__forceinline void join(ThreadLocal2* alloc)
{
2024-02-24 11:40:55 +00:00
Lock<MutexSys> lock(s_thread_local_allocators_lock);
2021-04-20 16:38:09 +00:00
thread_local_allocators.push_back(alloc);
}
public:
struct CachedAllocator
{
__forceinline CachedAllocator(void* ptr)
: alloc(nullptr), talloc0(nullptr), talloc1(nullptr)
{
assert(ptr == nullptr);
}
__forceinline CachedAllocator(FastAllocator* alloc, ThreadLocal2* talloc)
: alloc(alloc), talloc0(&talloc->alloc0), talloc1(alloc->use_single_mode ? &talloc->alloc0 : &talloc->alloc1) {}
__forceinline operator bool () const {
return alloc != nullptr;
}
__forceinline void* operator() (size_t bytes, size_t align = 16) const {
return talloc0->malloc(alloc,bytes,align);
}
__forceinline void* malloc0 (size_t bytes, size_t align = 16) const {
return talloc0->malloc(alloc,bytes,align);
}
__forceinline void* malloc1 (size_t bytes, size_t align = 16) const {
return talloc1->malloc(alloc,bytes,align);
}
public:
FastAllocator* alloc;
ThreadLocal* talloc0;
ThreadLocal* talloc1;
};
__forceinline CachedAllocator getCachedAllocator() {
return CachedAllocator(this,threadLocal2());
}
/*! Builder interface to create thread local allocator */
struct Create
{
public:
__forceinline Create (FastAllocator* allocator) : allocator(allocator) {}
__forceinline CachedAllocator operator() () const { return allocator->getCachedAllocator(); }
private:
FastAllocator* allocator;
};
void internal_fix_used_blocks()
{
/* move thread local blocks to global block list */
for (size_t i = 0; i < MAX_THREAD_USED_BLOCK_SLOTS; i++)
{
while (threadBlocks[i].load() != nullptr) {
Block* nextUsedBlock = threadBlocks[i].load()->next;
threadBlocks[i].load()->next = usedBlocks.load();
usedBlocks = threadBlocks[i].load();
threadBlocks[i] = nextUsedBlock;
}
threadBlocks[i] = nullptr;
}
}
static const size_t threadLocalAllocOverhead = 20; //! 20 means 5% parallel allocation overhead through unfilled thread local blocks
static const size_t mainAllocOverheadStatic = 20; //! 20 means 5% allocation overhead through unfilled main alloc blocks
static const size_t mainAllocOverheadDynamic = 8; //! 20 means 12.5% allocation overhead through unfilled main alloc blocks
/* calculates a single threaded threshold for the builders such
* that for small scenes the overhead of partly allocated blocks
* per thread is low */
size_t fixSingleThreadThreshold(size_t branchingFactor, size_t defaultThreshold, size_t numPrimitives, size_t bytesEstimated)
{
if (numPrimitives == 0 || bytesEstimated == 0)
return defaultThreshold;
/* calculate block size in bytes to fulfill threadLocalAllocOverhead constraint */
const size_t single_mode_factor = use_single_mode ? 1 : 2;
const size_t threadCount = TaskScheduler::threadCount();
const size_t singleThreadBytes = single_mode_factor*threadLocalAllocOverhead*defaultBlockSize;
/* if we do not have to limit number of threads use optimal thresdhold */
if ( (bytesEstimated+(singleThreadBytes-1))/singleThreadBytes >= threadCount)
return defaultThreshold;
/* otherwise limit number of threads by calculating proper single thread threshold */
else {
double bytesPerPrimitive = double(bytesEstimated)/double(numPrimitives);
return size_t(ceil(branchingFactor*singleThreadBytes/bytesPerPrimitive));
}
}
__forceinline size_t alignSize(size_t i) {
return (i+127)/128*128;
}
/*! initializes the grow size */
__forceinline void initGrowSizeAndNumSlots(size_t bytesEstimated, bool fast)
{
/* we do not need single thread local allocator mode */
use_single_mode = false;
/* calculate growSize such that at most mainAllocationOverhead gets wasted when a block stays unused */
size_t mainAllocOverhead = fast ? mainAllocOverheadDynamic : mainAllocOverheadStatic;
size_t blockSize = alignSize(bytesEstimated/mainAllocOverhead);
growSize = maxGrowSize = clamp(blockSize,size_t(1024),maxAllocationSize);
/* if we reached the maxAllocationSize for growSize, we can
* increase the number of allocation slots by still guaranteeing
* the mainAllocationOverhead */
slotMask = 0x0;
if (MAX_THREAD_USED_BLOCK_SLOTS >= 2 && bytesEstimated > 2*mainAllocOverhead*growSize) slotMask = 0x1;
if (MAX_THREAD_USED_BLOCK_SLOTS >= 4 && bytesEstimated > 4*mainAllocOverhead*growSize) slotMask = 0x3;
if (MAX_THREAD_USED_BLOCK_SLOTS >= 8 && bytesEstimated > 8*mainAllocOverhead*growSize) slotMask = 0x7;
if (MAX_THREAD_USED_BLOCK_SLOTS >= 8 && bytesEstimated > 16*mainAllocOverhead*growSize) { growSize *= 2; } /* if the overhead is tiny, double the growSize */
/* set the thread local alloc block size */
size_t defaultBlockSizeSwitch = PAGE_SIZE+maxAlignment;
/* for sufficiently large scene we can increase the defaultBlockSize over the defaultBlockSizeSwitch size */
#if 0 // we do not do this as a block size of 4160 if for some reason best for KNL
const size_t threadCount = TaskScheduler::threadCount();
const size_t single_mode_factor = use_single_mode ? 1 : 2;
const size_t singleThreadBytes = single_mode_factor*threadLocalAllocOverhead*defaultBlockSizeSwitch;
if (bytesEstimated+(singleThreadBytes-1))/singleThreadBytes >= threadCount)
defaultBlockSize = min(max(defaultBlockSizeSwitch,bytesEstimated/(single_mode_factor*threadLocalAllocOverhead*threadCount)),growSize);
/* otherwise we grow the defaultBlockSize up to defaultBlockSizeSwitch */
else
#endif
defaultBlockSize = clamp(blockSize,size_t(1024),defaultBlockSizeSwitch);
if (bytesEstimated == 0) {
maxGrowSize = maxAllocationSize; // special mode if builder cannot estimate tree size
defaultBlockSize = defaultBlockSizeSwitch;
}
log2_grow_size_scale = 0;
if (device->alloc_main_block_size != 0) growSize = device->alloc_main_block_size;
if (device->alloc_num_main_slots >= 1 ) slotMask = 0x0;
if (device->alloc_num_main_slots >= 2 ) slotMask = 0x1;
if (device->alloc_num_main_slots >= 4 ) slotMask = 0x3;
if (device->alloc_num_main_slots >= 8 ) slotMask = 0x7;
if (device->alloc_thread_block_size != 0) defaultBlockSize = device->alloc_thread_block_size;
if (device->alloc_single_thread_alloc != -1) use_single_mode = device->alloc_single_thread_alloc;
}
/*! initializes the allocator */
void init(size_t bytesAllocate, size_t bytesReserve, size_t bytesEstimate)
{
internal_fix_used_blocks();
/* distribute the allocation to multiple thread block slots */
slotMask = MAX_THREAD_USED_BLOCK_SLOTS-1; // FIXME: remove
if (usedBlocks.load() || freeBlocks.load()) { reset(); return; }
if (bytesReserve == 0) bytesReserve = bytesAllocate;
2024-02-24 11:40:55 +00:00
freeBlocks = Block::create(device,useUSM,bytesAllocate,bytesReserve,nullptr,atype);
2021-04-20 16:38:09 +00:00
estimatedSize = bytesEstimate;
initGrowSizeAndNumSlots(bytesEstimate,true);
}
/*! initializes the allocator */
void init_estimate(size_t bytesEstimate)
{
internal_fix_used_blocks();
if (usedBlocks.load() || freeBlocks.load()) { reset(); return; }
/* single allocator mode ? */
estimatedSize = bytesEstimate;
//initGrowSizeAndNumSlots(bytesEstimate,false);
initGrowSizeAndNumSlots(bytesEstimate,false);
}
/*! frees state not required after build */
__forceinline void cleanup()
{
internal_fix_used_blocks();
/* unbind all thread local allocators */
for (auto alloc : thread_local_allocators) alloc->unbind(this);
thread_local_allocators.clear();
}
/*! resets the allocator, memory blocks get reused */
void reset ()
{
internal_fix_used_blocks();
bytesUsed.store(0);
bytesFree.store(0);
bytesWasted.store(0);
/* reset all used blocks and move them to begin of free block list */
while (usedBlocks.load() != nullptr) {
usedBlocks.load()->reset_block();
Block* nextUsedBlock = usedBlocks.load()->next;
usedBlocks.load()->next = freeBlocks.load();
freeBlocks = usedBlocks.load();
usedBlocks = nextUsedBlock;
}
/* remove all shared blocks as they are re-added during build */
freeBlocks.store(Block::remove_shared_blocks(freeBlocks.load()));
for (size_t i=0; i<MAX_THREAD_USED_BLOCK_SLOTS; i++)
{
threadUsedBlocks[i] = nullptr;
threadBlocks[i] = nullptr;
}
/* unbind all thread local allocators */
for (auto alloc : thread_local_allocators) alloc->unbind(this);
thread_local_allocators.clear();
}
/*! frees all allocated memory */
__forceinline void clear()
{
cleanup();
bytesUsed.store(0);
bytesFree.store(0);
bytesWasted.store(0);
2024-02-24 11:40:55 +00:00
if (usedBlocks.load() != nullptr) usedBlocks.load()->clear_list(device,useUSM); usedBlocks = nullptr;
if (freeBlocks.load() != nullptr) freeBlocks.load()->clear_list(device,useUSM); freeBlocks = nullptr;
2021-04-20 16:38:09 +00:00
for (size_t i=0; i<MAX_THREAD_USED_BLOCK_SLOTS; i++) {
threadUsedBlocks[i] = nullptr;
threadBlocks[i] = nullptr;
}
primrefarray.clear();
}
__forceinline size_t incGrowSizeScale()
{
size_t scale = log2_grow_size_scale.fetch_add(1)+1;
return size_t(1) << min(size_t(16),scale);
}
/*! thread safe allocation of memory */
void* malloc(size_t& bytes, size_t align, bool partial)
{
assert(align <= maxAlignment);
while (true)
{
/* allocate using current block */
size_t threadID = TaskScheduler::threadID();
size_t slot = threadID & slotMask;
2024-02-24 11:40:55 +00:00
Block* myUsedBlocks = threadUsedBlocks[slot];
2021-04-20 16:38:09 +00:00
if (myUsedBlocks) {
void* ptr = myUsedBlocks->malloc(device,bytes,align,partial);
2024-02-24 11:40:55 +00:00
// -- GODOT start --
// if (ptr == nullptr && !blockAllocation)
// throw std::bad_alloc();
if (ptr == nullptr && !blockAllocation) {
abort();
}
// -- GODOT end --
2021-04-20 16:38:09 +00:00
if (ptr) return ptr;
}
/* throw error if allocation is too large */
if (bytes > maxAllocationSize)
throw_RTCError(RTC_ERROR_UNKNOWN,"allocation is too large");
/* parallel block creation in case of no freeBlocks, avoids single global mutex */
if (likely(freeBlocks.load() == nullptr))
{
2024-02-24 11:40:55 +00:00
Lock<MutexSys> lock(slotMutex[slot]);
2021-04-20 16:38:09 +00:00
if (myUsedBlocks == threadUsedBlocks[slot]) {
const size_t alignedBytes = (bytes+(align-1)) & ~(align-1);
const size_t allocSize = max(min(growSize,maxGrowSize),alignedBytes);
assert(allocSize >= bytes);
2024-02-24 11:40:55 +00:00
threadBlocks[slot] = threadUsedBlocks[slot] = Block::create(device,useUSM,allocSize,allocSize,threadBlocks[slot],atype); // FIXME: a large allocation might throw away a block here!
2021-04-20 16:38:09 +00:00
// FIXME: a direct allocation should allocate inside the block here, and not in the next loop! a different thread could do some allocation and make the large allocation fail.
}
continue;
}
/* if this fails allocate new block */
{
2024-02-24 11:40:55 +00:00
Lock<MutexSys> lock(mutex);
if (myUsedBlocks == threadUsedBlocks[slot])
{
2021-04-20 16:38:09 +00:00
if (freeBlocks.load() != nullptr) {
2024-02-24 11:40:55 +00:00
Block* nextFreeBlock = freeBlocks.load()->next;
freeBlocks.load()->next = usedBlocks;
__memory_barrier();
usedBlocks = freeBlocks.load();
2021-04-20 16:38:09 +00:00
threadUsedBlocks[slot] = freeBlocks.load();
2024-02-24 11:40:55 +00:00
freeBlocks = nextFreeBlock;
} else {
2021-04-20 16:38:09 +00:00
const size_t allocSize = min(growSize*incGrowSizeScale(),maxGrowSize);
2024-02-24 11:40:55 +00:00
usedBlocks = threadUsedBlocks[slot] = Block::create(device,useUSM,allocSize,allocSize,usedBlocks,atype); // FIXME: a large allocation should get delivered directly, like above!
}
2021-04-20 16:38:09 +00:00
}
}
}
}
/*! add new block */
void addBlock(void* ptr, ssize_t bytes)
{
2024-02-24 11:40:55 +00:00
Lock<MutexSys> lock(mutex);
2021-04-20 16:38:09 +00:00
const size_t sizeof_Header = offsetof(Block,data[0]);
void* aptr = (void*) ((((size_t)ptr)+maxAlignment-1) & ~(maxAlignment-1));
size_t ofs = (size_t) aptr - (size_t) ptr;
bytes -= ofs;
if (bytes < 4096) return; // ignore empty or very small blocks
freeBlocks = new (aptr) Block(SHARED,bytes-sizeof_Header,bytes-sizeof_Header,freeBlocks,ofs);
}
/* special allocation only used from morton builder only a single time for each build */
void* specialAlloc(size_t bytes)
{
assert(freeBlocks.load() != nullptr && freeBlocks.load()->getBlockAllocatedBytes() >= bytes);
return freeBlocks.load()->ptr();
}
struct Statistics
{
Statistics ()
: bytesUsed(0), bytesFree(0), bytesWasted(0) {}
Statistics (size_t bytesUsed, size_t bytesFree, size_t bytesWasted)
: bytesUsed(bytesUsed), bytesFree(bytesFree), bytesWasted(bytesWasted) {}
Statistics (FastAllocator* alloc, AllocationType atype, bool huge_pages = false)
: bytesUsed(0), bytesFree(0), bytesWasted(0)
{
Block* usedBlocks = alloc->usedBlocks.load();
Block* freeBlocks = alloc->freeBlocks.load();
if (usedBlocks) bytesUsed += usedBlocks->getUsedBytes(atype,huge_pages);
if (freeBlocks) bytesFree += freeBlocks->getAllocatedBytes(atype,huge_pages);
if (usedBlocks) bytesFree += usedBlocks->getFreeBytes(atype,huge_pages);
if (freeBlocks) bytesWasted += freeBlocks->getWastedBytes(atype,huge_pages);
if (usedBlocks) bytesWasted += usedBlocks->getWastedBytes(atype,huge_pages);
}
std::string str(size_t numPrimitives)
{
std::stringstream str;
str.setf(std::ios::fixed, std::ios::floatfield);
str << "used = " << std::setw(7) << std::setprecision(3) << 1E-6f*bytesUsed << " MB, "
<< "free = " << std::setw(7) << std::setprecision(3) << 1E-6f*bytesFree << " MB, "
<< "wasted = " << std::setw(7) << std::setprecision(3) << 1E-6f*bytesWasted << " MB, "
<< "total = " << std::setw(7) << std::setprecision(3) << 1E-6f*bytesAllocatedTotal() << " MB, "
<< "#bytes/prim = " << std::setw(6) << std::setprecision(2) << double(bytesAllocatedTotal())/double(numPrimitives);
return str.str();
}
friend Statistics operator+ ( const Statistics& a, const Statistics& b)
{
return Statistics(a.bytesUsed+b.bytesUsed,
a.bytesFree+b.bytesFree,
a.bytesWasted+b.bytesWasted);
}
size_t bytesAllocatedTotal() const {
return bytesUsed + bytesFree + bytesWasted;
}
public:
size_t bytesUsed;
size_t bytesFree;
size_t bytesWasted;
};
Statistics getStatistics(AllocationType atype, bool huge_pages = false) {
return Statistics(this,atype,huge_pages);
}
size_t getUsedBytes() {
return bytesUsed;
}
size_t getWastedBytes() {
return bytesWasted;
}
struct AllStatistics
{
AllStatistics (FastAllocator* alloc)
: bytesUsed(alloc->bytesUsed),
bytesFree(alloc->bytesFree),
bytesWasted(alloc->bytesWasted),
stat_all(alloc,ANY_TYPE),
stat_malloc(alloc,ALIGNED_MALLOC),
2022-11-24 14:45:59 +00:00
stat_4K(alloc,EMBREE_OS_MALLOC,false),
stat_2M(alloc,EMBREE_OS_MALLOC,true),
2021-04-20 16:38:09 +00:00
stat_shared(alloc,SHARED) {}
AllStatistics (size_t bytesUsed,
size_t bytesFree,
size_t bytesWasted,
Statistics stat_all,
Statistics stat_malloc,
Statistics stat_4K,
Statistics stat_2M,
Statistics stat_shared)
: bytesUsed(bytesUsed),
bytesFree(bytesFree),
bytesWasted(bytesWasted),
stat_all(stat_all),
stat_malloc(stat_malloc),
stat_4K(stat_4K),
stat_2M(stat_2M),
stat_shared(stat_shared) {}
friend AllStatistics operator+ (const AllStatistics& a, const AllStatistics& b)
{
return AllStatistics(a.bytesUsed+b.bytesUsed,
a.bytesFree+b.bytesFree,
a.bytesWasted+b.bytesWasted,
a.stat_all + b.stat_all,
a.stat_malloc + b.stat_malloc,
a.stat_4K + b.stat_4K,
a.stat_2M + b.stat_2M,
a.stat_shared + b.stat_shared);
}
void print(size_t numPrimitives)
{
std::stringstream str0;
str0.setf(std::ios::fixed, std::ios::floatfield);
str0 << " alloc : "
<< "used = " << std::setw(7) << std::setprecision(3) << 1E-6f*bytesUsed << " MB, "
<< " "
<< "#bytes/prim = " << std::setw(6) << std::setprecision(2) << double(bytesUsed)/double(numPrimitives);
std::cout << str0.str() << std::endl;
std::stringstream str1;
str1.setf(std::ios::fixed, std::ios::floatfield);
str1 << " alloc : "
<< "used = " << std::setw(7) << std::setprecision(3) << 1E-6f*bytesUsed << " MB, "
<< "free = " << std::setw(7) << std::setprecision(3) << 1E-6f*bytesFree << " MB, "
<< "wasted = " << std::setw(7) << std::setprecision(3) << 1E-6f*bytesWasted << " MB, "
<< "total = " << std::setw(7) << std::setprecision(3) << 1E-6f*(bytesUsed+bytesFree+bytesWasted) << " MB, "
<< "#bytes/prim = " << std::setw(6) << std::setprecision(2) << double(bytesUsed+bytesFree+bytesWasted)/double(numPrimitives);
std::cout << str1.str() << std::endl;
std::cout << " total : " << stat_all.str(numPrimitives) << std::endl;
std::cout << " 4K : " << stat_4K.str(numPrimitives) << std::endl;
std::cout << " 2M : " << stat_2M.str(numPrimitives) << std::endl;
std::cout << " malloc: " << stat_malloc.str(numPrimitives) << std::endl;
std::cout << " shared: " << stat_shared.str(numPrimitives) << std::endl;
}
private:
size_t bytesUsed;
size_t bytesFree;
size_t bytesWasted;
Statistics stat_all;
Statistics stat_malloc;
Statistics stat_4K;
Statistics stat_2M;
Statistics stat_shared;
};
void print_blocks()
{
2024-02-24 11:40:55 +00:00
std::cout << " estimatedSize = " << estimatedSize
<< ", slotMask = " << slotMask
<< ", use_single_mode = " << use_single_mode
<< ", maxGrowSize = " << maxGrowSize
<< ", defaultBlockSize = " << defaultBlockSize
<< std::endl;
2021-04-20 16:38:09 +00:00
std::cout << " used blocks = ";
if (usedBlocks.load() != nullptr) usedBlocks.load()->print_list();
std::cout << "[END]" << std::endl;
std::cout << " free blocks = ";
if (freeBlocks.load() != nullptr) freeBlocks.load()->print_list();
std::cout << "[END]" << std::endl;
}
private:
struct Block
{
2024-02-24 11:40:55 +00:00
__forceinline static void* blockAlignedMalloc(Device* device, bool useUSM, size_t bytesAllocate, size_t bytesAlignment)
{
if (useUSM) return device->malloc(bytesAllocate, bytesAlignment);
else return alignedMalloc (bytesAllocate, bytesAlignment);
}
__forceinline static void blockAlignedFree(Device* device, bool useUSM, void* ptr)
{
if (useUSM) return device->free(ptr);
else return alignedFree(ptr);
}
static Block* create(Device* device, bool useUSM, size_t bytesAllocate, size_t bytesReserve, Block* next, AllocationType atype)
2021-04-20 16:38:09 +00:00
{
/* We avoid using os_malloc for small blocks as this could
* cause a risk of fragmenting the virtual address space and
* reach the limit of vm.max_map_count = 65k under Linux. */
2022-11-24 14:45:59 +00:00
if (atype == EMBREE_OS_MALLOC && bytesAllocate < maxAllocationSize)
2021-04-20 16:38:09 +00:00
atype = ALIGNED_MALLOC;
/* we need to additionally allocate some header */
const size_t sizeof_Header = offsetof(Block,data[0]);
bytesAllocate = sizeof_Header+bytesAllocate;
bytesReserve = sizeof_Header+bytesReserve;
/* consume full 4k pages with using os_malloc */
2022-11-24 14:45:59 +00:00
if (atype == EMBREE_OS_MALLOC) {
2021-04-20 16:38:09 +00:00
bytesAllocate = ((bytesAllocate+PAGE_SIZE-1) & ~(PAGE_SIZE-1));
bytesReserve = ((bytesReserve +PAGE_SIZE-1) & ~(PAGE_SIZE-1));
}
/* either use alignedMalloc or os_malloc */
void *ptr = nullptr;
if (atype == ALIGNED_MALLOC)
{
/* special handling for default block size */
if (bytesAllocate == (2*PAGE_SIZE_2M))
{
const size_t alignment = maxAlignment;
if (device) device->memoryMonitor(bytesAllocate+alignment,false);
2024-02-24 11:40:55 +00:00
ptr = blockAlignedMalloc(device,useUSM,bytesAllocate,alignment);
2021-04-20 16:38:09 +00:00
/* give hint to transparently convert these pages to 2MB pages */
const size_t ptr_aligned_begin = ((size_t)ptr) & ~size_t(PAGE_SIZE_2M-1);
os_advise((void*)(ptr_aligned_begin + 0),PAGE_SIZE_2M); // may fail if no memory mapped before block
os_advise((void*)(ptr_aligned_begin + 1*PAGE_SIZE_2M),PAGE_SIZE_2M);
os_advise((void*)(ptr_aligned_begin + 2*PAGE_SIZE_2M),PAGE_SIZE_2M); // may fail if no memory mapped after block
return new (ptr) Block(ALIGNED_MALLOC,bytesAllocate-sizeof_Header,bytesAllocate-sizeof_Header,next,alignment);
}
else
{
const size_t alignment = maxAlignment;
if (device) device->memoryMonitor(bytesAllocate+alignment,false);
2024-02-24 11:40:55 +00:00
ptr = blockAlignedMalloc(device,useUSM,bytesAllocate,alignment);
2021-04-20 16:38:09 +00:00
return new (ptr) Block(ALIGNED_MALLOC,bytesAllocate-sizeof_Header,bytesAllocate-sizeof_Header,next,alignment);
}
}
2022-11-24 14:45:59 +00:00
else if (atype == EMBREE_OS_MALLOC)
2021-04-20 16:38:09 +00:00
{
if (device) device->memoryMonitor(bytesAllocate,false);
bool huge_pages; ptr = os_malloc(bytesReserve,huge_pages);
2022-11-24 14:45:59 +00:00
return new (ptr) Block(EMBREE_OS_MALLOC,bytesAllocate-sizeof_Header,bytesReserve-sizeof_Header,next,0,huge_pages);
2021-04-20 16:38:09 +00:00
}
else
assert(false);
return NULL;
}
Block (AllocationType atype, size_t bytesAllocate, size_t bytesReserve, Block* next, size_t wasted, bool huge_pages = false)
: cur(0), allocEnd(bytesAllocate), reserveEnd(bytesReserve), next(next), wasted(wasted), atype(atype), huge_pages(huge_pages)
{
assert((((size_t)&data[0]) & (maxAlignment-1)) == 0);
}
static Block* remove_shared_blocks(Block* head)
{
Block** prev_next = &head;
for (Block* block = head; block; block = block->next) {
if (block->atype == SHARED) *prev_next = block->next;
else prev_next = &block->next;
}
return head;
}
2024-02-24 11:40:55 +00:00
void clear_list(Device* device, bool useUSM)
2021-04-20 16:38:09 +00:00
{
Block* block = this;
while (block) {
Block* next = block->next;
2024-02-24 11:40:55 +00:00
block->clear_block(device, useUSM);
2021-04-20 16:38:09 +00:00
block = next;
}
}
2024-02-24 11:40:55 +00:00
void clear_block (Device* device, bool useUSM)
2021-04-20 16:38:09 +00:00
{
const size_t sizeof_Header = offsetof(Block,data[0]);
const ssize_t sizeof_Alloced = wasted+sizeof_Header+getBlockAllocatedBytes();
if (atype == ALIGNED_MALLOC) {
2024-02-24 11:40:55 +00:00
blockAlignedFree(device, useUSM, this);
2021-04-20 16:38:09 +00:00
if (device) device->memoryMonitor(-sizeof_Alloced,true);
}
2022-11-24 14:45:59 +00:00
else if (atype == EMBREE_OS_MALLOC) {
2021-04-20 16:38:09 +00:00
size_t sizeof_This = sizeof_Header+reserveEnd;
os_free(this,sizeof_This,huge_pages);
if (device) device->memoryMonitor(-sizeof_Alloced,true);
}
else /* if (atype == SHARED) */ {
}
}
void* malloc(MemoryMonitorInterface* device, size_t& bytes_in, size_t align, bool partial)
{
size_t bytes = bytes_in;
assert(align <= maxAlignment);
bytes = (bytes+(align-1)) & ~(align-1);
2024-02-24 11:40:55 +00:00
if (unlikely(cur+bytes > reserveEnd && !partial)) return nullptr;
const size_t i = cur.fetch_add(bytes);
2021-04-20 16:38:09 +00:00
if (unlikely(i+bytes > reserveEnd && !partial)) return nullptr;
if (unlikely(i > reserveEnd)) return nullptr;
bytes_in = bytes = min(bytes,reserveEnd-i);
2024-02-24 11:40:55 +00:00
if (i+bytes > allocEnd) {
2021-04-20 16:38:09 +00:00
if (device) device->memoryMonitor(i+bytes-max(i,allocEnd),true);
}
2024-02-24 11:40:55 +00:00
return &data[i];
2021-04-20 16:38:09 +00:00
}
void* ptr() {
return &data[cur];
}
void reset_block ()
{
allocEnd = max(allocEnd,(size_t)cur);
cur = 0;
}
size_t getBlockUsedBytes() const {
return min(size_t(cur),reserveEnd);
}
size_t getBlockFreeBytes() const {
2024-02-24 11:40:55 +00:00
return getBlockAllocatedBytes() - getBlockUsedBytes();
2021-04-20 16:38:09 +00:00
}
size_t getBlockAllocatedBytes() const {
return min(max(allocEnd,size_t(cur)),reserveEnd);
}
size_t getBlockWastedBytes() const {
const size_t sizeof_Header = offsetof(Block,data[0]);
return sizeof_Header + wasted;
}
size_t getBlockReservedBytes() const {
return reserveEnd;
}
bool hasType(AllocationType atype_i, bool huge_pages_i) const
{
if (atype_i == ANY_TYPE ) return true;
2022-11-24 14:45:59 +00:00
else if (atype == EMBREE_OS_MALLOC) return atype_i == atype && huge_pages_i == huge_pages;
2021-04-20 16:38:09 +00:00
else return atype_i == atype;
}
size_t getUsedBytes(AllocationType atype, bool huge_pages = false) const {
size_t bytes = 0;
for (const Block* block = this; block; block = block->next) {
if (!block->hasType(atype,huge_pages)) continue;
bytes += block->getBlockUsedBytes();
}
return bytes;
}
size_t getFreeBytes(AllocationType atype, bool huge_pages = false) const {
size_t bytes = 0;
for (const Block* block = this; block; block = block->next) {
if (!block->hasType(atype,huge_pages)) continue;
bytes += block->getBlockFreeBytes();
}
return bytes;
}
size_t getWastedBytes(AllocationType atype, bool huge_pages = false) const {
size_t bytes = 0;
for (const Block* block = this; block; block = block->next) {
if (!block->hasType(atype,huge_pages)) continue;
bytes += block->getBlockWastedBytes();
}
return bytes;
}
size_t getAllocatedBytes(AllocationType atype, bool huge_pages = false) const {
size_t bytes = 0;
for (const Block* block = this; block; block = block->next) {
if (!block->hasType(atype,huge_pages)) continue;
bytes += block->getBlockAllocatedBytes();
}
return bytes;
}
void print_list ()
{
for (const Block* block = this; block; block = block->next)
block->print_block();
}
void print_block() const
{
if (atype == ALIGNED_MALLOC) std::cout << "A";
2022-11-24 14:45:59 +00:00
else if (atype == EMBREE_OS_MALLOC) std::cout << "O";
2021-04-20 16:38:09 +00:00
else if (atype == SHARED) std::cout << "S";
if (huge_pages) std::cout << "H";
size_t bytesUsed = getBlockUsedBytes();
size_t bytesFree = getBlockFreeBytes();
size_t bytesWasted = getBlockWastedBytes();
std::cout << "[" << bytesUsed << ", " << bytesFree << ", " << bytesWasted << "] ";
}
public:
std::atomic<size_t> cur; //!< current location of the allocator
std::atomic<size_t> allocEnd; //!< end of the allocated memory region
std::atomic<size_t> reserveEnd; //!< end of the reserved memory region
Block* next; //!< pointer to next block in list
size_t wasted; //!< amount of memory wasted through block alignment
AllocationType atype; //!< allocation mode of the block
bool huge_pages; //!< whether the block uses huge pages
char align[maxAlignment-5*sizeof(size_t)-sizeof(AllocationType)-sizeof(bool)]; //!< align data to maxAlignment
char data[1]; //!< here starts memory to use for allocations
};
2024-02-24 11:40:55 +00:00
public:
static const size_t blockHeaderSize = offsetof(Block,data[0]);
2021-04-20 16:38:09 +00:00
private:
Device* device;
size_t slotMask;
2024-02-24 11:40:55 +00:00
size_t defaultBlockSize;
size_t estimatedSize;
size_t growSize;
size_t maxGrowSize;
MutexSys mutex;
MutexSys slotMutex[MAX_THREAD_USED_BLOCK_SLOTS];
2021-04-20 16:38:09 +00:00
std::atomic<Block*> threadUsedBlocks[MAX_THREAD_USED_BLOCK_SLOTS];
2024-02-24 11:40:55 +00:00
std::atomic<Block*> threadBlocks[MAX_THREAD_USED_BLOCK_SLOTS];
2021-04-20 16:38:09 +00:00
std::atomic<Block*> usedBlocks;
std::atomic<Block*> freeBlocks;
2024-02-24 11:40:55 +00:00
bool useUSM;
bool blockAllocation = true;
2021-04-20 16:38:09 +00:00
bool use_single_mode;
2024-02-24 11:40:55 +00:00
2021-04-20 16:38:09 +00:00
std::atomic<size_t> log2_grow_size_scale; //!< log2 of scaling factor for grow size // FIXME: remove
std::atomic<size_t> bytesUsed;
std::atomic<size_t> bytesFree;
std::atomic<size_t> bytesWasted;
2024-02-24 11:40:55 +00:00
2021-04-20 16:38:09 +00:00
static __thread ThreadLocal2* thread_local_allocator2;
2024-02-24 11:40:55 +00:00
static MutexSys s_thread_local_allocators_lock;
2021-04-20 16:38:09 +00:00
static std::vector<std::unique_ptr<ThreadLocal2>> s_thread_local_allocators;
2024-02-24 11:40:55 +00:00
2021-04-20 16:38:09 +00:00
std::vector<ThreadLocal2*> thread_local_allocators;
AllocationType atype;
2024-02-24 11:40:55 +00:00
2021-04-20 16:38:09 +00:00
mvector<PrimRef> primrefarray; //!< primrefarray used to allocate nodes
};
}