40#if defined(_SC_PAGE_SIZE)
41#define MHD_SC_PAGESIZE _SC_PAGE_SIZE
42#elif defined(_SC_PAGESIZE)
43#define MHD_SC_PAGESIZE _SC_PAGESIZE
48#if defined(MAP_ANON) && ! defined(MAP_ANONYMOUS)
49#define MAP_ANONYMOUS MAP_ANON
52#define MAP_FAILED NULL
53#elif ! defined(MAP_FAILED)
54#define MAP_FAILED ((void*) -1)
60#define ALIGN_SIZE (2 * sizeof(void*))
65#define ROUND_TO_ALIGN(n) (((n) + (ALIGN_SIZE - 1)) \
66 / (ALIGN_SIZE) *(ALIGN_SIZE))
69#define MHD_DEF_PAGE_SIZE_ PAGE_SIZE
70#elif defined(PAGESIZE)
71#define MHD_DEF_PAGE_SIZE_ PAGE_SIZE
73#define MHD_DEF_PAGE_SIZE_ (4096)
89 result = sysconf (MHD_SC_PAGESIZE);
147 struct MemoryPool *pool;
150 pool = malloc (
sizeof (
struct MemoryPool));
153#if defined(MAP_ANONYMOUS) || defined(_WIN32)
154 if ( (max <= 32 * 1024) ||
164#if defined(MAP_ANONYMOUS) && ! defined(_WIN32)
165 pool->memory = mmap (
NULL,
167 PROT_READ | PROT_WRITE,
168 MAP_PRIVATE | MAP_ANONYMOUS,
172 pool->memory = VirtualAlloc (
NULL,
174 MEM_COMMIT | MEM_RESERVE,
184 pool->memory = malloc (alloc_size);
185 if (
NULL == pool->memory)
190 pool->is_mmap =
false;
192#if defined(MAP_ANONYMOUS) || defined(_WIN32)
195 pool->is_mmap =
true;
199 pool->end = alloc_size;
200 pool->size = alloc_size;
217 mhd_assert (pool->size >= pool->end - pool->pos);
221#if defined(MAP_ANONYMOUS) && ! defined(_WIN32)
222 munmap (pool->memory,
225 VirtualFree (pool->memory,
245 mhd_assert (pool->size >= pool->end - pool->pos);
246 return (pool->end - pool->pos);
270 mhd_assert (pool->size >= pool->end - pool->pos);
272 if ( (0 == asize) && (0 != size) )
274 if ( (pool->pos + asize > pool->end) ||
275 (pool->pos + asize < pool->pos))
279 ret = &pool->memory[pool->end - asize];
284 ret = &pool->memory[pool->pos];
318 mhd_assert (pool->size >= pool->end - pool->pos);
321 mhd_assert (old ==
NULL || pool->memory + pool->size >= (uint8_t*) old
324 mhd_assert (old ==
NULL || pool->memory + pool->pos > (uint8_t*) old);
328 const size_t old_offset = (uint8_t*) old - pool->memory;
329 const bool shrinking = (old_size > new_size);
333 memset ((uint8_t*) old + new_size, 0, old_size - new_size);
340 if ( (new_apos > pool->end) ||
341 (new_apos < pool->pos) )
345 pool->pos = new_apos;
353 if ( ( (0 == asize) &&
355 (asize > pool->end - pool->pos) )
358 new_blc = pool->memory + pool->pos;
364 memcpy (new_blc, old, old_size);
366 memset (old, 0, old_size);
392 mhd_assert (pool->size >= pool->end - pool->pos);
396 mhd_assert (keep ==
NULL || pool->memory + pool->size >= (uint8_t*) keep
398 if ( (
NULL != keep) &&
399 (keep != pool->memory) )
402 memmove (pool->memory,
407 if (pool->size > copy_bytes)
411 to_zero = pool->size - copy_bytes;
416 uint8_t *recommit_addr;
419 recommit_addr = pool->memory + pool->size - to_recommit;
423 if (VirtualFree (recommit_addr,
427 to_zero -= to_recommit;
429 if (recommit_addr != VirtualAlloc (recommit_addr,
437 memset (&pool->memory[copy_bytes],
442 pool->end = pool->size;
void * MHD_pool_reallocate(struct MemoryPool *pool, void *old, size_t old_size, size_t new_size)
void MHD_pool_destroy(struct MemoryPool *pool)
size_t MHD_pool_get_free(struct MemoryPool *pool)
void * MHD_pool_reset(struct MemoryPool *pool, void *keep, size_t copy_bytes, size_t new_size)
struct MemoryPool * MHD_pool_create(size_t max)
void * MHD_pool_allocate(struct MemoryPool *pool, size_t size, int from_end)
void MHD_init_mem_pools_(void)
#define MHD_DEF_PAGE_SIZE_
static size_t MHD_sys_page_size_
#define ROUND_TO_ALIGN(n)
memory pool; mostly used for efficient (de)allocation for each connection and bounding memory use for...