2012-11-24 18:42:56 +00:00
|
|
|
/* wmem_allocator_block.c
|
2013-07-27 22:27:28 +00:00
|
|
|
* Wireshark Memory Manager Large-Block Allocator (version 3)
|
|
|
|
* Copyright 2013, Evan Huus <eapache@gmail.com>
|
2012-11-24 18:42:56 +00:00
|
|
|
*
|
|
|
|
* $Id$
|
|
|
|
*
|
|
|
|
* Wireshark - Network traffic analyzer
|
|
|
|
* By Gerald Combs <gerald@wireshark.org>
|
|
|
|
* Copyright 1998 Gerald Combs
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License as published by
|
|
|
|
* the Free Software Foundation; either version 2 of the License, or
|
|
|
|
* (at your option) any later version.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
* GNU General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License along
|
|
|
|
* with this program; if not, write to the Free Software Foundation, Inc.,
|
|
|
|
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
|
|
*/
|
|
|
|
|
2013-07-27 22:27:28 +00:00
|
|
|
#include <stdio.h>
|
2012-11-24 18:42:56 +00:00
|
|
|
#include <string.h>
|
|
|
|
|
|
|
|
#include <glib.h>
|
|
|
|
|
|
|
|
#include "wmem_core.h"
|
|
|
|
#include "wmem_allocator.h"
|
|
|
|
|
2013-07-27 22:27:28 +00:00
|
|
|
/* This has turned into a very interesting excercise in algorithms and data
|
|
|
|
* structures.
|
2013-02-08 01:38:03 +00:00
|
|
|
*
|
2013-07-27 22:27:28 +00:00
|
|
|
* HISTORY
|
2013-02-08 01:38:03 +00:00
|
|
|
*
|
2013-07-27 22:27:28 +00:00
|
|
|
* Version 1 of this allocator was embedded in the original emem framework. It
|
|
|
|
* didn't have to handle realloc or free, so it was very simple: it just grabbed
|
|
|
|
* a block from the OS and served allocations sequentially out of that until it
|
|
|
|
* ran out, then allocated a new block. The old block was never revisited, so
|
|
|
|
* it generally had a bit of wasted space at the end, but the waste was
|
|
|
|
* small enough that it was simply ignored. This allocator provided very fast
|
|
|
|
* constant-time allocation for any request that didn't require a new block from
|
|
|
|
* the OS, and that cost could be amortized away.
|
2013-02-08 01:38:03 +00:00
|
|
|
*
|
2013-07-27 22:27:28 +00:00
|
|
|
* Version 2 of this allocator was prompted by the need to support realloc and
|
|
|
|
* free in wmem. The original version simply didn't save enough metadata to do
|
|
|
|
* this, so I added a layer on top to make it possible. The primary principle
|
|
|
|
* was the same (allocate sequentially out of big blocks) with a bit of extra
|
|
|
|
* magic. Allocations were still fast constant-time, and frees were as well.
|
|
|
|
* Large parts of that design are still present in this one, but for more
|
|
|
|
* details see older versions of this file from git or svn.
|
2013-02-26 04:42:26 +00:00
|
|
|
*
|
2013-07-27 22:27:28 +00:00
|
|
|
* Version 3 of this allocator was written to address some issues that
|
|
|
|
* eventually showed up with version 2 under real-world usage. Specifically,
|
|
|
|
* version 2 dealt very poorly with memory fragmentation, almost never reusing
|
|
|
|
* freed blocks and choosing to just keep allocating from the master block
|
|
|
|
* instead. This led to particularly poor behaviour under the tick-tock loads
|
|
|
|
* (alloc/free/alloc/free or alloc/alloc/free/alloc/alloc/free/ or ...) that
|
|
|
|
* showed up in a couple of different protocol dissectors (TCP, Kafka).
|
2013-02-08 01:38:03 +00:00
|
|
|
*
|
2013-07-27 22:27:28 +00:00
|
|
|
* BLOCKS AND CHUNKS
|
2013-02-08 01:38:03 +00:00
|
|
|
*
|
2013-07-27 22:27:28 +00:00
|
|
|
* As in previous versions, allocations typically happen sequentially out of
|
|
|
|
* large OS-level blocks. A linked list of OS blocks is maintained to keep track
|
|
|
|
* of all blocks (used or not) currently owned by the allocator. Each block is
|
|
|
|
* divided into chunks, which represent allocations and free sections (a block
|
|
|
|
* is initialized with one large, free, chunk). Each chunk is prefixed with a
|
|
|
|
* wmem_block_chunk_t structure, which is a short metadata header (8 bytes,
|
|
|
|
* regardless of 32 or 64-bit architecture) that contains the length of the
|
|
|
|
* chunk, the length of the previous chunk, a flag marking the chunk as free or
|
|
|
|
* used, and a flag marking the last chunk in a block. This serves to implement
|
|
|
|
* an inline sequential doubly-linked list of all the chunks in each block.
|
|
|
|
* A block with three chunks might look something like this:
|
2013-02-08 01:38:03 +00:00
|
|
|
*
|
2013-07-27 22:27:28 +00:00
|
|
|
* 0 _________________________
|
|
|
|
* ^ _______________ / ______________ \ __________
|
|
|
|
* ||--|-----/---------------||--------/--------------||--\-----/----------||
|
|
|
|
* || prv | len | body || prv | len | body || prv | len | body ||
|
|
|
|
* ||------------------------||--/--------------------||-------------------||
|
|
|
|
* \__________________________/
|
2013-02-08 01:38:03 +00:00
|
|
|
*
|
2013-07-27 22:27:28 +00:00
|
|
|
*
|
|
|
|
* When allocating, a free chunk is found (more on that later) and split into
|
|
|
|
* two chunks: the first of the requested size and the second containing any
|
|
|
|
* remaining free. The first is marked used and returned to the caller.
|
|
|
|
*
|
|
|
|
* When freeing, the chunk in question is marked as free. Its neighbouring
|
|
|
|
* chunks are then checked; if either of them are free, the consecutive free
|
|
|
|
* chunks are merged into a single larger free chunk. Induction can show that
|
|
|
|
* applying this operation consistently prevents us ever having consecutive
|
|
|
|
* free chunks.
|
|
|
|
*
|
|
|
|
* Free chunks (because they are not being used for anything else) each store an
|
|
|
|
* additional pair of pointers (see the wmem_block_free_t structure) that form
|
|
|
|
* the backbone of the data structures used to track free chunks.
|
|
|
|
*
|
|
|
|
* MASTER AND RECYCLER
|
|
|
|
*
|
|
|
|
* The extra pair of pointers in free chunks are used to build two doubly-linked
|
|
|
|
* lists: the master and the recycler. The recycler is circular, the master is
|
|
|
|
* a stack.
|
|
|
|
*
|
|
|
|
* The master stack is only populated by chunks from new OS-level blocks,
|
|
|
|
* so every chunk in this list is guaranteed to be able to serve any allocation
|
|
|
|
* request (the allocator will not serve requests larger than its block size).
|
|
|
|
* The chunk at the head of the master list shrinks as it serves requests. When
|
|
|
|
* it is too small to serve the current request, it is popped and inserted into
|
|
|
|
* the recycler. If the master list is empty, a new OS-level block is allocated,
|
|
|
|
* and its chunk is pushed onto the master stack.
|
|
|
|
*
|
|
|
|
* The recycler is populated by 'leftovers' from the master, as well as any
|
|
|
|
* chunks that were returned to the allocator via a call to free(). Although the
|
|
|
|
* recycler is circular, we will refer to the element referenced from the
|
|
|
|
* allocator as the 'head' of the list for convenience. The primary operation on
|
|
|
|
* the recycler is called cycling it. In this operation, the head is compared
|
|
|
|
* with its clockwise neighbour. If the neighbour is as large or larger, it
|
|
|
|
* becomes the head (the list rotates counter-clockwise). If the neighbour is
|
|
|
|
* smaller, then it is removed from its location and inserted as the counter-
|
|
|
|
* clockwise neighbour of the head (the list still rotates counter-clockwise,
|
|
|
|
* but the head element is held fixed while the rest of the list spins). This
|
|
|
|
* operation has the following properties:
|
|
|
|
* - fast constant time
|
|
|
|
* - once the largest chunk is at the head, it remains at the head
|
|
|
|
* - more iterations increases the probability that the largest chunk will be
|
|
|
|
* the head (for a list with n items, n iterations guarantees that the
|
|
|
|
* largest chunk will be the head).
|
|
|
|
*
|
|
|
|
* ALLOCATING
|
|
|
|
*
|
|
|
|
* When an allocation request is received, the allocator first attempts to
|
|
|
|
* satisfy it with the chunk at the head of the recycler. If that does not
|
|
|
|
* succeed, the request is satisfied by the master list instead. Regardless of
|
|
|
|
* which chunk satisfied the request, the recycler is always cycled.
|
2013-02-08 01:38:03 +00:00
|
|
|
*/
|
|
|
|
|
2012-12-27 23:11:47 +00:00
|
|
|
/* https://mail.gnome.org/archives/gtk-devel-list/2004-December/msg00091.html
|
|
|
|
* The 2*sizeof(size_t) alignment here is borrowed from GNU libc, so it should
|
|
|
|
* be good most everywhere. It is more conservative than is needed on some
|
|
|
|
* 64-bit platforms, but ia64 does require a 16-byte alignment. The SIMD
|
|
|
|
* extensions for x86 and ppc32 would want a larger alignment than this, but
|
|
|
|
* we don't need to do better than malloc.
|
|
|
|
*/
|
2013-02-08 01:38:03 +00:00
|
|
|
#define WMEM_ALIGN_AMOUNT (2 * sizeof (gsize))
|
|
|
|
#define WMEM_ALIGN_SIZE(SIZE) ((SIZE) + WMEM_ALIGN_AMOUNT - \
|
2013-07-27 22:51:27 +00:00
|
|
|
((SIZE) & (WMEM_ALIGN_AMOUNT - 1)))
|
2013-02-08 01:38:03 +00:00
|
|
|
|
|
|
|
/* When required, allocate more memory from the OS in chunks of this size.
|
|
|
|
* 8MB is a pretty arbitrary value - it's big enough that it should last a while
|
2013-07-27 22:27:28 +00:00
|
|
|
* and small enough that a mostly-unused one doesn't waste *too* much. It's
|
|
|
|
* also a nice power of two, of course. */
|
2012-11-25 13:58:06 +00:00
|
|
|
#define WMEM_BLOCK_SIZE (8 * 1024 * 1024)
|
2012-11-24 18:42:56 +00:00
|
|
|
|
2013-02-08 01:38:03 +00:00
|
|
|
/* The header for a single 'chunk' of memory as returned from alloc/realloc. */
|
|
|
|
typedef struct _wmem_block_chunk_t {
|
|
|
|
guint32 used:1;
|
|
|
|
guint32 prev:31;
|
|
|
|
|
|
|
|
guint32 last:1;
|
|
|
|
guint32 len:31;
|
|
|
|
} wmem_block_chunk_t;
|
|
|
|
|
|
|
|
/* Handy macros for navigating the chunks in a block as if they were a
|
|
|
|
* doubly-linked list. */
|
|
|
|
#define WMEM_CHUNK_PREV(CHUNK) ((CHUNK)->prev \
|
|
|
|
? ((wmem_block_chunk_t*)(((guint8*)(CHUNK)) - (CHUNK)->prev)) \
|
|
|
|
: NULL)
|
|
|
|
|
|
|
|
#define WMEM_CHUNK_NEXT(CHUNK) ((CHUNK)->last \
|
|
|
|
? NULL \
|
|
|
|
: ((wmem_block_chunk_t*)(((guint8*)(CHUNK)) + (CHUNK)->len)))
|
|
|
|
|
|
|
|
/* other handy chunk macros */
|
2013-07-27 22:27:28 +00:00
|
|
|
#define WMEM_CHUNK_TO_DATA(CHUNK) ((void*)((CHUNK) + 1))
|
2013-02-08 01:38:03 +00:00
|
|
|
#define WMEM_DATA_TO_CHUNK(DATA) (((wmem_block_chunk_t*)(DATA)) - 1)
|
2013-07-27 22:27:28 +00:00
|
|
|
#define WMEM_CHUNK_DATA_LEN(CHUNK) ((CHUNK)->len - sizeof(wmem_block_chunk_t))
|
2013-02-08 01:38:03 +00:00
|
|
|
|
2013-07-27 22:27:28 +00:00
|
|
|
/* This is what the 'data' section of a chunk contains if it is free. */
|
2013-02-08 01:38:03 +00:00
|
|
|
typedef struct _wmem_block_free_t {
|
|
|
|
wmem_block_chunk_t *prev, *next;
|
|
|
|
} wmem_block_free_t;
|
|
|
|
|
|
|
|
/* Handy macro for accessing the free-header of a chunk */
|
2013-07-27 22:27:28 +00:00
|
|
|
#define WMEM_GET_FREE(CHUNK) ((wmem_block_free_t*)WMEM_CHUNK_TO_DATA(CHUNK))
|
2013-02-08 01:38:03 +00:00
|
|
|
|
2012-11-24 18:42:56 +00:00
|
|
|
typedef struct _wmem_block_allocator_t {
|
2013-02-08 01:38:03 +00:00
|
|
|
GSList *block_list;
|
2013-07-27 22:27:28 +00:00
|
|
|
wmem_block_chunk_t *master_head;
|
|
|
|
wmem_block_chunk_t *recycler_head;
|
2012-11-24 18:42:56 +00:00
|
|
|
} wmem_block_allocator_t;
|
|
|
|
|
2013-03-24 00:45:32 +00:00
|
|
|
/* DEBUG AND TEST */
|
2013-03-19 15:32:42 +00:00
|
|
|
static void
|
2013-03-24 00:45:32 +00:00
|
|
|
wmem_block_verify_chunk_chain(wmem_block_chunk_t *chunk)
|
2013-03-19 15:32:42 +00:00
|
|
|
{
|
|
|
|
guint32 total_len = 0;
|
|
|
|
|
2013-03-27 02:28:45 +00:00
|
|
|
g_assert(chunk->prev == 0);
|
2013-03-19 15:32:42 +00:00
|
|
|
|
2013-03-20 00:10:07 +00:00
|
|
|
do {
|
2013-03-19 15:32:42 +00:00
|
|
|
total_len += chunk->len;
|
|
|
|
|
2013-07-27 22:27:28 +00:00
|
|
|
g_assert(chunk->len >= sizeof(wmem_block_chunk_t));
|
|
|
|
|
2013-03-19 15:32:42 +00:00
|
|
|
if (WMEM_CHUNK_NEXT(chunk)) {
|
2013-03-27 02:28:45 +00:00
|
|
|
g_assert(chunk->len == WMEM_CHUNK_NEXT(chunk)->prev);
|
2013-03-19 15:32:42 +00:00
|
|
|
}
|
|
|
|
|
2013-07-27 22:27:28 +00:00
|
|
|
if (!chunk->used && !chunk->last &&
|
|
|
|
WMEM_CHUNK_DATA_LEN(chunk) > sizeof(wmem_block_free_t)) {
|
|
|
|
g_assert(WMEM_GET_FREE(chunk)->next);
|
|
|
|
g_assert(WMEM_GET_FREE(chunk)->prev);
|
|
|
|
}
|
|
|
|
|
2013-03-19 15:32:42 +00:00
|
|
|
chunk = WMEM_CHUNK_NEXT(chunk);
|
2013-03-20 00:10:07 +00:00
|
|
|
} while (chunk);
|
2013-03-19 15:32:42 +00:00
|
|
|
|
2013-03-27 02:28:45 +00:00
|
|
|
g_assert(total_len == WMEM_BLOCK_SIZE);
|
2013-03-19 15:32:42 +00:00
|
|
|
}
|
|
|
|
|
2013-03-08 18:50:34 +00:00
|
|
|
static void
|
2013-07-27 22:27:28 +00:00
|
|
|
wmem_block_verify_master_list(wmem_block_allocator_t *allocator)
|
2013-03-08 18:50:34 +00:00
|
|
|
{
|
2013-07-27 22:27:28 +00:00
|
|
|
wmem_block_chunk_t *cur;
|
|
|
|
wmem_block_free_t *cur_free;
|
2013-03-08 18:50:34 +00:00
|
|
|
|
2013-07-27 22:27:28 +00:00
|
|
|
cur = allocator->master_head;
|
|
|
|
if (!cur) {
|
|
|
|
return;
|
2013-03-08 18:50:34 +00:00
|
|
|
}
|
|
|
|
|
2013-07-27 22:27:28 +00:00
|
|
|
g_assert(WMEM_GET_FREE(cur)->prev == NULL);
|
2013-03-08 18:50:34 +00:00
|
|
|
|
2013-07-27 22:27:28 +00:00
|
|
|
while (cur) {
|
|
|
|
cur_free = WMEM_GET_FREE(cur);
|
2013-03-08 18:50:34 +00:00
|
|
|
|
2013-07-27 22:27:28 +00:00
|
|
|
g_assert(! cur->used);
|
|
|
|
|
|
|
|
if (cur_free->next) {
|
|
|
|
g_assert(WMEM_GET_FREE(cur_free->next)->prev == cur);
|
2013-03-08 18:50:34 +00:00
|
|
|
}
|
|
|
|
|
2013-07-27 22:27:28 +00:00
|
|
|
if (cur != allocator->master_head) {
|
|
|
|
g_assert(cur->len == WMEM_BLOCK_SIZE);
|
2013-03-19 15:32:42 +00:00
|
|
|
}
|
2013-03-08 18:50:34 +00:00
|
|
|
|
2013-07-27 22:27:28 +00:00
|
|
|
cur = cur_free->next;
|
2013-03-08 18:50:34 +00:00
|
|
|
}
|
2013-07-27 22:27:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
wmem_block_verify_recycler(wmem_block_allocator_t *allocator)
|
|
|
|
{
|
|
|
|
wmem_block_chunk_t *cur;
|
|
|
|
wmem_block_free_t *cur_free;
|
2013-03-08 18:50:34 +00:00
|
|
|
|
2013-07-27 22:27:28 +00:00
|
|
|
cur = allocator->recycler_head;
|
|
|
|
if (!cur) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
do {
|
|
|
|
cur_free = WMEM_GET_FREE(cur);
|
|
|
|
|
|
|
|
g_assert(! cur->used);
|
|
|
|
|
|
|
|
g_assert(cur_free->prev);
|
|
|
|
g_assert(cur_free->next);
|
|
|
|
|
|
|
|
g_assert(WMEM_GET_FREE(cur_free->prev)->next == cur);
|
|
|
|
g_assert(WMEM_GET_FREE(cur_free->next)->prev == cur);
|
|
|
|
|
|
|
|
cur = cur_free->next;
|
|
|
|
} while (cur != allocator->recycler_head);
|
2013-03-08 18:50:34 +00:00
|
|
|
}
|
2013-03-24 00:45:32 +00:00
|
|
|
|
|
|
|
void
|
|
|
|
wmem_block_verify(wmem_allocator_t *allocator)
|
|
|
|
{
|
|
|
|
GSList *tmp;
|
|
|
|
wmem_block_allocator_t *private_allocator;
|
|
|
|
|
|
|
|
/* Normally it would be bad for an allocator helper function to depend
|
|
|
|
* on receiving the right type of allocator, but this is for testing only
|
|
|
|
* and is not part of any real API. */
|
|
|
|
g_assert(allocator->type == WMEM_ALLOCATOR_BLOCK);
|
2013-07-26 16:42:06 +00:00
|
|
|
|
2013-03-24 00:45:32 +00:00
|
|
|
private_allocator = (wmem_block_allocator_t*) allocator->private_data;
|
|
|
|
|
|
|
|
if (private_allocator->block_list == NULL) {
|
2013-07-27 22:27:28 +00:00
|
|
|
g_assert(! private_allocator->master_head);
|
|
|
|
g_assert(! private_allocator->recycler_head);
|
2013-03-24 00:45:32 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2013-07-27 22:27:28 +00:00
|
|
|
wmem_block_verify_master_list(private_allocator);
|
|
|
|
wmem_block_verify_recycler(private_allocator);
|
2013-03-24 00:45:32 +00:00
|
|
|
|
|
|
|
tmp = private_allocator->block_list;
|
|
|
|
while (tmp) {
|
|
|
|
wmem_block_verify_chunk_chain((wmem_block_chunk_t *)tmp->data);
|
|
|
|
tmp = tmp->next;
|
|
|
|
}
|
|
|
|
}
|
2013-03-08 18:50:34 +00:00
|
|
|
|
2013-07-27 22:27:28 +00:00
|
|
|
/* MASTER/RECYCLER HELPERS */
|
2012-11-24 18:42:56 +00:00
|
|
|
|
2013-07-27 22:27:28 +00:00
|
|
|
/* Cycles the recycler. See the design notes at the top of this file for more
|
|
|
|
* details. */
|
2013-02-08 01:38:03 +00:00
|
|
|
static void
|
2013-07-27 22:27:28 +00:00
|
|
|
wmem_block_cycle_recycler(wmem_block_allocator_t *allocator)
|
2012-11-24 18:42:56 +00:00
|
|
|
{
|
2013-07-27 22:27:28 +00:00
|
|
|
wmem_block_chunk_t *chunk;
|
|
|
|
wmem_block_free_t *free_chunk;
|
2013-02-08 01:38:03 +00:00
|
|
|
|
2013-07-27 22:27:28 +00:00
|
|
|
chunk = allocator->recycler_head;
|
2013-02-08 01:38:03 +00:00
|
|
|
|
2013-07-27 22:27:28 +00:00
|
|
|
if (! chunk) {
|
2013-02-08 01:38:03 +00:00
|
|
|
return;
|
|
|
|
}
|
2012-11-24 18:42:56 +00:00
|
|
|
|
2013-07-27 22:27:28 +00:00
|
|
|
free_chunk = WMEM_GET_FREE(chunk);
|
2012-11-24 18:42:56 +00:00
|
|
|
|
2013-07-27 22:27:28 +00:00
|
|
|
if (free_chunk->next->len < chunk->len) {
|
|
|
|
/* Hold the current head fixed during rotation. */
|
|
|
|
WMEM_GET_FREE(free_chunk->next)->prev = free_chunk->prev;
|
|
|
|
WMEM_GET_FREE(free_chunk->prev)->next = free_chunk->next;
|
2012-11-24 18:42:56 +00:00
|
|
|
|
2013-07-27 22:27:28 +00:00
|
|
|
free_chunk->prev = free_chunk->next;
|
|
|
|
free_chunk->next = WMEM_GET_FREE(free_chunk->next)->next;
|
|
|
|
|
|
|
|
WMEM_GET_FREE(free_chunk->next)->prev = chunk;
|
|
|
|
WMEM_GET_FREE(free_chunk->prev)->next = chunk;
|
2013-02-08 01:38:03 +00:00
|
|
|
}
|
|
|
|
else {
|
2013-07-27 22:27:28 +00:00
|
|
|
/* Just rotate everything. */
|
|
|
|
allocator->recycler_head = free_chunk->next;
|
2013-02-08 01:38:03 +00:00
|
|
|
}
|
2013-07-27 22:27:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Adds a chunk from the recycler. */
|
|
|
|
static void
|
|
|
|
wmem_block_add_to_recycler(wmem_block_allocator_t *allocator,
|
|
|
|
wmem_block_chunk_t *chunk)
|
|
|
|
{
|
|
|
|
wmem_block_free_t *free_chunk;
|
|
|
|
|
|
|
|
g_assert(! chunk->used);
|
2013-02-08 01:38:03 +00:00
|
|
|
|
2013-07-27 22:27:28 +00:00
|
|
|
if (WMEM_CHUNK_DATA_LEN(chunk) < sizeof(wmem_block_free_t)) {
|
|
|
|
return;
|
2013-02-08 01:38:03 +00:00
|
|
|
}
|
|
|
|
|
2013-07-27 22:27:28 +00:00
|
|
|
free_chunk = WMEM_GET_FREE(chunk);
|
|
|
|
|
|
|
|
if (! allocator->recycler_head) {
|
|
|
|
/* First one */
|
|
|
|
free_chunk->next = chunk;
|
|
|
|
free_chunk->prev = chunk;
|
|
|
|
allocator->recycler_head = chunk;
|
2013-02-08 01:38:03 +00:00
|
|
|
}
|
2013-07-27 22:27:28 +00:00
|
|
|
else {
|
|
|
|
free_chunk->next = allocator->recycler_head;
|
|
|
|
free_chunk->prev = WMEM_GET_FREE(allocator->recycler_head)->prev;
|
|
|
|
|
|
|
|
WMEM_GET_FREE(free_chunk->next)->prev = chunk;
|
|
|
|
WMEM_GET_FREE(free_chunk->prev)->next = chunk;
|
2013-02-08 01:38:03 +00:00
|
|
|
|
2013-07-27 23:35:59 +00:00
|
|
|
if (chunk->len > allocator->recycler_head->len) {
|
|
|
|
allocator->recycler_head = chunk;
|
|
|
|
}
|
2013-07-27 22:27:28 +00:00
|
|
|
}
|
2012-11-24 18:42:56 +00:00
|
|
|
}
|
|
|
|
|
2013-07-27 22:27:28 +00:00
|
|
|
/* Pushes a chunk onto the master stack. */
|
2012-11-24 18:42:56 +00:00
|
|
|
static void
|
2013-07-27 22:27:28 +00:00
|
|
|
wmem_block_push_master(wmem_block_allocator_t *allocator,
|
|
|
|
wmem_block_chunk_t *chunk)
|
2012-11-24 18:42:56 +00:00
|
|
|
{
|
2013-07-27 22:27:28 +00:00
|
|
|
wmem_block_free_t *free_chunk;
|
2013-02-08 01:38:03 +00:00
|
|
|
|
2013-07-27 22:27:28 +00:00
|
|
|
free_chunk = WMEM_GET_FREE(chunk);
|
|
|
|
free_chunk->prev = NULL;
|
|
|
|
free_chunk->next = allocator->master_head;
|
|
|
|
if (free_chunk->next) {
|
|
|
|
WMEM_GET_FREE(free_chunk->next)->prev = chunk;
|
|
|
|
}
|
|
|
|
allocator->master_head = chunk;
|
|
|
|
}
|
2013-02-08 01:38:03 +00:00
|
|
|
|
2013-07-27 22:27:28 +00:00
|
|
|
/* Removes the top chunk from the master stack. */
|
|
|
|
static void
|
|
|
|
wmem_block_pop_master(wmem_block_allocator_t *allocator)
|
|
|
|
{
|
|
|
|
wmem_block_chunk_t *chunk;
|
|
|
|
wmem_block_free_t *free_chunk;
|
2013-02-08 01:38:03 +00:00
|
|
|
|
2013-07-27 22:27:28 +00:00
|
|
|
chunk = allocator->master_head;
|
2013-03-08 18:50:34 +00:00
|
|
|
|
2013-07-27 22:27:28 +00:00
|
|
|
if (!chunk) {
|
|
|
|
return;
|
2013-02-08 01:38:03 +00:00
|
|
|
}
|
|
|
|
|
2013-07-27 22:27:28 +00:00
|
|
|
free_chunk = WMEM_GET_FREE(chunk);
|
2013-02-08 01:38:03 +00:00
|
|
|
|
2013-07-27 22:27:28 +00:00
|
|
|
allocator->master_head = free_chunk->next;
|
|
|
|
if (free_chunk->next) {
|
|
|
|
WMEM_GET_FREE(free_chunk->next)->prev = NULL;
|
|
|
|
}
|
2012-11-24 18:42:56 +00:00
|
|
|
}
|
|
|
|
|
2013-07-27 22:27:28 +00:00
|
|
|
/* Helper removes the chunk from whichever free list its in. */
|
2013-02-08 01:38:03 +00:00
|
|
|
static void
|
2013-07-27 22:27:28 +00:00
|
|
|
wmem_block_unfree(wmem_block_allocator_t *allocator,
|
|
|
|
wmem_block_chunk_t *chunk)
|
2013-02-08 01:38:03 +00:00
|
|
|
{
|
2013-07-27 22:27:28 +00:00
|
|
|
wmem_block_free_t *free_chunk;
|
2013-02-08 01:38:03 +00:00
|
|
|
|
2013-07-27 22:27:28 +00:00
|
|
|
g_assert (! chunk->used);
|
2013-03-24 12:53:22 +00:00
|
|
|
|
2013-07-27 22:27:28 +00:00
|
|
|
if (chunk == allocator->master_head) {
|
|
|
|
wmem_block_pop_master(allocator);
|
2013-02-08 01:38:03 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2013-07-27 22:27:28 +00:00
|
|
|
/* Otherwise remove it from the recycler. */
|
|
|
|
free_chunk = WMEM_GET_FREE(chunk);
|
2013-02-08 01:38:03 +00:00
|
|
|
|
2013-07-27 22:27:28 +00:00
|
|
|
g_assert(free_chunk->prev && free_chunk->next);
|
2013-02-08 01:38:03 +00:00
|
|
|
|
2013-07-27 22:27:28 +00:00
|
|
|
if (free_chunk->prev == chunk && free_chunk->next == chunk) {
|
|
|
|
/* Only one item in recycler, just empty it. */
|
|
|
|
g_assert(allocator->recycler_head == chunk);
|
|
|
|
allocator->recycler_head = NULL;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
/* Two or more items, usual doubly-linked-list removal. It's circular
|
|
|
|
* so we don't need to worry about null-checking anything, which is
|
|
|
|
* nice. */
|
|
|
|
WMEM_GET_FREE(free_chunk->prev)->next = free_chunk->next;
|
|
|
|
WMEM_GET_FREE(free_chunk->next)->prev = free_chunk->prev;
|
|
|
|
if (allocator->recycler_head == chunk) {
|
|
|
|
allocator->recycler_head = free_chunk->next;
|
|
|
|
}
|
|
|
|
}
|
2013-02-08 01:38:03 +00:00
|
|
|
}
|
|
|
|
|
2013-07-27 22:27:28 +00:00
|
|
|
/* BLOCK/CHUNK HELPERS */
|
|
|
|
|
2013-02-08 01:38:03 +00:00
|
|
|
/* Takes a free chunk and checks the chunks to its immediate left and right in
|
|
|
|
* the block. If they are also free, the contigous free chunks are merged into
|
2013-07-27 22:27:28 +00:00
|
|
|
* a single free chunk. The merged-in chunks are removed from the recycler if
|
|
|
|
* they were in it, and the final merged chunk is added.
|
2013-02-08 01:38:03 +00:00
|
|
|
*/
|
2013-07-27 22:27:28 +00:00
|
|
|
static void
|
2013-02-08 01:38:03 +00:00
|
|
|
wmem_block_merge_free(wmem_block_allocator_t *allocator,
|
|
|
|
wmem_block_chunk_t *chunk)
|
|
|
|
{
|
|
|
|
wmem_block_chunk_t *tmp;
|
2013-07-27 22:27:28 +00:00
|
|
|
gboolean add_to_recycler = TRUE;
|
2013-02-08 01:38:03 +00:00
|
|
|
|
|
|
|
g_assert(!chunk->used);
|
|
|
|
|
|
|
|
/* check the chunk to our right */
|
|
|
|
tmp = WMEM_CHUNK_NEXT(chunk);
|
|
|
|
|
|
|
|
if (tmp && !tmp->used) {
|
2013-07-27 22:27:28 +00:00
|
|
|
/* Remove it from the recycler since we're merging it, then add its
|
2013-03-24 12:53:22 +00:00
|
|
|
* length to our length since the two free chunks are now one. Also
|
|
|
|
* update our last flag, since we may now be last if tmp was.
|
2013-02-08 01:38:03 +00:00
|
|
|
* Our 'chunk' pointer is still the master header. */
|
2013-07-27 22:27:28 +00:00
|
|
|
if (WMEM_CHUNK_DATA_LEN(tmp) >= sizeof(wmem_block_free_t)) {
|
|
|
|
wmem_block_unfree(allocator, tmp);
|
|
|
|
}
|
2013-02-08 01:38:03 +00:00
|
|
|
chunk->len += tmp->len;
|
2013-03-24 12:53:22 +00:00
|
|
|
chunk->last = tmp->last;
|
2013-02-08 01:38:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* check the chunk to our left */
|
|
|
|
tmp = WMEM_CHUNK_PREV(chunk);
|
|
|
|
|
|
|
|
if (tmp && !tmp->used) {
|
2013-07-27 22:27:28 +00:00
|
|
|
/* If we're merging left, then the chunk to our left will already be in
|
|
|
|
* a free list, so we no longer need to add the merge result to the
|
|
|
|
* recycler. */
|
|
|
|
if (WMEM_CHUNK_DATA_LEN(tmp) >= sizeof(wmem_block_free_t)) {
|
|
|
|
add_to_recycler = FALSE;
|
|
|
|
}
|
2013-02-08 01:38:03 +00:00
|
|
|
|
|
|
|
/* Add our length to its length since the two free chunks
|
2013-03-24 12:53:22 +00:00
|
|
|
* are now one. Also update its last flag, since it may now be the
|
|
|
|
* last chunk in the block. */
|
2013-02-08 01:38:03 +00:00
|
|
|
tmp->len += chunk->len;
|
2013-03-24 12:53:22 +00:00
|
|
|
tmp->last = chunk->last;
|
2013-02-08 01:38:03 +00:00
|
|
|
|
2013-07-27 22:27:28 +00:00
|
|
|
if (chunk == allocator->master_head) {
|
|
|
|
/* If our current chunk is the head of the master list then we need
|
|
|
|
* to update that. */
|
|
|
|
wmem_block_pop_master(allocator);
|
|
|
|
wmem_block_push_master(allocator, tmp);
|
|
|
|
add_to_recycler = FALSE;
|
|
|
|
}
|
|
|
|
|
2013-02-08 01:38:03 +00:00
|
|
|
/* The chunk pointer passed in is no longer valid, it's been merged to
|
2013-03-20 00:04:01 +00:00
|
|
|
* its left, so use the chunk to our left */
|
|
|
|
chunk = tmp;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Now update the following chunk to have the correct 'prev' count */
|
|
|
|
tmp = WMEM_CHUNK_NEXT(chunk);
|
|
|
|
if (tmp) {
|
|
|
|
tmp->prev = chunk->len;
|
2013-02-08 01:38:03 +00:00
|
|
|
}
|
|
|
|
|
2013-07-27 22:27:28 +00:00
|
|
|
if (add_to_recycler) {
|
|
|
|
wmem_block_add_to_recycler(allocator, chunk);
|
2013-03-24 12:53:22 +00:00
|
|
|
}
|
2013-02-08 01:38:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Takes an unused chunk and a size, and splits it into two chunks if possible.
|
2013-07-27 22:27:28 +00:00
|
|
|
* The first chunk (at the same address as the input chunk) is guaranteed to
|
|
|
|
* hold at least `size` bytes of data, and to not be in either the master or
|
|
|
|
* recycler lists.
|
|
|
|
*
|
|
|
|
* The second chunk gets whatever data is left over. It is marked unused and
|
|
|
|
* replaces the input chunk in whichever list it originally inhabited. */
|
2013-02-08 01:38:03 +00:00
|
|
|
static void
|
|
|
|
wmem_block_split_free_chunk(wmem_block_allocator_t *allocator,
|
|
|
|
wmem_block_chunk_t *chunk,
|
|
|
|
const size_t size)
|
|
|
|
{
|
|
|
|
wmem_block_chunk_t *extra;
|
|
|
|
size_t aligned_size, available;
|
|
|
|
gboolean last;
|
|
|
|
|
|
|
|
g_assert(!chunk->used);
|
2013-03-27 02:28:45 +00:00
|
|
|
g_assert(WMEM_CHUNK_DATA_LEN(chunk) >= size);
|
2013-02-08 01:38:03 +00:00
|
|
|
|
2013-07-27 22:51:27 +00:00
|
|
|
aligned_size = WMEM_ALIGN_SIZE(size) + sizeof(wmem_block_chunk_t);
|
2013-02-08 01:38:03 +00:00
|
|
|
|
2013-07-29 16:47:30 +00:00
|
|
|
if (WMEM_CHUNK_DATA_LEN(chunk) < aligned_size + sizeof(wmem_block_free_t)) {
|
|
|
|
/* If the available space is not enought to store all of
|
|
|
|
* (hdr + requested size + alignment padding + hdr + free-header) then
|
|
|
|
* remove the current chunk from the free list. If the chunk does have
|
|
|
|
* space we reuse its free-header later, so we don't have to do a full
|
|
|
|
* remove/insert. */
|
2013-07-27 22:27:28 +00:00
|
|
|
wmem_block_unfree(allocator, chunk);
|
2013-07-29 16:47:30 +00:00
|
|
|
if (WMEM_CHUNK_DATA_LEN(chunk) < aligned_size) {
|
|
|
|
/* If it doesn't even have room for just the chunk header then we
|
|
|
|
* can't split it at all, so just return. */
|
|
|
|
return;
|
|
|
|
}
|
2013-03-08 17:51:45 +00:00
|
|
|
}
|
|
|
|
|
2013-07-28 00:00:37 +00:00
|
|
|
/* preserve a few values from chunk that we'll need to manipulate */
|
|
|
|
last = chunk->last;
|
|
|
|
available = chunk->len - aligned_size;
|
|
|
|
|
2013-02-08 01:38:03 +00:00
|
|
|
/* set new values for chunk */
|
2013-07-27 22:51:27 +00:00
|
|
|
chunk->len = (guint32) aligned_size;
|
2013-02-08 01:38:03 +00:00
|
|
|
chunk->last = FALSE;
|
|
|
|
|
|
|
|
/* with chunk's values set, we can use the standard macro to calculate
|
|
|
|
* the location and size of the new free chunk */
|
|
|
|
extra = WMEM_CHUNK_NEXT(chunk);
|
|
|
|
|
2013-03-08 17:51:45 +00:00
|
|
|
if (available >= sizeof(wmem_block_chunk_t) + sizeof(wmem_block_free_t)) {
|
2013-02-08 01:38:03 +00:00
|
|
|
/* If the new block has room for the free header (in which case the old
|
|
|
|
* bigger one must have as well) then we move the free chunk's address
|
2013-07-27 22:27:28 +00:00
|
|
|
* without changing its location in either list.
|
2013-02-08 01:38:03 +00:00
|
|
|
*
|
2013-07-27 22:27:28 +00:00
|
|
|
* XXX: Note that we have not yet written to the new *chunk* header - it
|
|
|
|
* may overlap the old *free* header, so we have to do all of our reads
|
2013-02-08 01:38:03 +00:00
|
|
|
* here first!
|
|
|
|
*/
|
2013-07-27 22:27:28 +00:00
|
|
|
wmem_block_chunk_t *prev, *next;
|
|
|
|
wmem_block_free_t *old_blk, *new_blk;
|
2013-02-08 01:38:03 +00:00
|
|
|
|
2013-07-27 22:27:28 +00:00
|
|
|
old_blk = WMEM_GET_FREE(chunk);
|
|
|
|
new_blk = WMEM_GET_FREE(extra);
|
2013-02-08 01:38:03 +00:00
|
|
|
|
2013-07-27 22:27:28 +00:00
|
|
|
prev = old_blk->prev;
|
|
|
|
next = old_blk->next;
|
2013-02-08 01:38:03 +00:00
|
|
|
|
2013-07-27 22:27:28 +00:00
|
|
|
new_blk->prev = prev == chunk ? extra : prev;
|
|
|
|
new_blk->next = next == chunk ? extra : next;
|
2013-02-08 01:38:03 +00:00
|
|
|
|
2013-07-27 22:27:28 +00:00
|
|
|
if (prev) WMEM_GET_FREE(prev)->next = extra;
|
|
|
|
if (next) WMEM_GET_FREE(next)->prev = extra;
|
2013-02-08 01:38:03 +00:00
|
|
|
|
2013-07-27 23:01:24 +00:00
|
|
|
if (allocator->master_head == chunk) {
|
2013-07-27 22:27:28 +00:00
|
|
|
allocator->master_head = extra;
|
2013-07-27 23:01:24 +00:00
|
|
|
}
|
|
|
|
else if (allocator->recycler_head == chunk) {
|
2013-07-27 22:27:28 +00:00
|
|
|
allocator->recycler_head = extra;
|
2013-07-27 23:01:24 +00:00
|
|
|
}
|
2013-02-08 01:38:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Now that we've copied over the free-list stuff (which may have overlapped
|
|
|
|
* with our new chunk header) we can safely write our new chunk header. */
|
2013-02-08 02:15:03 +00:00
|
|
|
extra->len = (guint32) available;
|
2013-02-08 01:38:03 +00:00
|
|
|
extra->last = last;
|
2013-07-27 22:27:28 +00:00
|
|
|
extra->prev = chunk->len;
|
2013-02-08 01:38:03 +00:00
|
|
|
extra->used = FALSE;
|
2013-03-20 00:04:01 +00:00
|
|
|
|
|
|
|
/* Correctly update the following chunk's back-pointer */
|
2013-07-29 16:55:02 +00:00
|
|
|
if (!last) {
|
|
|
|
WMEM_CHUNK_NEXT(extra)->prev = extra->len;
|
2013-03-20 00:04:01 +00:00
|
|
|
}
|
2013-02-08 01:38:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Takes a used chunk and a size, and splits it into two chunks if possible.
|
|
|
|
* The first chunk can hold at least `size` bytes of data, while the second gets
|
2013-07-27 22:27:28 +00:00
|
|
|
* whatever's left over. The second is marked as unused and is added to the
|
|
|
|
* recycler. */
|
2013-02-08 01:38:03 +00:00
|
|
|
static void
|
|
|
|
wmem_block_split_used_chunk(wmem_block_allocator_t *allocator,
|
|
|
|
wmem_block_chunk_t *chunk,
|
|
|
|
const size_t size)
|
|
|
|
{
|
|
|
|
wmem_block_chunk_t *extra;
|
|
|
|
size_t aligned_size, available;
|
|
|
|
gboolean last;
|
|
|
|
|
|
|
|
g_assert(chunk->used);
|
2013-03-27 02:28:45 +00:00
|
|
|
g_assert(WMEM_CHUNK_DATA_LEN(chunk) >= size);
|
2013-02-08 01:38:03 +00:00
|
|
|
|
2013-07-27 22:51:27 +00:00
|
|
|
aligned_size = WMEM_ALIGN_SIZE(size) + sizeof(wmem_block_chunk_t);
|
2013-02-08 01:38:03 +00:00
|
|
|
|
2013-07-27 22:51:27 +00:00
|
|
|
if (aligned_size > WMEM_CHUNK_DATA_LEN(chunk)) {
|
2013-02-08 01:38:03 +00:00
|
|
|
/* in this case we don't have enough space to really split it, so
|
|
|
|
* it's basically a no-op */
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
/* otherwise, we have room to split it, though the remaining free chunk
|
|
|
|
* may still not be usefully large */
|
|
|
|
|
|
|
|
/* preserve a few values from chunk that we'll need to manipulate */
|
|
|
|
last = chunk->last;
|
2013-07-29 16:55:02 +00:00
|
|
|
available = chunk->len - aligned_size;
|
2013-02-08 01:38:03 +00:00
|
|
|
|
|
|
|
/* set new values for chunk */
|
2013-07-27 22:51:27 +00:00
|
|
|
chunk->len = (guint32) aligned_size;
|
2013-02-08 01:38:03 +00:00
|
|
|
chunk->last = FALSE;
|
|
|
|
|
|
|
|
/* with chunk's values set, we can use the standard macro to calculate
|
|
|
|
* the location and size of the new free chunk */
|
|
|
|
extra = WMEM_CHUNK_NEXT(chunk);
|
|
|
|
|
|
|
|
/* set the new values for the chunk */
|
2013-02-08 02:15:03 +00:00
|
|
|
extra->len = (guint32) available;
|
2013-02-08 01:38:03 +00:00
|
|
|
extra->last = last;
|
2013-07-27 22:27:28 +00:00
|
|
|
extra->prev = chunk->len;
|
2013-02-08 01:38:03 +00:00
|
|
|
extra->used = FALSE;
|
|
|
|
|
2013-03-20 00:04:01 +00:00
|
|
|
/* Correctly update the following chunk's back-pointer */
|
|
|
|
chunk = WMEM_CHUNK_NEXT(extra);
|
|
|
|
if (chunk) {
|
|
|
|
chunk->prev = extra->len;
|
|
|
|
}
|
|
|
|
|
2013-07-27 22:27:28 +00:00
|
|
|
/* Merge it to its right if possible (it can't be merged left, obviously).
|
|
|
|
* This also adds it to the recycler. */
|
|
|
|
wmem_block_merge_free(allocator, extra);
|
2013-02-08 01:38:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Initializes a single unused chunk at the beginning of the block, and
|
|
|
|
* adds that chunk to the free list. */
|
|
|
|
static void
|
|
|
|
wmem_block_init_block(wmem_block_allocator_t *allocator, void *block)
|
|
|
|
{
|
|
|
|
wmem_block_chunk_t *chunk;
|
|
|
|
|
|
|
|
/* a new block contains one chunk, right at the beginning */
|
|
|
|
chunk = (wmem_block_chunk_t*) block;
|
|
|
|
chunk->used = FALSE;
|
|
|
|
chunk->last = TRUE;
|
|
|
|
chunk->prev = 0;
|
|
|
|
chunk->len = WMEM_BLOCK_SIZE;
|
|
|
|
|
2013-07-27 22:27:28 +00:00
|
|
|
/* now push that chunk onto the master list */
|
|
|
|
wmem_block_push_master(allocator, chunk);
|
2013-02-08 01:38:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Creates a new block, and initializes it. */
|
|
|
|
static void
|
|
|
|
wmem_block_new_block(wmem_block_allocator_t *allocator)
|
|
|
|
{
|
|
|
|
void *block;
|
|
|
|
|
|
|
|
/* allocate the new block and add it to the block list */
|
|
|
|
block = g_malloc(WMEM_BLOCK_SIZE);
|
|
|
|
allocator->block_list = g_slist_prepend(allocator->block_list, block);
|
|
|
|
|
|
|
|
/* initialize it */
|
|
|
|
wmem_block_init_block(allocator, block);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* API */
|
2012-11-24 18:42:56 +00:00
|
|
|
static void *
|
|
|
|
wmem_block_alloc(void *private_data, const size_t size)
|
|
|
|
{
|
|
|
|
wmem_block_allocator_t *allocator = (wmem_block_allocator_t*) private_data;
|
2013-02-08 01:38:03 +00:00
|
|
|
wmem_block_chunk_t *chunk;
|
2012-11-24 18:42:56 +00:00
|
|
|
|
2013-02-08 01:38:03 +00:00
|
|
|
/* We can't allocate more than will fit in a block (less our header),
|
2013-07-27 22:27:28 +00:00
|
|
|
* which is still an awful lot. */
|
2013-03-27 02:28:45 +00:00
|
|
|
g_assert(size < WMEM_BLOCK_SIZE - sizeof(wmem_block_chunk_t));
|
2012-11-24 18:42:56 +00:00
|
|
|
|
2013-07-27 22:27:28 +00:00
|
|
|
if (allocator->recycler_head &&
|
|
|
|
WMEM_CHUNK_DATA_LEN(allocator->recycler_head) >= size) {
|
|
|
|
|
|
|
|
/* If we can serve it from the recycler, do so. */
|
|
|
|
chunk = allocator->recycler_head;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
if (allocator->master_head &&
|
|
|
|
WMEM_CHUNK_DATA_LEN(allocator->master_head) < size) {
|
|
|
|
|
|
|
|
/* Recycle the head of the master list if necessary. */
|
|
|
|
chunk = allocator->master_head;
|
|
|
|
wmem_block_pop_master(allocator);
|
|
|
|
wmem_block_add_to_recycler(allocator, chunk);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!allocator->master_head) {
|
|
|
|
/* Allocate a new block if necessary. */
|
2013-02-08 01:38:03 +00:00
|
|
|
wmem_block_new_block(allocator);
|
|
|
|
}
|
2012-11-24 18:42:56 +00:00
|
|
|
|
2013-07-27 22:27:28 +00:00
|
|
|
chunk = allocator->master_head;
|
|
|
|
}
|
2012-11-24 18:42:56 +00:00
|
|
|
|
2013-07-27 22:27:28 +00:00
|
|
|
/* if our chunk is used, something is wrong */
|
|
|
|
g_assert(! chunk->used);
|
2013-02-08 01:38:03 +00:00
|
|
|
/* if we still don't have the space at this point, something is wrong */
|
2013-03-27 02:28:45 +00:00
|
|
|
g_assert(size <= WMEM_CHUNK_DATA_LEN(chunk));
|
2012-11-24 18:42:56 +00:00
|
|
|
|
2013-02-08 01:38:03 +00:00
|
|
|
/* Split our chunk into two to preserve any trailing free space */
|
|
|
|
wmem_block_split_free_chunk(allocator, chunk, size);
|
2012-11-24 18:42:56 +00:00
|
|
|
|
2013-02-08 01:38:03 +00:00
|
|
|
/* if our split reduced our size too much, something went wrong */
|
2013-03-27 02:28:45 +00:00
|
|
|
g_assert(size <= WMEM_CHUNK_DATA_LEN(chunk));
|
2013-07-27 22:27:28 +00:00
|
|
|
/* the resulting chunk should not be in either free list */
|
|
|
|
g_assert(chunk != allocator->master_head);
|
|
|
|
g_assert(chunk != allocator->recycler_head);
|
2012-11-24 18:42:56 +00:00
|
|
|
|
2013-07-27 22:27:28 +00:00
|
|
|
/* Now cycle the recycler */
|
|
|
|
wmem_block_cycle_recycler(allocator);
|
2013-03-08 17:51:45 +00:00
|
|
|
|
2013-02-08 01:38:03 +00:00
|
|
|
/* mark it as used */
|
|
|
|
chunk->used = TRUE;
|
|
|
|
|
|
|
|
/* and return the user's pointer */
|
2013-07-27 22:27:28 +00:00
|
|
|
return WMEM_CHUNK_TO_DATA(chunk);
|
2013-02-08 01:38:03 +00:00
|
|
|
}
|
2012-11-24 18:42:56 +00:00
|
|
|
|
2013-02-08 01:38:03 +00:00
|
|
|
static void
|
|
|
|
wmem_block_free(void *private_data, void *ptr)
|
|
|
|
{
|
|
|
|
wmem_block_allocator_t *allocator = (wmem_block_allocator_t*) private_data;
|
|
|
|
wmem_block_chunk_t *chunk;
|
2012-11-24 18:42:56 +00:00
|
|
|
|
2013-02-08 01:38:03 +00:00
|
|
|
chunk = WMEM_DATA_TO_CHUNK(ptr);
|
2012-12-08 21:08:29 +00:00
|
|
|
|
2013-02-08 01:38:03 +00:00
|
|
|
g_assert(chunk->used);
|
2012-12-08 21:08:29 +00:00
|
|
|
|
2013-02-08 01:38:03 +00:00
|
|
|
/* mark it as unused */
|
|
|
|
chunk->used = FALSE;
|
2012-12-08 21:08:29 +00:00
|
|
|
|
2013-02-08 01:38:03 +00:00
|
|
|
/* merge it with any other free chunks adjacent to it, so that contiguous
|
|
|
|
* free space doesn't get fragmented */
|
2013-07-27 22:27:28 +00:00
|
|
|
wmem_block_merge_free(allocator, chunk);
|
2013-02-08 01:38:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void *
|
|
|
|
wmem_block_realloc(void *private_data, void *ptr, const size_t size)
|
|
|
|
{
|
|
|
|
wmem_block_allocator_t *allocator = (wmem_block_allocator_t*) private_data;
|
|
|
|
wmem_block_chunk_t *chunk;
|
|
|
|
|
|
|
|
chunk = WMEM_DATA_TO_CHUNK(ptr);
|
|
|
|
|
|
|
|
g_assert(chunk->used);
|
|
|
|
|
|
|
|
if (size > WMEM_CHUNK_DATA_LEN(chunk)) {
|
|
|
|
/* grow */
|
2013-03-20 00:04:01 +00:00
|
|
|
wmem_block_chunk_t *tmp;
|
|
|
|
|
|
|
|
tmp = WMEM_CHUNK_NEXT(chunk);
|
|
|
|
|
|
|
|
if (tmp && (!tmp->used) &&
|
|
|
|
(size < WMEM_CHUNK_DATA_LEN(chunk) + tmp->len)) {
|
2013-02-08 01:38:03 +00:00
|
|
|
/* the next chunk is free and has enough extra, so just grab
|
|
|
|
* from that */
|
2013-03-27 00:27:45 +00:00
|
|
|
size_t split_size;
|
2013-07-26 16:42:06 +00:00
|
|
|
|
2013-03-27 00:27:45 +00:00
|
|
|
/* we ask for the next chunk to be split, but we don't end up
|
|
|
|
* using the split chunk header (it just gets merged into this one),
|
|
|
|
* so we want the split to be of (size - curdatalen - header_size).
|
|
|
|
* However, this can underflow by header_size, so we do a quick
|
|
|
|
* check here and floor the value to 0. */
|
|
|
|
split_size = size - WMEM_CHUNK_DATA_LEN(chunk);
|
|
|
|
|
|
|
|
if (split_size < sizeof(wmem_block_chunk_t)) {
|
|
|
|
split_size = 0;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
split_size -= sizeof(wmem_block_chunk_t);
|
|
|
|
}
|
|
|
|
|
|
|
|
wmem_block_split_free_chunk(allocator, tmp, split_size);
|
2013-03-20 00:04:01 +00:00
|
|
|
|
2013-03-27 12:01:36 +00:00
|
|
|
/* Now do a 'quickie' merge between the current block and the left-
|
|
|
|
* hand side of the split. Simply calling wmem_block_merge_free
|
2013-07-27 22:27:28 +00:00
|
|
|
* might confuse things, since we may temporarily have two blocks
|
2013-03-27 12:01:36 +00:00
|
|
|
* to our right that are both free (and it isn't guaranteed to
|
|
|
|
* handle that case). Update our 'next' count and last flag, and
|
|
|
|
* our (new) successor's 'prev' count */
|
2013-03-20 00:04:01 +00:00
|
|
|
chunk->len += tmp->len;
|
2013-03-27 12:01:36 +00:00
|
|
|
chunk->last = tmp->last;
|
2013-03-20 00:04:01 +00:00
|
|
|
tmp = WMEM_CHUNK_NEXT(chunk);
|
|
|
|
if (tmp) {
|
|
|
|
tmp->prev = chunk->len;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* And return the same old pointer */
|
2013-02-08 01:38:03 +00:00
|
|
|
return ptr;
|
2012-12-08 21:08:29 +00:00
|
|
|
}
|
2013-02-08 01:38:03 +00:00
|
|
|
else {
|
|
|
|
/* no room to grow, need to alloc, copy, free */
|
|
|
|
void *newptr;
|
2012-12-08 21:08:29 +00:00
|
|
|
|
2013-02-08 01:38:03 +00:00
|
|
|
newptr = wmem_block_alloc(private_data, size);
|
|
|
|
memcpy(newptr, ptr, WMEM_CHUNK_DATA_LEN(chunk));
|
|
|
|
wmem_block_free(private_data, ptr);
|
|
|
|
|
|
|
|
return newptr;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else if (size < WMEM_CHUNK_DATA_LEN(chunk)) {
|
|
|
|
/* shrink */
|
|
|
|
wmem_block_split_used_chunk(allocator, chunk, size);
|
|
|
|
|
|
|
|
return ptr;
|
2012-12-08 21:08:29 +00:00
|
|
|
}
|
|
|
|
|
2013-02-08 01:38:03 +00:00
|
|
|
/* no-op */
|
|
|
|
return ptr;
|
2012-11-24 18:42:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
wmem_block_free_all(void *private_data)
|
|
|
|
{
|
|
|
|
wmem_block_allocator_t *allocator = (wmem_block_allocator_t*) private_data;
|
2013-02-08 01:38:03 +00:00
|
|
|
GSList *tmp;
|
|
|
|
|
2013-07-27 22:27:28 +00:00
|
|
|
/* the existing free lists are entirely irrelevant */
|
|
|
|
allocator->master_head = NULL;
|
|
|
|
allocator->recycler_head = NULL;
|
2013-02-08 01:38:03 +00:00
|
|
|
|
|
|
|
/* iterate through the blocks, reinitializing each one */
|
|
|
|
tmp = allocator->block_list;
|
2012-11-24 18:42:56 +00:00
|
|
|
|
|
|
|
while (tmp) {
|
2013-02-08 01:38:03 +00:00
|
|
|
wmem_block_init_block(allocator, tmp->data);
|
2012-11-24 18:42:56 +00:00
|
|
|
tmp = tmp->next;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2013-02-08 01:38:03 +00:00
|
|
|
wmem_block_gc(void *private_data)
|
2012-11-24 18:42:56 +00:00
|
|
|
{
|
2013-02-08 01:38:03 +00:00
|
|
|
wmem_block_allocator_t *allocator = (wmem_block_allocator_t*) private_data;
|
|
|
|
GSList *tmp, *new_block_list = NULL;
|
|
|
|
wmem_block_chunk_t *chunk;
|
2013-07-28 12:46:44 +00:00
|
|
|
wmem_block_free_t *free_chunk;
|
2012-11-24 18:42:56 +00:00
|
|
|
|
2013-02-08 01:38:03 +00:00
|
|
|
/* Walk through the blocks, adding used blocks to a new list and
|
|
|
|
* completely destroying unused blocks. The newly built list is the new
|
|
|
|
* block list. */
|
|
|
|
tmp = allocator->block_list;
|
2012-11-24 18:42:56 +00:00
|
|
|
|
|
|
|
while (tmp) {
|
2013-02-08 01:38:03 +00:00
|
|
|
chunk = (wmem_block_chunk_t *) tmp->data;
|
|
|
|
|
|
|
|
if (!chunk->used && chunk->last) {
|
2013-07-27 22:27:28 +00:00
|
|
|
/* If the first chunk is also the last, and is unused, then
|
|
|
|
* the block as a whole is entirely unused, so return it to
|
2013-07-28 12:46:44 +00:00
|
|
|
* the OS and remove it from whatever lists it is in. */
|
|
|
|
free_chunk = WMEM_GET_FREE(chunk);
|
|
|
|
if (free_chunk->next) {
|
|
|
|
WMEM_GET_FREE(free_chunk->next)->prev = free_chunk->prev;
|
|
|
|
}
|
|
|
|
if (free_chunk->prev) {
|
|
|
|
WMEM_GET_FREE(free_chunk->prev)->next = free_chunk->next;
|
|
|
|
}
|
|
|
|
if (allocator->recycler_head == chunk) {
|
|
|
|
if (free_chunk->next == chunk) {
|
|
|
|
allocator->recycler_head = NULL;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
allocator->recycler_head = free_chunk->next;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else if (allocator->master_head == chunk) {
|
|
|
|
allocator->master_head = free_chunk->next;
|
2013-07-27 22:27:28 +00:00
|
|
|
}
|
2013-02-08 01:38:03 +00:00
|
|
|
g_free(chunk);
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
/* part of this block is used, so add it to the new block list */
|
|
|
|
new_block_list = g_slist_prepend(new_block_list, chunk);
|
|
|
|
}
|
|
|
|
|
2012-11-24 18:42:56 +00:00
|
|
|
tmp = tmp->next;
|
|
|
|
}
|
|
|
|
|
2013-02-08 01:38:03 +00:00
|
|
|
/* free the data structure for the old list */
|
|
|
|
g_slist_free(allocator->block_list);
|
|
|
|
/* and store the new list */
|
|
|
|
allocator->block_list = new_block_list;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2013-05-22 15:42:12 +00:00
|
|
|
wmem_block_allocator_cleanup(void *private_data)
|
2013-02-08 01:38:03 +00:00
|
|
|
{
|
|
|
|
/* wmem guarantees that free_all() is called directly before this, so
|
|
|
|
* calling gc will return all our blocks to the OS automatically */
|
2013-05-22 15:42:12 +00:00
|
|
|
wmem_block_gc(private_data);
|
2013-02-08 01:38:03 +00:00
|
|
|
|
|
|
|
/* then just free the allocator structs */
|
2013-05-22 15:42:12 +00:00
|
|
|
g_slice_free(wmem_block_allocator_t, private_data);
|
2012-11-24 18:42:56 +00:00
|
|
|
}
|
|
|
|
|
2013-05-22 15:42:12 +00:00
|
|
|
void
|
|
|
|
wmem_block_allocator_init(wmem_allocator_t *allocator)
|
2012-11-24 18:42:56 +00:00
|
|
|
{
|
|
|
|
wmem_block_allocator_t *block_allocator;
|
|
|
|
|
2013-03-10 15:04:04 +00:00
|
|
|
block_allocator = g_slice_new(wmem_block_allocator_t);
|
2012-11-24 18:42:56 +00:00
|
|
|
|
2013-02-08 01:38:03 +00:00
|
|
|
allocator->alloc = &wmem_block_alloc;
|
|
|
|
allocator->realloc = &wmem_block_realloc;
|
|
|
|
allocator->free = &wmem_block_free;
|
|
|
|
|
|
|
|
allocator->free_all = &wmem_block_free_all;
|
|
|
|
allocator->gc = &wmem_block_gc;
|
2013-05-22 15:42:12 +00:00
|
|
|
allocator->cleanup = &wmem_block_allocator_cleanup;
|
|
|
|
|
|
|
|
allocator->private_data = (void*) block_allocator;
|
2013-01-19 16:15:32 +00:00
|
|
|
|
2013-07-27 22:27:28 +00:00
|
|
|
block_allocator->block_list = NULL;
|
|
|
|
block_allocator->master_head = NULL;
|
|
|
|
block_allocator->recycler_head = NULL;
|
2012-11-24 18:42:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Editor modelines - http://www.wireshark.org/tools/modelines.html
|
|
|
|
*
|
|
|
|
* Local variables:
|
|
|
|
* c-basic-offset: 4
|
|
|
|
* tab-width: 8
|
|
|
|
* indent-tabs-mode: nil
|
|
|
|
* End:
|
|
|
|
*
|
|
|
|
* vi: set shiftwidth=4 tabstop=8 expandtab:
|
|
|
|
* :indentSize=4:tabSize=8:noTabs=true:
|
|
|
|
*/
|