Remove the emem slab feature (sl_* functions) completely, replacing it with

glib memory slices.

- We weren't doing anything with the emem slab that couldn't be done with glib
  slices.
- Removes a fair bit of code as well as one debugging environment variable.
- Glib slices are much cache-friendlier and are multi-threading friendly (if
  we ever go there).
- Allows glib to actually return slices to the OS on occasion. The emem slab
  would hold onto its memory forever which resulted in a great deal of wasted
  memory after closing a large file.

svn path=/trunk/; revision=48218
This commit is contained in:
Evan Huus 2013-03-09 20:02:19 +00:00
parent deefa09237
commit 122b7cb6df
6 changed files with 8 additions and 151 deletions

View File

@ -170,14 +170,6 @@ static emem_pool_t se_packet_mem;
*/
static gboolean debug_use_memory_scrubber = FALSE;
/*
* Use g_slices in the slab allocator; enabling this (by putting
* WIRESHARK_DEBUG_USE_SLICES in the environment) together with
* exporting G_SLICE=always-malloc makes it easier to debug memory problems
* in slab-allocated memory.
*/
static gboolean debug_use_slices = FALSE;
#if defined (_WIN32)
static SYSTEM_INFO sysinfo;
static OSVERSIONINFO versinfo;
@ -356,9 +348,6 @@ emem_init(void)
if (getenv("WIRESHARK_DEBUG_SCRUB_MEMORY"))
debug_use_memory_scrubber = TRUE;
if (getenv("WIRESHARK_DEBUG_USE_SLICES"))
debug_use_slices = TRUE;
#if defined (_WIN32)
/* Set up our guard page info for Win32 */
GetSystemInfo(&sysinfo);
@ -686,28 +675,6 @@ emem_create_chunk(size_t size)
return npc;
}
static void
emem_destroy_chunk(emem_chunk_t *npc)
{
#if defined (_WIN32)
VirtualFree(npc->buf, 0, MEM_RELEASE);
#elif defined(USE_GUARD_PAGES)
/* we cannot recover from a munmap() failure, but we */
/* can print an informative error message to stderr */
if (munmap(npc->buf, npc->amount_free_init) != 0)
fprintf(stderr, "Warning: Unable to unmap memory chunk which has address %p and size %u\n",
npc->buf, npc->amount_free_init);
#else
g_free(npc->buf);
#endif
#ifdef SHOW_EMEM_STATS
total_no_chunks--;
#endif
g_free(npc);
}
static emem_chunk_t *
emem_create_chunk_gp(size_t size)
{
@ -902,54 +869,6 @@ se_alloc(size_t size)
return emem_alloc(size, &se_packet_mem);
}
void *
sl_alloc(struct ws_memory_slab *mem_chunk)
{
emem_chunk_t *chunk;
void *ptr;
if (debug_use_slices)
return g_slice_alloc0(mem_chunk->item_size);
if ((mem_chunk->freed != NULL)) {
ptr = mem_chunk->freed;
memcpy(&mem_chunk->freed, ptr, sizeof(void *));
return ptr;
}
if (!(chunk = mem_chunk->chunk_list) || chunk->amount_free < (guint) mem_chunk->item_size) {
size_t alloc_size = mem_chunk->item_size * mem_chunk->count;
/* align to page-size */
#if defined (_WIN32) || defined(USE_GUARD_PAGES)
alloc_size = (alloc_size + (pagesize - 1)) & ~(pagesize - 1);
#endif
chunk = emem_create_chunk(alloc_size); /* NOTE: using version without guard pages! */
chunk->next = mem_chunk->chunk_list;
mem_chunk->chunk_list = chunk;
}
ptr = chunk->buf + chunk->free_offset;
chunk->free_offset += mem_chunk->item_size;
chunk->amount_free -= mem_chunk->item_size;
return ptr;
}
void
sl_free(struct ws_memory_slab *mem_chunk, gpointer ptr)
{
if (debug_use_slices) {
g_slice_free1(mem_chunk->item_size, ptr);
} else
/* XXX, abort if ptr not found in emem_verify_pointer_list()? */
if (ptr != NULL /* && emem_verify_pointer_list(mem_chunk->chunk_list, ptr) */) {
memcpy(ptr, &(mem_chunk->freed), sizeof(void *));
mem_chunk->freed = ptr;
}
}
void *
ep_alloc0(size_t size)
{
@ -962,12 +881,6 @@ se_alloc0(size_t size)
return memset(se_alloc(size),'\0',size);
}
void *
sl_alloc0(struct ws_memory_slab *mem_chunk)
{
return memset(sl_alloc(mem_chunk), '\0', mem_chunk->item_size);
}
static gchar *
emem_strdup(const gchar *src, void *allocator(size_t))
{
@ -1283,21 +1196,6 @@ se_free_all(void)
emem_free_all(&se_packet_mem);
}
void
sl_free_all(struct ws_memory_slab *mem_chunk)
{
emem_chunk_t *chunk_list = mem_chunk->chunk_list;
mem_chunk->chunk_list = NULL;
mem_chunk->freed = NULL;
while (chunk_list) {
emem_chunk_t *chunk = chunk_list;
chunk_list = chunk_list->next;
emem_destroy_chunk(chunk);
}
}
ep_stack_t
ep_stack_new(void) {
ep_stack_t s = ep_new(struct _ep_stack_frame_t*);

View File

@ -201,29 +201,6 @@ struct _emem_chunk_t;
#define WS_MEM_ALIGN G_MEM_ALIGN
#endif
/* Macros to initialize ws_memory_slab */
#define WS_MEMORY_SLAB_INIT(type, count) { ((sizeof(type) + (WS_MEM_ALIGN - 1)) & ~(WS_MEM_ALIGN - 1)), count, NULL, NULL }
#define WS_MEMORY_SLAB_INIT_UNALIGNED(size, count) { size, count, NULL, NULL }
struct ws_memory_slab {
const gint item_size;
const gint count;
struct _emem_chunk_t *chunk_list;
void *freed;
};
WS_DLL_PUBLIC
void *sl_alloc(struct ws_memory_slab *mem_chunk);
WS_DLL_PUBLIC
void *sl_alloc0(struct ws_memory_slab *mem_chunk);
WS_DLL_PUBLIC
void sl_free(struct ws_memory_slab *mem_chunk, gpointer ptr);
/** release all memory allocated */
WS_DLL_PUBLIC
void sl_free_all(struct ws_memory_slab *mem_chunk);
/**************************************************************
* binary trees
**************************************************************/

View File

@ -31,10 +31,6 @@
/* Keep track of ftype_t's via their ftenum number */
static ftype_t* type_list[FT_NUM_TYPES];
/* Space for quickly allocating/de-allocating fvalue_t's */
struct ws_memory_slab fvalue_t_slab =
WS_MEMORY_SLAB_INIT(fvalue_t, 128);
/* Initialize the ftype module. */
void
ftypes_initialize(void)
@ -203,7 +199,7 @@ fvalue_new(ftenum_t ftype)
ftype_t *ft;
FvalueNewFunc new_value;
fv = (fvalue_t *)sl_alloc(&fvalue_t_slab);
fv = g_slice_new(fvalue_t);
FTYPE_LOOKUP(ftype, ft);
fv->ftype = ft;

View File

@ -269,8 +269,6 @@ fvalue_init(fvalue_t *fv, ftenum_t ftype);
/* Free all memory used by an fvalue_t. With MSVC and a
* libwireshark.dll, we need a special declaration.
*/
WS_DLL_PUBLIC struct ws_memory_slab fvalue_t_slab;
#define FVALUE_CLEANUP(fv) \
{ \
@ -284,7 +282,7 @@ WS_DLL_PUBLIC struct ws_memory_slab fvalue_t_slab;
#define FVALUE_FREE(fv) \
{ \
FVALUE_CLEANUP(fv) \
sl_free(&fvalue_t_slab, fv); \
g_slice_free(fvalue_t, fv); \
}
WS_DLL_PUBLIC

View File

@ -263,35 +263,24 @@ static GList *protocols = NULL;
/* Contains information about a field when a dissector calls
* proto_tree_add_item. */
static struct ws_memory_slab field_info_slab =
WS_MEMORY_SLAB_INIT(field_info, 128);
#define FIELD_INFO_NEW(fi) \
fi = (field_info *)sl_alloc(&field_info_slab)
#define FIELD_INFO_FREE(fi) \
sl_free(&field_info_slab, fi)
#define FIELD_INFO_NEW(fi) fi = g_slice_new(field_info)
#define FIELD_INFO_FREE(fi) g_slice_free(field_info, fi)
/* Contains the space for proto_nodes. */
static struct ws_memory_slab proto_node_slab =
WS_MEMORY_SLAB_INIT(proto_node, 128);
#define PROTO_NODE_NEW(node) \
node = (proto_node *)sl_alloc(&proto_node_slab); \
node = g_slice_new(proto_node); \
node->first_child = NULL; \
node->last_child = NULL; \
node->next = NULL;
#define PROTO_NODE_FREE(node) \
sl_free(&proto_node_slab, node)
g_slice_free(proto_node, node)
/* String space for protocol and field items for the GUI */
static struct ws_memory_slab item_label_slab =
WS_MEMORY_SLAB_INIT(item_label_t, 128);
#define ITEM_LABEL_NEW(il) \
il = (item_label_t *)sl_alloc(&item_label_slab);
il = g_slice_new(item_label_t);
#define ITEM_LABEL_FREE(il) \
sl_free(&item_label_slab, il);
g_slice_free(item_label_t, il);
#define PROTO_REGISTRAR_GET_NTH(hfindex, hfinfo) \
if((guint)hfindex >= gpa_hfinfo.len && getenv("WIRESHARK_ABORT_ON_DISSECTOR_BUG")) \

View File

@ -81,7 +81,6 @@ export WIRESHARK_DEBUG_EP_NO_CHUNKS=
export WIRESHARK_DEBUG_SE_NO_CHUNKS=
export WIRESHARK_DEBUG_WMEM_OVERRIDE=simple
export WIRESHARK_DEBUG_WMEM_SLAB=
export WIRESHARK_DEBUG_USE_SLICES=
export G_SLICE=always-malloc # or debug-blocks
libtool --mode=execute valgrind $VERBOSE $LEAK_CHECK $REACHABLE $TRACK_ORIGINS $BIN_DIR/$COMMAND $COMMAND_ARGS $PCAP $COMMAND_ARGS2 > /dev/null