2005-07-22 07:46:58 +00:00
|
|
|
/* emem.c
|
2006-05-28 19:49:07 +00:00
|
|
|
* Wireshark memory management and garbage collection functions
|
2005-07-22 07:46:58 +00:00
|
|
|
* Ronnie Sahlberg 2005
|
|
|
|
*
|
2006-05-21 05:12:17 +00:00
|
|
|
* Wireshark - Network traffic analyzer
|
|
|
|
* By Gerald Combs <gerald@wireshark.org>
|
2005-07-22 07:46:58 +00:00
|
|
|
* Copyright 1998 Gerald Combs
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU General Public License
|
|
|
|
* as published by the Free Software Foundation; either version 2
|
|
|
|
* of the License, or (at your option) any later version.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
* GNU General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License
|
|
|
|
* along with this program; if not, write to the Free Software
|
2012-06-28 22:56:06 +00:00
|
|
|
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
2005-07-22 07:46:58 +00:00
|
|
|
*/
|
2005-07-24 01:21:38 +00:00
|
|
|
#include "config.h"
|
2005-07-22 07:46:58 +00:00
|
|
|
|
|
|
|
#include <stdio.h>
|
2005-07-22 23:38:51 +00:00
|
|
|
#include <stdlib.h>
|
2005-07-24 00:29:57 +00:00
|
|
|
#include <string.h>
|
|
|
|
#include <stdarg.h>
|
2006-01-09 23:11:40 +00:00
|
|
|
|
|
|
|
#include <time.h>
|
|
|
|
#ifdef HAVE_SYS_TIME_H
|
|
|
|
#include <sys/time.h>
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef HAVE_UNISTD_H
|
|
|
|
#include <unistd.h>
|
|
|
|
#endif
|
|
|
|
|
2005-07-22 07:46:58 +00:00
|
|
|
#include <glib.h>
|
2008-08-05 02:23:35 +00:00
|
|
|
|
2013-07-31 18:26:14 +00:00
|
|
|
#include "app_mem_usage.h"
|
2009-09-04 11:48:04 +00:00
|
|
|
#include "proto.h"
|
2013-11-10 15:59:37 +00:00
|
|
|
#include "exceptions.h"
|
2005-07-22 07:46:58 +00:00
|
|
|
#include "emem.h"
|
2013-03-20 00:04:01 +00:00
|
|
|
#include "wmem/wmem.h"
|
2005-07-22 07:46:58 +00:00
|
|
|
|
2007-04-04 12:18:10 +00:00
|
|
|
#ifdef _WIN32
|
|
|
|
#include <windows.h> /* VirtualAlloc, VirtualProtect */
|
|
|
|
#include <process.h> /* getpid */
|
|
|
|
#endif
|
|
|
|
|
2009-10-06 16:20:26 +00:00
|
|
|
/* Print out statistics about our memory allocations? */
|
|
|
|
/*#define SHOW_EMEM_STATS*/
|
|
|
|
|
2009-08-10 16:36:44 +00:00
|
|
|
/* Do we want to use guardpages? if available */
|
|
|
|
#define WANT_GUARD_PAGES 1
|
2006-03-10 05:39:57 +00:00
|
|
|
|
2006-03-10 05:15:52 +00:00
|
|
|
#ifdef WANT_GUARD_PAGES
|
2006-02-27 20:51:53 +00:00
|
|
|
/* Add guard pages at each end of our allocated memory */
|
2012-10-08 19:43:03 +00:00
|
|
|
|
2006-03-03 20:44:53 +00:00
|
|
|
#if defined(HAVE_SYSCONF) && defined(HAVE_MMAP) && defined(HAVE_MPROTECT) && defined(HAVE_STDINT_H)
|
|
|
|
#include <stdint.h>
|
2012-10-08 19:43:03 +00:00
|
|
|
|
2009-11-23 18:47:52 +00:00
|
|
|
#ifdef HAVE_SYS_TYPES_H
|
2006-02-27 20:51:53 +00:00
|
|
|
#include <sys/types.h>
|
2012-10-08 19:43:03 +00:00
|
|
|
#endif /* HAVE_SYS_TYPES_H */
|
|
|
|
|
2006-02-27 20:51:53 +00:00
|
|
|
#include <sys/mman.h>
|
2012-10-08 19:43:03 +00:00
|
|
|
|
2006-06-17 02:31:56 +00:00
|
|
|
#if defined(MAP_ANONYMOUS)
|
|
|
|
#define ANON_PAGE_MODE (MAP_ANONYMOUS|MAP_PRIVATE)
|
|
|
|
#elif defined(MAP_ANON)
|
|
|
|
#define ANON_PAGE_MODE (MAP_ANON|MAP_PRIVATE)
|
|
|
|
#else
|
|
|
|
#define ANON_PAGE_MODE (MAP_PRIVATE) /* have to map /dev/zero */
|
|
|
|
#define NEED_DEV_ZERO
|
2012-10-08 19:43:03 +00:00
|
|
|
#endif /* defined(MAP_ANONYMOUS) */
|
|
|
|
|
2006-06-17 02:31:56 +00:00
|
|
|
#ifdef NEED_DEV_ZERO
|
|
|
|
#include <fcntl.h>
|
|
|
|
static int dev_zero_fd;
|
|
|
|
#define ANON_FD dev_zero_fd
|
|
|
|
#else
|
|
|
|
#define ANON_FD -1
|
2012-10-08 19:43:03 +00:00
|
|
|
#endif /* NEED_DEV_ZERO */
|
|
|
|
|
2006-02-27 20:51:53 +00:00
|
|
|
#define USE_GUARD_PAGES 1
|
2012-10-08 19:43:03 +00:00
|
|
|
#endif /* defined(HAVE_SYSCONF) && defined(HAVE_MMAP) && defined(HAVE_MPROTECT) && defined(HAVE_STDINT_H) */
|
|
|
|
#endif /* WANT_GUARD_PAGES */
|
2006-02-27 20:51:53 +00:00
|
|
|
|
2005-07-22 07:46:58 +00:00
|
|
|
/* When required, allocate more memory from the OS in this size chunks */
|
2009-10-11 06:26:24 +00:00
|
|
|
#define EMEM_PACKET_CHUNK_SIZE (10 * 1024 * 1024)
|
2005-07-22 07:46:58 +00:00
|
|
|
|
2011-02-25 15:49:28 +00:00
|
|
|
/* The canary between allocations is at least 8 bytes and up to 16 bytes to
|
|
|
|
* allow future allocations to be 4- or 8-byte aligned.
|
|
|
|
* All but the last byte of the canary are randomly generated; the last byte is
|
|
|
|
* NULL to separate the canary and the pointer to the next canary.
|
|
|
|
*
|
|
|
|
* For example, if the allocation is a multiple of 8 bytes, the canary and
|
|
|
|
* pointer would look like:
|
2011-02-25 03:18:02 +00:00
|
|
|
* |0|1|2|3|4|5|6|7||0|1|2|3|4|5|6|7|
|
|
|
|
* |c|c|c|c|c|c|c|0||p|p|p|p|p|p|p|p| (64-bit), or:
|
|
|
|
* |c|c|c|c|c|c|c|0||p|p|p|p| (32-bit)
|
|
|
|
*
|
2011-02-25 15:49:28 +00:00
|
|
|
* If the allocation was, for example, 12 bytes, the canary would look like:
|
|
|
|
* |0|1|2|3|4|5|6|7||0|1|2|3|4|5|6|7|
|
|
|
|
* [...]|a|a|a|a|c|c|c|c||c|c|c|c|c|c|c|0| (followed by the pointer)
|
2011-02-25 03:18:02 +00:00
|
|
|
*/
|
2006-01-10 21:12:48 +00:00
|
|
|
#define EMEM_CANARY_SIZE 8
|
|
|
|
#define EMEM_CANARY_DATA_SIZE (EMEM_CANARY_SIZE * 2 - 1)
|
2008-08-05 02:23:35 +00:00
|
|
|
|
2005-07-22 07:46:58 +00:00
|
|
|
typedef struct _emem_chunk_t {
|
|
|
|
struct _emem_chunk_t *next;
|
2009-10-19 22:31:08 +00:00
|
|
|
char *buf;
|
2012-10-10 12:24:56 +00:00
|
|
|
size_t size;
|
2006-02-27 20:51:53 +00:00
|
|
|
unsigned int amount_free_init;
|
2005-07-22 07:46:58 +00:00
|
|
|
unsigned int amount_free;
|
2006-02-27 20:51:53 +00:00
|
|
|
unsigned int free_offset_init;
|
2005-07-22 07:46:58 +00:00
|
|
|
unsigned int free_offset;
|
2010-01-19 21:28:48 +00:00
|
|
|
void *canary_last;
|
2005-07-22 07:46:58 +00:00
|
|
|
} emem_chunk_t;
|
|
|
|
|
2012-10-10 19:05:30 +00:00
|
|
|
typedef struct _emem_pool_t {
|
2009-04-14 14:08:19 +00:00
|
|
|
emem_chunk_t *free_list;
|
|
|
|
emem_chunk_t *used_list;
|
2009-10-19 22:31:08 +00:00
|
|
|
|
|
|
|
guint8 canary[EMEM_CANARY_DATA_SIZE];
|
2012-10-08 18:33:00 +00:00
|
|
|
void *(*memory_alloc)(size_t size, struct _emem_pool_t *);
|
2009-10-19 22:31:08 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Tools like Valgrind and ElectricFence don't work well with memchunks.
|
|
|
|
* Export the following environment variables to make {ep|se}_alloc() allocate each
|
|
|
|
* object individually.
|
|
|
|
*
|
|
|
|
* WIRESHARK_DEBUG_EP_NO_CHUNKS
|
|
|
|
*/
|
|
|
|
gboolean debug_use_chunks;
|
|
|
|
|
|
|
|
/* Do we want to use canaries?
|
|
|
|
* Export the following environment variables to disable/enable canaries
|
|
|
|
*
|
|
|
|
* WIRESHARK_DEBUG_EP_NO_CANARY
|
|
|
|
*/
|
|
|
|
gboolean debug_use_canary;
|
|
|
|
|
2015-01-18 21:26:12 +00:00
|
|
|
/* Do we want to verify no one is using a pointer to an ep_
|
2010-09-02 18:02:06 +00:00
|
|
|
* allocated thing where they shouldn't be?
|
|
|
|
*
|
2015-01-18 22:16:55 +00:00
|
|
|
* Export WIRESHARK_EP_VERIFY_POINTERS to turn this on.
|
2010-09-02 18:02:06 +00:00
|
|
|
*/
|
|
|
|
gboolean debug_verify_pointers;
|
|
|
|
|
2012-10-10 19:05:30 +00:00
|
|
|
} emem_pool_t;
|
2005-07-22 07:46:58 +00:00
|
|
|
|
2012-10-10 19:05:30 +00:00
|
|
|
static emem_pool_t ep_packet_mem;
|
2005-07-22 07:46:58 +00:00
|
|
|
|
2009-10-19 22:31:08 +00:00
|
|
|
/*
|
|
|
|
* Memory scrubbing is expensive but can be useful to ensure we don't:
|
|
|
|
* - use memory before initializing it
|
|
|
|
* - use memory after freeing it
|
|
|
|
* Export WIRESHARK_DEBUG_SCRUB_MEMORY to turn it on.
|
|
|
|
*/
|
|
|
|
static gboolean debug_use_memory_scrubber = FALSE;
|
|
|
|
|
2006-04-28 16:40:39 +00:00
|
|
|
#if defined (_WIN32)
|
|
|
|
static SYSTEM_INFO sysinfo;
|
2013-10-20 17:35:30 +00:00
|
|
|
static gboolean iswindowsplatform;
|
2006-04-28 16:40:39 +00:00
|
|
|
static int pagesize;
|
|
|
|
#elif defined(USE_GUARD_PAGES)
|
|
|
|
static intptr_t pagesize;
|
|
|
|
#endif /* _WIN32 / USE_GUARD_PAGES */
|
2006-03-10 05:39:57 +00:00
|
|
|
|
2012-10-08 18:33:00 +00:00
|
|
|
static void *emem_alloc_chunk(size_t size, emem_pool_t *mem);
|
|
|
|
static void *emem_alloc_glib(size_t size, emem_pool_t *mem);
|
2009-10-19 22:31:08 +00:00
|
|
|
|
2006-01-09 23:11:40 +00:00
|
|
|
/*
|
|
|
|
* Set a canary value to be placed between memchunks.
|
|
|
|
*/
|
2009-09-29 06:50:35 +00:00
|
|
|
static void
|
2009-10-19 22:31:08 +00:00
|
|
|
emem_canary_init(guint8 *canary)
|
|
|
|
{
|
2006-01-10 21:12:48 +00:00
|
|
|
int i;
|
2009-10-11 07:15:15 +00:00
|
|
|
static GRand *rand_state = NULL;
|
2006-01-09 23:11:40 +00:00
|
|
|
|
|
|
|
if (rand_state == NULL) {
|
|
|
|
rand_state = g_rand_new();
|
|
|
|
}
|
2006-01-10 21:12:48 +00:00
|
|
|
for (i = 0; i < EMEM_CANARY_DATA_SIZE; i ++) {
|
2010-01-19 21:28:48 +00:00
|
|
|
canary[i] = (guint8) g_rand_int_range(rand_state, 1, 0x100);
|
2006-01-10 21:12:48 +00:00
|
|
|
}
|
|
|
|
return;
|
2006-01-09 23:11:40 +00:00
|
|
|
}
|
|
|
|
|
2010-01-19 21:28:48 +00:00
|
|
|
static void *
|
|
|
|
emem_canary_next(guint8 *mem_canary, guint8 *canary, int *len)
|
|
|
|
{
|
|
|
|
void *ptr;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < EMEM_CANARY_SIZE-1; i++)
|
|
|
|
if (mem_canary[i] != canary[i])
|
|
|
|
return (void *) -1;
|
|
|
|
|
|
|
|
for (; i < EMEM_CANARY_DATA_SIZE; i++) {
|
|
|
|
if (canary[i] == '\0') {
|
|
|
|
memcpy(&ptr, &canary[i+1], sizeof(void *));
|
|
|
|
|
|
|
|
if (len)
|
2012-12-26 05:57:06 +00:00
|
|
|
*len = i + 1 + (int)sizeof(void *);
|
2010-01-19 21:28:48 +00:00
|
|
|
return ptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (mem_canary[i] != canary[i])
|
|
|
|
return (void *) -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return (void *) -1;
|
|
|
|
}
|
|
|
|
|
2006-01-10 21:12:48 +00:00
|
|
|
/*
|
2011-02-25 15:49:28 +00:00
|
|
|
* Given an allocation size, return the amount of room needed for the canary
|
|
|
|
* (with a minimum of 8 bytes) while using the canary to pad to an 8-byte
|
|
|
|
* boundary.
|
2006-01-10 21:12:48 +00:00
|
|
|
*/
|
2006-01-11 02:05:05 +00:00
|
|
|
static guint8
|
2009-10-19 22:31:08 +00:00
|
|
|
emem_canary_pad (size_t allocation)
|
|
|
|
{
|
2006-01-10 21:12:48 +00:00
|
|
|
guint8 pad;
|
|
|
|
|
|
|
|
pad = EMEM_CANARY_SIZE - (allocation % EMEM_CANARY_SIZE);
|
|
|
|
if (pad < EMEM_CANARY_SIZE)
|
|
|
|
pad += EMEM_CANARY_SIZE;
|
|
|
|
|
|
|
|
return pad;
|
|
|
|
}
|
2006-03-10 05:39:57 +00:00
|
|
|
|
2008-08-05 02:23:35 +00:00
|
|
|
/* used for debugging canaries, will block */
|
|
|
|
#ifdef DEBUG_INTENSE_CANARY_CHECKS
|
|
|
|
gboolean intense_canary_checking = FALSE;
|
|
|
|
|
|
|
|
/* used to intensivelly check ep canaries
|
|
|
|
*/
|
2009-10-19 22:31:08 +00:00
|
|
|
void
|
|
|
|
ep_check_canary_integrity(const char* fmt, ...)
|
|
|
|
{
|
2009-04-14 14:08:19 +00:00
|
|
|
va_list ap;
|
|
|
|
static gchar there[128] = {
|
|
|
|
'L','a','u','n','c','h',0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
|
|
|
|
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
|
|
|
|
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
|
|
|
|
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 };
|
|
|
|
gchar here[128];
|
2008-08-05 02:23:35 +00:00
|
|
|
emem_chunk_t* npc = NULL;
|
|
|
|
|
2009-04-14 14:08:19 +00:00
|
|
|
if (! intense_canary_checking ) return;
|
2008-08-05 02:23:35 +00:00
|
|
|
|
2009-04-14 14:08:19 +00:00
|
|
|
va_start(ap,fmt);
|
2010-01-20 06:37:56 +00:00
|
|
|
g_vsnprintf(here, sizeof(here), fmt, ap);
|
2009-04-14 14:08:19 +00:00
|
|
|
va_end(ap);
|
2009-03-30 19:38:47 +00:00
|
|
|
|
2012-10-10 19:05:30 +00:00
|
|
|
for (npc = ep_packet_mem.free_list; npc != NULL; npc = npc->next) {
|
2010-01-19 21:28:48 +00:00
|
|
|
void *canary_next = npc->canary_last;
|
2008-08-05 02:23:35 +00:00
|
|
|
|
2010-01-19 21:28:48 +00:00
|
|
|
while (canary_next != NULL) {
|
2012-10-10 19:05:30 +00:00
|
|
|
canary_next = emem_canary_next(ep_packet_mem.canary, canary_next, NULL);
|
2011-04-21 05:31:11 +00:00
|
|
|
/* XXX, check if canary_next is inside allocated memory? */
|
2009-03-30 19:38:47 +00:00
|
|
|
|
2011-04-21 05:31:11 +00:00
|
|
|
if (canary_next == (void *) -1)
|
2010-01-20 06:37:56 +00:00
|
|
|
g_error("Per-packet memory corrupted\nbetween: %s\nand: %s", there, here);
|
2008-08-05 02:23:35 +00:00
|
|
|
}
|
2009-04-14 14:08:19 +00:00
|
|
|
}
|
2009-03-30 19:38:47 +00:00
|
|
|
|
2010-01-20 06:37:56 +00:00
|
|
|
g_strlcpy(there, here, sizeof(there));
|
2008-08-05 02:23:35 +00:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2009-10-19 22:31:08 +00:00
|
|
|
static void
|
2012-10-08 18:33:00 +00:00
|
|
|
emem_init_chunk(emem_pool_t *mem)
|
2009-10-19 22:31:08 +00:00
|
|
|
{
|
|
|
|
if (mem->debug_use_canary)
|
|
|
|
emem_canary_init(mem->canary);
|
|
|
|
|
|
|
|
if (mem->debug_use_chunks)
|
|
|
|
mem->memory_alloc = emem_alloc_chunk;
|
|
|
|
else
|
|
|
|
mem->memory_alloc = emem_alloc_glib;
|
|
|
|
}
|
|
|
|
|
2013-07-31 18:26:14 +00:00
|
|
|
static gsize
|
|
|
|
emem_memory_usage(const emem_pool_t *pool)
|
|
|
|
{
|
|
|
|
gsize total_used = 0;
|
|
|
|
emem_chunk_t *chunk;
|
|
|
|
|
|
|
|
for (chunk = pool->used_list; chunk; chunk = chunk->next)
|
|
|
|
total_used += (chunk->amount_free_init - chunk->amount_free);
|
|
|
|
|
|
|
|
for (chunk = pool->free_list; chunk; chunk = chunk->next)
|
|
|
|
total_used += (chunk->amount_free_init - chunk->amount_free);
|
|
|
|
|
|
|
|
return total_used;
|
|
|
|
}
|
|
|
|
|
|
|
|
static gsize
|
|
|
|
ep_memory_usage(void)
|
|
|
|
{
|
|
|
|
return emem_memory_usage(&ep_packet_mem);
|
|
|
|
}
|
2006-01-10 21:12:48 +00:00
|
|
|
|
2005-07-22 07:46:58 +00:00
|
|
|
/* Initialize the packet-lifetime memory allocation pool.
|
2006-05-31 17:38:42 +00:00
|
|
|
* This function should be called only once when Wireshark or TShark starts
|
2005-07-22 07:46:58 +00:00
|
|
|
* up.
|
|
|
|
*/
|
2009-10-20 17:43:05 +00:00
|
|
|
static void
|
2012-10-10 19:05:30 +00:00
|
|
|
ep_init_chunk(void)
|
2005-07-22 07:46:58 +00:00
|
|
|
{
|
2013-07-31 18:26:14 +00:00
|
|
|
static const ws_mem_usage_t ep_stats = { "EP", ep_memory_usage, NULL };
|
|
|
|
|
2012-10-10 19:05:30 +00:00
|
|
|
ep_packet_mem.free_list=NULL;
|
|
|
|
ep_packet_mem.used_list=NULL;
|
2006-01-09 23:11:40 +00:00
|
|
|
|
2012-10-10 19:05:30 +00:00
|
|
|
ep_packet_mem.debug_use_chunks = (getenv("WIRESHARK_DEBUG_EP_NO_CHUNKS") == NULL);
|
|
|
|
ep_packet_mem.debug_use_canary = ep_packet_mem.debug_use_chunks && (getenv("WIRESHARK_DEBUG_EP_NO_CANARY") == NULL);
|
|
|
|
ep_packet_mem.debug_verify_pointers = (getenv("WIRESHARK_EP_VERIFY_POINTERS") != NULL);
|
2009-08-11 09:24:56 +00:00
|
|
|
|
2008-08-05 02:23:35 +00:00
|
|
|
#ifdef DEBUG_INTENSE_CANARY_CHECKS
|
2010-01-19 21:28:48 +00:00
|
|
|
intense_canary_checking = (getenv("WIRESHARK_DEBUG_EP_INTENSE_CANARY") != NULL);
|
2008-08-05 02:23:35 +00:00
|
|
|
#endif
|
2009-03-30 19:38:47 +00:00
|
|
|
|
2012-10-10 19:05:30 +00:00
|
|
|
emem_init_chunk(&ep_packet_mem);
|
2013-07-31 18:26:14 +00:00
|
|
|
|
|
|
|
memory_usage_component_register(&ep_stats);
|
|
|
|
}
|
|
|
|
|
2009-10-20 17:43:05 +00:00
|
|
|
/* Initialize all the allocators here.
|
|
|
|
* This function should be called only once when Wireshark or TShark starts
|
|
|
|
* up.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
emem_init(void)
|
|
|
|
{
|
2012-10-10 19:05:30 +00:00
|
|
|
ep_init_chunk();
|
2009-10-20 17:43:05 +00:00
|
|
|
|
|
|
|
if (getenv("WIRESHARK_DEBUG_SCRUB_MEMORY"))
|
|
|
|
debug_use_memory_scrubber = TRUE;
|
2006-04-28 16:40:39 +00:00
|
|
|
|
|
|
|
#if defined (_WIN32)
|
|
|
|
/* Set up our guard page info for Win32 */
|
|
|
|
GetSystemInfo(&sysinfo);
|
|
|
|
pagesize = sysinfo.dwPageSize;
|
|
|
|
|
2013-10-20 17:35:30 +00:00
|
|
|
#if (_MSC_VER >= 1800)
|
|
|
|
/*
|
|
|
|
* On VS2103, GetVersionEx is deprecated. Microsoft recommend to
|
|
|
|
* use VerifyVersionInfo instead
|
|
|
|
*/
|
|
|
|
{
|
|
|
|
OSVERSIONINFOEX osvi;
|
|
|
|
DWORDLONG dwlConditionMask = 0;
|
|
|
|
int op = VER_EQUAL;
|
|
|
|
|
2013-10-21 15:16:41 +00:00
|
|
|
SecureZeroMemory(&osvi, sizeof(OSVERSIONINFOEX));
|
2013-10-20 17:35:30 +00:00
|
|
|
osvi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX);
|
|
|
|
osvi.dwPlatformId = VER_PLATFORM_WIN32_WINDOWS;
|
|
|
|
VER_SET_CONDITION(dwlConditionMask, VER_PLATFORMID, op);
|
|
|
|
iswindowsplatform = VerifyVersionInfo(&osvi, VER_PLATFORMID, dwlConditionMask);
|
|
|
|
}
|
|
|
|
#else
|
2006-05-18 00:25:14 +00:00
|
|
|
/* calling GetVersionEx using the OSVERSIONINFO structure.
|
|
|
|
* OSVERSIONINFOEX requires Win NT4 with SP6 or newer NT Versions.
|
|
|
|
* OSVERSIONINFOEX will fail on Win9x and older NT Versions.
|
|
|
|
* See also:
|
|
|
|
* http://msdn.microsoft.com/library/en-us/sysinfo/base/getversionex.asp
|
|
|
|
* http://msdn.microsoft.com/library/en-us/sysinfo/base/osversioninfo_str.asp
|
|
|
|
* http://msdn.microsoft.com/library/en-us/sysinfo/base/osversioninfoex_str.asp
|
|
|
|
*/
|
2013-10-20 17:35:30 +00:00
|
|
|
{
|
|
|
|
OSVERSIONINFO versinfo;
|
|
|
|
|
2013-10-21 15:16:41 +00:00
|
|
|
SecureZeroMemory(&versinfo, sizeof(OSVERSIONINFO));
|
2013-10-20 17:35:30 +00:00
|
|
|
versinfo.dwOSVersionInfoSize = sizeof(OSVERSIONINFO);
|
|
|
|
GetVersionEx(&versinfo);
|
|
|
|
iswindowsplatform = (versinfo.dwPlatformId == VER_PLATFORM_WIN32_WINDOWS);
|
|
|
|
}
|
|
|
|
#endif
|
2006-05-18 00:25:14 +00:00
|
|
|
|
2006-04-28 16:40:39 +00:00
|
|
|
#elif defined(USE_GUARD_PAGES)
|
|
|
|
pagesize = sysconf(_SC_PAGESIZE);
|
2013-01-16 03:34:29 +00:00
|
|
|
if (pagesize == -1)
|
2012-11-05 09:04:53 +00:00
|
|
|
fprintf(stderr, "Warning: call to sysconf() for _SC_PAGESIZE has failed...\n");
|
2006-06-17 02:31:56 +00:00
|
|
|
#ifdef NEED_DEV_ZERO
|
2008-05-23 05:55:34 +00:00
|
|
|
dev_zero_fd = ws_open("/dev/zero", O_RDWR);
|
2006-06-17 02:31:56 +00:00
|
|
|
g_assert(dev_zero_fd != -1);
|
|
|
|
#endif
|
2006-04-28 16:40:39 +00:00
|
|
|
#endif /* _WIN32 / USE_GUARD_PAGES */
|
2006-05-18 00:25:14 +00:00
|
|
|
}
|
2009-08-11 09:24:56 +00:00
|
|
|
|
2009-09-20 12:02:40 +00:00
|
|
|
static gboolean
|
2011-04-22 12:25:01 +00:00
|
|
|
emem_verify_pointer_list(const emem_chunk_t *chunk_list, const void *ptr)
|
2009-09-20 12:02:40 +00:00
|
|
|
{
|
2013-12-14 22:07:26 +00:00
|
|
|
const gchar *cptr = (const gchar *)ptr;
|
2011-04-22 12:25:01 +00:00
|
|
|
const emem_chunk_t *chunk;
|
2009-09-24 13:37:02 +00:00
|
|
|
|
2011-04-22 12:25:01 +00:00
|
|
|
for (chunk = chunk_list; chunk; chunk = chunk->next) {
|
|
|
|
if (cptr >= (chunk->buf + chunk->free_offset_init) && cptr < (chunk->buf + chunk->free_offset))
|
|
|
|
return TRUE;
|
2009-09-20 12:02:40 +00:00
|
|
|
}
|
|
|
|
return FALSE;
|
|
|
|
}
|
|
|
|
|
2011-04-22 12:25:01 +00:00
|
|
|
static gboolean
|
2012-10-08 18:33:00 +00:00
|
|
|
emem_verify_pointer(const emem_pool_t *hdr, const void *ptr)
|
2011-04-22 12:25:01 +00:00
|
|
|
{
|
|
|
|
return emem_verify_pointer_list(hdr->free_list, ptr) || emem_verify_pointer_list(hdr->used_list, ptr);
|
|
|
|
}
|
|
|
|
|
2009-09-20 12:02:40 +00:00
|
|
|
gboolean
|
|
|
|
ep_verify_pointer(const void *ptr)
|
|
|
|
{
|
2012-10-10 19:05:30 +00:00
|
|
|
if (ep_packet_mem.debug_verify_pointers)
|
|
|
|
return emem_verify_pointer(&ep_packet_mem, ptr);
|
2010-09-02 18:02:06 +00:00
|
|
|
else
|
|
|
|
return FALSE;
|
2009-09-20 12:02:40 +00:00
|
|
|
}
|
|
|
|
|
2009-10-16 21:36:42 +00:00
|
|
|
static void
|
|
|
|
emem_scrub_memory(char *buf, size_t size, gboolean alloc)
|
|
|
|
{
|
|
|
|
guint scrubbed_value;
|
2012-12-26 05:57:06 +00:00
|
|
|
size_t offset;
|
2009-10-16 21:36:42 +00:00
|
|
|
|
|
|
|
if (!debug_use_memory_scrubber)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (alloc) /* this memory is being allocated */
|
2012-04-10 20:58:14 +00:00
|
|
|
scrubbed_value = 0xBADDCAFE;
|
2009-10-16 21:36:42 +00:00
|
|
|
else /* this memory is being freed */
|
2012-04-10 20:58:14 +00:00
|
|
|
scrubbed_value = 0xDEADBEEF;
|
2009-10-16 21:36:42 +00:00
|
|
|
|
|
|
|
/* We shouldn't need to check the alignment of the starting address
|
|
|
|
* since this is malloc'd memory (or 'pagesize' bytes into malloc'd
|
|
|
|
* memory).
|
|
|
|
*/
|
|
|
|
|
2011-04-22 04:01:30 +00:00
|
|
|
/* XXX - if the above is *NOT* true, we should use memcpy here,
|
|
|
|
* in order to avoid problems on alignment-sensitive platforms, e.g.
|
2009-10-23 21:07:28 +00:00
|
|
|
* http://stackoverflow.com/questions/108866/is-there-memset-that-accepts-integers-larger-than-char
|
|
|
|
*/
|
|
|
|
|
2009-10-16 21:36:42 +00:00
|
|
|
for (offset = 0; offset + sizeof(guint) <= size; offset += sizeof(guint))
|
2011-04-22 04:01:30 +00:00
|
|
|
*(guint*)(void*)(buf+offset) = scrubbed_value;
|
2009-10-16 21:36:42 +00:00
|
|
|
|
|
|
|
/* Initialize the last bytes, if any */
|
|
|
|
if (offset < size) {
|
2012-04-10 20:58:14 +00:00
|
|
|
*(guint8*)(buf+offset) = scrubbed_value >> 24;
|
2009-10-16 21:36:42 +00:00
|
|
|
offset++;
|
|
|
|
if (offset < size) {
|
2012-04-10 20:58:14 +00:00
|
|
|
*(guint8*)(buf+offset) = (scrubbed_value >> 16) & 0xFF;
|
2009-10-16 21:36:42 +00:00
|
|
|
offset++;
|
|
|
|
if (offset < size) {
|
2012-04-10 20:58:14 +00:00
|
|
|
*(guint8*)(buf+offset) = (scrubbed_value >> 8) & 0xFF;
|
2009-10-16 21:36:42 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2010-01-19 20:54:27 +00:00
|
|
|
static emem_chunk_t *
|
2011-05-09 08:55:35 +00:00
|
|
|
emem_create_chunk(size_t size)
|
|
|
|
{
|
2009-09-06 08:10:12 +00:00
|
|
|
emem_chunk_t *npc;
|
|
|
|
|
2009-09-20 09:03:02 +00:00
|
|
|
npc = g_new(emem_chunk_t, 1);
|
2009-09-06 08:10:12 +00:00
|
|
|
npc->next = NULL;
|
2010-01-19 21:28:48 +00:00
|
|
|
npc->canary_last = NULL;
|
2006-03-10 05:39:57 +00:00
|
|
|
|
2006-02-27 20:51:53 +00:00
|
|
|
#if defined (_WIN32)
|
2009-09-06 08:10:12 +00:00
|
|
|
/*
|
|
|
|
* MSDN documents VirtualAlloc/VirtualProtect at
|
|
|
|
* http://msdn.microsoft.com/library/en-us/memory/base/creating_guard_pages.asp
|
|
|
|
*/
|
2006-04-26 21:06:52 +00:00
|
|
|
|
2009-09-06 08:10:12 +00:00
|
|
|
/* XXX - is MEM_COMMIT|MEM_RESERVE correct? */
|
2013-03-15 05:42:24 +00:00
|
|
|
npc->buf = (char *)VirtualAlloc(NULL, size,
|
2009-09-06 08:10:12 +00:00
|
|
|
MEM_COMMIT|MEM_RESERVE, PAGE_READWRITE);
|
2010-01-19 20:53:02 +00:00
|
|
|
|
2010-01-20 20:26:01 +00:00
|
|
|
if (npc->buf == NULL) {
|
|
|
|
g_free(npc);
|
2012-03-19 02:00:42 +00:00
|
|
|
if (getenv("WIRESHARK_ABORT_ON_OUT_OF_MEMORY"))
|
|
|
|
abort();
|
|
|
|
else
|
|
|
|
THROW(OutOfMemoryError);
|
2010-01-20 20:26:01 +00:00
|
|
|
}
|
|
|
|
|
2010-01-19 20:53:02 +00:00
|
|
|
#elif defined(USE_GUARD_PAGES)
|
2013-03-15 05:42:24 +00:00
|
|
|
npc->buf = (char *)mmap(NULL, size,
|
2010-01-19 20:53:02 +00:00
|
|
|
PROT_READ|PROT_WRITE, ANON_PAGE_MODE, ANON_FD, 0);
|
|
|
|
|
2010-01-20 20:26:01 +00:00
|
|
|
if (npc->buf == MAP_FAILED) {
|
2010-01-19 20:54:27 +00:00
|
|
|
g_free(npc);
|
2012-03-19 02:00:42 +00:00
|
|
|
if (getenv("WIRESHARK_ABORT_ON_OUT_OF_MEMORY"))
|
|
|
|
abort();
|
|
|
|
else
|
|
|
|
THROW(OutOfMemoryError);
|
2009-09-06 08:10:12 +00:00
|
|
|
}
|
2010-01-19 20:53:02 +00:00
|
|
|
|
2010-01-20 20:26:01 +00:00
|
|
|
#else /* Is there a draft in here? */
|
2011-05-09 08:55:35 +00:00
|
|
|
npc->buf = g_malloc(size);
|
2010-01-20 20:26:01 +00:00
|
|
|
/* g_malloc() can't fail */
|
|
|
|
#endif
|
|
|
|
|
2011-05-09 12:52:55 +00:00
|
|
|
npc->amount_free = npc->amount_free_init = (unsigned int) size;
|
2011-05-09 08:55:35 +00:00
|
|
|
npc->free_offset = npc->free_offset_init = 0;
|
|
|
|
return npc;
|
|
|
|
}
|
|
|
|
|
|
|
|
static emem_chunk_t *
|
|
|
|
emem_create_chunk_gp(size_t size)
|
|
|
|
{
|
2010-01-19 20:53:02 +00:00
|
|
|
#if defined (_WIN32)
|
2011-05-09 08:55:35 +00:00
|
|
|
BOOL ret;
|
|
|
|
char *buf_end, *prot1, *prot2;
|
|
|
|
DWORD oldprot;
|
|
|
|
#elif defined(USE_GUARD_PAGES)
|
|
|
|
int ret;
|
|
|
|
char *buf_end, *prot1, *prot2;
|
|
|
|
#endif /* _WIN32 / USE_GUARD_PAGES */
|
|
|
|
emem_chunk_t *npc;
|
|
|
|
|
|
|
|
npc = emem_create_chunk(size);
|
|
|
|
|
|
|
|
#if defined (_WIN32)
|
|
|
|
buf_end = npc->buf + size;
|
2006-02-27 20:51:53 +00:00
|
|
|
|
2009-09-06 08:10:12 +00:00
|
|
|
/* Align our guard pages on page-sized boundaries */
|
2012-09-11 13:11:33 +00:00
|
|
|
prot1 = (char *) ((((intptr_t) npc->buf + pagesize - 1) / pagesize) * pagesize);
|
|
|
|
prot2 = (char *) ((((intptr_t) buf_end - (1 * pagesize)) / pagesize) * pagesize);
|
2006-02-27 20:51:53 +00:00
|
|
|
|
2009-09-06 08:10:12 +00:00
|
|
|
ret = VirtualProtect(prot1, pagesize, PAGE_NOACCESS, &oldprot);
|
2013-10-20 17:35:30 +00:00
|
|
|
g_assert(ret != 0 || iswindowsplatform);
|
2009-09-06 08:10:12 +00:00
|
|
|
ret = VirtualProtect(prot2, pagesize, PAGE_NOACCESS, &oldprot);
|
2013-10-20 17:35:30 +00:00
|
|
|
g_assert(ret != 0 || iswindowsplatform);
|
2006-02-27 20:51:53 +00:00
|
|
|
|
2009-09-06 08:10:12 +00:00
|
|
|
npc->amount_free_init = (unsigned int) (prot2 - prot1 - pagesize);
|
|
|
|
npc->free_offset_init = (unsigned int) (prot1 - npc->buf) + pagesize;
|
2006-02-27 20:51:53 +00:00
|
|
|
#elif defined(USE_GUARD_PAGES)
|
2011-05-09 08:55:35 +00:00
|
|
|
buf_end = npc->buf + size;
|
2010-01-19 20:53:02 +00:00
|
|
|
|
2009-09-06 08:10:12 +00:00
|
|
|
/* Align our guard pages on page-sized boundaries */
|
|
|
|
prot1 = (char *) ((((intptr_t) npc->buf + pagesize - 1) / pagesize) * pagesize);
|
|
|
|
prot2 = (char *) ((((intptr_t) buf_end - (1 * pagesize)) / pagesize) * pagesize);
|
2010-01-19 20:53:02 +00:00
|
|
|
|
2009-09-06 08:10:12 +00:00
|
|
|
ret = mprotect(prot1, pagesize, PROT_NONE);
|
|
|
|
g_assert(ret != -1);
|
|
|
|
ret = mprotect(prot2, pagesize, PROT_NONE);
|
|
|
|
g_assert(ret != -1);
|
|
|
|
|
2012-12-26 05:57:06 +00:00
|
|
|
npc->amount_free_init = (unsigned int)(prot2 - prot1 - pagesize);
|
|
|
|
npc->free_offset_init = (unsigned int)((prot1 - npc->buf) + pagesize);
|
2010-09-02 18:02:06 +00:00
|
|
|
#else
|
2011-05-09 08:55:35 +00:00
|
|
|
npc->amount_free_init = size;
|
2009-09-06 08:10:12 +00:00
|
|
|
npc->free_offset_init = 0;
|
2006-02-27 20:51:53 +00:00
|
|
|
#endif /* USE_GUARD_PAGES */
|
2010-01-19 20:53:02 +00:00
|
|
|
|
|
|
|
npc->amount_free = npc->amount_free_init;
|
|
|
|
npc->free_offset = npc->free_offset_init;
|
2010-01-19 20:54:27 +00:00
|
|
|
return npc;
|
2006-02-27 20:51:53 +00:00
|
|
|
}
|
2006-01-09 23:11:40 +00:00
|
|
|
|
2009-08-09 19:23:13 +00:00
|
|
|
static void *
|
2012-10-08 18:33:00 +00:00
|
|
|
emem_alloc_chunk(size_t size, emem_pool_t *mem)
|
2005-07-22 07:46:58 +00:00
|
|
|
{
|
2006-03-07 16:31:47 +00:00
|
|
|
void *buf;
|
2005-07-22 07:46:58 +00:00
|
|
|
|
2009-10-19 22:31:08 +00:00
|
|
|
size_t asize = size;
|
|
|
|
gboolean use_canary = mem->debug_use_canary;
|
|
|
|
guint8 pad;
|
|
|
|
emem_chunk_t *free_list;
|
2009-10-16 21:36:42 +00:00
|
|
|
|
2011-02-25 15:49:28 +00:00
|
|
|
/* Allocate room for at least 8 bytes of canary plus some padding
|
|
|
|
* so the canary ends on an 8-byte boundary.
|
2012-05-04 01:33:58 +00:00
|
|
|
* But first add the room needed for the pointer to the next canary
|
|
|
|
* (so the entire allocation will end on an 8-byte boundary).
|
2009-10-19 22:31:08 +00:00
|
|
|
*/
|
2010-01-19 21:28:48 +00:00
|
|
|
if (use_canary) {
|
|
|
|
asize += sizeof(void *);
|
2012-05-04 01:33:58 +00:00
|
|
|
pad = emem_canary_pad(asize);
|
2010-01-19 21:28:48 +00:00
|
|
|
} else
|
2012-05-04 21:56:32 +00:00
|
|
|
pad = (WS_MEM_ALIGN - (asize & (WS_MEM_ALIGN-1))) & (WS_MEM_ALIGN-1);
|
2009-08-11 09:24:56 +00:00
|
|
|
|
2009-10-19 22:31:08 +00:00
|
|
|
asize += pad;
|
2005-07-22 07:46:58 +00:00
|
|
|
|
2014-09-16 15:25:17 +00:00
|
|
|
/* make sure we don't try to allocate too much (arbitrary limit) */
|
2009-11-30 17:31:13 +00:00
|
|
|
DISSECTOR_ASSERT(size<(EMEM_PACKET_CHUNK_SIZE>>2));
|
2009-10-19 22:31:08 +00:00
|
|
|
|
|
|
|
if (!mem->free_list)
|
2011-05-09 08:55:35 +00:00
|
|
|
mem->free_list = emem_create_chunk_gp(EMEM_PACKET_CHUNK_SIZE);
|
2009-10-19 22:31:08 +00:00
|
|
|
|
|
|
|
/* oops, we need to allocate more memory to serve this request
|
|
|
|
* than we have free. move this node to the used list and try again
|
|
|
|
*/
|
2010-01-19 21:28:48 +00:00
|
|
|
if(asize > mem->free_list->amount_free) {
|
2009-10-19 22:31:08 +00:00
|
|
|
emem_chunk_t *npc;
|
|
|
|
npc=mem->free_list;
|
|
|
|
mem->free_list=mem->free_list->next;
|
|
|
|
npc->next=mem->used_list;
|
|
|
|
mem->used_list=npc;
|
2005-07-22 07:46:58 +00:00
|
|
|
|
2009-09-06 08:10:12 +00:00
|
|
|
if (!mem->free_list)
|
2011-05-09 08:55:35 +00:00
|
|
|
mem->free_list = emem_create_chunk_gp(EMEM_PACKET_CHUNK_SIZE);
|
2009-10-19 22:31:08 +00:00
|
|
|
}
|
2005-08-12 08:51:08 +00:00
|
|
|
|
2009-10-19 22:31:08 +00:00
|
|
|
free_list = mem->free_list;
|
2005-08-12 08:51:08 +00:00
|
|
|
|
2009-10-19 22:31:08 +00:00
|
|
|
buf = free_list->buf + free_list->free_offset;
|
2006-01-10 21:12:48 +00:00
|
|
|
|
2009-10-19 22:31:08 +00:00
|
|
|
free_list->amount_free -= (unsigned int) asize;
|
|
|
|
free_list->free_offset += (unsigned int) asize;
|
2005-08-12 08:51:08 +00:00
|
|
|
|
2009-10-19 22:31:08 +00:00
|
|
|
if (use_canary) {
|
2010-01-19 21:28:48 +00:00
|
|
|
char *cptr = (char *)buf + size;
|
|
|
|
|
|
|
|
memcpy(cptr, mem->canary, pad-1);
|
|
|
|
cptr[pad-1] = '\0';
|
|
|
|
memcpy(cptr + pad, &free_list->canary_last, sizeof(void *));
|
|
|
|
|
|
|
|
free_list->canary_last = cptr;
|
2009-10-19 22:31:08 +00:00
|
|
|
}
|
2005-08-12 08:51:08 +00:00
|
|
|
|
2009-10-19 22:31:08 +00:00
|
|
|
return buf;
|
|
|
|
}
|
2006-01-09 23:11:40 +00:00
|
|
|
|
2009-10-19 22:31:08 +00:00
|
|
|
static void *
|
2012-10-08 18:33:00 +00:00
|
|
|
emem_alloc_glib(size_t size, emem_pool_t *mem)
|
2009-10-19 22:31:08 +00:00
|
|
|
{
|
|
|
|
emem_chunk_t *npc;
|
|
|
|
|
|
|
|
npc=g_new(emem_chunk_t, 1);
|
|
|
|
npc->next=mem->used_list;
|
2013-03-15 05:42:24 +00:00
|
|
|
npc->buf=(char *)g_malloc(size);
|
2010-01-19 21:28:48 +00:00
|
|
|
npc->canary_last = NULL;
|
2009-10-19 22:31:08 +00:00
|
|
|
mem->used_list=npc;
|
|
|
|
/* There's no padding/alignment involved (from our point of view) when
|
|
|
|
* we fetch the memory directly from the system pool, so WYSIWYG */
|
2013-03-10 17:49:53 +00:00
|
|
|
npc->amount_free = npc->free_offset_init = 0;
|
|
|
|
npc->free_offset = npc->amount_free_init = (unsigned int) size;
|
2009-10-19 22:31:08 +00:00
|
|
|
|
|
|
|
return npc->buf;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* allocate 'size' amount of memory. */
|
|
|
|
static void *
|
2012-10-08 18:33:00 +00:00
|
|
|
emem_alloc(size_t size, emem_pool_t *mem)
|
2009-10-19 22:31:08 +00:00
|
|
|
{
|
2013-03-20 00:04:01 +00:00
|
|
|
void *buf;
|
2013-07-15 21:22:34 +00:00
|
|
|
|
2013-03-20 00:04:01 +00:00
|
|
|
#if 0
|
|
|
|
/* For testing wmem, effectively redirects most emem memory to wmem.
|
2013-06-19 18:28:13 +00:00
|
|
|
* You will also have to comment out several assertions in wmem_core.c,
|
|
|
|
* specifically anything g_assert(allocator->in_scope), since it is much
|
|
|
|
* stricter about when it is permitted to be called. */
|
2013-03-20 00:04:01 +00:00
|
|
|
if (mem == &ep_packet_mem) {
|
|
|
|
return wmem_alloc(wmem_packet_scope(), size);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
buf = mem->memory_alloc(size, mem);
|
2005-08-26 15:45:31 +00:00
|
|
|
|
2009-10-16 21:36:42 +00:00
|
|
|
/* XXX - this is a waste of time if the allocator function is going to
|
|
|
|
* memset this straight back to 0.
|
|
|
|
*/
|
2013-03-15 05:42:24 +00:00
|
|
|
emem_scrub_memory((char *)buf, size, TRUE);
|
2009-10-16 21:36:42 +00:00
|
|
|
|
2005-08-12 08:51:08 +00:00
|
|
|
return buf;
|
|
|
|
}
|
2009-08-09 19:23:13 +00:00
|
|
|
|
|
|
|
/* allocate 'size' amount of memory with an allocation lifetime until the
|
|
|
|
* next packet.
|
|
|
|
*/
|
|
|
|
void *
|
|
|
|
ep_alloc(size_t size)
|
|
|
|
{
|
2012-10-10 19:05:30 +00:00
|
|
|
return emem_alloc(size, &ep_packet_mem);
|
2009-08-09 19:23:13 +00:00
|
|
|
}
|
|
|
|
|
2015-01-18 22:16:55 +00:00
|
|
|
void *
|
|
|
|
ep_alloc0(size_t size)
|
|
|
|
{
|
|
|
|
return memset(ep_alloc(size),'\0',size);
|
|
|
|
}
|
|
|
|
|
2011-03-26 02:11:42 +00:00
|
|
|
static gchar *
|
|
|
|
emem_strdup_vprintf(const gchar *fmt, va_list ap, void *allocator(size_t))
|
2009-10-19 22:31:08 +00:00
|
|
|
{
|
2005-10-10 08:24:56 +00:00
|
|
|
va_list ap2;
|
2009-04-07 16:36:52 +00:00
|
|
|
gsize len;
|
2005-10-10 08:24:56 +00:00
|
|
|
gchar* dst;
|
|
|
|
|
|
|
|
G_VA_COPY(ap2, ap);
|
|
|
|
|
|
|
|
len = g_printf_string_upper_bound(fmt, ap);
|
|
|
|
|
2013-03-15 05:42:24 +00:00
|
|
|
dst = (gchar *)allocator(len+1);
|
2009-04-07 16:36:52 +00:00
|
|
|
g_vsnprintf (dst, (gulong) len, fmt, ap2);
|
2005-10-10 08:24:56 +00:00
|
|
|
va_end(ap2);
|
|
|
|
|
|
|
|
return dst;
|
|
|
|
}
|
|
|
|
|
2015-01-16 16:09:51 +00:00
|
|
|
static gchar *
|
2011-03-26 02:11:42 +00:00
|
|
|
ep_strdup_vprintf(const gchar *fmt, va_list ap)
|
|
|
|
{
|
|
|
|
return emem_strdup_vprintf(fmt, ap, ep_alloc);
|
|
|
|
}
|
|
|
|
|
|
|
|
gchar *
|
|
|
|
ep_strdup_printf(const gchar *fmt, ...)
|
2009-10-19 22:31:08 +00:00
|
|
|
{
|
2005-07-24 00:29:57 +00:00
|
|
|
va_list ap;
|
2011-03-26 02:11:42 +00:00
|
|
|
gchar *dst;
|
2006-01-09 23:11:40 +00:00
|
|
|
|
2011-03-26 02:11:42 +00:00
|
|
|
va_start(ap, fmt);
|
2005-10-10 08:24:56 +00:00
|
|
|
dst = ep_strdup_vprintf(fmt, ap);
|
2005-07-24 00:29:57 +00:00
|
|
|
va_end(ap);
|
|
|
|
return dst;
|
|
|
|
}
|
|
|
|
|
2005-08-16 00:55:08 +00:00
|
|
|
|
2009-08-09 20:10:19 +00:00
|
|
|
/* release all allocated memory back to the pool. */
|
|
|
|
static void
|
2012-10-08 18:33:00 +00:00
|
|
|
emem_free_all(emem_pool_t *mem)
|
2005-07-22 07:46:58 +00:00
|
|
|
{
|
2009-10-19 22:31:08 +00:00
|
|
|
gboolean use_chunks = mem->debug_use_chunks;
|
|
|
|
|
2005-07-22 07:46:58 +00:00
|
|
|
emem_chunk_t *npc;
|
|
|
|
|
2005-08-26 15:45:31 +00:00
|
|
|
/* move all used chunks over to the free list */
|
2009-08-09 20:10:19 +00:00
|
|
|
while(mem->used_list){
|
|
|
|
npc=mem->used_list;
|
|
|
|
mem->used_list=mem->used_list->next;
|
|
|
|
npc->next=mem->free_list;
|
|
|
|
mem->free_list=npc;
|
2005-08-12 08:51:08 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* clear them all out */
|
2009-08-09 20:10:19 +00:00
|
|
|
npc = mem->free_list;
|
2005-08-26 15:45:31 +00:00
|
|
|
while (npc != NULL) {
|
2009-08-11 09:24:56 +00:00
|
|
|
if (use_chunks) {
|
2010-01-19 21:28:48 +00:00
|
|
|
while (npc->canary_last != NULL) {
|
2013-03-15 05:42:24 +00:00
|
|
|
npc->canary_last = emem_canary_next(mem->canary, (guint8 *)npc->canary_last, NULL);
|
2010-01-19 21:28:48 +00:00
|
|
|
/* XXX, check if canary_last is inside allocated memory? */
|
|
|
|
|
|
|
|
if (npc->canary_last == (void *) -1)
|
|
|
|
g_error("Memory corrupted");
|
2009-08-09 20:10:19 +00:00
|
|
|
}
|
2009-10-16 21:36:42 +00:00
|
|
|
|
|
|
|
emem_scrub_memory((npc->buf + npc->free_offset_init),
|
|
|
|
(npc->free_offset - npc->free_offset_init),
|
|
|
|
FALSE);
|
|
|
|
|
2012-10-10 19:05:30 +00:00
|
|
|
npc->amount_free = npc->amount_free_init;
|
|
|
|
npc->free_offset = npc->free_offset_init;
|
|
|
|
npc = npc->next;
|
2009-08-09 20:10:19 +00:00
|
|
|
} else {
|
|
|
|
emem_chunk_t *next = npc->next;
|
|
|
|
|
2009-10-16 21:36:42 +00:00
|
|
|
emem_scrub_memory(npc->buf, npc->amount_free_init, FALSE);
|
|
|
|
|
2009-08-09 20:10:19 +00:00
|
|
|
g_free(npc->buf);
|
|
|
|
g_free(npc);
|
|
|
|
npc = next;
|
|
|
|
}
|
2005-08-12 08:51:08 +00:00
|
|
|
}
|
2005-08-26 15:45:31 +00:00
|
|
|
|
2012-10-10 19:05:30 +00:00
|
|
|
if (!use_chunks) {
|
|
|
|
/* We've freed all this memory already */
|
|
|
|
mem->free_list = NULL;
|
|
|
|
}
|
2009-08-09 20:10:19 +00:00
|
|
|
}
|
|
|
|
|
2012-10-10 19:05:30 +00:00
|
|
|
/* release all allocated memory back to the pool. */
|
2012-10-08 16:42:54 +00:00
|
|
|
void
|
2012-10-10 19:05:30 +00:00
|
|
|
ep_free_all(void)
|
2009-08-09 20:10:19 +00:00
|
|
|
{
|
2012-10-10 19:05:30 +00:00
|
|
|
emem_free_all(&ep_packet_mem);
|
2005-08-12 08:51:08 +00:00
|
|
|
}
|
2009-08-09 20:10:19 +00:00
|
|
|
|
2009-04-14 14:08:19 +00:00
|
|
|
/*
|
|
|
|
* Editor modelines
|
|
|
|
*
|
|
|
|
* Local Variables:
|
|
|
|
* c-basic-offset: 8
|
|
|
|
* tab-width: 8
|
|
|
|
* indent-tabs-mode: t
|
|
|
|
* End:
|
|
|
|
*
|
2011-09-21 17:49:11 +00:00
|
|
|
* ex: set shiftwidth=8 tabstop=8 noexpandtab:
|
2009-04-14 14:08:19 +00:00
|
|
|
* :indentSize=8:tabSize=8:noTabs=false:
|
|
|
|
*/
|