diff options
Diffstat (limited to 'compat/nedmalloc')
-rw-r--r-- | compat/nedmalloc/Readme.txt | 2 | ||||
-rw-r--r-- | compat/nedmalloc/malloc.c.h | 37 | ||||
-rw-r--r-- | compat/nedmalloc/nedmalloc.c | 30 |
3 files changed, 32 insertions, 37 deletions
diff --git a/compat/nedmalloc/Readme.txt b/compat/nedmalloc/Readme.txt index e46d8f112c..07cbf50c0f 100644 --- a/compat/nedmalloc/Readme.txt +++ b/compat/nedmalloc/Readme.txt @@ -97,7 +97,7 @@ Chew for reporting this. v1.04alpha_svn915 7th October 2006: * Fixed failure to unlock thread cache list if allocating a new list failed. -Thanks to Dmitry Chichkov for reporting this. Futher thanks to Aleksey Sanin. +Thanks to Dmitry Chichkov for reporting this. Further thanks to Aleksey Sanin. * Fixed realloc(0, <size>) segfaulting. Thanks to Dmitry Chichkov for reporting this. * Made config defines #ifndef so they can be overridden by the build system. diff --git a/compat/nedmalloc/malloc.c.h b/compat/nedmalloc/malloc.c.h index ff7c2c4fd8..814845d4b3 100644 --- a/compat/nedmalloc/malloc.c.h +++ b/compat/nedmalloc/malloc.c.h @@ -484,6 +484,10 @@ MAX_RELEASE_CHECK_RATE default: 4095 unless not HAVE_MMAP #define DLMALLOC_VERSION 20804 #endif /* DLMALLOC_VERSION */ +#if defined(linux) +#define _GNU_SOURCE 1 +#endif + #ifndef WIN32 #ifdef _WIN32 #define WIN32 1 @@ -495,7 +499,9 @@ MAX_RELEASE_CHECK_RATE default: 4095 unless not HAVE_MMAP #endif /* WIN32 */ #ifdef WIN32 #define WIN32_LEAN_AND_MEAN +#ifndef _WIN32_WINNT #define _WIN32_WINNT 0x403 +#endif #include <windows.h> #define HAVE_MMAP 1 #define HAVE_MORECORE 0 @@ -714,6 +720,9 @@ struct mallinfo { inlining are defined as macros, so these aren't used for them. */ +#ifdef __MINGW64_VERSION_MAJOR +#undef FORCEINLINE +#endif #ifndef FORCEINLINE #if defined(__GNUC__) #define FORCEINLINE __inline __attribute__ ((always_inline)) @@ -1376,6 +1385,7 @@ LONG __cdecl _InterlockedExchange(LONG volatile *Target, LONG Value); /*** Atomic operations ***/ #if (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) > 40100 + #undef _ReadWriteBarrier #define _ReadWriteBarrier() __sync_synchronize() #else static __inline__ __attribute__((always_inline)) long __sync_lock_test_and_set(volatile long * const Target, const long Value) @@ -1554,7 +1564,7 @@ static FORCEINLINE void* win32direct_mmap(size_t size) { return (ptr != 0)? ptr: MFAIL; } -/* This function supports releasing coalesed segments */ +/* This function supports releasing coalesced segments */ static FORCEINLINE int win32munmap(void* ptr, size_t size) { MEMORY_BASIC_INFORMATION minfo; char* cptr = (char*)ptr; @@ -1645,7 +1655,7 @@ static FORCEINLINE int win32munmap(void* ptr, size_t size) { #define CALL_MREMAP(addr, osz, nsz, mv) MFAIL #endif /* HAVE_MMAP && HAVE_MREMAP */ -/* mstate bit set if continguous morecore disabled or failed */ +/* mstate bit set if contiguous morecore disabled or failed */ #define USE_NONCONTIGUOUS_BIT (4U) /* segment bit set in create_mspace_with_base */ @@ -1745,10 +1755,10 @@ static FORCEINLINE void pthread_release_lock (MLOCK_T *sl) { assert(sl->l != 0); assert(sl->threadid == CURRENT_THREAD); if (--sl->c == 0) { - sl->threadid = 0; volatile unsigned int* lp = &sl->l; int prev = 0; int ret; + sl->threadid = 0; __asm__ __volatile__ ("lock; xchgl %0, %1" : "=r" (ret) : "m" (*(lp)), "0"(prev) @@ -1792,9 +1802,10 @@ struct win32_mlock_t volatile long threadid; }; +static inline int return_0(int i) { return 0; } #define MLOCK_T struct win32_mlock_t #define CURRENT_THREAD win32_getcurrentthreadid() -#define INITIAL_LOCK(sl) (memset(sl, 0, sizeof(MLOCK_T)), 0) +#define INITIAL_LOCK(sl) (memset(sl, 0, sizeof(MLOCK_T)), return_0(0)) #define ACQUIRE_LOCK(sl) win32_acquire_lock(sl) #define RELEASE_LOCK(sl) win32_release_lock(sl) #define TRY_LOCK(sl) win32_try_lock(sl) @@ -1802,7 +1813,7 @@ struct win32_mlock_t static MLOCK_T malloc_global_mutex = { 0, 0, 0}; -static FORCEINLINE long win32_getcurrentthreadid() { +static FORCEINLINE long win32_getcurrentthreadid(void) { #ifdef _MSC_VER #if defined(_M_IX86) long *threadstruct=(long *)__readfsdword(0x18); @@ -2474,7 +2485,7 @@ typedef struct malloc_segment* msegmentptr; Trim support Fields holding the amount of unused topmost memory that should trigger - timming, and a counter to force periodic scanning to release unused + timing, and a counter to force periodic scanning to release unused non-topmost segments. Locking @@ -3055,7 +3066,7 @@ static int init_mparams(void) { #if !ONLY_MSPACES /* Set up lock for main malloc area */ gm->mflags = mparams.default_mflags; - INITIAL_LOCK(&gm->mutex); + (void)INITIAL_LOCK(&gm->mutex); #endif #if (FOOTERS && !INSECURE) @@ -3598,8 +3609,8 @@ static void internal_malloc_stats(mstate m) { and choose its bk node as its replacement. 2. If x was the last node of its size, but not a leaf node, it must be replaced with a leaf node (not merely one with an open left or - right), to make sure that lefts and rights of descendents - correspond properly to bit masks. We use the rightmost descendent + right), to make sure that lefts and rights of descendants + correspond properly to bit masks. We use the rightmost descendant of x. We could use any other leaf, but this is easy to locate and tends to counteract removal of leftmosts elsewhere, and so keeps paths shorter than minimally guaranteed. This doesn't loop much @@ -4778,7 +4789,7 @@ void* dlmalloc(size_t bytes) { void dlfree(void* mem) { /* - Consolidate freed chunks with preceeding or succeeding bordering + Consolidate freed chunks with preceding or succeeding bordering free chunks, if they exist, and then place in a bin. Intermixed with special cases for top, dv, mmapped chunks, and usage errors. */ @@ -5006,7 +5017,7 @@ static mstate init_user_mstate(char* tbase, size_t tsize) { mchunkptr msp = align_as_chunk(tbase); mstate m = (mstate)(chunk2mem(msp)); memset(m, 0, msize); - INITIAL_LOCK(&m->mutex); + (void)INITIAL_LOCK(&m->mutex); msp->head = (msize|PINUSE_BIT|CINUSE_BIT); m->seg.base = m->least_addr = tbase; m->seg.size = m->footprint = m->max_footprint = tsize; @@ -5680,10 +5691,10 @@ History: Wolfram Gloger (Gloger@lrz.uni-muenchen.de). * Use last_remainder in more cases. * Pack bins using idea from colin@nyx10.cs.du.edu - * Use ordered bins instead of best-fit threshhold + * Use ordered bins instead of best-fit threshold * Eliminate block-local decls to simplify tracing and debugging. * Support another case of realloc via move into top - * Fix error occuring when initial sbrk_base not word-aligned. + * Fix error occurring when initial sbrk_base not word-aligned. * Rely on page size for units instead of SBRK_UNIT to avoid surprises about sbrk alignment conventions. * Add mallinfo, mallopt. Thanks to Raymond Nijssen diff --git a/compat/nedmalloc/nedmalloc.c b/compat/nedmalloc/nedmalloc.c index 91c4e7f27b..edb438a777 100644 --- a/compat/nedmalloc/nedmalloc.c +++ b/compat/nedmalloc/nedmalloc.c @@ -159,8 +159,8 @@ struct mallinfo nedmallinfo(void) THROWSPEC { return nedpmallinfo(0); } #endif int nedmallopt(int parno, int value) THROWSPEC { return nedpmallopt(0, parno, value); } int nedmalloc_trim(size_t pad) THROWSPEC { return nedpmalloc_trim(0, pad); } -void nedmalloc_stats() THROWSPEC { nedpmalloc_stats(0); } -size_t nedmalloc_footprint() THROWSPEC { return nedpmalloc_footprint(0); } +void nedmalloc_stats(void) THROWSPEC { nedpmalloc_stats(0); } +size_t nedmalloc_footprint(void) THROWSPEC { return nedpmalloc_footprint(0); } void **nedindependent_calloc(size_t elemsno, size_t elemsize, void **chunks) THROWSPEC { return nedpindependent_calloc(0, elemsno, elemsize, chunks); } void **nedindependent_comalloc(size_t elems, size_t *sizes, void **chunks) THROWSPEC { return nedpindependent_comalloc(0, elems, sizes, chunks); } @@ -510,7 +510,7 @@ static void threadcache_free(nedpool *p, threadcache *tc, int mymspace, void *me assert(idx<=THREADCACHEMAXBINS); if(tck==*binsptr) { - fprintf(stderr, "Attempt to free already freed memory block %p - aborting!\n", tck); + fprintf(stderr, "Attempt to free already freed memory block %p - aborting!\n", (void *)tck); abort(); } #ifdef FULLSANITYCHECKS @@ -938,32 +938,16 @@ void **nedpindependent_comalloc(nedpool *p, size_t elems, size_t *sizes, void ** void **ret; threadcache *tc; int mymspace; - size_t i, *adjustedsizes=(size_t *) alloca(elems*sizeof(size_t)); - if(!adjustedsizes) return 0; - for(i=0; i<elems; i++) - adjustedsizes[i]=sizes[i]<sizeof(threadcacheblk) ? sizeof(threadcacheblk) : sizes[i]; + size_t i, *adjustedsizes=(size_t *) alloca(elems*sizeof(size_t)); + if(!adjustedsizes) return 0; + for(i=0; i<elems; i++) + adjustedsizes[i]=sizes[i]<sizeof(threadcacheblk) ? sizeof(threadcacheblk) : sizes[i]; GetThreadCache(&p, &tc, &mymspace, 0); GETMSPACE(m, p, tc, mymspace, 0, ret=mspace_independent_comalloc(m, elems, adjustedsizes, chunks)); return ret; } -#ifdef OVERRIDE_STRDUP -/* - * This implementation is purely there to override the libc version, to - * avoid a crash due to allocation and free on different 'heaps'. - */ -char *strdup(const char *s1) -{ - char *s2 = 0; - if (s1) { - s2 = malloc(strlen(s1) + 1); - strcpy(s2, s1); - } - return s2; -} -#endif - #if defined(__cplusplus) } #endif |