diff options
Diffstat (limited to 'compat/nedmalloc/malloc.c.h')
-rw-r--r-- | compat/nedmalloc/malloc.c.h | 37 |
1 files changed, 23 insertions, 14 deletions
diff --git a/compat/nedmalloc/malloc.c.h b/compat/nedmalloc/malloc.c.h index 74c42e3162..b833ff9225 100644 --- a/compat/nedmalloc/malloc.c.h +++ b/compat/nedmalloc/malloc.c.h @@ -100,7 +100,7 @@ If you don't like either of these options, you can define CORRUPTION_ERROR_ACTION and USAGE_ERROR_ACTION to do anything - else. And if if you are sure that your program using malloc has + else. And if you are sure that your program using malloc has no errors or vulnerabilities, you can define INSECURE to 1, which might (or might not) provide a small performance improvement. @@ -484,6 +484,10 @@ MAX_RELEASE_CHECK_RATE default: 4095 unless not HAVE_MMAP #define DLMALLOC_VERSION 20804 #endif /* DLMALLOC_VERSION */ +#if defined(linux) +#define _GNU_SOURCE 1 +#endif + #ifndef WIN32 #ifdef _WIN32 #define WIN32 1 @@ -495,7 +499,9 @@ MAX_RELEASE_CHECK_RATE default: 4095 unless not HAVE_MMAP #endif /* WIN32 */ #ifdef WIN32 #define WIN32_LEAN_AND_MEAN +#ifndef _WIN32_WINNT #define _WIN32_WINNT 0x403 +#endif #include <windows.h> #define HAVE_MMAP 1 #define HAVE_MORECORE 0 @@ -714,6 +720,9 @@ struct mallinfo { inlining are defined as macros, so these aren't used for them. */ +#ifdef __MINGW64_VERSION_MAJOR +#undef FORCEINLINE +#endif #ifndef FORCEINLINE #if defined(__GNUC__) #define FORCEINLINE __inline __attribute__ ((always_inline)) @@ -1376,6 +1385,7 @@ LONG __cdecl _InterlockedExchange(LONG volatile *Target, LONG Value); /*** Atomic operations ***/ #if (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) > 40100 + #undef _ReadWriteBarrier #define _ReadWriteBarrier() __sync_synchronize() #else static __inline__ __attribute__((always_inline)) long __sync_lock_test_and_set(volatile long * const Target, const long Value) @@ -1792,9 +1802,10 @@ struct win32_mlock_t volatile long threadid; }; +static inline int return_0(int i) { return 0; } #define MLOCK_T struct win32_mlock_t #define CURRENT_THREAD win32_getcurrentthreadid() -#define INITIAL_LOCK(sl) (memset(sl, 0, sizeof(MLOCK_T)), 0) +#define INITIAL_LOCK(sl) (memset(sl, 0, sizeof(MLOCK_T)), return_0(0)) #define ACQUIRE_LOCK(sl) win32_acquire_lock(sl) #define RELEASE_LOCK(sl) win32_release_lock(sl) #define TRY_LOCK(sl) win32_try_lock(sl) @@ -1802,7 +1813,7 @@ struct win32_mlock_t static MLOCK_T malloc_global_mutex = { 0, 0, 0}; -static FORCEINLINE long win32_getcurrentthreadid() { +static FORCEINLINE long win32_getcurrentthreadid(void) { #ifdef _MSC_VER #if defined(_M_IX86) long *threadstruct=(long *)__readfsdword(0x18); @@ -2069,7 +2080,7 @@ static void init_malloc_global_mutex() { Each freshly allocated chunk must have both cinuse and pinuse set. That is, each allocated chunk borders either a previously allocated and still in-use chunk, or the base of its memory arena. This is - ensured by making all allocations from the the `lowest' part of any + ensured by making all allocations from the `lowest' part of any found chunk. Further, no free chunk physically borders another one, so each free chunk is known to be preceded and followed by either inuse chunks or the ends of memory. @@ -2279,12 +2290,12 @@ nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ of the same size are arranged in a circularly-linked list, with only the oldest chunk (the next to be used, in our FIFO ordering) actually in the tree. (Tree members are distinguished by a non-null - parent pointer.) If a chunk with the same size an an existing node + parent pointer.) If a chunk with the same size as an existing node is inserted, it is linked off the existing node using pointers that work in the same way as fd/bk pointers of small chunks. Each tree contains a power of 2 sized range of chunk sizes (the - smallest is 0x100 <= x < 0x180), which is is divided in half at each + smallest is 0x100 <= x < 0x180), which is divided in half at each tree level, with the chunks in the smaller half of the range (0x100 <= x < 0x140 for the top nose) in the left subtree and the larger half (0x140 <= x < 0x180) in the right subtree. This is, of course, @@ -3598,8 +3609,8 @@ static void internal_malloc_stats(mstate m) { and choose its bk node as its replacement. 2. If x was the last node of its size, but not a leaf node, it must be replaced with a leaf node (not merely one with an open left or - right), to make sure that lefts and rights of descendents - correspond properly to bit masks. We use the rightmost descendent + right), to make sure that lefts and rights of descendants + correspond properly to bit masks. We use the rightmost descendant of x. We could use any other leaf, but this is easy to locate and tends to counteract removal of leftmosts elsewhere, and so keeps paths shorter than minimally guaranteed. This doesn't loop much @@ -3943,7 +3954,7 @@ static void* sys_alloc(mstate m, size_t nb) { least-preferred order): 1. A call to MORECORE that can normally contiguously extend memory. (disabled if not MORECORE_CONTIGUOUS or not HAVE_MORECORE or - or main space is mmapped or a previous contiguous call failed) + main space is mmapped or a previous contiguous call failed) 2. A call to MMAP new space (disabled if not HAVE_MMAP). Note that under the default settings, if MORECORE is unable to fulfill a request, and HAVE_MMAP is true, then mmap is @@ -4778,7 +4789,7 @@ void* dlmalloc(size_t bytes) { void dlfree(void* mem) { /* - Consolidate freed chunks with preceeding or succeeding bordering + Consolidate freed chunks with preceding or succeeding bordering free chunks, if they exist, and then place in a bin. Intermixed with special cases for top, dv, mmapped chunks, and usage errors. */ @@ -5680,10 +5691,10 @@ History: Wolfram Gloger (Gloger@lrz.uni-muenchen.de). * Use last_remainder in more cases. * Pack bins using idea from colin@nyx10.cs.du.edu - * Use ordered bins instead of best-fit threshhold + * Use ordered bins instead of best-fit threshold * Eliminate block-local decls to simplify tracing and debugging. * Support another case of realloc via move into top - * Fix error occuring when initial sbrk_base not word-aligned. + * Fix error occurring when initial sbrk_base not word-aligned. * Rely on page size for units instead of SBRK_UNIT to avoid surprises about sbrk alignment conventions. * Add mallinfo, mallopt. Thanks to Raymond Nijssen @@ -5748,5 +5759,3 @@ History: structure of old version, but most details differ.) */ - - |