summaryrefslogtreecommitdiff
path: root/Src/mem.c
diff options
context:
space:
mode:
Diffstat (limited to 'Src/mem.c')
-rw-r--r--Src/mem.c89
1 files changed, 75 insertions, 14 deletions
diff --git a/Src/mem.c b/Src/mem.c
index b9569ea0c..e31145e1e 100644
--- a/Src/mem.c
+++ b/Src/mem.c
@@ -79,6 +79,12 @@
#include <sys/mman.h>
+/*
+ * This definition is designed to enable use of memory mapping on MacOS.
+ * However, performance tests indicate that MacOS mapped regions are
+ * somewhat slower to allocate than memory from malloc(), so whether
+ * using this improves performance depends on details of zhalloc().
+ */
#if defined(MAP_ANON) && !defined(MAP_ANONYMOUS)
#define MAP_ANONYMOUS MAP_ANON
#endif
@@ -294,7 +300,7 @@ pushheap(void)
#endif
for (h = heaps; h; h = h->next) {
- DPUTS(!h->used, "BUG: empty heap");
+ DPUTS(!h->used && h->next, "BUG: empty heap");
hs = (Heapstack) zalloc(sizeof(*hs));
hs->next = h->sp;
h->sp = hs;
@@ -334,16 +340,20 @@ freeheap(void)
*
* Whenever fheap is NULL here, the loop below sweeps back over the
* entire heap list again, resetting the free space in every arena to
- * the amount stashed by pushheap() and finding the first arena with
+ * the amount stashed by pushheap() and finding the arena with the most
* free space to optimize zhalloc()'s next search. When there's a lot
* of stuff already on the heap, this is an enormous amount of work,
* and performance goes to hell.
*
+ * Therefore, we defer freeing the most recently allocated arena until
+ * we reach popheap().
+ *
* However, if the arena to which fheap points is unused, we want to
- * free it, so we have no choice but to do the sweep for a new fheap.
+ * reclaim space in earlier arenas, so we have no choice but to do the
+ * sweep for a new fheap.
*/
if (fheap && !fheap->sp)
- fheap = NULL; /* We used to do this unconditionally */
+ fheap = NULL; /* We used to do this unconditionally */
/*
* In other cases, either fheap is already correct, or it has never
* been set and this loop will do it, or it'll be reset from scratch
@@ -361,7 +371,11 @@ freeheap(void)
memset(arena(h) + h->sp->used, 0xff, h->used - h->sp->used);
#endif
h->used = h->sp->used;
- if (!fheap && h->used < ARENA_SIZEOF(h))
+ if (!fheap) {
+ if (h->used < ARENA_SIZEOF(h))
+ fheap = h;
+ } else if (ARENA_SIZEOF(h) - h->used >
+ ARENA_SIZEOF(fheap) - fheap->used)
fheap = h;
hl = h;
#ifdef ZSH_HEAP_DEBUG
@@ -384,6 +398,26 @@ freeheap(void)
VALGRIND_MEMPOOL_TRIM((char *)h, (char *)arena(h), h->used);
#endif
} else {
+ if (fheap == h)
+ fheap = NULL;
+ if (h->next) {
+ /* We want to cut this out of the arena list if we can */
+ if (h == heaps)
+ hl = heaps = h->next;
+ else if (hl && hl->next == h)
+ hl->next = h->next;
+ else {
+ DPUTS(hl, "hl->next != h when freeing");
+ hl = h;
+ continue;
+ }
+ h->next = NULL;
+ } else {
+ /* Leave an empty arena at the end until popped */
+ h->used = 0;
+ fheap = hl = h;
+ break;
+ }
#ifdef USE_MMAP
munmap((void *) h, h->size);
#else
@@ -441,12 +475,29 @@ popheap(void)
#ifdef ZSH_VALGRIND
VALGRIND_MEMPOOL_TRIM((char *)h, (char *)arena(h), h->used);
#endif
- if (!fheap && h->used < ARENA_SIZEOF(h))
+ if (!fheap) {
+ if (h->used < ARENA_SIZEOF(h))
+ fheap = h;
+ } else if (ARENA_SIZEOF(h) - h->used >
+ ARENA_SIZEOF(fheap) - fheap->used)
fheap = h;
zfree(hs, sizeof(*hs));
hl = h;
} else {
+ if (h->next) {
+ /* We want to cut this out of the arena list if we can */
+ if (h == heaps)
+ hl = heaps = h->next;
+ else if (hl && hl->next == h)
+ hl->next = h->next;
+ else {
+ DPUTS(hl, "hl->next != h when popping");
+ hl = h;
+ continue;
+ }
+ h->next = NULL;
+ }
#ifdef USE_MMAP
munmap((void *) h, h->size);
#else
@@ -524,7 +575,7 @@ zheapptr(void *p)
mod_export void *
zhalloc(size_t size)
{
- Heap h;
+ Heap h, hp = NULL;
size_t n;
#ifdef ZSH_VALGRIND
size_t req_size = size;
@@ -543,9 +594,15 @@ zhalloc(size_t size)
/* find a heap with enough free space */
- for (h = ((fheap && ARENA_SIZEOF(fheap) >= (size + fheap->used))
- ? fheap : heaps);
- h; h = h->next) {
+ /*
+ * This previously assigned:
+ * h = ((fheap && ARENA_SIZEOF(fheap) >= (size + fheap->used))
+ * ? fheap : heaps);
+ * but we think that nothing upstream of fheap has more free space,
+ * so why start over at heaps just because fheap has too little?
+ */
+ for (h = (fheap ? fheap : heaps); h; h = h->next) {
+ hp = h;
if (ARENA_SIZEOF(h) >= (n = size + h->used)) {
void *ret;
@@ -566,7 +623,6 @@ zhalloc(size_t size)
}
}
{
- Heap hp;
/* not found, allocate new heap */
#if defined(ZSH_MEM) && !defined(USE_MMAP)
static int called = 0;
@@ -575,7 +631,6 @@ zhalloc(size_t size)
#endif
n = HEAP_ARENA_SIZE > size ? HEAPSIZE : size + sizeof(*h);
- for (hp = NULL, h = heaps; h; hp = h, h = h->next);
#ifdef USE_MMAP
h = mmap_heap_alloc(&n);
@@ -607,6 +662,7 @@ zhalloc(size_t size)
VALGRIND_MEMPOOL_ALLOC((char *)h, (char *)arena(h), req_size);
#endif
+ DPUTS(hp && hp->next, "failed to find end of chain in zhalloc");
if (hp)
hp->next = h;
else
@@ -1611,8 +1667,13 @@ realloc(MALLOC_RET_T p, MALLOC_ARG_T size)
int i, l = 0;
/* some system..., see above */
- if (!p && size)
- return (MALLOC_RET_T) malloc(size);
+ if (!p && size) {
+ queue_signals();
+ r = malloc(size);
+ unqueue_signals();
+ return (MALLOC_RET_T) r;
+ }
+
/* and some systems even do this... */
if (!p || !size)
return (MALLOC_RET_T) p;