2012-09-17 18:38:38 +02:00
|
|
|
/* mmap.c -- Memory allocation with mmap.
|
2021-01-04 10:26:59 +01:00
|
|
|
Copyright (C) 2012-2021 Free Software Foundation, Inc.
|
2012-09-17 18:38:38 +02:00
|
|
|
Written by Ian Lance Taylor, Google.
|
|
|
|
|
|
|
|
Redistribution and use in source and binary forms, with or without
|
|
|
|
modification, are permitted provided that the following conditions are
|
|
|
|
met:
|
|
|
|
|
|
|
|
(1) Redistributions of source code must retain the above copyright
|
2016-09-11 15:44:07 +02:00
|
|
|
notice, this list of conditions and the following disclaimer.
|
2012-09-17 18:38:38 +02:00
|
|
|
|
|
|
|
(2) Redistributions in binary form must reproduce the above copyright
|
|
|
|
notice, this list of conditions and the following disclaimer in
|
|
|
|
the documentation and/or other materials provided with the
|
2016-09-11 15:44:07 +02:00
|
|
|
distribution.
|
|
|
|
|
2012-09-17 18:38:38 +02:00
|
|
|
(3) The name of the author may not be used to
|
|
|
|
endorse or promote products derived from this software without
|
|
|
|
specific prior written permission.
|
|
|
|
|
|
|
|
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
|
|
|
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
|
|
|
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
|
|
|
DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
|
|
|
|
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
|
|
|
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
|
|
|
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
|
|
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
|
|
|
|
STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
|
|
|
|
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
|
|
|
POSSIBILITY OF SUCH DAMAGE. */
|
|
|
|
|
|
|
|
#include "config.h"
|
|
|
|
|
|
|
|
#include <errno.h>
|
|
|
|
#include <string.h>
|
2012-09-18 11:02:50 +02:00
|
|
|
#include <stdlib.h>
|
2012-09-17 18:38:38 +02:00
|
|
|
#include <unistd.h>
|
2013-03-25 19:13:18 +01:00
|
|
|
#include <sys/types.h>
|
2012-09-17 18:38:38 +02:00
|
|
|
#include <sys/mman.h>
|
|
|
|
|
|
|
|
#include "backtrace.h"
|
|
|
|
#include "internal.h"
|
|
|
|
|
2020-05-11 19:51:21 +02:00
|
|
|
#ifndef HAVE_DECL_GETPAGESIZE
|
|
|
|
extern int getpagesize (void);
|
|
|
|
#endif
|
|
|
|
|
2012-09-17 18:38:38 +02:00
|
|
|
/* Memory allocation on systems that provide anonymous mmap. This
|
|
|
|
permits the backtrace functions to be invoked from a signal
|
|
|
|
handler, assuming that mmap is async-signal safe. */
|
|
|
|
|
|
|
|
#ifndef MAP_ANONYMOUS
|
|
|
|
#define MAP_ANONYMOUS MAP_ANON
|
|
|
|
#endif
|
|
|
|
|
2016-02-06 23:27:34 +01:00
|
|
|
#ifndef MAP_FAILED
|
|
|
|
#define MAP_FAILED ((void *)-1)
|
|
|
|
#endif
|
|
|
|
|
2012-09-17 18:38:38 +02:00
|
|
|
/* A list of free memory blocks. */
|
|
|
|
|
|
|
|
struct backtrace_freelist_struct
|
|
|
|
{
|
|
|
|
/* Next on list. */
|
|
|
|
struct backtrace_freelist_struct *next;
|
|
|
|
/* Size of this block, including this structure. */
|
|
|
|
size_t size;
|
|
|
|
};
|
|
|
|
|
|
|
|
/* Free memory allocated by backtrace_alloc. */
|
|
|
|
|
|
|
|
static void
|
|
|
|
backtrace_free_locked (struct backtrace_state *state, void *addr, size_t size)
|
|
|
|
{
|
2018-01-25 03:24:45 +01:00
|
|
|
/* Just leak small blocks. We don't have to be perfect. Don't put
|
|
|
|
more than 16 entries on the free list, to avoid wasting time
|
|
|
|
searching when allocating a block. If we have more than 16
|
|
|
|
entries, leak the smallest entry. */
|
|
|
|
|
2012-09-17 18:38:38 +02:00
|
|
|
if (size >= sizeof (struct backtrace_freelist_struct))
|
|
|
|
{
|
2018-01-25 03:24:45 +01:00
|
|
|
size_t c;
|
|
|
|
struct backtrace_freelist_struct **ppsmall;
|
|
|
|
struct backtrace_freelist_struct **pp;
|
2012-09-17 18:38:38 +02:00
|
|
|
struct backtrace_freelist_struct *p;
|
|
|
|
|
2018-01-25 03:24:45 +01:00
|
|
|
c = 0;
|
|
|
|
ppsmall = NULL;
|
|
|
|
for (pp = &state->freelist; *pp != NULL; pp = &(*pp)->next)
|
|
|
|
{
|
|
|
|
if (ppsmall == NULL || (*pp)->size < (*ppsmall)->size)
|
|
|
|
ppsmall = pp;
|
|
|
|
++c;
|
|
|
|
}
|
|
|
|
if (c >= 16)
|
|
|
|
{
|
|
|
|
if (size <= (*ppsmall)->size)
|
|
|
|
return;
|
|
|
|
*ppsmall = (*ppsmall)->next;
|
|
|
|
}
|
|
|
|
|
2012-09-17 18:38:38 +02:00
|
|
|
p = (struct backtrace_freelist_struct *) addr;
|
|
|
|
p->next = state->freelist;
|
|
|
|
p->size = size;
|
|
|
|
state->freelist = p;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-09-08 18:46:16 +02:00
|
|
|
/* Allocate memory like malloc. If ERROR_CALLBACK is NULL, don't
|
|
|
|
report an error. */
|
2012-09-17 18:38:38 +02:00
|
|
|
|
|
|
|
void *
|
|
|
|
backtrace_alloc (struct backtrace_state *state,
|
|
|
|
size_t size, backtrace_error_callback error_callback,
|
|
|
|
void *data)
|
|
|
|
{
|
|
|
|
void *ret;
|
2013-01-01 17:13:20 +01:00
|
|
|
int locked;
|
2012-09-17 18:38:38 +02:00
|
|
|
struct backtrace_freelist_struct **pp;
|
|
|
|
size_t pagesize;
|
|
|
|
size_t asksize;
|
|
|
|
void *page;
|
|
|
|
|
|
|
|
ret = NULL;
|
|
|
|
|
|
|
|
/* If we can acquire the lock, then see if there is space on the
|
|
|
|
free list. If we can't acquire the lock, drop straight into
|
|
|
|
using mmap. __sync_lock_test_and_set returns the old state of
|
|
|
|
the lock, so we have acquired it if it returns 0. */
|
|
|
|
|
2013-01-01 17:13:20 +01:00
|
|
|
if (!state->threaded)
|
|
|
|
locked = 1;
|
|
|
|
else
|
|
|
|
locked = __sync_lock_test_and_set (&state->lock_alloc, 1) == 0;
|
|
|
|
|
|
|
|
if (locked)
|
2012-09-17 18:38:38 +02:00
|
|
|
{
|
|
|
|
for (pp = &state->freelist; *pp != NULL; pp = &(*pp)->next)
|
|
|
|
{
|
|
|
|
if ((*pp)->size >= size)
|
|
|
|
{
|
|
|
|
struct backtrace_freelist_struct *p;
|
|
|
|
|
|
|
|
p = *pp;
|
|
|
|
*pp = p->next;
|
|
|
|
|
|
|
|
/* Round for alignment; we assume that no type we care about
|
|
|
|
is more than 8 bytes. */
|
|
|
|
size = (size + 7) & ~ (size_t) 7;
|
|
|
|
if (size < p->size)
|
|
|
|
backtrace_free_locked (state, (char *) p + size,
|
|
|
|
p->size - size);
|
|
|
|
|
|
|
|
ret = (void *) p;
|
|
|
|
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-01-01 17:13:20 +01:00
|
|
|
if (state->threaded)
|
|
|
|
__sync_lock_release (&state->lock_alloc);
|
2012-09-17 18:38:38 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
if (ret == NULL)
|
|
|
|
{
|
|
|
|
/* Allocate a new page. */
|
|
|
|
|
|
|
|
pagesize = getpagesize ();
|
|
|
|
asksize = (size + pagesize - 1) & ~ (pagesize - 1);
|
|
|
|
page = mmap (NULL, asksize, PROT_READ | PROT_WRITE,
|
|
|
|
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
|
2015-09-08 15:49:19 +02:00
|
|
|
if (page == MAP_FAILED)
|
2015-09-08 18:46:16 +02:00
|
|
|
{
|
|
|
|
if (error_callback)
|
|
|
|
error_callback (data, "mmap", errno);
|
|
|
|
}
|
2012-09-17 18:38:38 +02:00
|
|
|
else
|
|
|
|
{
|
|
|
|
size = (size + 7) & ~ (size_t) 7;
|
|
|
|
if (size < asksize)
|
|
|
|
backtrace_free (state, (char *) page + size, asksize - size,
|
|
|
|
error_callback, data);
|
|
|
|
|
|
|
|
ret = page;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Free memory allocated by backtrace_alloc. */
|
|
|
|
|
|
|
|
void
|
|
|
|
backtrace_free (struct backtrace_state *state, void *addr, size_t size,
|
|
|
|
backtrace_error_callback error_callback ATTRIBUTE_UNUSED,
|
|
|
|
void *data ATTRIBUTE_UNUSED)
|
|
|
|
{
|
2013-01-01 17:13:20 +01:00
|
|
|
int locked;
|
|
|
|
|
2014-05-09 07:01:08 +02:00
|
|
|
/* If we are freeing a large aligned block, just release it back to
|
|
|
|
the system. This case arises when growing a vector for a large
|
|
|
|
binary with lots of debug info. Calling munmap here may cause us
|
|
|
|
to call mmap again if there is also a large shared library; we
|
|
|
|
just live with that. */
|
|
|
|
if (size >= 16 * 4096)
|
|
|
|
{
|
|
|
|
size_t pagesize;
|
|
|
|
|
|
|
|
pagesize = getpagesize ();
|
|
|
|
if (((uintptr_t) addr & (pagesize - 1)) == 0
|
|
|
|
&& (size & (pagesize - 1)) == 0)
|
|
|
|
{
|
|
|
|
/* If munmap fails for some reason, just add the block to
|
|
|
|
the freelist. */
|
|
|
|
if (munmap (addr, size) == 0)
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-09-17 18:38:38 +02:00
|
|
|
/* If we can acquire the lock, add the new space to the free list.
|
|
|
|
If we can't acquire the lock, just leak the memory.
|
|
|
|
__sync_lock_test_and_set returns the old state of the lock, so we
|
|
|
|
have acquired it if it returns 0. */
|
2013-01-01 17:13:20 +01:00
|
|
|
|
|
|
|
if (!state->threaded)
|
|
|
|
locked = 1;
|
|
|
|
else
|
|
|
|
locked = __sync_lock_test_and_set (&state->lock_alloc, 1) == 0;
|
|
|
|
|
|
|
|
if (locked)
|
2012-09-17 18:38:38 +02:00
|
|
|
{
|
|
|
|
backtrace_free_locked (state, addr, size);
|
|
|
|
|
2013-01-01 17:13:20 +01:00
|
|
|
if (state->threaded)
|
|
|
|
__sync_lock_release (&state->lock_alloc);
|
2012-09-17 18:38:38 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Grow VEC by SIZE bytes. */
|
|
|
|
|
|
|
|
void *
|
|
|
|
backtrace_vector_grow (struct backtrace_state *state,size_t size,
|
|
|
|
backtrace_error_callback error_callback,
|
|
|
|
void *data, struct backtrace_vector *vec)
|
|
|
|
{
|
|
|
|
void *ret;
|
|
|
|
|
|
|
|
if (size > vec->alc)
|
|
|
|
{
|
|
|
|
size_t pagesize;
|
|
|
|
size_t alc;
|
|
|
|
void *base;
|
|
|
|
|
|
|
|
pagesize = getpagesize ();
|
|
|
|
alc = vec->size + size;
|
|
|
|
if (vec->size == 0)
|
|
|
|
alc = 16 * size;
|
|
|
|
else if (alc < pagesize)
|
|
|
|
{
|
|
|
|
alc *= 2;
|
|
|
|
if (alc > pagesize)
|
|
|
|
alc = pagesize;
|
|
|
|
}
|
|
|
|
else
|
2014-05-09 07:01:08 +02:00
|
|
|
{
|
|
|
|
alc *= 2;
|
|
|
|
alc = (alc + pagesize - 1) & ~ (pagesize - 1);
|
|
|
|
}
|
2012-09-17 18:38:38 +02:00
|
|
|
base = backtrace_alloc (state, alc, error_callback, data);
|
|
|
|
if (base == NULL)
|
|
|
|
return NULL;
|
|
|
|
if (vec->base != NULL)
|
|
|
|
{
|
|
|
|
memcpy (base, vec->base, vec->size);
|
2014-05-09 07:01:08 +02:00
|
|
|
backtrace_free (state, vec->base, vec->size + vec->alc,
|
|
|
|
error_callback, data);
|
2012-09-17 18:38:38 +02:00
|
|
|
}
|
|
|
|
vec->base = base;
|
|
|
|
vec->alc = alc - vec->size;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = (char *) vec->base + vec->size;
|
|
|
|
vec->size += size;
|
|
|
|
vec->alc -= size;
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Finish the current allocation on VEC. */
|
|
|
|
|
2013-12-05 19:32:02 +01:00
|
|
|
void *
|
|
|
|
backtrace_vector_finish (
|
|
|
|
struct backtrace_state *state ATTRIBUTE_UNUSED,
|
|
|
|
struct backtrace_vector *vec,
|
|
|
|
backtrace_error_callback error_callback ATTRIBUTE_UNUSED,
|
|
|
|
void *data ATTRIBUTE_UNUSED)
|
2012-09-17 18:38:38 +02:00
|
|
|
{
|
2013-12-05 19:32:02 +01:00
|
|
|
void *ret;
|
|
|
|
|
|
|
|
ret = vec->base;
|
2012-09-17 18:38:38 +02:00
|
|
|
vec->base = (char *) vec->base + vec->size;
|
|
|
|
vec->size = 0;
|
2013-12-05 19:32:02 +01:00
|
|
|
return ret;
|
2012-09-17 18:38:38 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Release any extra space allocated for VEC. */
|
|
|
|
|
|
|
|
int
|
|
|
|
backtrace_vector_release (struct backtrace_state *state,
|
|
|
|
struct backtrace_vector *vec,
|
|
|
|
backtrace_error_callback error_callback,
|
|
|
|
void *data)
|
|
|
|
{
|
2012-10-29 16:43:37 +01:00
|
|
|
size_t size;
|
|
|
|
size_t alc;
|
|
|
|
size_t aligned;
|
|
|
|
|
|
|
|
/* Make sure that the block that we free is aligned on an 8-byte
|
|
|
|
boundary. */
|
|
|
|
size = vec->size;
|
|
|
|
alc = vec->alc;
|
|
|
|
aligned = (size + 7) & ~ (size_t) 7;
|
|
|
|
alc -= aligned - size;
|
|
|
|
|
2012-10-29 19:42:05 +01:00
|
|
|
backtrace_free (state, (char *) vec->base + aligned, alc,
|
|
|
|
error_callback, data);
|
2012-09-17 18:38:38 +02:00
|
|
|
vec->alc = 0;
|
2018-11-27 09:26:04 +01:00
|
|
|
if (vec->size == 0)
|
|
|
|
vec->base = NULL;
|
2012-09-17 18:38:38 +02:00
|
|
|
return 1;
|
|
|
|
}
|