update jemalloc to 3.4.0

This commit is contained in:
Daniel Micay 2013-06-12 00:24:36 -04:00
parent 8761b1fb16
commit 0685c657f0
34 changed files with 2676 additions and 2628 deletions

View File

@ -6,6 +6,19 @@ found in the git revision history:
http://www.canonware.com/cgi-bin/gitweb.cgi?p=jemalloc.git
git://canonware.com/jemalloc.git
* 3.4.0 (June 2, 2013)
This version is essentially a small bugfix release, but the addition of
aarch64 support requires that the minor version be incremented.
Bug fixes:
- Fix race-triggered deadlocks in chunk_record(). These deadlocks were
typically triggered by multiple threads concurrently deallocating huge
objects.
New features:
- Add support for the aarch64 architecture.
* 3.3.1 (March 6, 2013)
This version fixes bugs that are typically encountered only when utilizing
@ -15,7 +28,7 @@ found in the git revision history:
- Fix a locking order bug that could cause deadlock during fork if heap
profiling were enabled.
- Fix a chunk recycling bug that could cause the allocator to lose track of
whether a chunk was zeroed. On FreeBSD, NetBSD, and OS X, it could cause
whether a chunk was zeroed. On FreeBSD, NetBSD, and OS X, it could cause
corruption if allocating via sbrk(2) (unlikely unless running with the
"dss:primary" option specified). This was completely harmless on Linux
unless using mlockall(2) (and unlikely even then, unless the

View File

@ -1 +1 @@
3.3.1-0-g9ef9d9e8c271cdf14f664b871a8f98c827714784
3.4.0-0-g0ed518e5dab789ad2171bb38977a8927e2a26775

View File

@ -2,11 +2,11 @@
# Copyright (c) 1998-2007, Google Inc.
# All rights reserved.
#
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
@ -16,7 +16,7 @@
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
@ -1683,23 +1683,23 @@ sub PrintSource {
HtmlPrintNumber($c2),
UnparseAddress($offset, $e->[0]),
CleanDisassembly($e->[3]));
# Append the most specific source line associated with this instruction
if (length($dis) < 80) { $dis .= (' ' x (80 - length($dis))) };
$dis = HtmlEscape($dis);
my $f = $e->[5];
my $l = $e->[6];
if ($f ne $last_dis_filename) {
$dis .= sprintf("<span class=disasmloc>%s:%d</span>",
$dis .= sprintf("<span class=disasmloc>%s:%d</span>",
HtmlEscape(CleanFileName($f)), $l);
} elsif ($l ne $last_dis_linenum) {
# De-emphasize the unchanged file name portion
$dis .= sprintf("<span class=unimportant>%s</span>" .
"<span class=disasmloc>:%d</span>",
"<span class=disasmloc>:%d</span>",
HtmlEscape(CleanFileName($f)), $l);
} else {
# De-emphasize the entire location
$dis .= sprintf("<span class=unimportant>%s:%d</span>",
$dis .= sprintf("<span class=unimportant>%s:%d</span>",
HtmlEscape(CleanFileName($f)), $l);
}
$last_dis_filename = $f;
@ -1788,8 +1788,8 @@ sub PrintSource {
if (defined($dis) && $dis ne '') {
$asm = "<span class=\"asm\">" . $dis . "</span>";
}
my $source_class = (($n1 + $n2 > 0)
? "livesrc"
my $source_class = (($n1 + $n2 > 0)
? "livesrc"
: (($asm ne "") ? "deadsrc" : "nop"));
printf $output (
"<span class=\"line\">%5d</span> " .
@ -4723,7 +4723,7 @@ sub MapToSymbols {
}
}
}
# Prepend to accumulated symbols for pcstr
# (so that caller comes before callee)
my $sym = $symbols->{$pcstr};
@ -4917,7 +4917,7 @@ sub ConfigureTool {
my $dirname = $`; # this is everything up to and including the last slash
if (-x "$dirname$tool") {
$path = "$dirname$tool";
} else {
} else {
$path = $tool;
}
}

View File

@ -26,7 +26,7 @@ AC_COMPILE_IFELSE([AC_LANG_PROGRAM(
])
dnl JE_COMPILABLE(label, hcode, mcode, rvar)
dnl
dnl
dnl Use AC_LINK_IFELSE() rather than AC_COMPILE_IFELSE() so that linker errors
dnl cause failure.
AC_DEFUN([JE_COMPILABLE],
@ -232,7 +232,7 @@ CC_MM=1
dnl Platform-specific settings. abi and RPATH can probably be determined
dnl programmatically, but doing so is error-prone, which makes it generally
dnl not worth the trouble.
dnl
dnl
dnl Define cpp macros in CPPFLAGS, rather than doing AC_DEFINE(macro), since the
dnl definitions need to be seen before any headers are included, which is a pain
dnl to make happen otherwise.
@ -965,7 +965,7 @@ fi
dnl ============================================================================
dnl jemalloc configuration.
dnl
dnl
dnl Set VERSION if source directory has an embedded git repository.
if test -d "${srcroot}.git" ; then

View File

@ -2,12 +2,12 @@
.\" Title: JEMALLOC
.\" Author: Jason Evans
.\" Generator: DocBook XSL Stylesheets v1.76.1 <http://docbook.sf.net/>
.\" Date: 03/06/2013
.\" Date: 06/02/2013
.\" Manual: User Manual
.\" Source: jemalloc 3.3.1-0-g9ef9d9e8c271cdf14f664b871a8f98c827714784
.\" Source: jemalloc 3.4.0-0-g0ed518e5dab789ad2171bb38977a8927e2a26775
.\" Language: English
.\"
.TH "JEMALLOC" "3" "03/06/2013" "jemalloc 3.3.1-0-g9ef9d9e8c271" "User Manual"
.TH "JEMALLOC" "3" "06/02/2013" "jemalloc 3.4.0-0-g0ed518e5dab7" "User Manual"
.\" -----------------------------------------------------------------
.\" * Define some portability stuff
.\" -----------------------------------------------------------------
@ -31,7 +31,7 @@
jemalloc \- general purpose memory allocation functions
.SH "LIBRARY"
.PP
This manual describes jemalloc 3\&.3\&.1\-0\-g9ef9d9e8c271cdf14f664b871a8f98c827714784\&. More information can be found at the
This manual describes jemalloc 3\&.4\&.0\-0\-g0ed518e5dab789ad2171bb38977a8927e2a26775\&. More information can be found at the
\m[blue]\fBjemalloc website\fR\m[]\&\s-2\u[1]\d\s+2\&.
.SH "SYNOPSIS"
.sp
@ -376,7 +376,19 @@ Once, when the first call is made to one of the memory allocation routines, the
The string pointed to by the global variable
\fImalloc_conf\fR, the \(lqname\(rq of the file referenced by the symbolic link named
/etc/malloc\&.conf, and the value of the environment variable
\fBMALLOC_CONF\fR, will be interpreted, in that order, from left to right as options\&.
\fBMALLOC_CONF\fR, will be interpreted, in that order, from left to right as options\&. Note that
\fImalloc_conf\fR
may be read before
\fBmain\fR\fB\fR
is entered, so the declaration of
\fImalloc_conf\fR
should specify an initializer that contains the final value to be read by jemalloc\&.
\fImalloc_conf\fR
is a compile\-time setting, whereas
/etc/malloc\&.conf
and
\fBMALLOC_CONF\fR
can be safely set any time prior to program invocation\&.
.PP
An options string is a comma\-separated list of option:value pairs\&. There is one key corresponding to each
"opt\&.*"

View File

@ -432,7 +432,14 @@ for (i = 0; i < nbins; i++) {
referenced by the symbolic link named <filename
class="symlink">/etc/malloc.conf</filename>, and the value of the
environment variable <envar>MALLOC_CONF</envar>, will be interpreted, in
that order, from left to right as options.</para>
that order, from left to right as options. Note that
<varname>malloc_conf</varname> may be read before
<function>main<parameter/></function> is entered, so the declaration of
<varname>malloc_conf</varname> should specify an initializer that contains
the final value to be read by jemalloc. <varname>malloc_conf</varname> is
a compile-time setting, whereas <filename
class="symlink">/etc/malloc.conf</filename> and <envar>MALLOC_CONF</envar>
can be safely set any time prior to program invocation.</para>
<para>An options string is a comma-separated list of option:value pairs.
There is one key corresponding to each <link

File diff suppressed because it is too large Load Diff

View File

@ -38,105 +38,105 @@ JEMALLOC_INLINE uint64_t
atomic_add_uint64(uint64_t *p, uint64_t x)
{
return (__sync_add_and_fetch(p, x));
return (__sync_add_and_fetch(p, x));
}
JEMALLOC_INLINE uint64_t
atomic_sub_uint64(uint64_t *p, uint64_t x)
{
return (__sync_sub_and_fetch(p, x));
return (__sync_sub_and_fetch(p, x));
}
#elif (defined(_MSC_VER))
JEMALLOC_INLINE uint64_t
atomic_add_uint64(uint64_t *p, uint64_t x)
{
return (InterlockedExchangeAdd64(p, x));
return (InterlockedExchangeAdd64(p, x));
}
JEMALLOC_INLINE uint64_t
atomic_sub_uint64(uint64_t *p, uint64_t x)
{
return (InterlockedExchangeAdd64(p, -((int64_t)x)));
return (InterlockedExchangeAdd64(p, -((int64_t)x)));
}
#elif (defined(JEMALLOC_OSATOMIC))
JEMALLOC_INLINE uint64_t
atomic_add_uint64(uint64_t *p, uint64_t x)
{
return (OSAtomicAdd64((int64_t)x, (int64_t *)p));
return (OSAtomicAdd64((int64_t)x, (int64_t *)p));
}
JEMALLOC_INLINE uint64_t
atomic_sub_uint64(uint64_t *p, uint64_t x)
{
return (OSAtomicAdd64(-((int64_t)x), (int64_t *)p));
return (OSAtomicAdd64(-((int64_t)x), (int64_t *)p));
}
# elif (defined(__amd64__) || defined(__x86_64__))
JEMALLOC_INLINE uint64_t
atomic_add_uint64(uint64_t *p, uint64_t x)
{
asm volatile (
"lock; xaddq %0, %1;"
: "+r" (x), "=m" (*p) /* Outputs. */
: "m" (*p) /* Inputs. */
);
asm volatile (
"lock; xaddq %0, %1;"
: "+r" (x), "=m" (*p) /* Outputs. */
: "m" (*p) /* Inputs. */
);
return (x);
return (x);
}
JEMALLOC_INLINE uint64_t
atomic_sub_uint64(uint64_t *p, uint64_t x)
{
x = (uint64_t)(-(int64_t)x);
asm volatile (
"lock; xaddq %0, %1;"
: "+r" (x), "=m" (*p) /* Outputs. */
: "m" (*p) /* Inputs. */
);
x = (uint64_t)(-(int64_t)x);
asm volatile (
"lock; xaddq %0, %1;"
: "+r" (x), "=m" (*p) /* Outputs. */
: "m" (*p) /* Inputs. */
);
return (x);
return (x);
}
# elif (defined(JEMALLOC_ATOMIC9))
JEMALLOC_INLINE uint64_t
atomic_add_uint64(uint64_t *p, uint64_t x)
{
/*
* atomic_fetchadd_64() doesn't exist, but we only ever use this
* function on LP64 systems, so atomic_fetchadd_long() will do.
*/
assert(sizeof(uint64_t) == sizeof(unsigned long));
/*
* atomic_fetchadd_64() doesn't exist, but we only ever use this
* function on LP64 systems, so atomic_fetchadd_long() will do.
*/
assert(sizeof(uint64_t) == sizeof(unsigned long));
return (atomic_fetchadd_long(p, (unsigned long)x) + x);
return (atomic_fetchadd_long(p, (unsigned long)x) + x);
}
JEMALLOC_INLINE uint64_t
atomic_sub_uint64(uint64_t *p, uint64_t x)
{
assert(sizeof(uint64_t) == sizeof(unsigned long));
assert(sizeof(uint64_t) == sizeof(unsigned long));
return (atomic_fetchadd_long(p, (unsigned long)(-(long)x)) - x);
return (atomic_fetchadd_long(p, (unsigned long)(-(long)x)) - x);
}
# elif (defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_8))
JEMALLOC_INLINE uint64_t
atomic_add_uint64(uint64_t *p, uint64_t x)
{
return (__sync_add_and_fetch(p, x));
return (__sync_add_and_fetch(p, x));
}
JEMALLOC_INLINE uint64_t
atomic_sub_uint64(uint64_t *p, uint64_t x)
{
return (__sync_sub_and_fetch(p, x));
return (__sync_sub_and_fetch(p, x));
}
# else
# error "Missing implementation for 64-bit atomic operations"
@ -150,97 +150,97 @@ JEMALLOC_INLINE uint32_t
atomic_add_uint32(uint32_t *p, uint32_t x)
{
return (__sync_add_and_fetch(p, x));
return (__sync_add_and_fetch(p, x));
}
JEMALLOC_INLINE uint32_t
atomic_sub_uint32(uint32_t *p, uint32_t x)
{
return (__sync_sub_and_fetch(p, x));
return (__sync_sub_and_fetch(p, x));
}
#elif (defined(_MSC_VER))
JEMALLOC_INLINE uint32_t
atomic_add_uint32(uint32_t *p, uint32_t x)
{
return (InterlockedExchangeAdd(p, x));
return (InterlockedExchangeAdd(p, x));
}
JEMALLOC_INLINE uint32_t
atomic_sub_uint32(uint32_t *p, uint32_t x)
{
return (InterlockedExchangeAdd(p, -((int32_t)x)));
return (InterlockedExchangeAdd(p, -((int32_t)x)));
}
#elif (defined(JEMALLOC_OSATOMIC))
JEMALLOC_INLINE uint32_t
atomic_add_uint32(uint32_t *p, uint32_t x)
{
return (OSAtomicAdd32((int32_t)x, (int32_t *)p));
return (OSAtomicAdd32((int32_t)x, (int32_t *)p));
}
JEMALLOC_INLINE uint32_t
atomic_sub_uint32(uint32_t *p, uint32_t x)
{
return (OSAtomicAdd32(-((int32_t)x), (int32_t *)p));
return (OSAtomicAdd32(-((int32_t)x), (int32_t *)p));
}
#elif (defined(__i386__) || defined(__amd64__) || defined(__x86_64__))
JEMALLOC_INLINE uint32_t
atomic_add_uint32(uint32_t *p, uint32_t x)
{
asm volatile (
"lock; xaddl %0, %1;"
: "+r" (x), "=m" (*p) /* Outputs. */
: "m" (*p) /* Inputs. */
);
asm volatile (
"lock; xaddl %0, %1;"
: "+r" (x), "=m" (*p) /* Outputs. */
: "m" (*p) /* Inputs. */
);
return (x);
return (x);
}
JEMALLOC_INLINE uint32_t
atomic_sub_uint32(uint32_t *p, uint32_t x)
{
x = (uint32_t)(-(int32_t)x);
asm volatile (
"lock; xaddl %0, %1;"
: "+r" (x), "=m" (*p) /* Outputs. */
: "m" (*p) /* Inputs. */
);
x = (uint32_t)(-(int32_t)x);
asm volatile (
"lock; xaddl %0, %1;"
: "+r" (x), "=m" (*p) /* Outputs. */
: "m" (*p) /* Inputs. */
);
return (x);
return (x);
}
#elif (defined(JEMALLOC_ATOMIC9))
JEMALLOC_INLINE uint32_t
atomic_add_uint32(uint32_t *p, uint32_t x)
{
return (atomic_fetchadd_32(p, x) + x);
return (atomic_fetchadd_32(p, x) + x);
}
JEMALLOC_INLINE uint32_t
atomic_sub_uint32(uint32_t *p, uint32_t x)
{
return (atomic_fetchadd_32(p, (uint32_t)(-(int32_t)x)) - x);
return (atomic_fetchadd_32(p, (uint32_t)(-(int32_t)x)) - x);
}
#elif (defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_4))
JEMALLOC_INLINE uint32_t
atomic_add_uint32(uint32_t *p, uint32_t x)
{
return (__sync_add_and_fetch(p, x));
return (__sync_add_and_fetch(p, x));
}
JEMALLOC_INLINE uint32_t
atomic_sub_uint32(uint32_t *p, uint32_t x)
{
return (__sync_sub_and_fetch(p, x));
return (__sync_sub_and_fetch(p, x));
}
#else
# error "Missing implementation for 32-bit atomic operations"
@ -253,9 +253,9 @@ atomic_add_z(size_t *p, size_t x)
{
#if (LG_SIZEOF_PTR == 3)
return ((size_t)atomic_add_uint64((uint64_t *)p, (uint64_t)x));
return ((size_t)atomic_add_uint64((uint64_t *)p, (uint64_t)x));
#elif (LG_SIZEOF_PTR == 2)
return ((size_t)atomic_add_uint32((uint32_t *)p, (uint32_t)x));
return ((size_t)atomic_add_uint32((uint32_t *)p, (uint32_t)x));
#endif
}
@ -264,11 +264,11 @@ atomic_sub_z(size_t *p, size_t x)
{
#if (LG_SIZEOF_PTR == 3)
return ((size_t)atomic_add_uint64((uint64_t *)p,
(uint64_t)-((int64_t)x)));
return ((size_t)atomic_add_uint64((uint64_t *)p,
(uint64_t)-((int64_t)x)));
#elif (LG_SIZEOF_PTR == 2)
return ((size_t)atomic_add_uint32((uint32_t *)p,
(uint32_t)-((int32_t)x)));
return ((size_t)atomic_add_uint32((uint32_t *)p,
(uint32_t)-((int32_t)x)));
#endif
}
@ -279,9 +279,9 @@ atomic_add_u(unsigned *p, unsigned x)
{
#if (LG_SIZEOF_INT == 3)
return ((unsigned)atomic_add_uint64((uint64_t *)p, (uint64_t)x));
return ((unsigned)atomic_add_uint64((uint64_t *)p, (uint64_t)x));
#elif (LG_SIZEOF_INT == 2)
return ((unsigned)atomic_add_uint32((uint32_t *)p, (uint32_t)x));
return ((unsigned)atomic_add_uint32((uint32_t *)p, (uint32_t)x));
#endif
}
@ -290,11 +290,11 @@ atomic_sub_u(unsigned *p, unsigned x)
{
#if (LG_SIZEOF_INT == 3)
return ((unsigned)atomic_add_uint64((uint64_t *)p,
(uint64_t)-((int64_t)x)));
return ((unsigned)atomic_add_uint64((uint64_t *)p,
(uint64_t)-((int64_t)x)));
#elif (LG_SIZEOF_INT == 2)
return ((unsigned)atomic_add_uint32((uint32_t *)p,
(uint32_t)-((int32_t)x)));
return ((unsigned)atomic_add_uint32((uint32_t *)p,
(uint32_t)-((int32_t)x)));
#endif
}
/******************************************************************************/

View File

@ -24,22 +24,22 @@ typedef unsigned long bitmap_t;
#ifdef JEMALLOC_H_STRUCTS
struct bitmap_level_s {
/* Offset of this level's groups within the array of groups. */
size_t group_offset;
/* Offset of this level's groups within the array of groups. */
size_t group_offset;
};
struct bitmap_info_s {
/* Logical number of bits in bitmap (stored at bottom level). */
size_t nbits;
/* Logical number of bits in bitmap (stored at bottom level). */
size_t nbits;
/* Number of levels necessary for nbits. */
unsigned nlevels;
/* Number of levels necessary for nbits. */
unsigned nlevels;
/*
* Only the first (nlevels+1) elements are used, and levels are ordered
* bottom to top (e.g. the bottom level is stored in levels[0]).
*/
bitmap_level_t levels[BITMAP_MAX_LEVELS+1];
/*
* Only the first (nlevels+1) elements are used, and levels are ordered
* bottom to top (e.g. the bottom level is stored in levels[0]).
*/
bitmap_level_t levels[BITMAP_MAX_LEVELS+1];
};
#endif /* JEMALLOC_H_STRUCTS */
@ -67,115 +67,115 @@ void bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit);
JEMALLOC_INLINE bool
bitmap_full(bitmap_t *bitmap, const bitmap_info_t *binfo)
{
unsigned rgoff = binfo->levels[binfo->nlevels].group_offset - 1;
bitmap_t rg = bitmap[rgoff];
/* The bitmap is full iff the root group is 0. */
return (rg == 0);
unsigned rgoff = binfo->levels[binfo->nlevels].group_offset - 1;
bitmap_t rg = bitmap[rgoff];
/* The bitmap is full iff the root group is 0. */
return (rg == 0);
}
JEMALLOC_INLINE bool
bitmap_get(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
{
size_t goff;
bitmap_t g;
size_t goff;
bitmap_t g;
assert(bit < binfo->nbits);
goff = bit >> LG_BITMAP_GROUP_NBITS;
g = bitmap[goff];
return (!(g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK))));
assert(bit < binfo->nbits);
goff = bit >> LG_BITMAP_GROUP_NBITS;
g = bitmap[goff];
return (!(g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK))));
}
JEMALLOC_INLINE void
bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
{
size_t goff;
bitmap_t *gp;
bitmap_t g;
size_t goff;
bitmap_t *gp;
bitmap_t g;
assert(bit < binfo->nbits);
assert(bitmap_get(bitmap, binfo, bit) == false);
goff = bit >> LG_BITMAP_GROUP_NBITS;
gp = &bitmap[goff];
g = *gp;
assert(g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK)));
g ^= 1LU << (bit & BITMAP_GROUP_NBITS_MASK);
*gp = g;
assert(bitmap_get(bitmap, binfo, bit));
/* Propagate group state transitions up the tree. */
if (g == 0) {
unsigned i;
for (i = 1; i < binfo->nlevels; i++) {
bit = goff;
goff = bit >> LG_BITMAP_GROUP_NBITS;
gp = &bitmap[binfo->levels[i].group_offset + goff];
g = *gp;
assert(g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK)));
g ^= 1LU << (bit & BITMAP_GROUP_NBITS_MASK);
*gp = g;
if (g != 0)
break;
}
}
assert(bit < binfo->nbits);
assert(bitmap_get(bitmap, binfo, bit) == false);
goff = bit >> LG_BITMAP_GROUP_NBITS;
gp = &bitmap[goff];
g = *gp;
assert(g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK)));
g ^= 1LU << (bit & BITMAP_GROUP_NBITS_MASK);
*gp = g;
assert(bitmap_get(bitmap, binfo, bit));
/* Propagate group state transitions up the tree. */
if (g == 0) {
unsigned i;
for (i = 1; i < binfo->nlevels; i++) {
bit = goff;
goff = bit >> LG_BITMAP_GROUP_NBITS;
gp = &bitmap[binfo->levels[i].group_offset + goff];
g = *gp;
assert(g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK)));
g ^= 1LU << (bit & BITMAP_GROUP_NBITS_MASK);
*gp = g;
if (g != 0)
break;
}
}
}
/* sfu: set first unset. */
JEMALLOC_INLINE size_t
bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo)
{
size_t bit;
bitmap_t g;
unsigned i;
size_t bit;
bitmap_t g;
unsigned i;
assert(bitmap_full(bitmap, binfo) == false);
assert(bitmap_full(bitmap, binfo) == false);
i = binfo->nlevels - 1;
g = bitmap[binfo->levels[i].group_offset];
bit = ffsl(g) - 1;
while (i > 0) {
i--;
g = bitmap[binfo->levels[i].group_offset + bit];
bit = (bit << LG_BITMAP_GROUP_NBITS) + (ffsl(g) - 1);
}
i = binfo->nlevels - 1;
g = bitmap[binfo->levels[i].group_offset];
bit = ffsl(g) - 1;
while (i > 0) {
i--;
g = bitmap[binfo->levels[i].group_offset + bit];
bit = (bit << LG_BITMAP_GROUP_NBITS) + (ffsl(g) - 1);
}
bitmap_set(bitmap, binfo, bit);
return (bit);
bitmap_set(bitmap, binfo, bit);
return (bit);
}
JEMALLOC_INLINE void
bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
{
size_t goff;
bitmap_t *gp;
bitmap_t g;
bool propagate;
size_t goff;
bitmap_t *gp;
bitmap_t g;
bool propagate;
assert(bit < binfo->nbits);
assert(bitmap_get(bitmap, binfo, bit));
goff = bit >> LG_BITMAP_GROUP_NBITS;
gp = &bitmap[goff];
g = *gp;
propagate = (g == 0);
assert((g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK))) == 0);
g ^= 1LU << (bit & BITMAP_GROUP_NBITS_MASK);
*gp = g;
assert(bitmap_get(bitmap, binfo, bit) == false);
/* Propagate group state transitions up the tree. */
if (propagate) {
unsigned i;
for (i = 1; i < binfo->nlevels; i++) {
bit = goff;
goff = bit >> LG_BITMAP_GROUP_NBITS;
gp = &bitmap[binfo->levels[i].group_offset + goff];
g = *gp;
propagate = (g == 0);
assert((g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK)))
== 0);
g ^= 1LU << (bit & BITMAP_GROUP_NBITS_MASK);
*gp = g;
if (propagate == false)
break;
}
}
assert(bit < binfo->nbits);
assert(bitmap_get(bitmap, binfo, bit));
goff = bit >> LG_BITMAP_GROUP_NBITS;
gp = &bitmap[goff];
g = *gp;
propagate = (g == 0);
assert((g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK))) == 0);
g ^= 1LU << (bit & BITMAP_GROUP_NBITS_MASK);
*gp = g;
assert(bitmap_get(bitmap, binfo, bit) == false);
/* Propagate group state transitions up the tree. */
if (propagate) {
unsigned i;
for (i = 1; i < binfo->nlevels; i++) {
bit = goff;
goff = bit >> LG_BITMAP_GROUP_NBITS;
gp = &bitmap[binfo->levels[i].group_offset + goff];
g = *gp;
propagate = (g == 0);
assert((g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK)))
== 0);
g ^= 1LU << (bit & BITMAP_GROUP_NBITS_MASK);
*gp = g;
if (propagate == false)
break;
}
}
}
#endif

View File

@ -9,15 +9,15 @@
/* Return the chunk address for allocation address a. */
#define CHUNK_ADDR2BASE(a) \
((void *)((uintptr_t)(a) & ~chunksize_mask))
((void *)((uintptr_t)(a) & ~chunksize_mask))
/* Return the chunk offset of address a. */
#define CHUNK_ADDR2OFFSET(a) \
((size_t)((uintptr_t)(a) & chunksize_mask))
((size_t)((uintptr_t)(a) & chunksize_mask))
/* Return the smallest chunk multiple that is >= s. */
#define CHUNK_CEILING(s) \
(((s) + chunksize_mask) & ~chunksize_mask)
(((s) + chunksize_mask) & ~chunksize_mask)
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/

View File

@ -2,11 +2,11 @@
#ifdef JEMALLOC_H_TYPES
typedef enum {
dss_prec_disabled = 0,
dss_prec_primary = 1,
dss_prec_secondary = 2,
dss_prec_disabled = 0,
dss_prec_primary = 1,
dss_prec_secondary = 2,
dss_prec_limit = 3
dss_prec_limit = 3
} dss_prec_t ;
#define DSS_PREC_DEFAULT dss_prec_secondary
#define DSS_DEFAULT "secondary"

View File

@ -25,41 +25,41 @@ typedef bool ckh_keycomp_t (const void *, const void *);
/* Hash table cell. */
struct ckhc_s {
const void *key;
const void *data;
const void *key;
const void *data;
};
struct ckh_s {
#ifdef CKH_COUNT
/* Counters used to get an idea of performance. */
uint64_t ngrows;
uint64_t nshrinks;
uint64_t nshrinkfails;
uint64_t ninserts;
uint64_t nrelocs;
/* Counters used to get an idea of performance. */
uint64_t ngrows;
uint64_t nshrinks;
uint64_t nshrinkfails;
uint64_t ninserts;
uint64_t nrelocs;
#endif
/* Used for pseudo-random number generation. */
/* Used for pseudo-random number generation. */
#define CKH_A 1103515241
#define CKH_C 12347
uint32_t prng_state;
uint32_t prng_state;
/* Total number of items. */
size_t count;
/* Total number of items. */
size_t count;
/*
* Minimum and current number of hash table buckets. There are
* 2^LG_CKH_BUCKET_CELLS cells per bucket.
*/
unsigned lg_minbuckets;
unsigned lg_curbuckets;
/*
* Minimum and current number of hash table buckets. There are
* 2^LG_CKH_BUCKET_CELLS cells per bucket.
*/
unsigned lg_minbuckets;
unsigned lg_curbuckets;
/* Hash and comparison functions. */
ckh_hash_t *hash;
ckh_keycomp_t *keycomp;
/* Hash and comparison functions. */
ckh_hash_t *hash;
ckh_keycomp_t *keycomp;
/* Hash table with 2^lg_curbuckets buckets. */
ckhc_t *tab;
/* Hash table with 2^lg_curbuckets buckets. */
ckhc_t *tab;
};
#endif /* JEMALLOC_H_STRUCTS */

View File

@ -12,58 +12,58 @@ typedef struct ctl_stats_s ctl_stats_t;
#ifdef JEMALLOC_H_STRUCTS
struct ctl_node_s {
bool named;
bool named;
};
struct ctl_named_node_s {
struct ctl_node_s node;
const char *name;
/* If (nchildren == 0), this is a terminal node. */
unsigned nchildren;
const ctl_node_t *children;
int (*ctl)(const size_t *, size_t, void *, size_t *,
void *, size_t);
struct ctl_node_s node;
const char *name;
/* If (nchildren == 0), this is a terminal node. */
unsigned nchildren;
const ctl_node_t *children;
int (*ctl)(const size_t *, size_t, void *, size_t *,
void *, size_t);
};
struct ctl_indexed_node_s {
struct ctl_node_s node;
const ctl_named_node_t *(*index)(const size_t *, size_t, size_t);
struct ctl_node_s node;
const ctl_named_node_t *(*index)(const size_t *, size_t, size_t);
};
struct ctl_arena_stats_s {
bool initialized;
unsigned nthreads;
const char *dss;
size_t pactive;
size_t pdirty;
arena_stats_t astats;
bool initialized;
unsigned nthreads;
const char *dss;
size_t pactive;
size_t pdirty;
arena_stats_t astats;
/* Aggregate stats for small size classes, based on bin stats. */
size_t allocated_small;
uint64_t nmalloc_small;
uint64_t ndalloc_small;
uint64_t nrequests_small;
/* Aggregate stats for small size classes, based on bin stats. */
size_t allocated_small;
uint64_t nmalloc_small;
uint64_t ndalloc_small;
uint64_t nrequests_small;
malloc_bin_stats_t bstats[NBINS];
malloc_large_stats_t *lstats; /* nlclasses elements. */
malloc_bin_stats_t bstats[NBINS];
malloc_large_stats_t *lstats; /* nlclasses elements. */
};
struct ctl_stats_s {
size_t allocated;
size_t active;
size_t mapped;
struct {
size_t current; /* stats_chunks.curchunks */
uint64_t total; /* stats_chunks.nchunks */
size_t high; /* stats_chunks.highchunks */
} chunks;
struct {
size_t allocated; /* huge_allocated */
uint64_t nmalloc; /* huge_nmalloc */
uint64_t ndalloc; /* huge_ndalloc */
} huge;
unsigned narenas;
ctl_arena_stats_t *arenas; /* (narenas + 1) elements. */
size_t allocated;
size_t active;
size_t mapped;
struct {
size_t current; /* stats_chunks.curchunks */
uint64_t total; /* stats_chunks.nchunks */
size_t high; /* stats_chunks.highchunks */
} chunks;
struct {
size_t allocated; /* huge_allocated */
uint64_t nmalloc; /* huge_nmalloc */
uint64_t ndalloc; /* huge_ndalloc */
} huge;
unsigned narenas;
ctl_arena_stats_t *arenas; /* (narenas + 1) elements. */
};
#endif /* JEMALLOC_H_STRUCTS */
@ -82,30 +82,30 @@ void ctl_postfork_parent(void);
void ctl_postfork_child(void);
#define xmallctl(name, oldp, oldlenp, newp, newlen) do { \
if (je_mallctl(name, oldp, oldlenp, newp, newlen) \
!= 0) { \
malloc_printf( \
"<jemalloc>: Failure in xmallctl(\"%s\", ...)\n", \
name); \
abort(); \
} \
if (je_mallctl(name, oldp, oldlenp, newp, newlen) \
!= 0) { \
malloc_printf( \
"<jemalloc>: Failure in xmallctl(\"%s\", ...)\n", \
name); \
abort(); \
} \
} while (0)
#define xmallctlnametomib(name, mibp, miblenp) do { \
if (je_mallctlnametomib(name, mibp, miblenp) != 0) { \
malloc_printf("<jemalloc>: Failure in " \
"xmallctlnametomib(\"%s\", ...)\n", name); \
abort(); \
} \
if (je_mallctlnametomib(name, mibp, miblenp) != 0) { \
malloc_printf("<jemalloc>: Failure in " \
"xmallctlnametomib(\"%s\", ...)\n", name); \
abort(); \
} \
} while (0)
#define xmallctlbymib(mib, miblen, oldp, oldlenp, newp, newlen) do { \
if (je_mallctlbymib(mib, miblen, oldp, oldlenp, newp, \
newlen) != 0) { \
malloc_write( \
"<jemalloc>: Failure in xmallctlbymib()\n"); \
abort(); \
} \
if (je_mallctlbymib(mib, miblen, oldp, oldlenp, newp, \
newlen) != 0) { \
malloc_write( \
"<jemalloc>: Failure in xmallctlbymib()\n"); \
abort(); \
} \
} while (0)
#endif /* JEMALLOC_H_EXTERNS */
@ -114,3 +114,4 @@ void ctl_postfork_child(void);
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/

View File

@ -9,23 +9,23 @@ typedef struct extent_node_s extent_node_t;
/* Tree of extents. */
struct extent_node_s {
/* Linkage for the size/address-ordered tree. */
rb_node(extent_node_t) link_szad;
/* Linkage for the size/address-ordered tree. */
rb_node(extent_node_t) link_szad;
/* Linkage for the address-ordered tree. */
rb_node(extent_node_t) link_ad;
/* Linkage for the address-ordered tree. */
rb_node(extent_node_t) link_ad;
/* Profile counters, used for huge objects. */
prof_ctx_t *prof_ctx;
/* Profile counters, used for huge objects. */
prof_ctx_t *prof_ctx;
/* Pointer to the extent that this tree node is responsible for. */
void *addr;
/* Pointer to the extent that this tree node is responsible for. */
void *addr;
/* Total region size. */
size_t size;
/* Total region size. */
size_t size;
/* True if zero-filled; used by chunk recycling code. */
bool zeroed;
/* True if zero-filled; used by chunk recycling code. */
bool zeroed;
};
typedef rb_tree(extent_node_t) extent_tree_t;
@ -43,3 +43,4 @@ rb_proto(, extent_tree_ad_, extent_tree_t, extent_node_t)
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/

View File

@ -30,284 +30,284 @@ JEMALLOC_INLINE uint32_t
hash_rotl_32(uint32_t x, int8_t r)
{
return (x << r) | (x >> (32 - r));
return (x << r) | (x >> (32 - r));
}
JEMALLOC_INLINE uint64_t
hash_rotl_64(uint64_t x, int8_t r)
{
return (x << r) | (x >> (64 - r));
return (x << r) | (x >> (64 - r));
}
JEMALLOC_INLINE uint32_t
hash_get_block_32(const uint32_t *p, int i)
{
return p[i];
return p[i];
}
JEMALLOC_INLINE uint64_t
hash_get_block_64(const uint64_t *p, int i)
{
return p[i];
return p[i];
}
JEMALLOC_INLINE uint32_t
hash_fmix_32(uint32_t h)
{
h ^= h >> 16;
h *= 0x85ebca6b;
h ^= h >> 13;
h *= 0xc2b2ae35;
h ^= h >> 16;
h ^= h >> 16;
h *= 0x85ebca6b;
h ^= h >> 13;
h *= 0xc2b2ae35;
h ^= h >> 16;
return h;
return h;
}
JEMALLOC_INLINE uint64_t
hash_fmix_64(uint64_t k)
{
k ^= k >> 33;
k *= QU(0xff51afd7ed558ccdLLU);
k ^= k >> 33;
k *= QU(0xc4ceb9fe1a85ec53LLU);
k ^= k >> 33;
k ^= k >> 33;
k *= QU(0xff51afd7ed558ccdLLU);
k ^= k >> 33;
k *= QU(0xc4ceb9fe1a85ec53LLU);
k ^= k >> 33;
return k;
return k;
}
JEMALLOC_INLINE uint32_t
hash_x86_32(const void *key, int len, uint32_t seed)
{
const uint8_t *data = (const uint8_t *) key;
const int nblocks = len / 4;
const uint8_t *data = (const uint8_t *) key;
const int nblocks = len / 4;
uint32_t h1 = seed;
uint32_t h1 = seed;
const uint32_t c1 = 0xcc9e2d51;
const uint32_t c2 = 0x1b873593;
const uint32_t c1 = 0xcc9e2d51;
const uint32_t c2 = 0x1b873593;
/* body */
{
const uint32_t *blocks = (const uint32_t *) (data + nblocks*4);
int i;
/* body */
{
const uint32_t *blocks = (const uint32_t *) (data + nblocks*4);
int i;
for (i = -nblocks; i; i++) {
uint32_t k1 = hash_get_block_32(blocks, i);
for (i = -nblocks; i; i++) {
uint32_t k1 = hash_get_block_32(blocks, i);
k1 *= c1;
k1 = hash_rotl_32(k1, 15);
k1 *= c2;
k1 *= c1;
k1 = hash_rotl_32(k1, 15);
k1 *= c2;
h1 ^= k1;
h1 = hash_rotl_32(h1, 13);
h1 = h1*5 + 0xe6546b64;
}
}
h1 ^= k1;
h1 = hash_rotl_32(h1, 13);
h1 = h1*5 + 0xe6546b64;
}
}
/* tail */
{
const uint8_t *tail = (const uint8_t *) (data + nblocks*4);
/* tail */
{
const uint8_t *tail = (const uint8_t *) (data + nblocks*4);
uint32_t k1 = 0;
uint32_t k1 = 0;
switch (len & 3) {
case 3: k1 ^= tail[2] << 16;
case 2: k1 ^= tail[1] << 8;
case 1: k1 ^= tail[0]; k1 *= c1; k1 = hash_rotl_32(k1, 15);
k1 *= c2; h1 ^= k1;
}
}
switch (len & 3) {
case 3: k1 ^= tail[2] << 16;
case 2: k1 ^= tail[1] << 8;
case 1: k1 ^= tail[0]; k1 *= c1; k1 = hash_rotl_32(k1, 15);
k1 *= c2; h1 ^= k1;
}
}
/* finalization */
h1 ^= len;
/* finalization */
h1 ^= len;
h1 = hash_fmix_32(h1);
h1 = hash_fmix_32(h1);
return h1;
return h1;
}
UNUSED JEMALLOC_INLINE void
hash_x86_128(const void *key, const int len, uint32_t seed,
uint64_t r_out[2])
{
const uint8_t * data = (const uint8_t *) key;
const int nblocks = len / 16;
const uint8_t * data = (const uint8_t *) key;
const int nblocks = len / 16;
uint32_t h1 = seed;
uint32_t h2 = seed;
uint32_t h3 = seed;
uint32_t h4 = seed;
uint32_t h1 = seed;
uint32_t h2 = seed;
uint32_t h3 = seed;
uint32_t h4 = seed;
const uint32_t c1 = 0x239b961b;
const uint32_t c2 = 0xab0e9789;
const uint32_t c3 = 0x38b34ae5;
const uint32_t c4 = 0xa1e38b93;
const uint32_t c1 = 0x239b961b;
const uint32_t c2 = 0xab0e9789;
const uint32_t c3 = 0x38b34ae5;
const uint32_t c4 = 0xa1e38b93;
/* body */
{
const uint32_t *blocks = (const uint32_t *) (data + nblocks*16);
int i;
/* body */
{
const uint32_t *blocks = (const uint32_t *) (data + nblocks*16);
int i;
for (i = -nblocks; i; i++) {
uint32_t k1 = hash_get_block_32(blocks, i*4 + 0);
uint32_t k2 = hash_get_block_32(blocks, i*4 + 1);
uint32_t k3 = hash_get_block_32(blocks, i*4 + 2);
uint32_t k4 = hash_get_block_32(blocks, i*4 + 3);
for (i = -nblocks; i; i++) {
uint32_t k1 = hash_get_block_32(blocks, i*4 + 0);
uint32_t k2 = hash_get_block_32(blocks, i*4 + 1);
uint32_t k3 = hash_get_block_32(blocks, i*4 + 2);
uint32_t k4 = hash_get_block_32(blocks, i*4 + 3);
k1 *= c1; k1 = hash_rotl_32(k1, 15); k1 *= c2; h1 ^= k1;
k1 *= c1; k1 = hash_rotl_32(k1, 15); k1 *= c2; h1 ^= k1;
h1 = hash_rotl_32(h1, 19); h1 += h2;
h1 = h1*5 + 0x561ccd1b;
h1 = hash_rotl_32(h1, 19); h1 += h2;
h1 = h1*5 + 0x561ccd1b;
k2 *= c2; k2 = hash_rotl_32(k2, 16); k2 *= c3; h2 ^= k2;
k2 *= c2; k2 = hash_rotl_32(k2, 16); k2 *= c3; h2 ^= k2;
h2 = hash_rotl_32(h2, 17); h2 += h3;
h2 = h2*5 + 0x0bcaa747;
h2 = hash_rotl_32(h2, 17); h2 += h3;
h2 = h2*5 + 0x0bcaa747;
k3 *= c3; k3 = hash_rotl_32(k3, 17); k3 *= c4; h3 ^= k3;
k3 *= c3; k3 = hash_rotl_32(k3, 17); k3 *= c4; h3 ^= k3;
h3 = hash_rotl_32(h3, 15); h3 += h4;
h3 = h3*5 + 0x96cd1c35;
h3 = hash_rotl_32(h3, 15); h3 += h4;
h3 = h3*5 + 0x96cd1c35;
k4 *= c4; k4 = hash_rotl_32(k4, 18); k4 *= c1; h4 ^= k4;
k4 *= c4; k4 = hash_rotl_32(k4, 18); k4 *= c1; h4 ^= k4;
h4 = hash_rotl_32(h4, 13); h4 += h1;
h4 = h4*5 + 0x32ac3b17;
}
}
h4 = hash_rotl_32(h4, 13); h4 += h1;
h4 = h4*5 + 0x32ac3b17;
}
}
/* tail */
{
const uint8_t *tail = (const uint8_t *) (data + nblocks*16);
uint32_t k1 = 0;
uint32_t k2 = 0;
uint32_t k3 = 0;
uint32_t k4 = 0;
/* tail */
{
const uint8_t *tail = (const uint8_t *) (data + nblocks*16);
uint32_t k1 = 0;
uint32_t k2 = 0;
uint32_t k3 = 0;
uint32_t k4 = 0;
switch (len & 15) {
case 15: k4 ^= tail[14] << 16;
case 14: k4 ^= tail[13] << 8;
case 13: k4 ^= tail[12] << 0;
k4 *= c4; k4 = hash_rotl_32(k4, 18); k4 *= c1; h4 ^= k4;
switch (len & 15) {
case 15: k4 ^= tail[14] << 16;
case 14: k4 ^= tail[13] << 8;
case 13: k4 ^= tail[12] << 0;
k4 *= c4; k4 = hash_rotl_32(k4, 18); k4 *= c1; h4 ^= k4;
case 12: k3 ^= tail[11] << 24;
case 11: k3 ^= tail[10] << 16;
case 10: k3 ^= tail[ 9] << 8;
case 9: k3 ^= tail[ 8] << 0;
k3 *= c3; k3 = hash_rotl_32(k3, 17); k3 *= c4; h3 ^= k3;
case 12: k3 ^= tail[11] << 24;
case 11: k3 ^= tail[10] << 16;
case 10: k3 ^= tail[ 9] << 8;
case 9: k3 ^= tail[ 8] << 0;
k3 *= c3; k3 = hash_rotl_32(k3, 17); k3 *= c4; h3 ^= k3;
case 8: k2 ^= tail[ 7] << 24;
case 7: k2 ^= tail[ 6] << 16;
case 6: k2 ^= tail[ 5] << 8;
case 5: k2 ^= tail[ 4] << 0;
k2 *= c2; k2 = hash_rotl_32(k2, 16); k2 *= c3; h2 ^= k2;
case 8: k2 ^= tail[ 7] << 24;
case 7: k2 ^= tail[ 6] << 16;
case 6: k2 ^= tail[ 5] << 8;
case 5: k2 ^= tail[ 4] << 0;
k2 *= c2; k2 = hash_rotl_32(k2, 16); k2 *= c3; h2 ^= k2;
case 4: k1 ^= tail[ 3] << 24;
case 3: k1 ^= tail[ 2] << 16;
case 2: k1 ^= tail[ 1] << 8;
case 1: k1 ^= tail[ 0] << 0;
k1 *= c1; k1 = hash_rotl_32(k1, 15); k1 *= c2; h1 ^= k1;
}
}
case 4: k1 ^= tail[ 3] << 24;
case 3: k1 ^= tail[ 2] << 16;
case 2: k1 ^= tail[ 1] << 8;
case 1: k1 ^= tail[ 0] << 0;
k1 *= c1; k1 = hash_rotl_32(k1, 15); k1 *= c2; h1 ^= k1;
}
}
/* finalization */
h1 ^= len; h2 ^= len; h3 ^= len; h4 ^= len;
/* finalization */
h1 ^= len; h2 ^= len; h3 ^= len; h4 ^= len;
h1 += h2; h1 += h3; h1 += h4;
h2 += h1; h3 += h1; h4 += h1;
h1 += h2; h1 += h3; h1 += h4;
h2 += h1; h3 += h1; h4 += h1;
h1 = hash_fmix_32(h1);
h2 = hash_fmix_32(h2);
h3 = hash_fmix_32(h3);
h4 = hash_fmix_32(h4);
h1 = hash_fmix_32(h1);
h2 = hash_fmix_32(h2);
h3 = hash_fmix_32(h3);
h4 = hash_fmix_32(h4);
h1 += h2; h1 += h3; h1 += h4;
h2 += h1; h3 += h1; h4 += h1;
h1 += h2; h1 += h3; h1 += h4;
h2 += h1; h3 += h1; h4 += h1;
r_out[0] = (((uint64_t) h2) << 32) | h1;
r_out[1] = (((uint64_t) h4) << 32) | h3;
r_out[0] = (((uint64_t) h2) << 32) | h1;
r_out[1] = (((uint64_t) h4) << 32) | h3;
}
UNUSED JEMALLOC_INLINE void
hash_x64_128(const void *key, const int len, const uint32_t seed,
uint64_t r_out[2])
{
const uint8_t *data = (const uint8_t *) key;
const int nblocks = len / 16;
const uint8_t *data = (const uint8_t *) key;
const int nblocks = len / 16;
uint64_t h1 = seed;
uint64_t h2 = seed;
uint64_t h1 = seed;
uint64_t h2 = seed;
const uint64_t c1 = QU(0x87c37b91114253d5LLU);
const uint64_t c2 = QU(0x4cf5ad432745937fLLU);
const uint64_t c1 = QU(0x87c37b91114253d5LLU);
const uint64_t c2 = QU(0x4cf5ad432745937fLLU);
/* body */
{
const uint64_t *blocks = (const uint64_t *) (data);
int i;
/* body */
{
const uint64_t *blocks = (const uint64_t *) (data);
int i;
for (i = 0; i < nblocks; i++) {
uint64_t k1 = hash_get_block_64(blocks, i*2 + 0);
uint64_t k2 = hash_get_block_64(blocks, i*2 + 1);
for (i = 0; i < nblocks; i++) {
uint64_t k1 = hash_get_block_64(blocks, i*2 + 0);
uint64_t k2 = hash_get_block_64(blocks, i*2 + 1);
k1 *= c1; k1 = hash_rotl_64(k1, 31); k1 *= c2; h1 ^= k1;
k1 *= c1; k1 = hash_rotl_64(k1, 31); k1 *= c2; h1 ^= k1;
h1 = hash_rotl_64(h1, 27); h1 += h2;
h1 = h1*5 + 0x52dce729;
h1 = hash_rotl_64(h1, 27); h1 += h2;
h1 = h1*5 + 0x52dce729;
k2 *= c2; k2 = hash_rotl_64(k2, 33); k2 *= c1; h2 ^= k2;
k2 *= c2; k2 = hash_rotl_64(k2, 33); k2 *= c1; h2 ^= k2;
h2 = hash_rotl_64(h2, 31); h2 += h1;
h2 = h2*5 + 0x38495ab5;
}
}
h2 = hash_rotl_64(h2, 31); h2 += h1;
h2 = h2*5 + 0x38495ab5;
}
}
/* tail */
{
const uint8_t *tail = (const uint8_t*)(data + nblocks*16);
uint64_t k1 = 0;
uint64_t k2 = 0;
/* tail */
{
const uint8_t *tail = (const uint8_t*)(data + nblocks*16);
uint64_t k1 = 0;
uint64_t k2 = 0;
switch (len & 15) {
case 15: k2 ^= ((uint64_t)(tail[14])) << 48;
case 14: k2 ^= ((uint64_t)(tail[13])) << 40;
case 13: k2 ^= ((uint64_t)(tail[12])) << 32;
case 12: k2 ^= ((uint64_t)(tail[11])) << 24;
case 11: k2 ^= ((uint64_t)(tail[10])) << 16;
case 10: k2 ^= ((uint64_t)(tail[ 9])) << 8;
case 9: k2 ^= ((uint64_t)(tail[ 8])) << 0;
k2 *= c2; k2 = hash_rotl_64(k2, 33); k2 *= c1; h2 ^= k2;
switch (len & 15) {
case 15: k2 ^= ((uint64_t)(tail[14])) << 48;
case 14: k2 ^= ((uint64_t)(tail[13])) << 40;
case 13: k2 ^= ((uint64_t)(tail[12])) << 32;
case 12: k2 ^= ((uint64_t)(tail[11])) << 24;
case 11: k2 ^= ((uint64_t)(tail[10])) << 16;
case 10: k2 ^= ((uint64_t)(tail[ 9])) << 8;
case 9: k2 ^= ((uint64_t)(tail[ 8])) << 0;
k2 *= c2; k2 = hash_rotl_64(k2, 33); k2 *= c1; h2 ^= k2;
case 8: k1 ^= ((uint64_t)(tail[ 7])) << 56;
case 7: k1 ^= ((uint64_t)(tail[ 6])) << 48;
case 6: k1 ^= ((uint64_t)(tail[ 5])) << 40;
case 5: k1 ^= ((uint64_t)(tail[ 4])) << 32;
case 4: k1 ^= ((uint64_t)(tail[ 3])) << 24;
case 3: k1 ^= ((uint64_t)(tail[ 2])) << 16;
case 2: k1 ^= ((uint64_t)(tail[ 1])) << 8;
case 1: k1 ^= ((uint64_t)(tail[ 0])) << 0;
k1 *= c1; k1 = hash_rotl_64(k1, 31); k1 *= c2; h1 ^= k1;
}
}
case 8: k1 ^= ((uint64_t)(tail[ 7])) << 56;
case 7: k1 ^= ((uint64_t)(tail[ 6])) << 48;
case 6: k1 ^= ((uint64_t)(tail[ 5])) << 40;
case 5: k1 ^= ((uint64_t)(tail[ 4])) << 32;
case 4: k1 ^= ((uint64_t)(tail[ 3])) << 24;
case 3: k1 ^= ((uint64_t)(tail[ 2])) << 16;
case 2: k1 ^= ((uint64_t)(tail[ 1])) << 8;
case 1: k1 ^= ((uint64_t)(tail[ 0])) << 0;
k1 *= c1; k1 = hash_rotl_64(k1, 31); k1 *= c2; h1 ^= k1;
}
}
/* finalization */
h1 ^= len; h2 ^= len;
/* finalization */
h1 ^= len; h2 ^= len;
h1 += h2;
h2 += h1;
h1 += h2;
h2 += h1;
h1 = hash_fmix_64(h1);
h2 = hash_fmix_64(h2);
h1 = hash_fmix_64(h1);
h2 = hash_fmix_64(h2);
h1 += h2;
h2 += h1;
h1 += h2;
h2 += h1;
r_out[0] = h1;
r_out[1] = h2;
r_out[0] = h1;
r_out[1] = h2;
}
@ -317,12 +317,12 @@ JEMALLOC_INLINE void
hash(const void *key, size_t len, const uint32_t seed, size_t r_hash[2])
{
#if (LG_SIZEOF_PTR == 3)
hash_x64_128(key, len, seed, (uint64_t *)r_hash);
hash_x64_128(key, len, seed, (uint64_t *)r_hash);
#else
uint64_t hashes[2];
hash_x86_128(key, len, seed, hashes);
r_hash[0] = (size_t)hashes[0];
r_hash[1] = (size_t)hashes[1];
uint64_t hashes[2];
hash_x86_128(key, len, seed, hashes);
r_hash[0] = (size_t)hashes[0];
r_hash[1] = (size_t)hashes[1];
#endif
}
#endif

View File

@ -278,6 +278,9 @@ static const bool config_ivsalloc =
# ifdef __arm__
# define LG_QUANTUM 3
# endif
# ifdef __aarch64__
# define LG_QUANTUM 4
# endif
# ifdef __hppa__
# define LG_QUANTUM 4
# endif

View File

@ -33,25 +33,25 @@ mb_write(void)
{
# if 0
/* This is a true memory barrier. */
asm volatile ("pusha;"
"xor %%eax,%%eax;"
"cpuid;"
"popa;"
: /* Outputs. */
: /* Inputs. */
: "memory" /* Clobbers. */
);
/* This is a true memory barrier. */
asm volatile ("pusha;"
"xor %%eax,%%eax;"
"cpuid;"
"popa;"
: /* Outputs. */
: /* Inputs. */
: "memory" /* Clobbers. */
);
#else
/*
* This is hopefully enough to keep the compiler from reordering
* instructions around this one.
*/
asm volatile ("nop;"
: /* Outputs. */
: /* Inputs. */
: "memory" /* Clobbers. */
);
/*
* This is hopefully enough to keep the compiler from reordering
* instructions around this one.
*/
asm volatile ("nop;"
: /* Outputs. */
: /* Inputs. */
: "memory" /* Clobbers. */
);
#endif
}
#elif (defined(__amd64__) || defined(__x86_64__))
@ -59,40 +59,40 @@ JEMALLOC_INLINE void
mb_write(void)
{
asm volatile ("sfence"
: /* Outputs. */
: /* Inputs. */
: "memory" /* Clobbers. */
);
asm volatile ("sfence"
: /* Outputs. */
: /* Inputs. */
: "memory" /* Clobbers. */
);
}
#elif defined(__powerpc__)
JEMALLOC_INLINE void
mb_write(void)
{
asm volatile ("eieio"
: /* Outputs. */
: /* Inputs. */
: "memory" /* Clobbers. */
);
asm volatile ("eieio"
: /* Outputs. */
: /* Inputs. */
: "memory" /* Clobbers. */
);
}
#elif defined(__sparc64__)
JEMALLOC_INLINE void
mb_write(void)
{
asm volatile ("membar #StoreStore"
: /* Outputs. */
: /* Inputs. */
: "memory" /* Clobbers. */
);
asm volatile ("membar #StoreStore"
: /* Outputs. */
: /* Inputs. */
: "memory" /* Clobbers. */
);
}
#elif defined(__tile__)
JEMALLOC_INLINE void
mb_write(void)
{
__sync_synchronize();
__sync_synchronize();
}
#else
/*
@ -102,11 +102,11 @@ mb_write(void)
JEMALLOC_INLINE void
mb_write(void)
{
malloc_mutex_t mtx;
malloc_mutex_t mtx;
malloc_mutex_init(&mtx);
malloc_mutex_lock(&mtx);
malloc_mutex_unlock(&mtx);
malloc_mutex_init(&mtx);
malloc_mutex_lock(&mtx);
malloc_mutex_unlock(&mtx);
}
#endif
#endif

View File

@ -26,14 +26,14 @@ typedef struct malloc_mutex_s malloc_mutex_t;
struct malloc_mutex_s {
#ifdef _WIN32
CRITICAL_SECTION lock;
CRITICAL_SECTION lock;
#elif (defined(JEMALLOC_OSSPIN))
OSSpinLock lock;
OSSpinLock lock;
#elif (defined(JEMALLOC_MUTEX_INIT_CB))
pthread_mutex_t lock;
malloc_mutex_t *postponed_next;
pthread_mutex_t lock;
malloc_mutex_t *postponed_next;
#else
pthread_mutex_t lock;
pthread_mutex_t lock;
#endif
};
@ -68,30 +68,30 @@ JEMALLOC_INLINE void
malloc_mutex_lock(malloc_mutex_t *mutex)
{
if (isthreaded) {
if (isthreaded) {
#ifdef _WIN32
EnterCriticalSection(&mutex->lock);
EnterCriticalSection(&mutex->lock);
#elif (defined(JEMALLOC_OSSPIN))
OSSpinLockLock(&mutex->lock);
OSSpinLockLock(&mutex->lock);
#else
pthread_mutex_lock(&mutex->lock);
pthread_mutex_lock(&mutex->lock);
#endif
}
}
}
JEMALLOC_INLINE void
malloc_mutex_unlock(malloc_mutex_t *mutex)
{
if (isthreaded) {
if (isthreaded) {
#ifdef _WIN32
LeaveCriticalSection(&mutex->lock);
LeaveCriticalSection(&mutex->lock);
#elif (defined(JEMALLOC_OSSPIN))
OSSpinLockUnlock(&mutex->lock);
OSSpinLockUnlock(&mutex->lock);
#else
pthread_mutex_unlock(&mutex->lock);
pthread_mutex_unlock(&mutex->lock);
#endif
}
}
}
#endif

View File

@ -26,22 +26,22 @@
* const uint32_t a, c : See above discussion.
*/
#define prng32(r, lg_range, state, a, c) do { \
assert(lg_range > 0); \
assert(lg_range <= 32); \
\
r = (state * (a)) + (c); \
state = r; \
r >>= (32 - lg_range); \
assert(lg_range > 0); \
assert(lg_range <= 32); \
\
r = (state * (a)) + (c); \
state = r; \
r >>= (32 - lg_range); \
} while (false)
/* Same as prng32(), but 64 bits of pseudo-randomness, using uint64_t. */
#define prng64(r, lg_range, state, a, c) do { \
assert(lg_range > 0); \
assert(lg_range <= 64); \
\
r = (state * (a)) + (c); \
state = r; \
r >>= (64 - lg_range); \
assert(lg_range > 0); \
assert(lg_range <= 64); \
\
r = (state * (a)) + (c); \
state = r; \
r >>= (64 - lg_range); \
} while (false)
#endif /* JEMALLOC_H_TYPES */

View File

@ -50,131 +50,131 @@ typedef struct prof_tdata_s prof_tdata_t;
#ifdef JEMALLOC_H_STRUCTS
struct prof_bt_s {
/* Backtrace, stored as len program counters. */
void **vec;
unsigned len;
/* Backtrace, stored as len program counters. */
void **vec;
unsigned len;
};
#ifdef JEMALLOC_PROF_LIBGCC
/* Data structure passed to libgcc _Unwind_Backtrace() callback functions. */
typedef struct {
prof_bt_t *bt;
unsigned nignore;
unsigned max;
prof_bt_t *bt;
unsigned nignore;
unsigned max;
} prof_unwind_data_t;
#endif
struct prof_cnt_s {
/*
* Profiling counters. An allocation/deallocation pair can operate on
* different prof_thr_cnt_t objects that are linked into the same
* prof_ctx_t cnts_ql, so it is possible for the cur* counters to go
* negative. In principle it is possible for the *bytes counters to
* overflow/underflow, but a general solution would require something
* like 128-bit counters; this implementation doesn't bother to solve
* that problem.
*/
int64_t curobjs;
int64_t curbytes;
uint64_t accumobjs;
uint64_t accumbytes;
/*
* Profiling counters. An allocation/deallocation pair can operate on
* different prof_thr_cnt_t objects that are linked into the same
* prof_ctx_t cnts_ql, so it is possible for the cur* counters to go
* negative. In principle it is possible for the *bytes counters to
* overflow/underflow, but a general solution would require something
* like 128-bit counters; this implementation doesn't bother to solve
* that problem.
*/
int64_t curobjs;
int64_t curbytes;
uint64_t accumobjs;
uint64_t accumbytes;
};
struct prof_thr_cnt_s {
/* Linkage into prof_ctx_t's cnts_ql. */
ql_elm(prof_thr_cnt_t) cnts_link;
/* Linkage into prof_ctx_t's cnts_ql. */
ql_elm(prof_thr_cnt_t) cnts_link;
/* Linkage into thread's LRU. */
ql_elm(prof_thr_cnt_t) lru_link;
/* Linkage into thread's LRU. */
ql_elm(prof_thr_cnt_t) lru_link;
/*
* Associated context. If a thread frees an object that it did not
* allocate, it is possible that the context is not cached in the
* thread's hash table, in which case it must be able to look up the
* context, insert a new prof_thr_cnt_t into the thread's hash table,
* and link it into the prof_ctx_t's cnts_ql.
*/
prof_ctx_t *ctx;
/*
* Associated context. If a thread frees an object that it did not
* allocate, it is possible that the context is not cached in the
* thread's hash table, in which case it must be able to look up the
* context, insert a new prof_thr_cnt_t into the thread's hash table,
* and link it into the prof_ctx_t's cnts_ql.
*/
prof_ctx_t *ctx;
/*
* Threads use memory barriers to update the counters. Since there is
* only ever one writer, the only challenge is for the reader to get a
* consistent read of the counters.
*
* The writer uses this series of operations:
*
* 1) Increment epoch to an odd number.
* 2) Update counters.
* 3) Increment epoch to an even number.
*
* The reader must assure 1) that the epoch is even while it reads the
* counters, and 2) that the epoch doesn't change between the time it
* starts and finishes reading the counters.
*/
unsigned epoch;
/*
* Threads use memory barriers to update the counters. Since there is
* only ever one writer, the only challenge is for the reader to get a
* consistent read of the counters.
*
* The writer uses this series of operations:
*
* 1) Increment epoch to an odd number.
* 2) Update counters.
* 3) Increment epoch to an even number.
*
* The reader must assure 1) that the epoch is even while it reads the
* counters, and 2) that the epoch doesn't change between the time it
* starts and finishes reading the counters.
*/
unsigned epoch;
/* Profiling counters. */
prof_cnt_t cnts;
/* Profiling counters. */
prof_cnt_t cnts;
};
struct prof_ctx_s {
/* Associated backtrace. */
prof_bt_t *bt;
/* Associated backtrace. */
prof_bt_t *bt;
/* Protects nlimbo, cnt_merged, and cnts_ql. */
malloc_mutex_t *lock;
/* Protects nlimbo, cnt_merged, and cnts_ql. */
malloc_mutex_t *lock;
/*
* Number of threads that currently cause this ctx to be in a state of
* limbo due to one of:
* - Initializing per thread counters associated with this ctx.
* - Preparing to destroy this ctx.
* nlimbo must be 1 (single destroyer) in order to safely destroy the
* ctx.
*/
unsigned nlimbo;
/*
* Number of threads that currently cause this ctx to be in a state of
* limbo due to one of:
* - Initializing per thread counters associated with this ctx.
* - Preparing to destroy this ctx.
* nlimbo must be 1 (single destroyer) in order to safely destroy the
* ctx.
*/
unsigned nlimbo;
/* Temporary storage for summation during dump. */
prof_cnt_t cnt_summed;
/* Temporary storage for summation during dump. */
prof_cnt_t cnt_summed;
/* When threads exit, they merge their stats into cnt_merged. */
prof_cnt_t cnt_merged;
/* When threads exit, they merge their stats into cnt_merged. */
prof_cnt_t cnt_merged;
/*
* List of profile counters, one for each thread that has allocated in
* this context.
*/
ql_head(prof_thr_cnt_t) cnts_ql;
/*
* List of profile counters, one for each thread that has allocated in
* this context.
*/
ql_head(prof_thr_cnt_t) cnts_ql;
};
struct prof_tdata_s {
/*
* Hash of (prof_bt_t *)-->(prof_thr_cnt_t *). Each thread keeps a
* cache of backtraces, with associated thread-specific prof_thr_cnt_t
* objects. Other threads may read the prof_thr_cnt_t contents, but no
* others will ever write them.
*
* Upon thread exit, the thread must merge all the prof_thr_cnt_t
* counter data into the associated prof_ctx_t objects, and unlink/free
* the prof_thr_cnt_t objects.
*/
ckh_t bt2cnt;
/*
* Hash of (prof_bt_t *)-->(prof_thr_cnt_t *). Each thread keeps a
* cache of backtraces, with associated thread-specific prof_thr_cnt_t
* objects. Other threads may read the prof_thr_cnt_t contents, but no
* others will ever write them.
*
* Upon thread exit, the thread must merge all the prof_thr_cnt_t
* counter data into the associated prof_ctx_t objects, and unlink/free
* the prof_thr_cnt_t objects.
*/
ckh_t bt2cnt;
/* LRU for contents of bt2cnt. */
ql_head(prof_thr_cnt_t) lru_ql;
/* LRU for contents of bt2cnt. */
ql_head(prof_thr_cnt_t) lru_ql;
/* Backtrace vector, used for calls to prof_backtrace(). */
void **vec;
/* Backtrace vector, used for calls to prof_backtrace(). */
void **vec;
/* Sampling state. */
uint64_t prng_state;
uint64_t threshold;
uint64_t accum;
/* Sampling state. */
uint64_t prng_state;
uint64_t threshold;
uint64_t accum;
/* State used to avoid dumping while operating on prof internals. */
bool enq;
bool enq_idump;
bool enq_gdump;
/* State used to avoid dumping while operating on prof internals. */
bool enq;
bool enq_idump;
bool enq_gdump;
};
#endif /* JEMALLOC_H_STRUCTS */
@ -232,55 +232,55 @@ void prof_postfork_child(void);
#ifdef JEMALLOC_H_INLINES
#define PROF_ALLOC_PREP(nignore, size, ret) do { \
prof_tdata_t *prof_tdata; \
prof_bt_t bt; \
\
assert(size == s2u(size)); \
\
prof_tdata = prof_tdata_get(true); \
if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX) { \
if (prof_tdata != NULL) \
ret = (prof_thr_cnt_t *)(uintptr_t)1U; \
else \
ret = NULL; \
break; \
} \
\
if (opt_prof_active == false) { \
/* Sampling is currently inactive, so avoid sampling. */\
ret = (prof_thr_cnt_t *)(uintptr_t)1U; \
} else if (opt_lg_prof_sample == 0) { \
/* Don't bother with sampling logic, since sampling */\
/* interval is 1. */\
bt_init(&bt, prof_tdata->vec); \
prof_backtrace(&bt, nignore); \
ret = prof_lookup(&bt); \
} else { \
if (prof_tdata->threshold == 0) { \
/* Initialize. Seed the prng differently for */\
/* each thread. */\
prof_tdata->prng_state = \
(uint64_t)(uintptr_t)&size; \
prof_sample_threshold_update(prof_tdata); \
} \
\
/* Determine whether to capture a backtrace based on */\
/* whether size is enough for prof_accum to reach */\
/* prof_tdata->threshold. However, delay updating */\
/* these variables until prof_{m,re}alloc(), because */\
/* we don't know for sure that the allocation will */\
/* succeed. */\
/* */\
/* Use subtraction rather than addition to avoid */\
/* potential integer overflow. */\
if (size >= prof_tdata->threshold - \
prof_tdata->accum) { \
bt_init(&bt, prof_tdata->vec); \
prof_backtrace(&bt, nignore); \
ret = prof_lookup(&bt); \
} else \
ret = (prof_thr_cnt_t *)(uintptr_t)1U; \
} \
prof_tdata_t *prof_tdata; \
prof_bt_t bt; \
\
assert(size == s2u(size)); \
\
prof_tdata = prof_tdata_get(true); \
if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX) { \
if (prof_tdata != NULL) \
ret = (prof_thr_cnt_t *)(uintptr_t)1U; \
else \
ret = NULL; \
break; \
} \
\
if (opt_prof_active == false) { \
/* Sampling is currently inactive, so avoid sampling. */\
ret = (prof_thr_cnt_t *)(uintptr_t)1U; \
} else if (opt_lg_prof_sample == 0) { \
/* Don't bother with sampling logic, since sampling */\
/* interval is 1. */\
bt_init(&bt, prof_tdata->vec); \
prof_backtrace(&bt, nignore); \
ret = prof_lookup(&bt); \
} else { \
if (prof_tdata->threshold == 0) { \
/* Initialize. Seed the prng differently for */\
/* each thread. */\
prof_tdata->prng_state = \
(uint64_t)(uintptr_t)&size; \
prof_sample_threshold_update(prof_tdata); \
} \
\
/* Determine whether to capture a backtrace based on */\
/* whether size is enough for prof_accum to reach */\
/* prof_tdata->threshold. However, delay updating */\
/* these variables until prof_{m,re}alloc(), because */\
/* we don't know for sure that the allocation will */\
/* succeed. */\
/* */\
/* Use subtraction rather than addition to avoid */\
/* potential integer overflow. */\
if (size >= prof_tdata->threshold - \
prof_tdata->accum) { \
bt_init(&bt, prof_tdata->vec); \
prof_backtrace(&bt, nignore); \
ret = prof_lookup(&bt); \
} else \
ret = (prof_thr_cnt_t *)(uintptr_t)1U; \
} \
} while (0)
#ifndef JEMALLOC_ENABLE_INLINE
@ -306,272 +306,272 @@ malloc_tsd_funcs(JEMALLOC_INLINE, prof_tdata, prof_tdata_t *, NULL,
JEMALLOC_INLINE prof_tdata_t *
prof_tdata_get(bool create)
{
prof_tdata_t *prof_tdata;
prof_tdata_t *prof_tdata;
cassert(config_prof);
cassert(config_prof);
prof_tdata = *prof_tdata_tsd_get();
if (create && prof_tdata == NULL)
prof_tdata = prof_tdata_init();
prof_tdata = *prof_tdata_tsd_get();
if (create && prof_tdata == NULL)
prof_tdata = prof_tdata_init();
return (prof_tdata);
return (prof_tdata);
}
JEMALLOC_INLINE void
prof_sample_threshold_update(prof_tdata_t *prof_tdata)
{
uint64_t r;
double u;
uint64_t r;
double u;
cassert(config_prof);
cassert(config_prof);
/*
* Compute sample threshold as a geometrically distributed random
* variable with mean (2^opt_lg_prof_sample).
*
* __ __
* | log(u) | 1
* prof_tdata->threshold = | -------- |, where p = -------------------
* | log(1-p) | opt_lg_prof_sample
* 2
*
* For more information on the math, see:
*
* Non-Uniform Random Variate Generation
* Luc Devroye
* Springer-Verlag, New York, 1986
* pp 500
* (http://cg.scs.carleton.ca/~luc/rnbookindex.html)
*/
prng64(r, 53, prof_tdata->prng_state,
UINT64_C(6364136223846793005), UINT64_C(1442695040888963407));
u = (double)r * (1.0/9007199254740992.0L);
prof_tdata->threshold = (uint64_t)(log(u) /
log(1.0 - (1.0 / (double)((uint64_t)1U << opt_lg_prof_sample))))
+ (uint64_t)1U;
/*
* Compute sample threshold as a geometrically distributed random
* variable with mean (2^opt_lg_prof_sample).
*
* __ __
* | log(u) | 1
* prof_tdata->threshold = | -------- |, where p = -------------------
* | log(1-p) | opt_lg_prof_sample
* 2
*
* For more information on the math, see:
*
* Non-Uniform Random Variate Generation
* Luc Devroye
* Springer-Verlag, New York, 1986
* pp 500
* (http://cg.scs.carleton.ca/~luc/rnbookindex.html)
*/
prng64(r, 53, prof_tdata->prng_state,
UINT64_C(6364136223846793005), UINT64_C(1442695040888963407));
u = (double)r * (1.0/9007199254740992.0L);
prof_tdata->threshold = (uint64_t)(log(u) /
log(1.0 - (1.0 / (double)((uint64_t)1U << opt_lg_prof_sample))))
+ (uint64_t)1U;
}
JEMALLOC_INLINE prof_ctx_t *
prof_ctx_get(const void *ptr)
{
prof_ctx_t *ret;
arena_chunk_t *chunk;
prof_ctx_t *ret;
arena_chunk_t *chunk;
cassert(config_prof);
assert(ptr != NULL);
cassert(config_prof);
assert(ptr != NULL);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
if (chunk != ptr) {
/* Region. */
ret = arena_prof_ctx_get(ptr);
} else
ret = huge_prof_ctx_get(ptr);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
if (chunk != ptr) {
/* Region. */
ret = arena_prof_ctx_get(ptr);
} else
ret = huge_prof_ctx_get(ptr);
return (ret);
return (ret);
}
JEMALLOC_INLINE void
prof_ctx_set(const void *ptr, prof_ctx_t *ctx)
{
arena_chunk_t *chunk;
arena_chunk_t *chunk;
cassert(config_prof);
assert(ptr != NULL);
cassert(config_prof);
assert(ptr != NULL);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
if (chunk != ptr) {
/* Region. */
arena_prof_ctx_set(ptr, ctx);
} else
huge_prof_ctx_set(ptr, ctx);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
if (chunk != ptr) {
/* Region. */
arena_prof_ctx_set(ptr, ctx);
} else
huge_prof_ctx_set(ptr, ctx);
}
JEMALLOC_INLINE bool
prof_sample_accum_update(size_t size)
{
prof_tdata_t *prof_tdata;
prof_tdata_t *prof_tdata;
cassert(config_prof);
/* Sampling logic is unnecessary if the interval is 1. */
assert(opt_lg_prof_sample != 0);
cassert(config_prof);
/* Sampling logic is unnecessary if the interval is 1. */
assert(opt_lg_prof_sample != 0);
prof_tdata = prof_tdata_get(false);
if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX)
return (true);
prof_tdata = prof_tdata_get(false);
if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX)
return (true);
/* Take care to avoid integer overflow. */
if (size >= prof_tdata->threshold - prof_tdata->accum) {
prof_tdata->accum -= (prof_tdata->threshold - size);
/* Compute new sample threshold. */
prof_sample_threshold_update(prof_tdata);
while (prof_tdata->accum >= prof_tdata->threshold) {
prof_tdata->accum -= prof_tdata->threshold;
prof_sample_threshold_update(prof_tdata);
}
return (false);
} else {
prof_tdata->accum += size;
return (true);
}
/* Take care to avoid integer overflow. */
if (size >= prof_tdata->threshold - prof_tdata->accum) {
prof_tdata->accum -= (prof_tdata->threshold - size);
/* Compute new sample threshold. */
prof_sample_threshold_update(prof_tdata);
while (prof_tdata->accum >= prof_tdata->threshold) {
prof_tdata->accum -= prof_tdata->threshold;
prof_sample_threshold_update(prof_tdata);
}
return (false);
} else {
prof_tdata->accum += size;
return (true);
}
}
JEMALLOC_INLINE void
prof_malloc(const void *ptr, size_t size, prof_thr_cnt_t *cnt)
{
cassert(config_prof);
assert(ptr != NULL);
assert(size == isalloc(ptr, true));
cassert(config_prof);
assert(ptr != NULL);
assert(size == isalloc(ptr, true));
if (opt_lg_prof_sample != 0) {
if (prof_sample_accum_update(size)) {
/*
* Don't sample. For malloc()-like allocation, it is
* always possible to tell in advance how large an
* object's usable size will be, so there should never
* be a difference between the size passed to
* PROF_ALLOC_PREP() and prof_malloc().
*/
assert((uintptr_t)cnt == (uintptr_t)1U);
}
}
if (opt_lg_prof_sample != 0) {
if (prof_sample_accum_update(size)) {
/*
* Don't sample. For malloc()-like allocation, it is
* always possible to tell in advance how large an
* object's usable size will be, so there should never
* be a difference between the size passed to
* PROF_ALLOC_PREP() and prof_malloc().
*/
assert((uintptr_t)cnt == (uintptr_t)1U);
}
}
if ((uintptr_t)cnt > (uintptr_t)1U) {
prof_ctx_set(ptr, cnt->ctx);
if ((uintptr_t)cnt > (uintptr_t)1U) {
prof_ctx_set(ptr, cnt->ctx);
cnt->epoch++;
/*********/
mb_write();
/*********/
cnt->cnts.curobjs++;
cnt->cnts.curbytes += size;
if (opt_prof_accum) {
cnt->cnts.accumobjs++;
cnt->cnts.accumbytes += size;
}
/*********/
mb_write();
/*********/
cnt->epoch++;
/*********/
mb_write();
/*********/
} else
prof_ctx_set(ptr, (prof_ctx_t *)(uintptr_t)1U);
cnt->epoch++;
/*********/
mb_write();
/*********/
cnt->cnts.curobjs++;
cnt->cnts.curbytes += size;
if (opt_prof_accum) {
cnt->cnts.accumobjs++;
cnt->cnts.accumbytes += size;
}
/*********/
mb_write();
/*********/
cnt->epoch++;
/*********/
mb_write();
/*********/
} else
prof_ctx_set(ptr, (prof_ctx_t *)(uintptr_t)1U);
}
JEMALLOC_INLINE void
prof_realloc(const void *ptr, size_t size, prof_thr_cnt_t *cnt,
size_t old_size, prof_ctx_t *old_ctx)
{
prof_thr_cnt_t *told_cnt;
prof_thr_cnt_t *told_cnt;
cassert(config_prof);
assert(ptr != NULL || (uintptr_t)cnt <= (uintptr_t)1U);
cassert(config_prof);
assert(ptr != NULL || (uintptr_t)cnt <= (uintptr_t)1U);
if (ptr != NULL) {
assert(size == isalloc(ptr, true));
if (opt_lg_prof_sample != 0) {
if (prof_sample_accum_update(size)) {
/*
* Don't sample. The size passed to
* PROF_ALLOC_PREP() was larger than what
* actually got allocated, so a backtrace was
* captured for this allocation, even though
* its actual size was insufficient to cross
* the sample threshold.
*/
cnt = (prof_thr_cnt_t *)(uintptr_t)1U;
}
}
}
if (ptr != NULL) {
assert(size == isalloc(ptr, true));
if (opt_lg_prof_sample != 0) {
if (prof_sample_accum_update(size)) {
/*
* Don't sample. The size passed to
* PROF_ALLOC_PREP() was larger than what
* actually got allocated, so a backtrace was
* captured for this allocation, even though
* its actual size was insufficient to cross
* the sample threshold.
*/
cnt = (prof_thr_cnt_t *)(uintptr_t)1U;
}
}
}
if ((uintptr_t)old_ctx > (uintptr_t)1U) {
told_cnt = prof_lookup(old_ctx->bt);
if (told_cnt == NULL) {
/*
* It's too late to propagate OOM for this realloc(),
* so operate directly on old_cnt->ctx->cnt_merged.
*/
malloc_mutex_lock(old_ctx->lock);
old_ctx->cnt_merged.curobjs--;
old_ctx->cnt_merged.curbytes -= old_size;
malloc_mutex_unlock(old_ctx->lock);
told_cnt = (prof_thr_cnt_t *)(uintptr_t)1U;
}
} else
told_cnt = (prof_thr_cnt_t *)(uintptr_t)1U;
if ((uintptr_t)old_ctx > (uintptr_t)1U) {
told_cnt = prof_lookup(old_ctx->bt);
if (told_cnt == NULL) {
/*
* It's too late to propagate OOM for this realloc(),
* so operate directly on old_cnt->ctx->cnt_merged.
*/
malloc_mutex_lock(old_ctx->lock);
old_ctx->cnt_merged.curobjs--;
old_ctx->cnt_merged.curbytes -= old_size;
malloc_mutex_unlock(old_ctx->lock);
told_cnt = (prof_thr_cnt_t *)(uintptr_t)1U;
}
} else
told_cnt = (prof_thr_cnt_t *)(uintptr_t)1U;
if ((uintptr_t)told_cnt > (uintptr_t)1U)
told_cnt->epoch++;
if ((uintptr_t)cnt > (uintptr_t)1U) {
prof_ctx_set(ptr, cnt->ctx);
cnt->epoch++;
} else if (ptr != NULL)
prof_ctx_set(ptr, (prof_ctx_t *)(uintptr_t)1U);
/*********/
mb_write();
/*********/
if ((uintptr_t)told_cnt > (uintptr_t)1U) {
told_cnt->cnts.curobjs--;
told_cnt->cnts.curbytes -= old_size;
}
if ((uintptr_t)cnt > (uintptr_t)1U) {
cnt->cnts.curobjs++;
cnt->cnts.curbytes += size;
if (opt_prof_accum) {
cnt->cnts.accumobjs++;
cnt->cnts.accumbytes += size;
}
}
/*********/
mb_write();
/*********/
if ((uintptr_t)told_cnt > (uintptr_t)1U)
told_cnt->epoch++;
if ((uintptr_t)cnt > (uintptr_t)1U)
cnt->epoch++;
/*********/
mb_write(); /* Not strictly necessary. */
if ((uintptr_t)told_cnt > (uintptr_t)1U)
told_cnt->epoch++;
if ((uintptr_t)cnt > (uintptr_t)1U) {
prof_ctx_set(ptr, cnt->ctx);
cnt->epoch++;
} else if (ptr != NULL)
prof_ctx_set(ptr, (prof_ctx_t *)(uintptr_t)1U);
/*********/
mb_write();
/*********/
if ((uintptr_t)told_cnt > (uintptr_t)1U) {
told_cnt->cnts.curobjs--;
told_cnt->cnts.curbytes -= old_size;
}
if ((uintptr_t)cnt > (uintptr_t)1U) {
cnt->cnts.curobjs++;
cnt->cnts.curbytes += size;
if (opt_prof_accum) {
cnt->cnts.accumobjs++;
cnt->cnts.accumbytes += size;
}
}
/*********/
mb_write();
/*********/
if ((uintptr_t)told_cnt > (uintptr_t)1U)
told_cnt->epoch++;
if ((uintptr_t)cnt > (uintptr_t)1U)
cnt->epoch++;
/*********/
mb_write(); /* Not strictly necessary. */
}
JEMALLOC_INLINE void
prof_free(const void *ptr, size_t size)
{
prof_ctx_t *ctx = prof_ctx_get(ptr);
prof_ctx_t *ctx = prof_ctx_get(ptr);
cassert(config_prof);
cassert(config_prof);
if ((uintptr_t)ctx > (uintptr_t)1) {
prof_thr_cnt_t *tcnt;
assert(size == isalloc(ptr, true));
tcnt = prof_lookup(ctx->bt);
if ((uintptr_t)ctx > (uintptr_t)1) {
prof_thr_cnt_t *tcnt;
assert(size == isalloc(ptr, true));
tcnt = prof_lookup(ctx->bt);
if (tcnt != NULL) {
tcnt->epoch++;
/*********/
mb_write();
/*********/
tcnt->cnts.curobjs--;
tcnt->cnts.curbytes -= size;
/*********/
mb_write();
/*********/
tcnt->epoch++;
/*********/
mb_write();
/*********/
} else {
/*
* OOM during free() cannot be propagated, so operate
* directly on cnt->ctx->cnt_merged.
*/
malloc_mutex_lock(ctx->lock);
ctx->cnt_merged.curobjs--;
ctx->cnt_merged.curbytes -= size;
malloc_mutex_unlock(ctx->lock);
}
}
if (tcnt != NULL) {
tcnt->epoch++;
/*********/
mb_write();
/*********/
tcnt->cnts.curobjs--;
tcnt->cnts.curbytes -= size;
/*********/
mb_write();
/*********/
tcnt->epoch++;
/*********/
mb_write();
/*********/
} else {
/*
* OOM during free() cannot be propagated, so operate
* directly on cnt->ctx->cnt_merged.
*/
malloc_mutex_lock(ctx->lock);
ctx->cnt_merged.curobjs--;
ctx->cnt_merged.curbytes -= size;
malloc_mutex_unlock(ctx->lock);
}
}
}
#endif

View File

@ -3,7 +3,7 @@
*/
#define ql_head(a_type) \
struct { \
a_type *qlh_first; \
a_type *qlh_first; \
}
#define ql_head_initializer(a_head) {NULL}
@ -12,7 +12,7 @@ struct { \
/* List functions. */
#define ql_new(a_head) do { \
(a_head)->qlh_first = NULL; \
(a_head)->qlh_first = NULL; \
} while (0)
#define ql_elm_new(a_elm, a_field) qr_new((a_elm), a_field)
@ -20,64 +20,64 @@ struct { \
#define ql_first(a_head) ((a_head)->qlh_first)
#define ql_last(a_head, a_field) \
((ql_first(a_head) != NULL) \
? qr_prev(ql_first(a_head), a_field) : NULL)
((ql_first(a_head) != NULL) \
? qr_prev(ql_first(a_head), a_field) : NULL)
#define ql_next(a_head, a_elm, a_field) \
((ql_last(a_head, a_field) != (a_elm)) \
? qr_next((a_elm), a_field) : NULL)
((ql_last(a_head, a_field) != (a_elm)) \
? qr_next((a_elm), a_field) : NULL)
#define ql_prev(a_head, a_elm, a_field) \
((ql_first(a_head) != (a_elm)) ? qr_prev((a_elm), a_field) \
: NULL)
((ql_first(a_head) != (a_elm)) ? qr_prev((a_elm), a_field) \
: NULL)
#define ql_before_insert(a_head, a_qlelm, a_elm, a_field) do { \
qr_before_insert((a_qlelm), (a_elm), a_field); \
if (ql_first(a_head) == (a_qlelm)) { \
ql_first(a_head) = (a_elm); \
} \
qr_before_insert((a_qlelm), (a_elm), a_field); \
if (ql_first(a_head) == (a_qlelm)) { \
ql_first(a_head) = (a_elm); \
} \
} while (0)
#define ql_after_insert(a_qlelm, a_elm, a_field) \
qr_after_insert((a_qlelm), (a_elm), a_field)
qr_after_insert((a_qlelm), (a_elm), a_field)
#define ql_head_insert(a_head, a_elm, a_field) do { \
if (ql_first(a_head) != NULL) { \
qr_before_insert(ql_first(a_head), (a_elm), a_field); \
} \
ql_first(a_head) = (a_elm); \
if (ql_first(a_head) != NULL) { \
qr_before_insert(ql_first(a_head), (a_elm), a_field); \
} \
ql_first(a_head) = (a_elm); \
} while (0)
#define ql_tail_insert(a_head, a_elm, a_field) do { \
if (ql_first(a_head) != NULL) { \
qr_before_insert(ql_first(a_head), (a_elm), a_field); \
} \
ql_first(a_head) = qr_next((a_elm), a_field); \
if (ql_first(a_head) != NULL) { \
qr_before_insert(ql_first(a_head), (a_elm), a_field); \
} \
ql_first(a_head) = qr_next((a_elm), a_field); \
} while (0)
#define ql_remove(a_head, a_elm, a_field) do { \
if (ql_first(a_head) == (a_elm)) { \
ql_first(a_head) = qr_next(ql_first(a_head), a_field); \
} \
if (ql_first(a_head) != (a_elm)) { \
qr_remove((a_elm), a_field); \
} else { \
ql_first(a_head) = NULL; \
} \
if (ql_first(a_head) == (a_elm)) { \
ql_first(a_head) = qr_next(ql_first(a_head), a_field); \
} \
if (ql_first(a_head) != (a_elm)) { \
qr_remove((a_elm), a_field); \
} else { \
ql_first(a_head) = NULL; \
} \
} while (0)
#define ql_head_remove(a_head, a_type, a_field) do { \
a_type *t = ql_first(a_head); \
ql_remove((a_head), t, a_field); \
a_type *t = ql_first(a_head); \
ql_remove((a_head), t, a_field); \
} while (0)
#define ql_tail_remove(a_head, a_type, a_field) do { \
a_type *t = ql_last(a_head, a_field); \
ql_remove((a_head), t, a_field); \
a_type *t = ql_last(a_head, a_field); \
ql_remove((a_head), t, a_field); \
} while (0)
#define ql_foreach(a_var, a_head, a_field) \
qr_foreach((a_var), ql_first(a_head), a_field)
qr_foreach((a_var), ql_first(a_head), a_field)
#define ql_reverse_foreach(a_var, a_head, a_field) \
qr_reverse_foreach((a_var), ql_first(a_head), a_field)
qr_reverse_foreach((a_var), ql_first(a_head), a_field)

View File

@ -1,14 +1,14 @@
/* Ring definitions. */
#define qr(a_type) \
struct { \
a_type *qre_next; \
a_type *qre_prev; \
a_type *qre_next; \
a_type *qre_prev; \
}
/* Ring functions. */
#define qr_new(a_qr, a_field) do { \
(a_qr)->a_field.qre_next = (a_qr); \
(a_qr)->a_field.qre_prev = (a_qr); \
(a_qr)->a_field.qre_next = (a_qr); \
(a_qr)->a_field.qre_prev = (a_qr); \
} while (0)
#define qr_next(a_qr, a_field) ((a_qr)->a_field.qre_next)
@ -16,52 +16,52 @@ struct { \
#define qr_prev(a_qr, a_field) ((a_qr)->a_field.qre_prev)
#define qr_before_insert(a_qrelm, a_qr, a_field) do { \
(a_qr)->a_field.qre_prev = (a_qrelm)->a_field.qre_prev; \
(a_qr)->a_field.qre_next = (a_qrelm); \
(a_qr)->a_field.qre_prev->a_field.qre_next = (a_qr); \
(a_qrelm)->a_field.qre_prev = (a_qr); \
(a_qr)->a_field.qre_prev = (a_qrelm)->a_field.qre_prev; \
(a_qr)->a_field.qre_next = (a_qrelm); \
(a_qr)->a_field.qre_prev->a_field.qre_next = (a_qr); \
(a_qrelm)->a_field.qre_prev = (a_qr); \
} while (0)
#define qr_after_insert(a_qrelm, a_qr, a_field) \
do \
{ \
(a_qr)->a_field.qre_next = (a_qrelm)->a_field.qre_next; \
(a_qr)->a_field.qre_prev = (a_qrelm); \
(a_qr)->a_field.qre_next->a_field.qre_prev = (a_qr); \
(a_qrelm)->a_field.qre_next = (a_qr); \
(a_qr)->a_field.qre_next = (a_qrelm)->a_field.qre_next; \
(a_qr)->a_field.qre_prev = (a_qrelm); \
(a_qr)->a_field.qre_next->a_field.qre_prev = (a_qr); \
(a_qrelm)->a_field.qre_next = (a_qr); \
} while (0)
#define qr_meld(a_qr_a, a_qr_b, a_field) do { \
void *t; \
(a_qr_a)->a_field.qre_prev->a_field.qre_next = (a_qr_b); \
(a_qr_b)->a_field.qre_prev->a_field.qre_next = (a_qr_a); \
t = (a_qr_a)->a_field.qre_prev; \
(a_qr_a)->a_field.qre_prev = (a_qr_b)->a_field.qre_prev; \
(a_qr_b)->a_field.qre_prev = t; \
void *t; \
(a_qr_a)->a_field.qre_prev->a_field.qre_next = (a_qr_b); \
(a_qr_b)->a_field.qre_prev->a_field.qre_next = (a_qr_a); \
t = (a_qr_a)->a_field.qre_prev; \
(a_qr_a)->a_field.qre_prev = (a_qr_b)->a_field.qre_prev; \
(a_qr_b)->a_field.qre_prev = t; \
} while (0)
/* qr_meld() and qr_split() are functionally equivalent, so there's no need to
* have two copies of the code. */
#define qr_split(a_qr_a, a_qr_b, a_field) \
qr_meld((a_qr_a), (a_qr_b), a_field)
qr_meld((a_qr_a), (a_qr_b), a_field)
#define qr_remove(a_qr, a_field) do { \
(a_qr)->a_field.qre_prev->a_field.qre_next \
= (a_qr)->a_field.qre_next; \
(a_qr)->a_field.qre_next->a_field.qre_prev \
= (a_qr)->a_field.qre_prev; \
(a_qr)->a_field.qre_next = (a_qr); \
(a_qr)->a_field.qre_prev = (a_qr); \
(a_qr)->a_field.qre_prev->a_field.qre_next \
= (a_qr)->a_field.qre_next; \
(a_qr)->a_field.qre_next->a_field.qre_prev \
= (a_qr)->a_field.qre_prev; \
(a_qr)->a_field.qre_next = (a_qr); \
(a_qr)->a_field.qre_prev = (a_qr); \
} while (0)
#define qr_foreach(var, a_qr, a_field) \
for ((var) = (a_qr); \
(var) != NULL; \
(var) = (((var)->a_field.qre_next != (a_qr)) \
? (var)->a_field.qre_next : NULL))
for ((var) = (a_qr); \
(var) != NULL; \
(var) = (((var)->a_field.qre_next != (a_qr)) \
? (var)->a_field.qre_next : NULL))
#define qr_reverse_foreach(var, a_qr, a_field) \
for ((var) = ((a_qr) != NULL) ? qr_prev(a_qr, a_field) : NULL; \
(var) != NULL; \
(var) = (((var) != (a_qr)) \
? (var)->a_field.qre_prev : NULL))
for ((var) = ((a_qr) != NULL) ? qr_prev(a_qr, a_field) : NULL; \
(var) != NULL; \
(var) = (((var) != (a_qr)) \
? (var)->a_field.qre_prev : NULL))

View File

@ -12,17 +12,17 @@ typedef struct quarantine_s quarantine_t;
#ifdef JEMALLOC_H_STRUCTS
struct quarantine_obj_s {
void *ptr;
size_t usize;
void *ptr;
size_t usize;
};
struct quarantine_s {
size_t curbytes;
size_t curobjs;
size_t first;
size_t curbytes;
size_t curobjs;
size_t first;
#define LG_MAXOBJS_INIT 10
size_t lg_maxobjs;
quarantine_obj_t objs[1]; /* Dynamically sized ring buffer. */
size_t lg_maxobjs;
quarantine_obj_t objs[1]; /* Dynamically sized ring buffer. */
};
#endif /* JEMALLOC_H_STRUCTS */
@ -52,15 +52,16 @@ malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, quarantine, quarantine_t *, NULL,
JEMALLOC_ALWAYS_INLINE void
quarantine_alloc_hook(void)
{
quarantine_t *quarantine;
quarantine_t *quarantine;
assert(config_fill && opt_quarantine);
assert(config_fill && opt_quarantine);
quarantine = *quarantine_tsd_get();
if (quarantine == NULL)
quarantine_init(LG_MAXOBJS_INIT);
quarantine = *quarantine_tsd_get();
if (quarantine == NULL)
quarantine_init(LG_MAXOBJS_INIT);
}
#endif
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/

File diff suppressed because it is too large Load Diff

View File

@ -25,10 +25,10 @@ typedef struct rtree_s rtree_t;
#ifdef JEMALLOC_H_STRUCTS
struct rtree_s {
malloc_mutex_t mutex;
void **root;
unsigned height;
unsigned level2bits[1]; /* Dynamically sized. */
malloc_mutex_t mutex;
void **root;
unsigned height;
unsigned level2bits[1]; /* Dynamically sized. */
};
#endif /* JEMALLOC_H_STRUCTS */
@ -58,37 +58,37 @@ bool rtree_set(rtree_t *rtree, uintptr_t key, void *val);
JEMALLOC_INLINE void * \
f(rtree_t *rtree, uintptr_t key) \
{ \
void *ret; \
uintptr_t subkey; \
unsigned i, lshift, height, bits; \
void **node, **child; \
\
RTREE_LOCK(&rtree->mutex); \
for (i = lshift = 0, height = rtree->height, node = rtree->root;\
i < height - 1; \
i++, lshift += bits, node = child) { \
bits = rtree->level2bits[i]; \
subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR + \
3)) - bits); \
child = (void**)node[subkey]; \
if (child == NULL) { \
RTREE_UNLOCK(&rtree->mutex); \
return (NULL); \
} \
} \
\
/* \
* node is a leaf, so it contains values rather than node \
* pointers. \
*/ \
bits = rtree->level2bits[i]; \
subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR+3)) - \
bits); \
ret = node[subkey]; \
RTREE_UNLOCK(&rtree->mutex); \
\
RTREE_GET_VALIDATE \
return (ret); \
void *ret; \
uintptr_t subkey; \
unsigned i, lshift, height, bits; \
void **node, **child; \
\
RTREE_LOCK(&rtree->mutex); \
for (i = lshift = 0, height = rtree->height, node = rtree->root;\
i < height - 1; \
i++, lshift += bits, node = child) { \
bits = rtree->level2bits[i]; \
subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR + \
3)) - bits); \
child = (void**)node[subkey]; \
if (child == NULL) { \
RTREE_UNLOCK(&rtree->mutex); \
return (NULL); \
} \
} \
\
/* \
* node is a leaf, so it contains values rather than node \
* pointers. \
*/ \
bits = rtree->level2bits[i]; \
subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR+3)) - \
bits); \
ret = node[subkey]; \
RTREE_UNLOCK(&rtree->mutex); \
\
RTREE_GET_VALIDATE \
return (ret); \
}
#ifdef JEMALLOC_DEBUG
@ -113,7 +113,7 @@ RTREE_GET_GENERATE(rtree_get_locked)
* seems impossible, but the following assertion is a prudent sanity check.
*/
# define RTREE_GET_VALIDATE \
assert(rtree_get_locked(rtree, key) == ret);
assert(rtree_get_locked(rtree, key) == ret);
#else
# define RTREE_GET_VALIDATE
#endif
@ -125,38 +125,38 @@ RTREE_GET_GENERATE(rtree_get)
JEMALLOC_INLINE bool
rtree_set(rtree_t *rtree, uintptr_t key, void *val)
{
uintptr_t subkey;
unsigned i, lshift, height, bits;
void **node, **child;
uintptr_t subkey;
unsigned i, lshift, height, bits;
void **node, **child;
malloc_mutex_lock(&rtree->mutex);
for (i = lshift = 0, height = rtree->height, node = rtree->root;
i < height - 1;
i++, lshift += bits, node = child) {
bits = rtree->level2bits[i];
subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR+3)) -
bits);
child = (void**)node[subkey];
if (child == NULL) {
child = (void**)base_alloc(sizeof(void *) <<
rtree->level2bits[i+1]);
if (child == NULL) {
malloc_mutex_unlock(&rtree->mutex);
return (true);
}
memset(child, 0, sizeof(void *) <<
rtree->level2bits[i+1]);
node[subkey] = child;
}
}
malloc_mutex_lock(&rtree->mutex);
for (i = lshift = 0, height = rtree->height, node = rtree->root;
i < height - 1;
i++, lshift += bits, node = child) {
bits = rtree->level2bits[i];
subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR+3)) -
bits);
child = (void**)node[subkey];
if (child == NULL) {
child = (void**)base_alloc(sizeof(void *) <<
rtree->level2bits[i+1]);
if (child == NULL) {
malloc_mutex_unlock(&rtree->mutex);
return (true);
}
memset(child, 0, sizeof(void *) <<
rtree->level2bits[i+1]);
node[subkey] = child;
}
}
/* node is a leaf, so it contains values rather than node pointers. */
bits = rtree->level2bits[i];
subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR+3)) - bits);
node[subkey] = val;
malloc_mutex_unlock(&rtree->mutex);
/* node is a leaf, so it contains values rather than node pointers. */
bits = rtree->level2bits[i];
subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR+3)) - bits);
node[subkey] = val;
malloc_mutex_unlock(&rtree->mutex);
return (false);
return (false);
}
#endif

View File

@ -12,117 +12,117 @@ typedef struct chunk_stats_s chunk_stats_t;
#ifdef JEMALLOC_H_STRUCTS
struct tcache_bin_stats_s {
/*
* Number of allocation requests that corresponded to the size of this
* bin.
*/
uint64_t nrequests;
/*
* Number of allocation requests that corresponded to the size of this
* bin.
*/
uint64_t nrequests;
};
struct malloc_bin_stats_s {
/*
* Current number of bytes allocated, including objects currently
* cached by tcache.
*/
size_t allocated;
/*
* Current number of bytes allocated, including objects currently
* cached by tcache.
*/
size_t allocated;
/*
* Total number of allocation/deallocation requests served directly by
* the bin. Note that tcache may allocate an object, then recycle it
* many times, resulting many increments to nrequests, but only one
* each to nmalloc and ndalloc.
*/
uint64_t nmalloc;
uint64_t ndalloc;
/*
* Total number of allocation/deallocation requests served directly by
* the bin. Note that tcache may allocate an object, then recycle it
* many times, resulting many increments to nrequests, but only one
* each to nmalloc and ndalloc.
*/
uint64_t nmalloc;
uint64_t ndalloc;
/*
* Number of allocation requests that correspond to the size of this
* bin. This includes requests served by tcache, though tcache only
* periodically merges into this counter.
*/
uint64_t nrequests;
/*
* Number of allocation requests that correspond to the size of this
* bin. This includes requests served by tcache, though tcache only
* periodically merges into this counter.
*/
uint64_t nrequests;
/* Number of tcache fills from this bin. */
uint64_t nfills;
/* Number of tcache fills from this bin. */
uint64_t nfills;
/* Number of tcache flushes to this bin. */
uint64_t nflushes;
/* Number of tcache flushes to this bin. */
uint64_t nflushes;
/* Total number of runs created for this bin's size class. */
uint64_t nruns;
/* Total number of runs created for this bin's size class. */
uint64_t nruns;
/*
* Total number of runs reused by extracting them from the runs tree for
* this bin's size class.
*/
uint64_t reruns;
/*
* Total number of runs reused by extracting them from the runs tree for
* this bin's size class.
*/
uint64_t reruns;
/* Current number of runs in this bin. */
size_t curruns;
/* Current number of runs in this bin. */
size_t curruns;
};
struct malloc_large_stats_s {
/*
* Total number of allocation/deallocation requests served directly by
* the arena. Note that tcache may allocate an object, then recycle it
* many times, resulting many increments to nrequests, but only one
* each to nmalloc and ndalloc.
*/
uint64_t nmalloc;
uint64_t ndalloc;
/*
* Total number of allocation/deallocation requests served directly by
* the arena. Note that tcache may allocate an object, then recycle it
* many times, resulting many increments to nrequests, but only one
* each to nmalloc and ndalloc.
*/
uint64_t nmalloc;
uint64_t ndalloc;
/*
* Number of allocation requests that correspond to this size class.
* This includes requests served by tcache, though tcache only
* periodically merges into this counter.
*/
uint64_t nrequests;
/*
* Number of allocation requests that correspond to this size class.
* This includes requests served by tcache, though tcache only
* periodically merges into this counter.
*/
uint64_t nrequests;
/* Current number of runs of this size class. */
size_t curruns;
/* Current number of runs of this size class. */
size_t curruns;
};
struct arena_stats_s {
/* Number of bytes currently mapped. */
size_t mapped;
/* Number of bytes currently mapped. */
size_t mapped;
/*
* Total number of purge sweeps, total number of madvise calls made,
* and total pages purged in order to keep dirty unused memory under
* control.
*/
uint64_t npurge;
uint64_t nmadvise;
uint64_t purged;
/*
* Total number of purge sweeps, total number of madvise calls made,
* and total pages purged in order to keep dirty unused memory under
* control.
*/
uint64_t npurge;
uint64_t nmadvise;
uint64_t purged;
/* Per-size-category statistics. */
size_t allocated_large;
uint64_t nmalloc_large;
uint64_t ndalloc_large;
uint64_t nrequests_large;
/* Per-size-category statistics. */
size_t allocated_large;
uint64_t nmalloc_large;
uint64_t ndalloc_large;
uint64_t nrequests_large;
/*
* One element for each possible size class, including sizes that
* overlap with bin size classes. This is necessary because ipalloc()
* sometimes has to use such large objects in order to assure proper
* alignment.
*/
malloc_large_stats_t *lstats;
/*
* One element for each possible size class, including sizes that
* overlap with bin size classes. This is necessary because ipalloc()
* sometimes has to use such large objects in order to assure proper
* alignment.
*/
malloc_large_stats_t *lstats;
};
struct chunk_stats_s {
/* Number of chunks that were allocated. */
uint64_t nchunks;
/* Number of chunks that were allocated. */
uint64_t nchunks;
/* High-water mark for number of chunks allocated. */
size_t highchunks;
/* High-water mark for number of chunks allocated. */
size_t highchunks;
/*
* Current number of chunks allocated. This value isn't maintained for
* any other purpose, so keep track of it in order to be able to set
* highchunks.
*/
size_t curchunks;
/*
* Current number of chunks allocated. This value isn't maintained for
* any other purpose, so keep track of it in order to be able to set
* highchunks.
*/
size_t curchunks;
};
#endif /* JEMALLOC_H_STRUCTS */
@ -151,21 +151,21 @@ JEMALLOC_INLINE size_t
stats_cactive_get(void)
{
return (atomic_read_z(&stats_cactive));
return (atomic_read_z(&stats_cactive));
}
JEMALLOC_INLINE void
stats_cactive_add(size_t size)
{
atomic_add_z(&stats_cactive, size);
atomic_add_z(&stats_cactive, size);
}
JEMALLOC_INLINE void
stats_cactive_sub(size_t size)
{
atomic_sub_z(&stats_cactive, size);
atomic_sub_z(&stats_cactive, size);
}
#endif

View File

@ -46,9 +46,9 @@ typedef struct tcache_s tcache_t;
#ifdef JEMALLOC_H_STRUCTS
typedef enum {
tcache_enabled_false = 0, /* Enable cast to/from bool. */
tcache_enabled_true = 1,
tcache_enabled_default = 2
tcache_enabled_false = 0, /* Enable cast to/from bool. */
tcache_enabled_true = 1,
tcache_enabled_default = 2
} tcache_enabled_t;
/*
@ -56,30 +56,30 @@ typedef enum {
* is stored separately, mainly to reduce memory usage.
*/
struct tcache_bin_info_s {
unsigned ncached_max; /* Upper limit on ncached. */
unsigned ncached_max; /* Upper limit on ncached. */
};
struct tcache_bin_s {
tcache_bin_stats_t tstats;
int low_water; /* Min # cached since last GC. */
unsigned lg_fill_div; /* Fill (ncached_max >> lg_fill_div). */
unsigned ncached; /* # of cached objects. */
void **avail; /* Stack of available objects. */
tcache_bin_stats_t tstats;
int low_water; /* Min # cached since last GC. */
unsigned lg_fill_div; /* Fill (ncached_max >> lg_fill_div). */
unsigned ncached; /* # of cached objects. */
void **avail; /* Stack of available objects. */
};
struct tcache_s {
ql_elm(tcache_t) link; /* Used for aggregating stats. */
uint64_t prof_accumbytes;/* Cleared after arena_prof_accum() */
arena_t *arena; /* This thread's arena. */
unsigned ev_cnt; /* Event count since incremental GC. */
unsigned next_gc_bin; /* Next bin to GC. */
tcache_bin_t tbins[1]; /* Dynamically sized. */
/*
* The pointer stacks associated with tbins follow as a contiguous
* array. During tcache initialization, the avail pointer in each
* element of tbins is initialized to point to the proper offset within
* this array.
*/
ql_elm(tcache_t) link; /* Used for aggregating stats. */
uint64_t prof_accumbytes;/* Cleared after arena_prof_accum() */
arena_t *arena; /* This thread's arena. */
unsigned ev_cnt; /* Event count since incremental GC. */
unsigned next_gc_bin; /* Next bin to GC. */
tcache_bin_t tbins[1]; /* Dynamically sized. */
/*
* The pointer stacks associated with tbins follow as a contiguous
* array. During tcache initialization, the avail pointer in each
* element of tbins is initialized to point to the proper offset within
* this array.
*/
};
#endif /* JEMALLOC_H_STRUCTS */
@ -150,291 +150,291 @@ malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, tcache_enabled, tcache_enabled_t,
JEMALLOC_INLINE void
tcache_flush(void)
{
tcache_t *tcache;
tcache_t *tcache;
cassert(config_tcache);
cassert(config_tcache);
tcache = *tcache_tsd_get();
if ((uintptr_t)tcache <= (uintptr_t)TCACHE_STATE_MAX)
return;
tcache_destroy(tcache);
tcache = NULL;
tcache_tsd_set(&tcache);
tcache = *tcache_tsd_get();
if ((uintptr_t)tcache <= (uintptr_t)TCACHE_STATE_MAX)
return;
tcache_destroy(tcache);
tcache = NULL;
tcache_tsd_set(&tcache);
}
JEMALLOC_INLINE bool
tcache_enabled_get(void)
{
tcache_enabled_t tcache_enabled;
tcache_enabled_t tcache_enabled;
cassert(config_tcache);
cassert(config_tcache);
tcache_enabled = *tcache_enabled_tsd_get();
if (tcache_enabled == tcache_enabled_default) {
tcache_enabled = (tcache_enabled_t)opt_tcache;
tcache_enabled_tsd_set(&tcache_enabled);
}
tcache_enabled = *tcache_enabled_tsd_get();
if (tcache_enabled == tcache_enabled_default) {
tcache_enabled = (tcache_enabled_t)opt_tcache;
tcache_enabled_tsd_set(&tcache_enabled);
}
return ((bool)tcache_enabled);
return ((bool)tcache_enabled);
}
JEMALLOC_INLINE void
tcache_enabled_set(bool enabled)
{
tcache_enabled_t tcache_enabled;
tcache_t *tcache;
tcache_enabled_t tcache_enabled;
tcache_t *tcache;
cassert(config_tcache);
cassert(config_tcache);
tcache_enabled = (tcache_enabled_t)enabled;
tcache_enabled_tsd_set(&tcache_enabled);
tcache = *tcache_tsd_get();
if (enabled) {
if (tcache == TCACHE_STATE_DISABLED) {
tcache = NULL;
tcache_tsd_set(&tcache);
}
} else /* disabled */ {
if (tcache > TCACHE_STATE_MAX) {
tcache_destroy(tcache);
tcache = NULL;
}
if (tcache == NULL) {
tcache = TCACHE_STATE_DISABLED;
tcache_tsd_set(&tcache);
}
}
tcache_enabled = (tcache_enabled_t)enabled;
tcache_enabled_tsd_set(&tcache_enabled);
tcache = *tcache_tsd_get();
if (enabled) {
if (tcache == TCACHE_STATE_DISABLED) {
tcache = NULL;
tcache_tsd_set(&tcache);
}
} else /* disabled */ {
if (tcache > TCACHE_STATE_MAX) {
tcache_destroy(tcache);
tcache = NULL;
}
if (tcache == NULL) {
tcache = TCACHE_STATE_DISABLED;
tcache_tsd_set(&tcache);
}
}
}
JEMALLOC_ALWAYS_INLINE tcache_t *
tcache_get(bool create)
{
tcache_t *tcache;
tcache_t *tcache;
if (config_tcache == false)
return (NULL);
if (config_lazy_lock && isthreaded == false)
return (NULL);
if (config_tcache == false)
return (NULL);
if (config_lazy_lock && isthreaded == false)
return (NULL);
tcache = *tcache_tsd_get();
if ((uintptr_t)tcache <= (uintptr_t)TCACHE_STATE_MAX) {
if (tcache == TCACHE_STATE_DISABLED)
return (NULL);
if (tcache == NULL) {
if (create == false) {
/*
* Creating a tcache here would cause
* allocation as a side effect of free().
* Ordinarily that would be okay since
* tcache_create() failure is a soft failure
* that doesn't propagate. However, if TLS
* data are freed via free() as in glibc,
* subtle corruption could result from setting
* a TLS variable after its backing memory is
* freed.
*/
return (NULL);
}
if (tcache_enabled_get() == false) {
tcache_enabled_set(false); /* Memoize. */
return (NULL);
}
return (tcache_create(choose_arena(NULL)));
}
if (tcache == TCACHE_STATE_PURGATORY) {
/*
* Make a note that an allocator function was called
* after tcache_thread_cleanup() was called.
*/
tcache = TCACHE_STATE_REINCARNATED;
tcache_tsd_set(&tcache);
return (NULL);
}
if (tcache == TCACHE_STATE_REINCARNATED)
return (NULL);
not_reached();
}
tcache = *tcache_tsd_get();
if ((uintptr_t)tcache <= (uintptr_t)TCACHE_STATE_MAX) {
if (tcache == TCACHE_STATE_DISABLED)
return (NULL);
if (tcache == NULL) {
if (create == false) {
/*
* Creating a tcache here would cause
* allocation as a side effect of free().
* Ordinarily that would be okay since
* tcache_create() failure is a soft failure
* that doesn't propagate. However, if TLS
* data are freed via free() as in glibc,
* subtle corruption could result from setting
* a TLS variable after its backing memory is
* freed.
*/
return (NULL);
}
if (tcache_enabled_get() == false) {
tcache_enabled_set(false); /* Memoize. */
return (NULL);
}
return (tcache_create(choose_arena(NULL)));
}
if (tcache == TCACHE_STATE_PURGATORY) {
/*
* Make a note that an allocator function was called
* after tcache_thread_cleanup() was called.
*/
tcache = TCACHE_STATE_REINCARNATED;
tcache_tsd_set(&tcache);
return (NULL);
}
if (tcache == TCACHE_STATE_REINCARNATED)
return (NULL);
not_reached();
}
return (tcache);
return (tcache);
}
JEMALLOC_ALWAYS_INLINE void
tcache_event(tcache_t *tcache)
{
if (TCACHE_GC_INCR == 0)
return;
if (TCACHE_GC_INCR == 0)
return;
tcache->ev_cnt++;
assert(tcache->ev_cnt <= TCACHE_GC_INCR);
if (tcache->ev_cnt == TCACHE_GC_INCR)
tcache_event_hard(tcache);
tcache->ev_cnt++;
assert(tcache->ev_cnt <= TCACHE_GC_INCR);
if (tcache->ev_cnt == TCACHE_GC_INCR)
tcache_event_hard(tcache);
}
JEMALLOC_ALWAYS_INLINE void *
tcache_alloc_easy(tcache_bin_t *tbin)
{
void *ret;
void *ret;
if (tbin->ncached == 0) {
tbin->low_water = -1;
return (NULL);
}
tbin->ncached--;
if ((int)tbin->ncached < tbin->low_water)
tbin->low_water = tbin->ncached;
ret = tbin->avail[tbin->ncached];
return (ret);
if (tbin->ncached == 0) {
tbin->low_water = -1;
return (NULL);
}
tbin->ncached--;
if ((int)tbin->ncached < tbin->low_water)
tbin->low_water = tbin->ncached;
ret = tbin->avail[tbin->ncached];
return (ret);
}
JEMALLOC_ALWAYS_INLINE void *
tcache_alloc_small(tcache_t *tcache, size_t size, bool zero)
{
void *ret;
size_t binind;
tcache_bin_t *tbin;
void *ret;
size_t binind;
tcache_bin_t *tbin;
binind = SMALL_SIZE2BIN(size);
assert(binind < NBINS);
tbin = &tcache->tbins[binind];
ret = tcache_alloc_easy(tbin);
if (ret == NULL) {
ret = tcache_alloc_small_hard(tcache, tbin, binind);
if (ret == NULL)
return (NULL);
}
assert(tcache_salloc(ret) == arena_bin_info[binind].reg_size);
binind = SMALL_SIZE2BIN(size);
assert(binind < NBINS);
tbin = &tcache->tbins[binind];
ret = tcache_alloc_easy(tbin);
if (ret == NULL) {
ret = tcache_alloc_small_hard(tcache, tbin, binind);
if (ret == NULL)
return (NULL);
}
assert(tcache_salloc(ret) == arena_bin_info[binind].reg_size);
if (zero == false) {
if (config_fill) {
if (opt_junk) {
arena_alloc_junk_small(ret,
&arena_bin_info[binind], false);
} else if (opt_zero)
memset(ret, 0, size);
}
} else {
if (config_fill && opt_junk) {
arena_alloc_junk_small(ret, &arena_bin_info[binind],
true);
}
VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
memset(ret, 0, size);
}
VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
if (zero == false) {
if (config_fill) {
if (opt_junk) {
arena_alloc_junk_small(ret,
&arena_bin_info[binind], false);
} else if (opt_zero)
memset(ret, 0, size);
}
} else {
if (config_fill && opt_junk) {
arena_alloc_junk_small(ret, &arena_bin_info[binind],
true);
}
VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
memset(ret, 0, size);
}
VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
if (config_stats)
tbin->tstats.nrequests++;
if (config_prof)
tcache->prof_accumbytes += arena_bin_info[binind].reg_size;
tcache_event(tcache);
return (ret);
if (config_stats)
tbin->tstats.nrequests++;
if (config_prof)
tcache->prof_accumbytes += arena_bin_info[binind].reg_size;
tcache_event(tcache);
return (ret);
}
JEMALLOC_ALWAYS_INLINE void *
tcache_alloc_large(tcache_t *tcache, size_t size, bool zero)
{
void *ret;
size_t binind;
tcache_bin_t *tbin;
void *ret;
size_t binind;
tcache_bin_t *tbin;
size = PAGE_CEILING(size);
assert(size <= tcache_maxclass);
binind = NBINS + (size >> LG_PAGE) - 1;
assert(binind < nhbins);
tbin = &tcache->tbins[binind];
ret = tcache_alloc_easy(tbin);
if (ret == NULL) {
/*
* Only allocate one large object at a time, because it's quite
* expensive to create one and not use it.
*/
ret = arena_malloc_large(tcache->arena, size, zero);
if (ret == NULL)
return (NULL);
} else {
if (config_prof && prof_promote && size == PAGE) {
arena_chunk_t *chunk =
(arena_chunk_t *)CHUNK_ADDR2BASE(ret);
size_t pageind = (((uintptr_t)ret - (uintptr_t)chunk) >>
LG_PAGE);
arena_mapbits_large_binind_set(chunk, pageind,
BININD_INVALID);
}
if (zero == false) {
if (config_fill) {
if (opt_junk)
memset(ret, 0xa5, size);
else if (opt_zero)
memset(ret, 0, size);
}
} else {
VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
memset(ret, 0, size);
}
VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
size = PAGE_CEILING(size);
assert(size <= tcache_maxclass);
binind = NBINS + (size >> LG_PAGE) - 1;
assert(binind < nhbins);
tbin = &tcache->tbins[binind];
ret = tcache_alloc_easy(tbin);
if (ret == NULL) {
/*
* Only allocate one large object at a time, because it's quite
* expensive to create one and not use it.
*/
ret = arena_malloc_large(tcache->arena, size, zero);
if (ret == NULL)
return (NULL);
} else {
if (config_prof && prof_promote && size == PAGE) {
arena_chunk_t *chunk =
(arena_chunk_t *)CHUNK_ADDR2BASE(ret);
size_t pageind = (((uintptr_t)ret - (uintptr_t)chunk) >>
LG_PAGE);
arena_mapbits_large_binind_set(chunk, pageind,
BININD_INVALID);
}
if (zero == false) {
if (config_fill) {
if (opt_junk)
memset(ret, 0xa5, size);
else if (opt_zero)
memset(ret, 0, size);
}
} else {
VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
memset(ret, 0, size);
}
VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
if (config_stats)
tbin->tstats.nrequests++;
if (config_prof)
tcache->prof_accumbytes += size;
}
if (config_stats)
tbin->tstats.nrequests++;
if (config_prof)
tcache->prof_accumbytes += size;
}
tcache_event(tcache);
return (ret);
tcache_event(tcache);
return (ret);
}
JEMALLOC_ALWAYS_INLINE void
tcache_dalloc_small(tcache_t *tcache, void *ptr, size_t binind)
{
tcache_bin_t *tbin;
tcache_bin_info_t *tbin_info;
tcache_bin_t *tbin;
tcache_bin_info_t *tbin_info;
assert(tcache_salloc(ptr) <= SMALL_MAXCLASS);
assert(tcache_salloc(ptr) <= SMALL_MAXCLASS);
if (config_fill && opt_junk)
arena_dalloc_junk_small(ptr, &arena_bin_info[binind]);
if (config_fill && opt_junk)
arena_dalloc_junk_small(ptr, &arena_bin_info[binind]);
tbin = &tcache->tbins[binind];
tbin_info = &tcache_bin_info[binind];
if (tbin->ncached == tbin_info->ncached_max) {
tcache_bin_flush_small(tbin, binind, (tbin_info->ncached_max >>
1), tcache);
}
assert(tbin->ncached < tbin_info->ncached_max);
tbin->avail[tbin->ncached] = ptr;
tbin->ncached++;
tbin = &tcache->tbins[binind];
tbin_info = &tcache_bin_info[binind];
if (tbin->ncached == tbin_info->ncached_max) {
tcache_bin_flush_small(tbin, binind, (tbin_info->ncached_max >>
1), tcache);
}
assert(tbin->ncached < tbin_info->ncached_max);
tbin->avail[tbin->ncached] = ptr;
tbin->ncached++;
tcache_event(tcache);
tcache_event(tcache);
}
JEMALLOC_ALWAYS_INLINE void
tcache_dalloc_large(tcache_t *tcache, void *ptr, size_t size)
{
size_t binind;
tcache_bin_t *tbin;
tcache_bin_info_t *tbin_info;
size_t binind;
tcache_bin_t *tbin;
tcache_bin_info_t *tbin_info;
assert((size & PAGE_MASK) == 0);
assert(tcache_salloc(ptr) > SMALL_MAXCLASS);
assert(tcache_salloc(ptr) <= tcache_maxclass);
assert((size & PAGE_MASK) == 0);
assert(tcache_salloc(ptr) > SMALL_MAXCLASS);
assert(tcache_salloc(ptr) <= tcache_maxclass);
binind = NBINS + (size >> LG_PAGE) - 1;
binind = NBINS + (size >> LG_PAGE) - 1;
if (config_fill && opt_junk)
memset(ptr, 0x5a, size);
if (config_fill && opt_junk)
memset(ptr, 0x5a, size);
tbin = &tcache->tbins[binind];
tbin_info = &tcache_bin_info[binind];
if (tbin->ncached == tbin_info->ncached_max) {
tcache_bin_flush_large(tbin, binind, (tbin_info->ncached_max >>
1), tcache);
}
assert(tbin->ncached < tbin_info->ncached_max);
tbin->avail[tbin->ncached] = ptr;
tbin->ncached++;
tbin = &tcache->tbins[binind];
tbin_info = &tcache_bin_info[binind];
if (tbin->ncached == tbin_info->ncached_max) {
tcache_bin_flush_large(tbin, binind, (tbin_info->ncached_max >>
1), tcache);
}
assert(tbin->ncached < tbin_info->ncached_max);
tbin->avail[tbin->ncached] = ptr;
tbin->ncached++;
tcache_event(tcache);
tcache_event(tcache);
}
#endif

View File

@ -116,40 +116,40 @@ a_attr bool a_name##_booted = false;
a_attr bool \
a_name##_tsd_cleanup_wrapper(void) \
{ \
\
if (a_name##_initialized) { \
a_name##_initialized = false; \
a_cleanup(&a_name##_tls); \
} \
return (a_name##_initialized); \
\
if (a_name##_initialized) { \
a_name##_initialized = false; \
a_cleanup(&a_name##_tls); \
} \
return (a_name##_initialized); \
} \
a_attr bool \
a_name##_tsd_boot(void) \
{ \
\
if (a_cleanup != malloc_tsd_no_cleanup) { \
malloc_tsd_cleanup_register( \
&a_name##_tsd_cleanup_wrapper); \
} \
a_name##_booted = true; \
return (false); \
\
if (a_cleanup != malloc_tsd_no_cleanup) { \
malloc_tsd_cleanup_register( \
&a_name##_tsd_cleanup_wrapper); \
} \
a_name##_booted = true; \
return (false); \
} \
/* Get/set. */ \
a_attr a_type * \
a_name##_tsd_get(void) \
{ \
\
assert(a_name##_booted); \
return (&a_name##_tls); \
\
assert(a_name##_booted); \
return (&a_name##_tls); \
} \
a_attr void \
a_name##_tsd_set(a_type *val) \
{ \
\
assert(a_name##_booted); \
a_name##_tls = (*val); \
if (a_cleanup != malloc_tsd_no_cleanup) \
a_name##_initialized = true; \
\
assert(a_name##_booted); \
a_name##_tls = (*val); \
if (a_cleanup != malloc_tsd_no_cleanup) \
a_name##_initialized = true; \
}
#elif (defined(JEMALLOC_TLS))
#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \
@ -158,220 +158,220 @@ a_name##_tsd_set(a_type *val) \
a_attr bool \
a_name##_tsd_boot(void) \
{ \
\
if (a_cleanup != malloc_tsd_no_cleanup) { \
if (pthread_key_create(&a_name##_tsd, a_cleanup) != 0) \
return (true); \
} \
a_name##_booted = true; \
return (false); \
\
if (a_cleanup != malloc_tsd_no_cleanup) { \
if (pthread_key_create(&a_name##_tsd, a_cleanup) != 0) \
return (true); \
} \
a_name##_booted = true; \
return (false); \
} \
/* Get/set. */ \
a_attr a_type * \
a_name##_tsd_get(void) \
{ \
\
assert(a_name##_booted); \
return (&a_name##_tls); \
\
assert(a_name##_booted); \
return (&a_name##_tls); \
} \
a_attr void \
a_name##_tsd_set(a_type *val) \
{ \
\
assert(a_name##_booted); \
a_name##_tls = (*val); \
if (a_cleanup != malloc_tsd_no_cleanup) { \
if (pthread_setspecific(a_name##_tsd, \
(void *)(&a_name##_tls))) { \
malloc_write("<jemalloc>: Error" \
" setting TSD for "#a_name"\n"); \
if (opt_abort) \
abort(); \
} \
} \
\
assert(a_name##_booted); \
a_name##_tls = (*val); \
if (a_cleanup != malloc_tsd_no_cleanup) { \
if (pthread_setspecific(a_name##_tsd, \
(void *)(&a_name##_tls))) { \
malloc_write("<jemalloc>: Error" \
" setting TSD for "#a_name"\n"); \
if (opt_abort) \
abort(); \
} \
} \
}
#elif (defined(_WIN32))
#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \
a_cleanup) \
/* Data structure. */ \
typedef struct { \
bool initialized; \
a_type val; \
bool initialized; \
a_type val; \
} a_name##_tsd_wrapper_t; \
/* Initialization/cleanup. */ \
a_attr bool \
a_name##_tsd_cleanup_wrapper(void) \
{ \
a_name##_tsd_wrapper_t *wrapper; \
\
wrapper = (a_name##_tsd_wrapper_t *) TlsGetValue(a_name##_tsd); \
if (wrapper == NULL) \
return (false); \
if (a_cleanup != malloc_tsd_no_cleanup && \
wrapper->initialized) { \
a_type val = wrapper->val; \
a_type tsd_static_data = a_initializer; \
wrapper->initialized = false; \
wrapper->val = tsd_static_data; \
a_cleanup(&val); \
if (wrapper->initialized) { \
/* Trigger another cleanup round. */ \
return (true); \
} \
} \
malloc_tsd_dalloc(wrapper); \
return (false); \
a_name##_tsd_wrapper_t *wrapper; \
\
wrapper = (a_name##_tsd_wrapper_t *) TlsGetValue(a_name##_tsd); \
if (wrapper == NULL) \
return (false); \
if (a_cleanup != malloc_tsd_no_cleanup && \
wrapper->initialized) { \
a_type val = wrapper->val; \
a_type tsd_static_data = a_initializer; \
wrapper->initialized = false; \
wrapper->val = tsd_static_data; \
a_cleanup(&val); \
if (wrapper->initialized) { \
/* Trigger another cleanup round. */ \
return (true); \
} \
} \
malloc_tsd_dalloc(wrapper); \
return (false); \
} \
a_attr bool \
a_name##_tsd_boot(void) \
{ \
\
a_name##_tsd = TlsAlloc(); \
if (a_name##_tsd == TLS_OUT_OF_INDEXES) \
return (true); \
if (a_cleanup != malloc_tsd_no_cleanup) { \
malloc_tsd_cleanup_register( \
&a_name##_tsd_cleanup_wrapper); \
} \
a_name##_booted = true; \
return (false); \
\
a_name##_tsd = TlsAlloc(); \
if (a_name##_tsd == TLS_OUT_OF_INDEXES) \
return (true); \
if (a_cleanup != malloc_tsd_no_cleanup) { \
malloc_tsd_cleanup_register( \
&a_name##_tsd_cleanup_wrapper); \
} \
a_name##_booted = true; \
return (false); \
} \
/* Get/set. */ \
a_attr a_name##_tsd_wrapper_t * \
a_name##_tsd_get_wrapper(void) \
{ \
a_name##_tsd_wrapper_t *wrapper = (a_name##_tsd_wrapper_t *) \
TlsGetValue(a_name##_tsd); \
\
if (wrapper == NULL) { \
wrapper = (a_name##_tsd_wrapper_t *) \
malloc_tsd_malloc(sizeof(a_name##_tsd_wrapper_t)); \
if (wrapper == NULL) { \
malloc_write("<jemalloc>: Error allocating" \
" TSD for "#a_name"\n"); \
abort(); \
} else { \
static a_type tsd_static_data = a_initializer; \
wrapper->initialized = false; \
wrapper->val = tsd_static_data; \
} \
if (!TlsSetValue(a_name##_tsd, (void *)wrapper)) { \
malloc_write("<jemalloc>: Error setting" \
" TSD for "#a_name"\n"); \
abort(); \
} \
} \
return (wrapper); \
a_name##_tsd_wrapper_t *wrapper = (a_name##_tsd_wrapper_t *) \
TlsGetValue(a_name##_tsd); \
\
if (wrapper == NULL) { \
wrapper = (a_name##_tsd_wrapper_t *) \
malloc_tsd_malloc(sizeof(a_name##_tsd_wrapper_t)); \
if (wrapper == NULL) { \
malloc_write("<jemalloc>: Error allocating" \
" TSD for "#a_name"\n"); \
abort(); \
} else { \
static a_type tsd_static_data = a_initializer; \
wrapper->initialized = false; \
wrapper->val = tsd_static_data; \
} \
if (!TlsSetValue(a_name##_tsd, (void *)wrapper)) { \
malloc_write("<jemalloc>: Error setting" \
" TSD for "#a_name"\n"); \
abort(); \
} \
} \
return (wrapper); \
} \
a_attr a_type * \
a_name##_tsd_get(void) \
{ \
a_name##_tsd_wrapper_t *wrapper; \
\
assert(a_name##_booted); \
wrapper = a_name##_tsd_get_wrapper(); \
return (&wrapper->val); \
a_name##_tsd_wrapper_t *wrapper; \
\
assert(a_name##_booted); \
wrapper = a_name##_tsd_get_wrapper(); \
return (&wrapper->val); \
} \
a_attr void \
a_name##_tsd_set(a_type *val) \
{ \
a_name##_tsd_wrapper_t *wrapper; \
\
assert(a_name##_booted); \
wrapper = a_name##_tsd_get_wrapper(); \
wrapper->val = *(val); \
if (a_cleanup != malloc_tsd_no_cleanup) \
wrapper->initialized = true; \
a_name##_tsd_wrapper_t *wrapper; \
\
assert(a_name##_booted); \
wrapper = a_name##_tsd_get_wrapper(); \
wrapper->val = *(val); \
if (a_cleanup != malloc_tsd_no_cleanup) \
wrapper->initialized = true; \
}
#else
#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \
a_cleanup) \
/* Data structure. */ \
typedef struct { \
bool initialized; \
a_type val; \
bool initialized; \
a_type val; \
} a_name##_tsd_wrapper_t; \
/* Initialization/cleanup. */ \
a_attr void \
a_name##_tsd_cleanup_wrapper(void *arg) \
{ \
a_name##_tsd_wrapper_t *wrapper = (a_name##_tsd_wrapper_t *)arg;\
\
if (a_cleanup != malloc_tsd_no_cleanup && \
wrapper->initialized) { \
wrapper->initialized = false; \
a_cleanup(&wrapper->val); \
if (wrapper->initialized) { \
/* Trigger another cleanup round. */ \
if (pthread_setspecific(a_name##_tsd, \
(void *)wrapper)) { \
malloc_write("<jemalloc>: Error" \
" setting TSD for "#a_name"\n"); \
if (opt_abort) \
abort(); \
} \
return; \
} \
} \
malloc_tsd_dalloc(wrapper); \
a_name##_tsd_wrapper_t *wrapper = (a_name##_tsd_wrapper_t *)arg;\
\
if (a_cleanup != malloc_tsd_no_cleanup && \
wrapper->initialized) { \
wrapper->initialized = false; \
a_cleanup(&wrapper->val); \
if (wrapper->initialized) { \
/* Trigger another cleanup round. */ \
if (pthread_setspecific(a_name##_tsd, \
(void *)wrapper)) { \
malloc_write("<jemalloc>: Error" \
" setting TSD for "#a_name"\n"); \
if (opt_abort) \
abort(); \
} \
return; \
} \
} \
malloc_tsd_dalloc(wrapper); \
} \
a_attr bool \
a_name##_tsd_boot(void) \
{ \
\
if (pthread_key_create(&a_name##_tsd, \
a_name##_tsd_cleanup_wrapper) != 0) \
return (true); \
a_name##_booted = true; \
return (false); \
\
if (pthread_key_create(&a_name##_tsd, \
a_name##_tsd_cleanup_wrapper) != 0) \
return (true); \
a_name##_booted = true; \
return (false); \
} \
/* Get/set. */ \
a_attr a_name##_tsd_wrapper_t * \
a_name##_tsd_get_wrapper(void) \
{ \
a_name##_tsd_wrapper_t *wrapper = (a_name##_tsd_wrapper_t *) \
pthread_getspecific(a_name##_tsd); \
\
if (wrapper == NULL) { \
wrapper = (a_name##_tsd_wrapper_t *) \
malloc_tsd_malloc(sizeof(a_name##_tsd_wrapper_t)); \
if (wrapper == NULL) { \
malloc_write("<jemalloc>: Error allocating" \
" TSD for "#a_name"\n"); \
abort(); \
} else { \
static a_type tsd_static_data = a_initializer; \
wrapper->initialized = false; \
wrapper->val = tsd_static_data; \
} \
if (pthread_setspecific(a_name##_tsd, \
(void *)wrapper)) { \
malloc_write("<jemalloc>: Error setting" \
" TSD for "#a_name"\n"); \
abort(); \
} \
} \
return (wrapper); \
a_name##_tsd_wrapper_t *wrapper = (a_name##_tsd_wrapper_t *) \
pthread_getspecific(a_name##_tsd); \
\
if (wrapper == NULL) { \
wrapper = (a_name##_tsd_wrapper_t *) \
malloc_tsd_malloc(sizeof(a_name##_tsd_wrapper_t)); \
if (wrapper == NULL) { \
malloc_write("<jemalloc>: Error allocating" \
" TSD for "#a_name"\n"); \
abort(); \
} else { \
static a_type tsd_static_data = a_initializer; \
wrapper->initialized = false; \
wrapper->val = tsd_static_data; \
} \
if (pthread_setspecific(a_name##_tsd, \
(void *)wrapper)) { \
malloc_write("<jemalloc>: Error setting" \
" TSD for "#a_name"\n"); \
abort(); \
} \
} \
return (wrapper); \
} \
a_attr a_type * \
a_name##_tsd_get(void) \
{ \
a_name##_tsd_wrapper_t *wrapper; \
\
assert(a_name##_booted); \
wrapper = a_name##_tsd_get_wrapper(); \
return (&wrapper->val); \
a_name##_tsd_wrapper_t *wrapper; \
\
assert(a_name##_booted); \
wrapper = a_name##_tsd_get_wrapper(); \
return (&wrapper->val); \
} \
a_attr void \
a_name##_tsd_set(a_type *val) \
{ \
a_name##_tsd_wrapper_t *wrapper; \
\
assert(a_name##_booted); \
wrapper = a_name##_tsd_get_wrapper(); \
wrapper->val = *(val); \
if (a_cleanup != malloc_tsd_no_cleanup) \
wrapper->initialized = true; \
a_name##_tsd_wrapper_t *wrapper; \
\
assert(a_name##_booted); \
wrapper = a_name##_tsd_get_wrapper(); \
wrapper->val = *(val); \
if (a_cleanup != malloc_tsd_no_cleanup) \
wrapper->initialized = true; \
}
#endif

View File

@ -33,45 +33,45 @@
*/
#ifndef assert
#define assert(e) do { \
if (config_debug && !(e)) { \
malloc_printf( \
"<jemalloc>: %s:%d: Failed assertion: \"%s\"\n", \
__FILE__, __LINE__, #e); \
abort(); \
} \
if (config_debug && !(e)) { \
malloc_printf( \
"<jemalloc>: %s:%d: Failed assertion: \"%s\"\n", \
__FILE__, __LINE__, #e); \
abort(); \
} \
} while (0)
#endif
/* Use to assert a particular configuration, e.g., cassert(config_debug). */
#define cassert(c) do { \
if ((c) == false) \
assert(false); \
if ((c) == false) \
assert(false); \
} while (0)
#ifndef not_reached
#define not_reached() do { \
if (config_debug) { \
malloc_printf( \
"<jemalloc>: %s:%d: Unreachable code reached\n", \
__FILE__, __LINE__); \
abort(); \
} \
if (config_debug) { \
malloc_printf( \
"<jemalloc>: %s:%d: Unreachable code reached\n", \
__FILE__, __LINE__); \
abort(); \
} \
} while (0)
#endif
#ifndef not_implemented
#define not_implemented() do { \
if (config_debug) { \
malloc_printf("<jemalloc>: %s:%d: Not implemented\n", \
__FILE__, __LINE__); \
abort(); \
} \
if (config_debug) { \
malloc_printf("<jemalloc>: %s:%d: Not implemented\n", \
__FILE__, __LINE__); \
abort(); \
} \
} while (0)
#endif
#define assert_not_implemented(e) do { \
if (config_debug && !(e)) \
not_implemented(); \
if (config_debug && !(e)) \
not_implemented(); \
} while (0)
#endif /* JEMALLOC_H_TYPES */
@ -118,17 +118,17 @@ JEMALLOC_INLINE size_t
pow2_ceil(size_t x)
{
x--;
x |= x >> 1;
x |= x >> 2;
x |= x >> 4;
x |= x >> 8;
x |= x >> 16;
x--;
x |= x >> 1;
x |= x >> 2;
x |= x >> 4;
x |= x >> 8;
x |= x >> 16;
#if (LG_SIZEOF_PTR == 3)
x |= x >> 32;
x |= x >> 32;
#endif
x++;
return (x);
x++;
return (x);
}
/* Sets error code */
@ -137,9 +137,9 @@ set_errno(int errnum)
{
#ifdef _WIN32
SetLastError(errnum);
SetLastError(errnum);
#else
errno = errnum;
errno = errnum;
#endif
}
@ -149,9 +149,9 @@ get_errno(void)
{
#ifdef _WIN32
return (GetLastError());
return (GetLastError());
#else
return (errno);
return (errno);
#endif
}
#endif

View File

@ -1,32 +1,32 @@
// ISO C9x compliant inttypes.h for Microsoft Visual Studio
// Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124
//
// Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124
//
// Copyright (c) 2006 Alexander Chemeris
//
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
//
// 1. Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
//
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
//
// 3. The name of the author may be used to endorse or promote products
// derived from this software without specific prior written permission.
//
//
// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
// EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
//
///////////////////////////////////////////////////////////////////////////////
#ifndef _MSC_VER // [

View File

@ -1,32 +1,32 @@
// ISO C9x compliant stdint.h for Microsoft Visual Studio
// Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124
//
// Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124
//
// Copyright (c) 2006-2008 Alexander Chemeris
//
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
//
// 1. Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
//
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
//
// 3. The name of the author may be used to endorse or promote products
// derived from this software without specific prior written permission.
//
//
// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
// EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
//
///////////////////////////////////////////////////////////////////////////////
#ifndef _MSC_VER // [

View File

@ -7,17 +7,17 @@
#pragma intrinsic(_BitScanForward)
static __forceinline int ffsl(long x)
{
unsigned long i;
unsigned long i;
if (_BitScanForward(&i, x))
return (i + 1);
return (0);
if (_BitScanForward(&i, x))
return (i + 1);
return (0);
}
static __forceinline int ffs(int x)
{
return (ffsl(x));
return (ffsl(x));
}
#endif

View File

@ -115,7 +115,7 @@ fi
if [ x"$dir_arg" != x ]; then
dst=$src
src=""
if [ -d $dst ]; then
instcmd=:
else
@ -124,7 +124,7 @@ if [ x"$dir_arg" != x ]; then
else
# Waiting for this to be detected by the "$instcmd $src $dsttmp" command
# might cause directories to be created, which would be especially bad
# might cause directories to be created, which would be especially bad
# if $src (and thus $dsttmp) contains '*'.
if [ -f $src -o -d $src ]
@ -134,7 +134,7 @@ else
echo "install: $src does not exist"
exit 1
fi
if [ x"$dst" = x ]
then
echo "install: no destination specified"
@ -162,7 +162,7 @@ dstdir=`echo $dst | sed -e 's,[^/]*$,,;s,/$,,;s,^$,.,'`
# Skip lots of stat calls in the usual case.
if [ ! -d "$dstdir" ]; then
defaultIFS='
defaultIFS='
'
IFS="${IFS-${defaultIFS}}"
@ -201,17 +201,17 @@ else
# If we're going to rename the final executable, determine the name now.
if [ x"$transformarg" = x ]
if [ x"$transformarg" = x ]
then
dstfile=`basename $dst`
else
dstfile=`basename $dst $transformbasename |
dstfile=`basename $dst $transformbasename |
sed $transformarg`$transformbasename
fi
# don't allow the sed command to completely eliminate the filename
if [ x"$dstfile" = x ]
if [ x"$dstfile" = x ]
then
dstfile=`basename $dst`
else
@ -242,7 +242,7 @@ else
# Now rename the file to the real destination.
$doit $rmcmd -f $dstdir/$dstfile &&
$doit $mvcmd $dsttmp $dstdir/$dstfile
$doit $mvcmd $dsttmp $dstdir/$dstfile
fi &&

View File

@ -214,7 +214,7 @@ chunk_record(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, void *chunk,
size_t size)
{
bool unzeroed;
extent_node_t *xnode, *node, *prev, key;
extent_node_t *xnode, *node, *prev, *xprev, key;
unzeroed = pages_purge(chunk, size);
VALGRIND_MAKE_MEM_NOACCESS(chunk, size);
@ -226,6 +226,8 @@ chunk_record(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, void *chunk,
* held.
*/
xnode = base_node_alloc();
/* Use xprev to implement conditional deferred deallocation of prev. */
xprev = NULL;
malloc_mutex_lock(&chunks_mtx);
key.addr = (void *)((uintptr_t)chunk + size);
@ -242,8 +244,6 @@ chunk_record(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, void *chunk,
node->size += size;
node->zeroed = (node->zeroed && (unzeroed == false));
extent_tree_szad_insert(chunks_szad, node);
if (xnode != NULL)
base_node_dealloc(xnode);
} else {
/* Coalescing forward failed, so insert a new node. */
if (xnode == NULL) {
@ -253,10 +253,10 @@ chunk_record(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, void *chunk,
* already been purged, so this is only a virtual
* memory leak.
*/
malloc_mutex_unlock(&chunks_mtx);
return;
goto label_return;
}
node = xnode;
xnode = NULL; /* Prevent deallocation below. */
node->addr = chunk;
node->size = size;
node->zeroed = (unzeroed == false);
@ -282,9 +282,19 @@ chunk_record(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, void *chunk,
node->zeroed = (node->zeroed && prev->zeroed);
extent_tree_szad_insert(chunks_szad, node);
base_node_dealloc(prev);
xprev = prev;
}
label_return:
malloc_mutex_unlock(&chunks_mtx);
/*
* Deallocate xnode and/or xprev after unlocking chunks_mtx in order to
* avoid potential deadlock.
*/
if (xnode != NULL)
base_node_dealloc(xnode);
if (xprev != NULL)
base_node_dealloc(prev);
}
void