S390: Use new __libc_tbegin_retry macro in elision-lock.c.

This patch implements __libc_tbegin_retry macro which is equivalent to
gcc builtin __builtin_tbegin_retry, except the changes which were applied
to __libc_tbegin in the previous patch.

If tbegin aborts with _HTM_TBEGIN_TRANSIENT.  Then this macros restores
the fpc, fprs and automatically retries up to retry_cnt tbegins.
Further saving of the state is omitted as it is already saved in the
first round.  Before retrying a further transaction, the
transaction-abort-assist instruction is used to support the cpu.

This macro is now used in function __lll_lock_elision.

ChangeLog:

	* sysdeps/unix/sysv/linux/s390/htm.h(__libc_tbegin_retry): New macro.
	* sysdeps/unix/sysv/linux/s390/elision-lock.c (__lll_lock_elision):
	Use __libc_tbegin_retry macro.
This commit is contained in:
Stefan Liebler 2016-12-20 15:12:48 +01:00
parent 8bfc4a2ab4
commit 53c5c3d5ac
3 changed files with 64 additions and 28 deletions

View File

@ -1,3 +1,9 @@
2016-12-20 Stefan Liebler <stli@linux.vnet.ibm.com>
* sysdeps/unix/sysv/linux/s390/htm.h(__libc_tbegin_retry): New macro.
* sysdeps/unix/sysv/linux/s390/elision-lock.c (__lll_lock_elision):
Use __libc_tbegin_retry macro.
2016-12-20 Stefan Liebler <stli@linux.vnet.ibm.com>
* sysdeps/unix/sysv/linux/s390/Makefile (elision-CFLAGS):

View File

@ -60,17 +60,16 @@ __lll_lock_elision (int *futex, short *adapt_count, EXTRAARG int private)
goto use_lock;
}
int try_tbegin;
for (try_tbegin = aconf.try_tbegin;
try_tbegin > 0;
try_tbegin--)
if (aconf.try_tbegin > 0)
{
int status;
if (__builtin_expect
((status = __libc_tbegin ((void *) 0)) == _HTM_TBEGIN_STARTED, 1))
int status = __libc_tbegin_retry ((void *) 0, aconf.try_tbegin - 1);
if (__builtin_expect (status == _HTM_TBEGIN_STARTED,
_HTM_TBEGIN_STARTED))
{
if (*futex == 0)
if (__builtin_expect (*futex == 0, 1))
/* Lock was free. Return to user code in a transaction. */
return 0;
/* Lock was busy. Fall back to normal locking. */
if (__builtin_expect (__libc_tx_nesting_depth (), 1))
{
@ -81,7 +80,6 @@ __lll_lock_elision (int *futex, short *adapt_count, EXTRAARG int private)
See above for why relaxed MO is sufficient. */
if (aconf.skip_lock_busy > 0)
atomic_store_relaxed (adapt_count, aconf.skip_lock_busy);
goto use_lock;
}
else /* nesting depth is > 1 */
{
@ -99,28 +97,28 @@ __lll_lock_elision (int *futex, short *adapt_count, EXTRAARG int private)
__libc_tabort (_HTM_FIRST_USER_ABORT_CODE | 1);
}
}
else if (status != _HTM_TBEGIN_TRANSIENT)
{
/* A persistent abort (cc 1 or 3) indicates that a retry is
probably futile. Use the normal locking now and for the
next couple of calls.
Be careful to avoid writing to the lock. See above for why
relaxed MO is sufficient. */
if (aconf.skip_lock_internal_abort > 0)
atomic_store_relaxed (adapt_count,
aconf.skip_lock_internal_abort);
}
else
{
if (status != _HTM_TBEGIN_TRANSIENT)
{
/* A persistent abort (cc 1 or 3) indicates that a retry is
probably futile. Use the normal locking now and for the
next couple of calls.
Be careful to avoid writing to the lock. See above for why
relaxed MO is sufficient. */
if (aconf.skip_lock_internal_abort > 0)
atomic_store_relaxed (adapt_count,
aconf.skip_lock_internal_abort);
goto use_lock;
}
/* Same logic as above, but for for a number of temporary failures in
a row. */
if (aconf.skip_lock_out_of_tbegin_retries > 0)
atomic_store_relaxed (adapt_count,
aconf.skip_lock_out_of_tbegin_retries);
}
}
/* Same logic as above, but for for a number of temporary failures in a
row. See above for why relaxed MO is sufficient. */
if (aconf.skip_lock_out_of_tbegin_retries > 0 && aconf.try_tbegin > 0)
atomic_store_relaxed (adapt_count, aconf.skip_lock_out_of_tbegin_retries);
use_lock:
/* Use normal locking as fallback path if transaction does not succeed. */
return LLL_LOCK ((*futex), private);
}

View File

@ -69,7 +69,36 @@
started. Thus the user of the tbegin macros in this header file has to
compile the file / function with -msoft-float. It prevents gcc from using
fprs / vrs. */
#define __libc_tbegin(tdb) \
#define __libc_tbegin(tdb) __libc_tbegin_base(tdb,,,)
#define __libc_tbegin_retry_output_regs , [R_TX_CNT] "+&d" (__tx_cnt)
#define __libc_tbegin_retry_input_regs(retry_cnt) , [R_RETRY] "d" (retry_cnt)
#define __libc_tbegin_retry_abort_path_insn \
/* If tbegin returned _HTM_TBEGIN_TRANSIENT, retry immediately so \
that max tbegin_cnt transactions are tried. Otherwise return and \
let the caller of this macro do the fallback path. */ \
" jnh 1f\n\t" /* cc 1/3: jump to fallback path. */ \
/* tbegin returned _HTM_TBEGIN_TRANSIENT: retry with transaction. */ \
" crje %[R_TX_CNT], %[R_RETRY], 1f\n\t" /* Reached max retries? */ \
" ahi %[R_TX_CNT], 1\n\t" \
" ppa %[R_TX_CNT], 0, 1\n\t" /* Transaction-Abort Assist. */ \
" j 2b\n\t" /* Loop to tbegin. */
/* Same as __libc_tbegin except if tbegin aborts with _HTM_TBEGIN_TRANSIENT.
Then this macros restores the fpc, fprs and automatically retries up to
retry_cnt tbegins. Further saving of the state is omitted as it is already
saved. This macro calls tbegin at most as retry_cnt + 1 times. */
#define __libc_tbegin_retry(tdb, retry_cnt) \
({ int __ret; \
int __tx_cnt = 0; \
__ret = __libc_tbegin_base(tdb, \
__libc_tbegin_retry_abort_path_insn, \
__libc_tbegin_retry_output_regs, \
__libc_tbegin_retry_input_regs(retry_cnt)); \
__ret; \
})
#define __libc_tbegin_base(tdb, abort_path_insn, output_regs, input_regs) \
({ int __ret; \
int __fpc; \
char __fprs[TX_FPRS_BYTES]; \
@ -95,7 +124,7 @@
again and result in a core dump wich does \
now show at tbegin but the real executed \
instruction. */ \
" tbegin 0, 0xFF0E\n\t" \
"2: tbegin 0, 0xFF0E\n\t" \
/* Branch away in abort case (this is the \
prefered sequence. See PoP in chapter 5 \
Transactional-Execution Facility \
@ -111,11 +140,14 @@
" srl %[R_RET], 28\n\t" \
" sfpc %[R_FPC]\n\t" \
TX_RESTORE_FPRS \
abort_path_insn \
"1:\n\t" \
".machine pop\n" \
: [R_RET] "=&d" (__ret), \
[R_FPC] "=&d" (__fpc) \
output_regs \
: [R_FPRS] "a" (__fprs) \
input_regs \
: "cc", "memory"); \
__ret; \
})