[nvptx libgomp plugin] Build only in supported configurations

As recently again discussed in <https://gcc.gnu.org/PR97436> "[nvptx] -m32
support", nvptx offloading other than for 64-bit host has never been
implemented, tested, supported.  So we simply should buildn't the nvptx libgomp
plugin in this case.

This avoids build problems if, for example, in a (standard) bi-arch
x86_64-pc-linux-gnu '-m64'/'-m32' build, libcuda is available only in a 64-bit
variant but not in a 32-bit one, which, for example, is the case if you build
GCC against the CUDA toolkit's 'stubs/libcuda.so' (see
<https://stackoverflow.com/a/52784819>).

This amends PR65099 commit a92defdab7 (r225560)
"[nvptx offloading] Only 64-bit configurations are currently supported" to
match the way we're doing this for the HSA/GCN plugins.

	libgomp/
	PR libgomp/65099
	* plugin/configfrag.ac (PLUGIN_NVPTX): Restrict to supported
	configurations.
	* configure: Regenerate.
	* plugin/plugin-nvptx.c (nvptx_get_num_devices): Remove 64-bit
	check.
This commit is contained in:
Thomas Schwinge 2020-11-30 15:15:20 +01:00
parent 57a4f5e4ea
commit 6106dfb9f7
3 changed files with 104 additions and 81 deletions

84
libgomp/configure vendored
View File

@ -15272,21 +15272,30 @@ if test x"$enable_offload_targets" != x; then
tgt_plugin=intelmic
;;
nvptx*)
tgt_plugin=nvptx
PLUGIN_NVPTX=$tgt
if test "x$CUDA_DRIVER_LIB" != xno \
&& test "x$CUDA_DRIVER_LIB" != xno; then
PLUGIN_NVPTX_CPPFLAGS=$CUDA_DRIVER_CPPFLAGS
PLUGIN_NVPTX_LDFLAGS=$CUDA_DRIVER_LDFLAGS
PLUGIN_NVPTX_LIBS='-lcuda'
case "${target}" in
aarch64*-*-* | powerpc64le-*-* | x86_64-*-*)
case " ${CC} ${CFLAGS} " in
*" -m32 "* | *" -mx32 "*)
# PR libgomp/65099: Currently, we only support offloading in
# 64-bit configurations.
PLUGIN_NVPTX=0
;;
*)
tgt_plugin=nvptx
PLUGIN_NVPTX=$tgt
if test "x$CUDA_DRIVER_LIB" != xno \
&& test "x$CUDA_DRIVER_LIB" != xno; then
PLUGIN_NVPTX_CPPFLAGS=$CUDA_DRIVER_CPPFLAGS
PLUGIN_NVPTX_LDFLAGS=$CUDA_DRIVER_LDFLAGS
PLUGIN_NVPTX_LIBS='-lcuda'
PLUGIN_NVPTX_save_CPPFLAGS=$CPPFLAGS
CPPFLAGS="$PLUGIN_NVPTX_CPPFLAGS $CPPFLAGS"
PLUGIN_NVPTX_save_LDFLAGS=$LDFLAGS
LDFLAGS="$PLUGIN_NVPTX_LDFLAGS $LDFLAGS"
PLUGIN_NVPTX_save_LIBS=$LIBS
LIBS="$PLUGIN_NVPTX_LIBS $LIBS"
cat confdefs.h - <<_ACEOF >conftest.$ac_ext
PLUGIN_NVPTX_save_CPPFLAGS=$CPPFLAGS
CPPFLAGS="$PLUGIN_NVPTX_CPPFLAGS $CPPFLAGS"
PLUGIN_NVPTX_save_LDFLAGS=$LDFLAGS
LDFLAGS="$PLUGIN_NVPTX_LDFLAGS $LDFLAGS"
PLUGIN_NVPTX_save_LIBS=$LIBS
LIBS="$PLUGIN_NVPTX_LIBS $LIBS"
cat confdefs.h - <<_ACEOF >conftest.$ac_ext
/* end confdefs.h. */
#include "cuda.h"
int
@ -15302,28 +15311,35 @@ if ac_fn_c_try_link "$LINENO"; then :
fi
rm -f core conftest.err conftest.$ac_objext \
conftest$ac_exeext conftest.$ac_ext
CPPFLAGS=$PLUGIN_NVPTX_save_CPPFLAGS
LDFLAGS=$PLUGIN_NVPTX_save_LDFLAGS
LIBS=$PLUGIN_NVPTX_save_LIBS
fi
case $PLUGIN_NVPTX in
nvptx*)
if (test "x$CUDA_DRIVER_INCLUDE" = x \
|| test "x$CUDA_DRIVER_INCLUDE" = xno) \
&& (test "x$CUDA_DRIVER_LIB" = x \
|| test "x$CUDA_DRIVER_LIB" = xno); then
PLUGIN_NVPTX=1
PLUGIN_NVPTX_CPPFLAGS='-I$(srcdir)/plugin/cuda'
PLUGIN_NVPTX_LIBS='-ldl'
PLUGIN_NVPTX_DYNAMIC=1
else
PLUGIN_NVPTX=0
as_fn_error $? "CUDA driver package required for nvptx support" "$LINENO" 5
fi
;;
CPPFLAGS=$PLUGIN_NVPTX_save_CPPFLAGS
LDFLAGS=$PLUGIN_NVPTX_save_LDFLAGS
LIBS=$PLUGIN_NVPTX_save_LIBS
fi
case $PLUGIN_NVPTX in
nvptx*)
if (test "x$CUDA_DRIVER_INCLUDE" = x \
|| test "x$CUDA_DRIVER_INCLUDE" = xno) \
&& (test "x$CUDA_DRIVER_LIB" = x \
|| test "x$CUDA_DRIVER_LIB" = xno); then
PLUGIN_NVPTX=1
PLUGIN_NVPTX_CPPFLAGS='-I$(srcdir)/plugin/cuda'
PLUGIN_NVPTX_LIBS='-ldl'
PLUGIN_NVPTX_DYNAMIC=1
else
PLUGIN_NVPTX=0
as_fn_error $? "CUDA driver package required for nvptx support" "$LINENO" 5
fi
;;
esac
;;
esac
;;
*-*-*)
# Target architecture not supported.
PLUGIN_NVPTX=0
;;
esac
;;
amdgcn*)
case "${target}" in
x86_64-*-*)

View File

@ -158,47 +158,63 @@ if test x"$enable_offload_targets" != x; then
tgt_plugin=intelmic
;;
nvptx*)
tgt_plugin=nvptx
PLUGIN_NVPTX=$tgt
if test "x$CUDA_DRIVER_LIB" != xno \
&& test "x$CUDA_DRIVER_LIB" != xno; then
PLUGIN_NVPTX_CPPFLAGS=$CUDA_DRIVER_CPPFLAGS
PLUGIN_NVPTX_LDFLAGS=$CUDA_DRIVER_LDFLAGS
PLUGIN_NVPTX_LIBS='-lcuda'
case "${target}" in
aarch64*-*-* | powerpc64le-*-* | x86_64-*-*)
case " ${CC} ${CFLAGS} " in
*" -m32 "* | *" -mx32 "*)
# PR libgomp/65099: Currently, we only support offloading in
# 64-bit configurations.
PLUGIN_NVPTX=0
;;
*)
tgt_plugin=nvptx
PLUGIN_NVPTX=$tgt
if test "x$CUDA_DRIVER_LIB" != xno \
&& test "x$CUDA_DRIVER_LIB" != xno; then
PLUGIN_NVPTX_CPPFLAGS=$CUDA_DRIVER_CPPFLAGS
PLUGIN_NVPTX_LDFLAGS=$CUDA_DRIVER_LDFLAGS
PLUGIN_NVPTX_LIBS='-lcuda'
PLUGIN_NVPTX_save_CPPFLAGS=$CPPFLAGS
CPPFLAGS="$PLUGIN_NVPTX_CPPFLAGS $CPPFLAGS"
PLUGIN_NVPTX_save_LDFLAGS=$LDFLAGS
LDFLAGS="$PLUGIN_NVPTX_LDFLAGS $LDFLAGS"
PLUGIN_NVPTX_save_LIBS=$LIBS
LIBS="$PLUGIN_NVPTX_LIBS $LIBS"
AC_LINK_IFELSE(
[AC_LANG_PROGRAM(
[#include "cuda.h"],
[CUresult r = cuCtxPushCurrent (NULL);])],
[PLUGIN_NVPTX=1])
CPPFLAGS=$PLUGIN_NVPTX_save_CPPFLAGS
LDFLAGS=$PLUGIN_NVPTX_save_LDFLAGS
LIBS=$PLUGIN_NVPTX_save_LIBS
fi
case $PLUGIN_NVPTX in
nvptx*)
if (test "x$CUDA_DRIVER_INCLUDE" = x \
|| test "x$CUDA_DRIVER_INCLUDE" = xno) \
&& (test "x$CUDA_DRIVER_LIB" = x \
|| test "x$CUDA_DRIVER_LIB" = xno); then
PLUGIN_NVPTX=1
PLUGIN_NVPTX_CPPFLAGS='-I$(srcdir)/plugin/cuda'
PLUGIN_NVPTX_LIBS='-ldl'
PLUGIN_NVPTX_DYNAMIC=1
else
PLUGIN_NVPTX=0
AC_MSG_ERROR([CUDA driver package required for nvptx support])
fi
;;
PLUGIN_NVPTX_save_CPPFLAGS=$CPPFLAGS
CPPFLAGS="$PLUGIN_NVPTX_CPPFLAGS $CPPFLAGS"
PLUGIN_NVPTX_save_LDFLAGS=$LDFLAGS
LDFLAGS="$PLUGIN_NVPTX_LDFLAGS $LDFLAGS"
PLUGIN_NVPTX_save_LIBS=$LIBS
LIBS="$PLUGIN_NVPTX_LIBS $LIBS"
AC_LINK_IFELSE(
[AC_LANG_PROGRAM(
[#include "cuda.h"],
[CUresult r = cuCtxPushCurrent (NULL);])],
[PLUGIN_NVPTX=1])
CPPFLAGS=$PLUGIN_NVPTX_save_CPPFLAGS
LDFLAGS=$PLUGIN_NVPTX_save_LDFLAGS
LIBS=$PLUGIN_NVPTX_save_LIBS
fi
case $PLUGIN_NVPTX in
nvptx*)
if (test "x$CUDA_DRIVER_INCLUDE" = x \
|| test "x$CUDA_DRIVER_INCLUDE" = xno) \
&& (test "x$CUDA_DRIVER_LIB" = x \
|| test "x$CUDA_DRIVER_LIB" = xno); then
PLUGIN_NVPTX=1
PLUGIN_NVPTX_CPPFLAGS='-I$(srcdir)/plugin/cuda'
PLUGIN_NVPTX_LIBS='-ldl'
PLUGIN_NVPTX_DYNAMIC=1
else
PLUGIN_NVPTX=0
AC_MSG_ERROR([CUDA driver package required for nvptx support])
fi
;;
esac
;;
esac
;;
*-*-*)
# Target architecture not supported.
PLUGIN_NVPTX=0
;;
esac
;;
amdgcn*)
case "${target}" in
x86_64-*-*)

View File

@ -572,15 +572,6 @@ nvptx_get_num_devices (void)
{
int n;
/* PR libgomp/65099: Currently, we only support offloading in 64-bit
configurations. */
if (sizeof (void *) != 8)
{
GOMP_PLUGIN_debug (0, "Disabling nvptx offloading;"
" only 64-bit configurations are supported\n");
return 0;
}
/* This function will be called before the plugin has been initialized in
order to enumerate available devices, but CUDA API routines can't be used
until cuInit has been called. Just call it now (but don't yet do any