Fix a problem with the maximum number of open files held in the cache when running on a 32-bit Solaris host.

PR ld/19260
	* cache.c (bfd_cache_max_open): Avoid using getrlimit on 32-bit
	Solaris as the result is unreliable.
This commit is contained in:
Stefan Teleman 2015-11-20 15:28:40 +00:00 committed by Nick Clifton
parent a915c10f7f
commit 0b1fa2880c
2 changed files with 27 additions and 3 deletions

View File

@ -1,3 +1,9 @@
2015-11-20 Stefan Teleman <stefan.teleman@oracle.com>
PR ld/19260
* cache.c (bfd_cache_max_open): Avoid using getrlimit on 32-bit
Solaris as the result is unreliable.
2015-11-20 Tristan Gingold <gingold@adacore.com>
* mach-o-x86-64.c (x86_64_howto_table): Change name here too.

View File

@ -78,18 +78,36 @@ bfd_cache_max_open (void)
if (max_open_files == 0)
{
int max;
#if defined(__sun) && !defined(__sparcv9) && !defined(__x86_64__)
/* PR ld/19260: 32-bit Solaris has very inelegant handling of the 255
file descriptor limit. The problem is that setrlimit(2) can raise
RLIMIT_NOFILE to a value that is not supported by libc, resulting
in "Too many open files" errors. This can happen here even though
max_open_files is set to rlim.rlim_cur / 8. For example, if
a parent process has set rlim.rlim_cur to 65536, then max_open_files
will be computed as 8192.
This check essentially reverts to the behavior from binutils 2.23.1
for 32-bit Solaris only. (It is hoped that the 32-bit libc
limitation will be removed soon). 64-bit Solaris libc does not have
this limitation. */
max = 16;
#else
#ifdef HAVE_GETRLIMIT
struct rlimit rlim;
if (getrlimit (RLIMIT_NOFILE, &rlim) == 0
&& rlim.rlim_cur != (rlim_t) RLIM_INFINITY)
max = rlim.rlim_cur / 8;
else
#endif /* HAVE_GETRLIMIT */
#endif
#ifdef _SC_OPEN_MAX
max = sysconf (_SC_OPEN_MAX) / 8;
#else
max = 10;
#endif /* _SC_OPEN_MAX */
max = 10;
#endif
#endif /* not 32-bit Solaris */
max_open_files = max < 10 ? 10 : max;
}