binutils-gdb/gdb/testsuite/Makefile.in

306 lines
10 KiB
Makefile
Raw Normal View History

# Makefile for regression testing the GNU debugger.
# Copyright 1992-2015 Free Software Foundation, Inc.
# This file is part of GDB.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
VPATH = @srcdir@
srcdir = @srcdir@
prefix = @prefix@
exec_prefix = @exec_prefix@
abs_builddir = @abs_builddir@
target_alias = @target_noncanonical@
program_transform_name = @program_transform_name@
build_canonical = @build@
host_canonical = @host@
target_canonical = @target@
SHELL = @SHELL@
EXEEXT = @EXEEXT@
SUBDIRS = @subdirs@
RPATH_ENVVAR = @RPATH_ENVVAR@
ALL_SUBDIRS = gdb.ada gdb.arch gdb.asm gdb.base gdb.btrace gdb.cell gdb.cp \
Remove gdb.hp gdb/ChangeLog 2015-03-20 Jan Kratochvil <jan.kratochvil@redhat.com> * config/djgpp/README: Remove gdb.hp. gdb/testsuite/ChangeLog 2015-03-20 Jan Kratochvil <jan.kratochvil@redhat.com> * Makefile.in (ALL_SUBDIRS): Remove gdb.hp. * README: Remove HP-UX and gdb.hp. (configuration): * configure: Regenerate. * configure.ac (AC_OUTPUT): Remove gdb.hp/Makefile, gdb.hp/gdb.objdbg/Makefile, gdb.hp/gdb.base-hp/Makefile, gdb.hp/gdb.aCC/Makefile, gdb.hp/gdb.compat/Makefile, gdb.hp/gdb.defects/Makefile. * gdb.hp/Makefile.in: File deleted. * gdb.hp/gdb.aCC/Makefile.in: File deleted. * gdb.hp/gdb.aCC/optimize.c: File deleted. * gdb.hp/gdb.aCC/optimize.exp: File deleted. * gdb.hp/gdb.aCC/run.c: File deleted. * gdb.hp/gdb.aCC/watch-cmd.exp: File deleted. * gdb.hp/gdb.base-hp/Makefile.in: File deleted. * gdb.hp/gdb.base-hp/callfwmall.c: File deleted. * gdb.hp/gdb.base-hp/callfwmall.exp: File deleted. * gdb.hp/gdb.base-hp/dollar.c: File deleted. * gdb.hp/gdb.base-hp/dollar.exp: File deleted. * gdb.hp/gdb.base-hp/genso-thresh.c: File deleted. * gdb.hp/gdb.base-hp/hwwatchbus.c: File deleted. * gdb.hp/gdb.base-hp/hwwatchbus.exp: File deleted. * gdb.hp/gdb.base-hp/pxdb.c: File deleted. * gdb.hp/gdb.base-hp/pxdb.exp: File deleted. * gdb.hp/gdb.base-hp/reg-pa64.exp: File deleted. * gdb.hp/gdb.base-hp/reg-pa64.s: File deleted. * gdb.hp/gdb.base-hp/reg.exp: File deleted. * gdb.hp/gdb.base-hp/reg.s: File deleted. * gdb.hp/gdb.base-hp/sized-enum.c: File deleted. * gdb.hp/gdb.base-hp/sized-enum.exp: File deleted. * gdb.hp/gdb.base-hp/so-thresh.exp: File deleted. * gdb.hp/gdb.base-hp/so-thresh.mk: File deleted. * gdb.hp/gdb.base-hp/so-thresh.sh: File deleted. * gdb.hp/gdb.compat/Makefile.in: File deleted. * gdb.hp/gdb.compat/average.c: File deleted. * gdb.hp/gdb.compat/sum.c: File deleted. * gdb.hp/gdb.compat/xdb.c: File deleted. * gdb.hp/gdb.compat/xdb0.c: File deleted. * gdb.hp/gdb.compat/xdb0.h: File deleted. * gdb.hp/gdb.compat/xdb1.c: File deleted. * gdb.hp/gdb.compat/xdb1.exp: File deleted. * gdb.hp/gdb.compat/xdb2.exp: File deleted. * gdb.hp/gdb.compat/xdb3.exp: File deleted. * gdb.hp/gdb.defects/Makefile.in: File deleted. * gdb.hp/gdb.defects/bs14602.c: File deleted. * gdb.hp/gdb.defects/bs14602.exp: File deleted. * gdb.hp/gdb.defects/solib-d.c: File deleted. * gdb.hp/gdb.defects/solib-d.exp: File deleted. * gdb.hp/gdb.defects/solib-d1.c: File deleted. * gdb.hp/gdb.defects/solib-d2.c: File deleted. * gdb.hp/gdb.objdbg/Makefile.in: File deleted. * gdb.hp/gdb.objdbg/objdbg01.exp: File deleted. * gdb.hp/gdb.objdbg/objdbg01/x1.cc: File deleted. * gdb.hp/gdb.objdbg/objdbg01/x2.cc: File deleted. * gdb.hp/gdb.objdbg/objdbg01/x3.cc: File deleted. * gdb.hp/gdb.objdbg/objdbg01/x3.h: File deleted. * gdb.hp/gdb.objdbg/objdbg02.exp: File deleted. * gdb.hp/gdb.objdbg/objdbg02/x1.cc: File deleted. * gdb.hp/gdb.objdbg/objdbg02/x2.cc: File deleted. * gdb.hp/gdb.objdbg/objdbg02/x3.cc: File deleted. * gdb.hp/gdb.objdbg/objdbg03.exp: File deleted. * gdb.hp/gdb.objdbg/objdbg03/x1.cc: File deleted. * gdb.hp/gdb.objdbg/objdbg03/x2.cc: File deleted. * gdb.hp/gdb.objdbg/objdbg03/x3.cc: File deleted. * gdb.hp/gdb.objdbg/objdbg04.exp: File deleted. * gdb.hp/gdb.objdbg/objdbg04/x.h: File deleted. * gdb.hp/gdb.objdbg/objdbg04/x1.cc: File deleted. * gdb.hp/gdb.objdbg/objdbg04/x2.cc: File deleted. * gdb.hp/gdb.objdbg/tools/symaddr: File deleted. * gdb.hp/gdb.objdbg/tools/symaddr.pa64: File deleted. * gdb.hp/gdb.objdbg/tools/test-objdbg.cc: File deleted. * gdb.hp/tools/odump: File deleted.
2015-03-20 17:15:15 +01:00
gdb.disasm gdb.dlang gdb.dwarf2 gdb.fortran gdb.gdb gdb.go \
the "ambiguous linespec" series gdb 2011-12-06 Joel Brobecker <brobecker@acacore.com> * language.h (struct language_defn): Add new component la_symbol_name_compare. * symfile.h (struct quick_symbol_functions): Update the profile of parameter "name_matcher" for the expand_symtabs_matching method. Update the documentation accordingly. * ada-lang.h (ada_name_for_lookup): Add declaration. * ada-lang.c (ada_name_for_lookup): New function, extracted out from ada_iterate_over_symbols. (ada_iterate_over_symbols): Do not encode symbol name anymore. (ada_expand_partial_symbol_name): Adjust profile. (ada_language_defn): Add value for la_symbol_name_compare field. * linespec.c: #include "ada-lang.h". (iterate_name_matcher): Add language parameter. Replace call to strcmp_iw by call to language->la_symbol_name_compare. (decode_variable): Encode COPY if current language is Ada. * dwarf2read.c (dw2_expand_symtabs_matching): Adjust profile of name_matcher parameter. Adjust call to name_matcher. * psymtab.c (expand_symtabs_matching_via_partial): Likewise. (expand_partial_symbol_names): Update profile of parameter "fun". * psymtab.h (expand_partial_symbol_names): Update profile of parameter "fun". * symtab.c (demangle_for_lookup): Update function documentation. (search_symbols_name_matches): Add language parameter. (expand_partial_symbol_name): Likewise. * c-lang.c (c_language_defn, cplus_language_defn) (asm_language_defn, minimal_language_defn): Add value for la_symbol_name_compare field. * d-lang.c (d_language_defn): Likewise. * f-lang.c (f_language_defn): Ditto. * jv-lang.c (java_language_defn): Ditto. * m2-lang.c (m2_language_defn): Ditto. * objc-lang.c (objc_language_defn): Ditto. * opencl-lang.c (opencl_language_defn): Ditto. * p-lang.c (pascal_language_defn): Ditto. * language.c (unknown_language_defn, auto_language_defn) (local_language_defn): Ditto. 2011-12-06 Tom Tromey <tromey@redhat.com> * linespec.c (iterate_over_all_matching_symtabs): Use LA_ITERATE_OVER_SYMBOLS. (lookup_prefix_sym, add_matching_symbols_to_info): Likewise. (find_function_symbols, decode_variable): Remove Ada special case. * language.h (struct language_defn) <la_iterate_over_symbols>: New field. (LA_ITERATE_OVER_SYMBOLS): New macro. * language.c (unknown_language_defn, auto_language_defn) (local_language_defn): Update. * c-lang.c (c_language_defn, cplus_language_defn) (asm_language_defn, minimal_language_defn): Update. * d-lang.c (d_language_defn): Update. * f-lang.c (f_language_defn): Update. * jv-lang.c (java_language_defn): Update. * m2-lang.c (m2_language_defn): Update. * objc-lang.c (objc_language_defn): Update. * opencl-lang.c (opencl_language_defn): Update. * p-lang.c (pascal_language_defn): Update. * ada-lang.c (ada_iterate_over_symbols): New function. (ada_language_defn): Update. 2011-12-06 Tom Tromey <tromey@redhat.com> Joel Brobecker <brobecker@acacore.com> PR breakpoints/13105, PR objc/8341, PR objc/8343, PR objc/8366, PR objc/8535, PR breakpoints/11657, PR breakpoints/11970, PR breakpoints/12023, PR breakpoints/12334, PR breakpoints/12856, PR shlibs/8929, PR shlibs/7393: * python/py-type.c (compare_maybe_null_strings): Rename from compare_strings. (check_types_equal): Update. * utils.c (compare_strings): New function. * tui/tui-winsource.c (tui_update_breakpoint_info): Update for location changes. * tracepoint.c (scope_info): Update. (trace_find_line_command): Use DECODE_LINE_FUNFIRSTLINE. * symtab.h (iterate_over_minimal_symbols) (iterate_over_some_symtabs, iterate_over_symtabs) (find_pcs_for_symtab_line, iterate_over_symbols) (demangle_for_lookup): Declare. (expand_line_sal): Remove. * symtab.c (iterate_over_some_symtabs, iterate_over_symtabs) (lookup_symtab_callback): New functions. (lookup_symtab): Rewrite. (demangle_for_lookup): New function, extract from lookup_symbol_in_language. (lookup_symbol_in_language): Use it. (iterate_over_symbols): New function. (find_line_symtab): Update. (find_pcs_for_symtab_line): New functions. (find_line_common): Add 'start' argument. (decode_line_spec): Update. Change argument to 'flags', change interpretation. (append_expanded_sal): Remove. (append_exact_match_to_sals): Remove. (expand_line_sal): Remove. * symfile.h (struct quick_symbol_functions) <lookup_symtab>: Remove. <map_symtabs_matching_filename>: New field. * stack.c (func_command): Only look in the current program space. Use DECODE_LINE_FUNFIRSTLINE. * source.c (line_info): Set pspace on sal. Check program space in the loop. Use DECODE_LINE_LIST_MODE. (select_source_symtab): Use DECODE_LINE_FUNFIRSTLINE. * solib-target.c: Remove DEF_VEC_I(CORE_ADDR). * python/python.c (gdbpy_decode_line): Update. * psymtab.c (partial_map_expand_apply): New function. (partial_map_symtabs_matching_filename): Rename from lookup_partial_symbol. Update arguments. (lookup_symtab_via_partial_symtab): Remove. (psym_functions): Update. * objc-lang.h (parse_selector, parse_method): Don't declare. (find_imps): Update. * objc-lang.c (parse_selector, parse_method): Now static. (find_methods): Change arguments. Fill in a vector of symbol names. (uniquify_strings): New function. (find_imps): Change arguments. * minsyms.c (iterate_over_minimal_symbols): New function. * linespec.h (enum decode_line_flags): New. (struct linespec_sals): New. (struct linespec_result) <canonical>: Remove. <pre_expanded, addr_string, sals>: New fields. (destroy_linespec_result, make_cleanup_destroy_linespec_result) (decode_line_full): Declare. (decode_line_1): Update. * linespec.c (struct address_entry, struct linespec_state, struct collect_info): New types. (add_sal_to_sals_basic, add_sal_to_sals, hash_address_entry) (eq_address_entry, maybe_add_address): New functions. (total_number_of_methods): Remove. (iterate_name_matcher, iterate_over_all_matching_symtabs): New functions. (find_methods): Change arguments. Don't canonicalize input. Simplify logic. (add_matching_methods, add_constructors) (build_canonical_line_spec): Remove. (filter_results, convert_results_to_lsals): New functions. (decode_line_2): Change arguments. Rewrite for new data structures. (decode_line_internal): Rename from decode_line_1. Change arguments. Add cleanups. Update for new data structures. (linespec_state_constructor, linespec_state_destructor) (decode_line_full, decode_line_1): New functions. (decode_indirect): Change arguments. Update. (locate_first_half): Use skip_spaces. (decode_objc): Change arguments. Update for new data structures. Simplify logic. (decode_compound): Change arguments. Add cleanups. Remove fallback code, replace with error. (struct decode_compound_collector): New type. (collect_one_symbol): New function. (lookup_prefix_sym): Change arguments. Update. (compare_symbol_name, add_all_symbol_names_from_pspace) (find_superclass_methods ): New functions. (find_method): Rewrite. (struct symtab_collector): New type. (add_symtabs_to_list, collect_symtabs_from_filename): New functions. (symtabs_from_filename): Change API. Rename from symtab_from_filename. (collect_function_symbols): New function. (find_function_symbols): Change API. Rename from find_function_symbol. Rewrite. (decode_all_digits): Change arguments. Rewrite. (decode_dollar): Change arguments. Use decode_variable. (decode_label): Change arguments. Rewrite. (collect_symbols): New function. (minsym_found): Change arguments. Rewrite. (check_minsym, search_minsyms_for_name) (add_matching_symbols_to_info): New function. (decode_variable): Change arguments. Iterate over all symbols. (symbol_found): Remove. (symbol_to_sal): New function. (init_linespec_result, destroy_linespec_result) (cleanup_linespec_result, make_cleanup_destroy_linespec_result): New functions. (decode_digits_list_mode, decode_digits_ordinary): New functions. * dwarf2read.c (dw2_map_expand_apply): New function. (dw2_map_symtabs_matching_filename): Rename from dw2_lookup_symtab. Change arguments. (dwarf2_gdb_index_functions): Update. * dwarf2loc.c: Remove DEF_VEC_I(CORE_ADDR). * defs.h (compare_strings): Declare. * cli/cli-cmds.c (compare_strings): Move to utils.c. (edit_command, list_command): Use DECODE_LINE_LIST_MODE. Call filter_sals. (compare_symtabs, filter_sals): New functions. * breakpoint.h (struct bp_location) <line_number, source_file>: New fields. (struct breakpoint) <line_number, source_file>: Remove. <filter>: New field. * breakpoint.c (print_breakpoint_location, init_raw_breakpoint) (momentary_breakpoint_from_master, add_location_to_breakpoint): Update for changes to locations. (init_breakpoint_sal): Add 'filter' argument. Set 'filter' on breakpoint. (create_breakpoint_sal): Add 'filter' argument. (remove_sal, expand_line_sal_maybe): Remove. (create_breakpoints_sal): Remove 'sals' argument. Handle pre-expanded sals and the filter. (parse_breakpoint_sals): Use decode_line_full. (check_fast_tracepoint_sals): Use get_sal_arch. (create_breakpoint): Create a linespec_sals. Update. (break_range_command): Use decode_line_full. Update. (until_break_command): Update. (clear_command): Update match conditions for linespec.c changes. Use DECODE_LINE_LIST_MODE. (say_where): Update for changes to locations. (bp_location_dtor): Free 'source_file'. (base_breakpoint_dtor): Free 'filter'. Don't free 'source_file'. (update_static_tracepoint): Update for changes to locations. (update_breakpoint_locations): Disable ranged breakpoint if too many locations match. Update. (addr_string_to_sals): Use decode_line_full. Resolve all sal PCs. (breakpoint_re_set_default): Don't call expand_line_sal_maybe. (decode_line_spec_1): Update. Change argument name to 'flags', change interpretation. * block.h (block_containing_function): Declare. * block.c (block_containing_function): New function. * skip.c (skip_function_command): Update. (skip_re_set): Update. * infcmd.c (jump_command): Use DECODE_LINE_FUNFIRSTLINE. * mi/mi-main.c (mi_cmd_trace_find): Use DECODE_LINE_FUNFIRSTLINE. * NEWS: Add entry. 2011-12-06 Tom Tromey <tromey@redhat.com> * elfread.c (elf_gnu_ifunc_resolver_return_stop): Allow breakpoint's pspace to be NULL. * breakpoint.h (struct breakpoint) <pspace>: Update comment. * breakpoint.c (init_raw_breakpoint): Conditionally set breakpoint's pspace. (init_breakpoint_sal): Don't set breakpoint's pspace. (prepare_re_set_context): Conditionally switch program space. (addr_string_to_sals): Check executing_startup on location's program space. 2011-12-06 Tom Tromey <tromey@redhat.com> * breakpoint.h (enum enable_state) <bp_startup_disabled>: Remove. * breakpoint.c (should_be_inserted): Explicitly check if program space is executing startup. (describe_other_breakpoints): Update. (disable_breakpoints_before_startup): Change executing_startup earlier. Remove loop. (enable_breakpoints_after_startup): Likewise. (init_breakpoint_sal): Don't use bp_startup_disabled. (create_breakpoint): Don't use bp_startup_disabled. (update_global_location_list): Use should_be_inserted. (bkpt_re_set): Update. gdb/testsuite 2011-12-06 Joel Brobecker <brobecker@acacore.com> * gdb.ada/fullname_bp.exp: Add tests for other valid linespecs involving a fully qualified function name. 2011-12-06 Tom Tromey <tromey@redhat.com> * gdb.ada/homonym.exp: Add three breakpoint tests. 2011-12-06 Tom Tromey <tromey@redhat.com> * gdb.base/solib-weak.exp (do_test): Remove kfail. * gdb.trace/tracecmd.exp: Disable pending breakpoints earlier. * gdb.objc/objcdecode.exp: Update for output changes. * gdb.linespec/linespec.exp: New file. * gdb.linespec/lspec.cc: New file. * gdb.linespec/lspec.h: New file. * gdb.linespec/body.h: New file. * gdb.linespec/base/two/thefile.cc: New file. * gdb.linespec/base/one/thefile.cc: New file. * gdb.linespec/Makefile.in: New file. * gdb.cp/templates.exp (test_template_breakpoints): Update for output changes. * gdb.cp/re-set-overloaded.exp: Remove kfail. * gdb.cp/ovldbreak.exp: Update for output changes. "all" test now makes one breakpoint. * gdb.cp/method2.exp (test_break): Update for output changes. * gdb.cp/mb-templates.exp: Update for output changes. * gdb.cp/mb-inline.exp: Update for output changes. * gdb.cp/mb-ctor.exp: Update for output changes. * gdb.cp/ovsrch.exp: Use fully-qualified names. * gdb.base/solib-symbol.exp: Run to main later. Breakpoint now has multiple matches. * gdb.base/sepdebug.exp: Disable pending breakpoints. Update for error message change. * gdb.base/list.exp (test_list_filename_and_number): Update for error message change. * gdb.base/break.exp: Disable pending breakpoints. Update for output changes. * configure.ac: Add gdb.linespec. * configure: Rebuild. * Makefile.in (ALL_SUBDIRS): Add gdb.linespec. gdb/doc 2011-12-06 Tom Tromey <tromey@redhat.com> * gdb.texinfo (Set Breaks): Update for new behavior.
2011-12-06 19:54:43 +01:00
gdb.java gdb.linespec gdb.mi gdb.modula2 gdb.multi \
gdb.objc gdb.opencl gdb.opt gdb.pascal gdb.python gdb.server \
gdb.stabs gdb.reverse gdb.threads gdb.trace gdb.xml \
$(SUBDIRS)
EXTRA_RULES = @EXTRA_RULES@
CC=@CC@
EXPECT = `if [ "$${READ1}" != "" ] ; then \
echo $${rootme}/expect-read1; \
elif [ -f $${rootme}/../../expect/expect ] ; then \
echo $${rootme}/../../expect/expect ; \
else \
echo expect ; \
fi`
RUNTEST = $(RUNTEST_FOR_TARGET)
RUNTESTFLAGS =
FORCE_PARALLEL =
RUNTEST_FOR_TARGET = `\
if [ -f $${srcdir}/../../dejagnu/runtest ]; then \
echo $${srcdir}/../../dejagnu/runtest; \
else \
if [ "$(host_canonical)" = "$(target_canonical)" ]; then \
echo runtest; \
else \
t='$(program_transform_name)'; echo runtest | sed -e $$t; \
fi; \
fi`
#### host, target, and site specific Makefile frags come in here.
# The use of $$(x_FOR_TARGET) reduces the command line length by not
# duplicating the lengthy definition.
TARGET_FLAGS_TO_PASS = \
"prefix=$(prefix)" \
"exec_prefix=$(exec_prefix)" \
"against=$(against)" \
'CC=$$(CC_FOR_TARGET)' \
"CC_FOR_TARGET=$(CC_FOR_TARGET)" \
"CFLAGS=$(TESTSUITE_CFLAGS)" \
'CXX=$$(CXX_FOR_TARGET)' \
"CXX_FOR_TARGET=$(CXX_FOR_TARGET)" \
"CXXFLAGS=$(CXXFLAGS)" \
"MAKEINFO=$(MAKEINFO)" \
"INSTALL=$(INSTALL)" \
"INSTALL_PROGRAM=$(INSTALL_PROGRAM)" \
"INSTALL_DATA=$(INSTALL_DATA)" \
"LDFLAGS=$(LDFLAGS)" \
"LIBS=$(LIBS)" \
"RUNTEST=$(RUNTEST)" \
"RUNTESTFLAGS=$(RUNTESTFLAGS)"
all: $(EXTRA_RULES)
@echo "Nothing to be done for all..."
.NOEXPORT:
INFODIRS=doc
info:
install-info:
dvi:
pdf:
install-pdf:
1999-06-28 18:06:02 +02:00
html:
install-html:
install:
uninstall: force
# Use absolute `site.exp' path everywhere to suppress VPATH lookups for it.
# Bare `site.exp' is used as a target here if user requests it explicitly.
# $(RUNTEST) is looking up `site.exp' only in the current directory.
$(abs_builddir)/site.exp site.exp: ./config.status Makefile
@echo "Making a new config file..."
-@rm -f ./tmp?
@touch site.exp
-@mv site.exp site.bak
@echo "## these variables are automatically generated by make ##" > ./tmp0
@echo "# Do not edit here. If you wish to override these values" >> ./tmp0
@echo "# add them to the last section" >> ./tmp0
@echo "set host_triplet ${host_canonical}" >> ./tmp0
@echo "set target_alias $(target_alias)" >> ./tmp0
@echo "set target_triplet ${target_canonical}" >> ./tmp0
@echo "set build_triplet ${build_canonical}" >> ./tmp0
@echo "set srcdir ${srcdir}" >> ./tmp0
@echo "set tool gdb" >> ./tmp0
@echo 'source $${srcdir}/lib/append_gdb_boards_dir.exp' >> ./tmp0
@echo "## All variables above are generated by configure. Do Not Edit ##" >> ./tmp0
@cat ./tmp0 > site.exp
@cat site.bak | sed \
-e '1,/^## All variables above are.*##/ d' >> site.exp
-@rm -f ./tmp?
installcheck:
switch to fully parallel mode This switches "make check" to fully parallel mode. One primary issue facing full parallelization is the overhead of "runtest". On my machine, if I "touch gdb.base/empty.exp", making a new file, and then "time runtest.exp", it takes 0.08 seconds. Multiply this by the 1008 (in my configuration) tests and you get ~80 seconds. This is the overhead that would theoretically be present if all tests were run in parallel. However, the problem isn't nearly as bad as this, for two reasons. First, you must divide by the number of jobs, assuming perfect parallelization -- reasonably true for small -j numbers, based on the results I see. Second, the current test suite parallelization approach bundles the tests, largely by directory, but also splitting up gdb.base into two halves. I was curious to see how the current bundling played out in practice, so I ran "make -j1 check RUNTEST='/bin/time runtest'". This invokes the parallel mode (thus the bundling) and then shows the time taken by each invocation of runtest. Then, I ran "/bin/time make -j3 check". (See below about -j2.) The time for the entire -j3 test run was the same as the time for "gdb.base1". What this means is that gdb.base1 is currently the time-limiting run, preventing further parallelization gains. So, I reason, whatever overhead we see from full parallelization will only be seen by "-j1" and "-j2". I then tried a -j2 test run. This does take longer than a -j3 build, meaning that the gdb.base1 job finishes and then proceeds to other runtest invocations. Finally I tried a -j2 test run with the appended patch. This was 9% slower than the -j2 run without the patch. I think that is a reasonable slowdown for what is probably a rare case. I believe this patch will yield faster test results for all -j values greater than 2. For -j3 on my machine, the test suite is a few seconds faster; I didn't try any larger -j values. For -j1, I went ahead and changed the Makefile so that, if no -j option is given, then the "check-single" mode is used. You can still use "make -j1 check" to get single-job parallel-mode, though of course there's no good reason to do so. This change is likely to speed up the plain "make check" scenario a little as we will now bypass dg-extract-results.sh. One drawback of this change is that "make -jN check" is now much more verbose. I generally only look at the .sum and .log files, but perhaps this will bother some. Another interesting question is scalability of the result. The slowest test, which limits the scalability, took 80.78 seconds. The mean of the remaining tests is 1.08 seconds. (Note that this is just a rough estimate, since there are still outliers.) This means we can run 80.78 / 1.08 =~ 74 tests in the time available. And, in this data set (slightly older than the above, but materially the same) there were 948 tests. So, I think the current test suite should scale ok up to about -j12. We could improve this number if need be by breaking up the biggest tests. 2013-11-04 Tom Tromey <tromey@redhat.com> * Makefile.in (TEST_DIRS): Remove. (TEST_TARGETS, check-parallel): Rewrite. (check-gdb.%, BASE1_FILES, BASE2_FILES, check-gdb.base%) (subdir_do, subdirs): Remove. (do-check-parallel, check/%): New targets. (clean): Remove outputs, temp, and cache directories. (saw_dash_j): New variable. (CHECK_TARGET): Use it. (check): Depend on all, site.exp. Rewrite. (check-single): Remove dependencies. (slow_tests, all_tests, reordered_tests): New variables.
2013-08-27 19:52:25 +02:00
# See whether -j was given to make. Either it was given with no
# arguments, and appears as "j" in the first word, or it was given an
# argument and appears as "-j" in a separate word.
@GMAKE_TRUE@saw_dash_j = $(or $(findstring j,$(firstword $(MAKEFLAGS))),$(filter -j,$(MAKEFLAGS)))
# For GNU make, try to run the tests in parallel if any -j option is
# given. If RUNTESTFLAGS is not empty, then by default the tests will
# be serialized. This can be overridden by setting FORCE_PARALLEL to
# any non-empty value. For a non-GNU make, do not parallelize.
@GMAKE_TRUE@CHECK_TARGET = $(if $(FORCE_PARALLEL),check-parallel,$(if $(RUNTESTFLAGS),check-single,$(if $(saw_dash_j),check-parallel,check-single)))
@GMAKE_FALSE@CHECK_TARGET = check-single
switch to fully parallel mode This switches "make check" to fully parallel mode. One primary issue facing full parallelization is the overhead of "runtest". On my machine, if I "touch gdb.base/empty.exp", making a new file, and then "time runtest.exp", it takes 0.08 seconds. Multiply this by the 1008 (in my configuration) tests and you get ~80 seconds. This is the overhead that would theoretically be present if all tests were run in parallel. However, the problem isn't nearly as bad as this, for two reasons. First, you must divide by the number of jobs, assuming perfect parallelization -- reasonably true for small -j numbers, based on the results I see. Second, the current test suite parallelization approach bundles the tests, largely by directory, but also splitting up gdb.base into two halves. I was curious to see how the current bundling played out in practice, so I ran "make -j1 check RUNTEST='/bin/time runtest'". This invokes the parallel mode (thus the bundling) and then shows the time taken by each invocation of runtest. Then, I ran "/bin/time make -j3 check". (See below about -j2.) The time for the entire -j3 test run was the same as the time for "gdb.base1". What this means is that gdb.base1 is currently the time-limiting run, preventing further parallelization gains. So, I reason, whatever overhead we see from full parallelization will only be seen by "-j1" and "-j2". I then tried a -j2 test run. This does take longer than a -j3 build, meaning that the gdb.base1 job finishes and then proceeds to other runtest invocations. Finally I tried a -j2 test run with the appended patch. This was 9% slower than the -j2 run without the patch. I think that is a reasonable slowdown for what is probably a rare case. I believe this patch will yield faster test results for all -j values greater than 2. For -j3 on my machine, the test suite is a few seconds faster; I didn't try any larger -j values. For -j1, I went ahead and changed the Makefile so that, if no -j option is given, then the "check-single" mode is used. You can still use "make -j1 check" to get single-job parallel-mode, though of course there's no good reason to do so. This change is likely to speed up the plain "make check" scenario a little as we will now bypass dg-extract-results.sh. One drawback of this change is that "make -jN check" is now much more verbose. I generally only look at the .sum and .log files, but perhaps this will bother some. Another interesting question is scalability of the result. The slowest test, which limits the scalability, took 80.78 seconds. The mean of the remaining tests is 1.08 seconds. (Note that this is just a rough estimate, since there are still outliers.) This means we can run 80.78 / 1.08 =~ 74 tests in the time available. And, in this data set (slightly older than the above, but materially the same) there were 948 tests. So, I think the current test suite should scale ok up to about -j12. We could improve this number if need be by breaking up the biggest tests. 2013-11-04 Tom Tromey <tromey@redhat.com> * Makefile.in (TEST_DIRS): Remove. (TEST_TARGETS, check-parallel): Rewrite. (check-gdb.%, BASE1_FILES, BASE2_FILES, check-gdb.base%) (subdir_do, subdirs): Remove. (do-check-parallel, check/%): New targets. (clean): Remove outputs, temp, and cache directories. (saw_dash_j): New variable. (CHECK_TARGET): Use it. (check): Depend on all, site.exp. Rewrite. (check-single): Remove dependencies. (slow_tests, all_tests, reordered_tests): New variables.
2013-08-27 19:52:25 +02:00
# Note that we must resort to a recursive make invocation here,
# because GNU make 3.82 has a bug preventing MAKEFLAGS from being used
# in conditions.
check: all $(abs_builddir)/site.exp
$(MAKE) $(CHECK_TARGET)
check-read1:
$(MAKE) READ1="1" check
# All the hair to invoke dejagnu. A given invocation can just append
# $(RUNTESTFLAGS)
DO_RUNTEST = \
rootme=`pwd`; export rootme; \
srcdir=${srcdir} ; export srcdir ; \
EXPECT=${EXPECT} ; export EXPECT ; \
EXEEXT=${EXEEXT} ; export EXEEXT ; \
$(RPATH_ENVVAR)=$$rootme/../../expect:$$rootme/../../libstdc++:$$rootme/../../tk/unix:$$rootme/../../tcl/unix:$$rootme/../../bfd:$$rootme/../../opcodes:$$$(RPATH_ENVVAR); \
export $(RPATH_ENVVAR); \
if [ -f $${rootme}/../../expect/expect ] ; then \
TCL_LIBRARY=$${srcdir}/../../tcl/library ; \
export TCL_LIBRARY ; fi ; \
$(RUNTEST)
# TESTS exists for the user to pass on the command line to easily
# say "Only run these tests." With check-single it's not necessary, but
# with check-parallel there's no other way to (easily) specify a subset
# of tests. For consistency we support it for check-single as well.
# To specify all tests in a subdirectory, use TESTS=gdb.subdir/*.exp.
# E.g., make check TESTS="gdb.server/*.exp gdb.threads/*.exp".
@GMAKE_TRUE@TESTS :=
@GMAKE_FALSE@TESTS =
@GMAKE_TRUE@ifeq ($(strip $(TESTS)),)
@GMAKE_TRUE@expanded_tests_or_none :=
@GMAKE_TRUE@else
@GMAKE_TRUE@expanded_tests := $(patsubst $(srcdir)/%,%,$(wildcard $(addprefix $(srcdir)/,$(TESTS))))
@GMAKE_TRUE@expanded_tests_or_none := $(or $(expanded_tests),no-matching-tests-found)
@GMAKE_TRUE@endif
@GMAKE_FALSE@expanded_tests_or_none = $(TESTS)
# Shorthand for running all the tests in a single directory.
@GMAKE_TRUE@check-gdb.%:
@GMAKE_TRUE@ $(MAKE) check TESTS="gdb.$*/*.exp"
switch to fully parallel mode This switches "make check" to fully parallel mode. One primary issue facing full parallelization is the overhead of "runtest". On my machine, if I "touch gdb.base/empty.exp", making a new file, and then "time runtest.exp", it takes 0.08 seconds. Multiply this by the 1008 (in my configuration) tests and you get ~80 seconds. This is the overhead that would theoretically be present if all tests were run in parallel. However, the problem isn't nearly as bad as this, for two reasons. First, you must divide by the number of jobs, assuming perfect parallelization -- reasonably true for small -j numbers, based on the results I see. Second, the current test suite parallelization approach bundles the tests, largely by directory, but also splitting up gdb.base into two halves. I was curious to see how the current bundling played out in practice, so I ran "make -j1 check RUNTEST='/bin/time runtest'". This invokes the parallel mode (thus the bundling) and then shows the time taken by each invocation of runtest. Then, I ran "/bin/time make -j3 check". (See below about -j2.) The time for the entire -j3 test run was the same as the time for "gdb.base1". What this means is that gdb.base1 is currently the time-limiting run, preventing further parallelization gains. So, I reason, whatever overhead we see from full parallelization will only be seen by "-j1" and "-j2". I then tried a -j2 test run. This does take longer than a -j3 build, meaning that the gdb.base1 job finishes and then proceeds to other runtest invocations. Finally I tried a -j2 test run with the appended patch. This was 9% slower than the -j2 run without the patch. I think that is a reasonable slowdown for what is probably a rare case. I believe this patch will yield faster test results for all -j values greater than 2. For -j3 on my machine, the test suite is a few seconds faster; I didn't try any larger -j values. For -j1, I went ahead and changed the Makefile so that, if no -j option is given, then the "check-single" mode is used. You can still use "make -j1 check" to get single-job parallel-mode, though of course there's no good reason to do so. This change is likely to speed up the plain "make check" scenario a little as we will now bypass dg-extract-results.sh. One drawback of this change is that "make -jN check" is now much more verbose. I generally only look at the .sum and .log files, but perhaps this will bother some. Another interesting question is scalability of the result. The slowest test, which limits the scalability, took 80.78 seconds. The mean of the remaining tests is 1.08 seconds. (Note that this is just a rough estimate, since there are still outliers.) This means we can run 80.78 / 1.08 =~ 74 tests in the time available. And, in this data set (slightly older than the above, but materially the same) there were 948 tests. So, I think the current test suite should scale ok up to about -j12. We could improve this number if need be by breaking up the biggest tests. 2013-11-04 Tom Tromey <tromey@redhat.com> * Makefile.in (TEST_DIRS): Remove. (TEST_TARGETS, check-parallel): Rewrite. (check-gdb.%, BASE1_FILES, BASE2_FILES, check-gdb.base%) (subdir_do, subdirs): Remove. (do-check-parallel, check/%): New targets. (clean): Remove outputs, temp, and cache directories. (saw_dash_j): New variable. (CHECK_TARGET): Use it. (check): Depend on all, site.exp. Rewrite. (check-single): Remove dependencies. (slow_tests, all_tests, reordered_tests): New variables.
2013-08-27 19:52:25 +02:00
check-single:
$(DO_RUNTEST) $(RUNTESTFLAGS) $(expanded_tests_or_none)
check-parallel:
-rm -rf cache outputs temp
switch to fully parallel mode This switches "make check" to fully parallel mode. One primary issue facing full parallelization is the overhead of "runtest". On my machine, if I "touch gdb.base/empty.exp", making a new file, and then "time runtest.exp", it takes 0.08 seconds. Multiply this by the 1008 (in my configuration) tests and you get ~80 seconds. This is the overhead that would theoretically be present if all tests were run in parallel. However, the problem isn't nearly as bad as this, for two reasons. First, you must divide by the number of jobs, assuming perfect parallelization -- reasonably true for small -j numbers, based on the results I see. Second, the current test suite parallelization approach bundles the tests, largely by directory, but also splitting up gdb.base into two halves. I was curious to see how the current bundling played out in practice, so I ran "make -j1 check RUNTEST='/bin/time runtest'". This invokes the parallel mode (thus the bundling) and then shows the time taken by each invocation of runtest. Then, I ran "/bin/time make -j3 check". (See below about -j2.) The time for the entire -j3 test run was the same as the time for "gdb.base1". What this means is that gdb.base1 is currently the time-limiting run, preventing further parallelization gains. So, I reason, whatever overhead we see from full parallelization will only be seen by "-j1" and "-j2". I then tried a -j2 test run. This does take longer than a -j3 build, meaning that the gdb.base1 job finishes and then proceeds to other runtest invocations. Finally I tried a -j2 test run with the appended patch. This was 9% slower than the -j2 run without the patch. I think that is a reasonable slowdown for what is probably a rare case. I believe this patch will yield faster test results for all -j values greater than 2. For -j3 on my machine, the test suite is a few seconds faster; I didn't try any larger -j values. For -j1, I went ahead and changed the Makefile so that, if no -j option is given, then the "check-single" mode is used. You can still use "make -j1 check" to get single-job parallel-mode, though of course there's no good reason to do so. This change is likely to speed up the plain "make check" scenario a little as we will now bypass dg-extract-results.sh. One drawback of this change is that "make -jN check" is now much more verbose. I generally only look at the .sum and .log files, but perhaps this will bother some. Another interesting question is scalability of the result. The slowest test, which limits the scalability, took 80.78 seconds. The mean of the remaining tests is 1.08 seconds. (Note that this is just a rough estimate, since there are still outliers.) This means we can run 80.78 / 1.08 =~ 74 tests in the time available. And, in this data set (slightly older than the above, but materially the same) there were 948 tests. So, I think the current test suite should scale ok up to about -j12. We could improve this number if need be by breaking up the biggest tests. 2013-11-04 Tom Tromey <tromey@redhat.com> * Makefile.in (TEST_DIRS): Remove. (TEST_TARGETS, check-parallel): Rewrite. (check-gdb.%, BASE1_FILES, BASE2_FILES, check-gdb.base%) (subdir_do, subdirs): Remove. (do-check-parallel, check/%): New targets. (clean): Remove outputs, temp, and cache directories. (saw_dash_j): New variable. (CHECK_TARGET): Use it. (check): Depend on all, site.exp. Rewrite. (check-single): Remove dependencies. (slow_tests, all_tests, reordered_tests): New variables.
2013-08-27 19:52:25 +02:00
$(MAKE) -k do-check-parallel; \
$(SHELL) $(srcdir)/dg-extract-results.sh \
switch to fully parallel mode This switches "make check" to fully parallel mode. One primary issue facing full parallelization is the overhead of "runtest". On my machine, if I "touch gdb.base/empty.exp", making a new file, and then "time runtest.exp", it takes 0.08 seconds. Multiply this by the 1008 (in my configuration) tests and you get ~80 seconds. This is the overhead that would theoretically be present if all tests were run in parallel. However, the problem isn't nearly as bad as this, for two reasons. First, you must divide by the number of jobs, assuming perfect parallelization -- reasonably true for small -j numbers, based on the results I see. Second, the current test suite parallelization approach bundles the tests, largely by directory, but also splitting up gdb.base into two halves. I was curious to see how the current bundling played out in practice, so I ran "make -j1 check RUNTEST='/bin/time runtest'". This invokes the parallel mode (thus the bundling) and then shows the time taken by each invocation of runtest. Then, I ran "/bin/time make -j3 check". (See below about -j2.) The time for the entire -j3 test run was the same as the time for "gdb.base1". What this means is that gdb.base1 is currently the time-limiting run, preventing further parallelization gains. So, I reason, whatever overhead we see from full parallelization will only be seen by "-j1" and "-j2". I then tried a -j2 test run. This does take longer than a -j3 build, meaning that the gdb.base1 job finishes and then proceeds to other runtest invocations. Finally I tried a -j2 test run with the appended patch. This was 9% slower than the -j2 run without the patch. I think that is a reasonable slowdown for what is probably a rare case. I believe this patch will yield faster test results for all -j values greater than 2. For -j3 on my machine, the test suite is a few seconds faster; I didn't try any larger -j values. For -j1, I went ahead and changed the Makefile so that, if no -j option is given, then the "check-single" mode is used. You can still use "make -j1 check" to get single-job parallel-mode, though of course there's no good reason to do so. This change is likely to speed up the plain "make check" scenario a little as we will now bypass dg-extract-results.sh. One drawback of this change is that "make -jN check" is now much more verbose. I generally only look at the .sum and .log files, but perhaps this will bother some. Another interesting question is scalability of the result. The slowest test, which limits the scalability, took 80.78 seconds. The mean of the remaining tests is 1.08 seconds. (Note that this is just a rough estimate, since there are still outliers.) This means we can run 80.78 / 1.08 =~ 74 tests in the time available. And, in this data set (slightly older than the above, but materially the same) there were 948 tests. So, I think the current test suite should scale ok up to about -j12. We could improve this number if need be by breaking up the biggest tests. 2013-11-04 Tom Tromey <tromey@redhat.com> * Makefile.in (TEST_DIRS): Remove. (TEST_TARGETS, check-parallel): Rewrite. (check-gdb.%, BASE1_FILES, BASE2_FILES, check-gdb.base%) (subdir_do, subdirs): Remove. (do-check-parallel, check/%): New targets. (clean): Remove outputs, temp, and cache directories. (saw_dash_j): New variable. (CHECK_TARGET): Use it. (check): Depend on all, site.exp. Rewrite. (check-single): Remove dependencies. (slow_tests, all_tests, reordered_tests): New variables.
2013-08-27 19:52:25 +02:00
`find outputs -name gdb.sum -print` > gdb.sum; \
$(SHELL) $(srcdir)/dg-extract-results.sh -L \
switch to fully parallel mode This switches "make check" to fully parallel mode. One primary issue facing full parallelization is the overhead of "runtest". On my machine, if I "touch gdb.base/empty.exp", making a new file, and then "time runtest.exp", it takes 0.08 seconds. Multiply this by the 1008 (in my configuration) tests and you get ~80 seconds. This is the overhead that would theoretically be present if all tests were run in parallel. However, the problem isn't nearly as bad as this, for two reasons. First, you must divide by the number of jobs, assuming perfect parallelization -- reasonably true for small -j numbers, based on the results I see. Second, the current test suite parallelization approach bundles the tests, largely by directory, but also splitting up gdb.base into two halves. I was curious to see how the current bundling played out in practice, so I ran "make -j1 check RUNTEST='/bin/time runtest'". This invokes the parallel mode (thus the bundling) and then shows the time taken by each invocation of runtest. Then, I ran "/bin/time make -j3 check". (See below about -j2.) The time for the entire -j3 test run was the same as the time for "gdb.base1". What this means is that gdb.base1 is currently the time-limiting run, preventing further parallelization gains. So, I reason, whatever overhead we see from full parallelization will only be seen by "-j1" and "-j2". I then tried a -j2 test run. This does take longer than a -j3 build, meaning that the gdb.base1 job finishes and then proceeds to other runtest invocations. Finally I tried a -j2 test run with the appended patch. This was 9% slower than the -j2 run without the patch. I think that is a reasonable slowdown for what is probably a rare case. I believe this patch will yield faster test results for all -j values greater than 2. For -j3 on my machine, the test suite is a few seconds faster; I didn't try any larger -j values. For -j1, I went ahead and changed the Makefile so that, if no -j option is given, then the "check-single" mode is used. You can still use "make -j1 check" to get single-job parallel-mode, though of course there's no good reason to do so. This change is likely to speed up the plain "make check" scenario a little as we will now bypass dg-extract-results.sh. One drawback of this change is that "make -jN check" is now much more verbose. I generally only look at the .sum and .log files, but perhaps this will bother some. Another interesting question is scalability of the result. The slowest test, which limits the scalability, took 80.78 seconds. The mean of the remaining tests is 1.08 seconds. (Note that this is just a rough estimate, since there are still outliers.) This means we can run 80.78 / 1.08 =~ 74 tests in the time available. And, in this data set (slightly older than the above, but materially the same) there were 948 tests. So, I think the current test suite should scale ok up to about -j12. We could improve this number if need be by breaking up the biggest tests. 2013-11-04 Tom Tromey <tromey@redhat.com> * Makefile.in (TEST_DIRS): Remove. (TEST_TARGETS, check-parallel): Rewrite. (check-gdb.%, BASE1_FILES, BASE2_FILES, check-gdb.base%) (subdir_do, subdirs): Remove. (do-check-parallel, check/%): New targets. (clean): Remove outputs, temp, and cache directories. (saw_dash_j): New variable. (CHECK_TARGET): Use it. (check): Depend on all, site.exp. Rewrite. (check-single): Remove dependencies. (slow_tests, all_tests, reordered_tests): New variables.
2013-08-27 19:52:25 +02:00
`find outputs -name gdb.log -print` > gdb.log
@sed -n '/=== gdb Summary ===/,$$ p' gdb.sum
switch to fully parallel mode This switches "make check" to fully parallel mode. One primary issue facing full parallelization is the overhead of "runtest". On my machine, if I "touch gdb.base/empty.exp", making a new file, and then "time runtest.exp", it takes 0.08 seconds. Multiply this by the 1008 (in my configuration) tests and you get ~80 seconds. This is the overhead that would theoretically be present if all tests were run in parallel. However, the problem isn't nearly as bad as this, for two reasons. First, you must divide by the number of jobs, assuming perfect parallelization -- reasonably true for small -j numbers, based on the results I see. Second, the current test suite parallelization approach bundles the tests, largely by directory, but also splitting up gdb.base into two halves. I was curious to see how the current bundling played out in practice, so I ran "make -j1 check RUNTEST='/bin/time runtest'". This invokes the parallel mode (thus the bundling) and then shows the time taken by each invocation of runtest. Then, I ran "/bin/time make -j3 check". (See below about -j2.) The time for the entire -j3 test run was the same as the time for "gdb.base1". What this means is that gdb.base1 is currently the time-limiting run, preventing further parallelization gains. So, I reason, whatever overhead we see from full parallelization will only be seen by "-j1" and "-j2". I then tried a -j2 test run. This does take longer than a -j3 build, meaning that the gdb.base1 job finishes and then proceeds to other runtest invocations. Finally I tried a -j2 test run with the appended patch. This was 9% slower than the -j2 run without the patch. I think that is a reasonable slowdown for what is probably a rare case. I believe this patch will yield faster test results for all -j values greater than 2. For -j3 on my machine, the test suite is a few seconds faster; I didn't try any larger -j values. For -j1, I went ahead and changed the Makefile so that, if no -j option is given, then the "check-single" mode is used. You can still use "make -j1 check" to get single-job parallel-mode, though of course there's no good reason to do so. This change is likely to speed up the plain "make check" scenario a little as we will now bypass dg-extract-results.sh. One drawback of this change is that "make -jN check" is now much more verbose. I generally only look at the .sum and .log files, but perhaps this will bother some. Another interesting question is scalability of the result. The slowest test, which limits the scalability, took 80.78 seconds. The mean of the remaining tests is 1.08 seconds. (Note that this is just a rough estimate, since there are still outliers.) This means we can run 80.78 / 1.08 =~ 74 tests in the time available. And, in this data set (slightly older than the above, but materially the same) there were 948 tests. So, I think the current test suite should scale ok up to about -j12. We could improve this number if need be by breaking up the biggest tests. 2013-11-04 Tom Tromey <tromey@redhat.com> * Makefile.in (TEST_DIRS): Remove. (TEST_TARGETS, check-parallel): Rewrite. (check-gdb.%, BASE1_FILES, BASE2_FILES, check-gdb.base%) (subdir_do, subdirs): Remove. (do-check-parallel, check/%): New targets. (clean): Remove outputs, temp, and cache directories. (saw_dash_j): New variable. (CHECK_TARGET): Use it. (check): Depend on all, site.exp. Rewrite. (check-single): Remove dependencies. (slow_tests, all_tests, reordered_tests): New variables.
2013-08-27 19:52:25 +02:00
# Turn a list of .exp files into "check/" targets. Only examine .exp
# files appearing in a gdb.* directory -- we don't want to pick up
# lib/ by mistake. For example, gdb.linespec/linespec.exp becomes
# check/gdb.linespec/linespec.exp. The list is generally sorted
# alphabetically, but we take a few tests known to be slow and push
# them to the front of the list to try to lessen the overall time
# taken by the test suite -- if one of these tests happens to be run
# late, it will cause the overall time to increase.
@GMAKE_TRUE@ifeq ($(strip $(TESTS)),)
switch to fully parallel mode This switches "make check" to fully parallel mode. One primary issue facing full parallelization is the overhead of "runtest". On my machine, if I "touch gdb.base/empty.exp", making a new file, and then "time runtest.exp", it takes 0.08 seconds. Multiply this by the 1008 (in my configuration) tests and you get ~80 seconds. This is the overhead that would theoretically be present if all tests were run in parallel. However, the problem isn't nearly as bad as this, for two reasons. First, you must divide by the number of jobs, assuming perfect parallelization -- reasonably true for small -j numbers, based on the results I see. Second, the current test suite parallelization approach bundles the tests, largely by directory, but also splitting up gdb.base into two halves. I was curious to see how the current bundling played out in practice, so I ran "make -j1 check RUNTEST='/bin/time runtest'". This invokes the parallel mode (thus the bundling) and then shows the time taken by each invocation of runtest. Then, I ran "/bin/time make -j3 check". (See below about -j2.) The time for the entire -j3 test run was the same as the time for "gdb.base1". What this means is that gdb.base1 is currently the time-limiting run, preventing further parallelization gains. So, I reason, whatever overhead we see from full parallelization will only be seen by "-j1" and "-j2". I then tried a -j2 test run. This does take longer than a -j3 build, meaning that the gdb.base1 job finishes and then proceeds to other runtest invocations. Finally I tried a -j2 test run with the appended patch. This was 9% slower than the -j2 run without the patch. I think that is a reasonable slowdown for what is probably a rare case. I believe this patch will yield faster test results for all -j values greater than 2. For -j3 on my machine, the test suite is a few seconds faster; I didn't try any larger -j values. For -j1, I went ahead and changed the Makefile so that, if no -j option is given, then the "check-single" mode is used. You can still use "make -j1 check" to get single-job parallel-mode, though of course there's no good reason to do so. This change is likely to speed up the plain "make check" scenario a little as we will now bypass dg-extract-results.sh. One drawback of this change is that "make -jN check" is now much more verbose. I generally only look at the .sum and .log files, but perhaps this will bother some. Another interesting question is scalability of the result. The slowest test, which limits the scalability, took 80.78 seconds. The mean of the remaining tests is 1.08 seconds. (Note that this is just a rough estimate, since there are still outliers.) This means we can run 80.78 / 1.08 =~ 74 tests in the time available. And, in this data set (slightly older than the above, but materially the same) there were 948 tests. So, I think the current test suite should scale ok up to about -j12. We could improve this number if need be by breaking up the biggest tests. 2013-11-04 Tom Tromey <tromey@redhat.com> * Makefile.in (TEST_DIRS): Remove. (TEST_TARGETS, check-parallel): Rewrite. (check-gdb.%, BASE1_FILES, BASE2_FILES, check-gdb.base%) (subdir_do, subdirs): Remove. (do-check-parallel, check/%): New targets. (clean): Remove outputs, temp, and cache directories. (saw_dash_j): New variable. (CHECK_TARGET): Use it. (check): Depend on all, site.exp. Rewrite. (check-single): Remove dependencies. (slow_tests, all_tests, reordered_tests): New variables.
2013-08-27 19:52:25 +02:00
slow_tests = gdb.base/break-interp.exp gdb.base/interp.exp \
gdb.base/multi-forks.exp
@GMAKE_TRUE@all_tests := $(shell cd $(srcdir) && find gdb.* -name '*.exp' -print)
@GMAKE_TRUE@reordered_tests := $(slow_tests) $(filter-out $(slow_tests),$(all_tests))
@GMAKE_TRUE@TEST_TARGETS := $(addprefix check/,$(reordered_tests))
@GMAKE_TRUE@else
@GMAKE_TRUE@TEST_TARGETS := $(addprefix check/,$(expanded_tests_or_none))
@GMAKE_TRUE@endif
switch to fully parallel mode This switches "make check" to fully parallel mode. One primary issue facing full parallelization is the overhead of "runtest". On my machine, if I "touch gdb.base/empty.exp", making a new file, and then "time runtest.exp", it takes 0.08 seconds. Multiply this by the 1008 (in my configuration) tests and you get ~80 seconds. This is the overhead that would theoretically be present if all tests were run in parallel. However, the problem isn't nearly as bad as this, for two reasons. First, you must divide by the number of jobs, assuming perfect parallelization -- reasonably true for small -j numbers, based on the results I see. Second, the current test suite parallelization approach bundles the tests, largely by directory, but also splitting up gdb.base into two halves. I was curious to see how the current bundling played out in practice, so I ran "make -j1 check RUNTEST='/bin/time runtest'". This invokes the parallel mode (thus the bundling) and then shows the time taken by each invocation of runtest. Then, I ran "/bin/time make -j3 check". (See below about -j2.) The time for the entire -j3 test run was the same as the time for "gdb.base1". What this means is that gdb.base1 is currently the time-limiting run, preventing further parallelization gains. So, I reason, whatever overhead we see from full parallelization will only be seen by "-j1" and "-j2". I then tried a -j2 test run. This does take longer than a -j3 build, meaning that the gdb.base1 job finishes and then proceeds to other runtest invocations. Finally I tried a -j2 test run with the appended patch. This was 9% slower than the -j2 run without the patch. I think that is a reasonable slowdown for what is probably a rare case. I believe this patch will yield faster test results for all -j values greater than 2. For -j3 on my machine, the test suite is a few seconds faster; I didn't try any larger -j values. For -j1, I went ahead and changed the Makefile so that, if no -j option is given, then the "check-single" mode is used. You can still use "make -j1 check" to get single-job parallel-mode, though of course there's no good reason to do so. This change is likely to speed up the plain "make check" scenario a little as we will now bypass dg-extract-results.sh. One drawback of this change is that "make -jN check" is now much more verbose. I generally only look at the .sum and .log files, but perhaps this will bother some. Another interesting question is scalability of the result. The slowest test, which limits the scalability, took 80.78 seconds. The mean of the remaining tests is 1.08 seconds. (Note that this is just a rough estimate, since there are still outliers.) This means we can run 80.78 / 1.08 =~ 74 tests in the time available. And, in this data set (slightly older than the above, but materially the same) there were 948 tests. So, I think the current test suite should scale ok up to about -j12. We could improve this number if need be by breaking up the biggest tests. 2013-11-04 Tom Tromey <tromey@redhat.com> * Makefile.in (TEST_DIRS): Remove. (TEST_TARGETS, check-parallel): Rewrite. (check-gdb.%, BASE1_FILES, BASE2_FILES, check-gdb.base%) (subdir_do, subdirs): Remove. (do-check-parallel, check/%): New targets. (clean): Remove outputs, temp, and cache directories. (saw_dash_j): New variable. (CHECK_TARGET): Use it. (check): Depend on all, site.exp. Rewrite. (check-single): Remove dependencies. (slow_tests, all_tests, reordered_tests): New variables.
2013-08-27 19:52:25 +02:00
do-check-parallel: $(TEST_TARGETS)
@:
@GMAKE_TRUE@check/%.exp:
@GMAKE_TRUE@ -mkdir -p outputs/$*
@GMAKE_TRUE@ @$(DO_RUNTEST) GDB_PARALLEL=yes --outdir=outputs/$* $*.exp $(RUNTESTFLAGS)
check/no-matching-tests-found:
@echo ""
@echo "No matching tests found."
@echo ""
check-perf: all $(abs_builddir)/site.exp
@if test ! -d gdb.perf; then mkdir gdb.perf; fi
$(DO_RUNTEST) --directory=gdb.perf --outdir gdb.perf GDB_PERFTEST_MODE=both $(RUNTESTFLAGS)
force:;
clean mostlyclean:
-rm -f *~ core *.o a.out xgdb *.x *.grt bigcore.corefile .gdb_history
-rm -f core.* *.tf *.cl tracecommandsscript copy1.txt zzz-gdbscript
-rm -f *.dwo *.dwp
switch to fully parallel mode This switches "make check" to fully parallel mode. One primary issue facing full parallelization is the overhead of "runtest". On my machine, if I "touch gdb.base/empty.exp", making a new file, and then "time runtest.exp", it takes 0.08 seconds. Multiply this by the 1008 (in my configuration) tests and you get ~80 seconds. This is the overhead that would theoretically be present if all tests were run in parallel. However, the problem isn't nearly as bad as this, for two reasons. First, you must divide by the number of jobs, assuming perfect parallelization -- reasonably true for small -j numbers, based on the results I see. Second, the current test suite parallelization approach bundles the tests, largely by directory, but also splitting up gdb.base into two halves. I was curious to see how the current bundling played out in practice, so I ran "make -j1 check RUNTEST='/bin/time runtest'". This invokes the parallel mode (thus the bundling) and then shows the time taken by each invocation of runtest. Then, I ran "/bin/time make -j3 check". (See below about -j2.) The time for the entire -j3 test run was the same as the time for "gdb.base1". What this means is that gdb.base1 is currently the time-limiting run, preventing further parallelization gains. So, I reason, whatever overhead we see from full parallelization will only be seen by "-j1" and "-j2". I then tried a -j2 test run. This does take longer than a -j3 build, meaning that the gdb.base1 job finishes and then proceeds to other runtest invocations. Finally I tried a -j2 test run with the appended patch. This was 9% slower than the -j2 run without the patch. I think that is a reasonable slowdown for what is probably a rare case. I believe this patch will yield faster test results for all -j values greater than 2. For -j3 on my machine, the test suite is a few seconds faster; I didn't try any larger -j values. For -j1, I went ahead and changed the Makefile so that, if no -j option is given, then the "check-single" mode is used. You can still use "make -j1 check" to get single-job parallel-mode, though of course there's no good reason to do so. This change is likely to speed up the plain "make check" scenario a little as we will now bypass dg-extract-results.sh. One drawback of this change is that "make -jN check" is now much more verbose. I generally only look at the .sum and .log files, but perhaps this will bother some. Another interesting question is scalability of the result. The slowest test, which limits the scalability, took 80.78 seconds. The mean of the remaining tests is 1.08 seconds. (Note that this is just a rough estimate, since there are still outliers.) This means we can run 80.78 / 1.08 =~ 74 tests in the time available. And, in this data set (slightly older than the above, but materially the same) there were 948 tests. So, I think the current test suite should scale ok up to about -j12. We could improve this number if need be by breaking up the biggest tests. 2013-11-04 Tom Tromey <tromey@redhat.com> * Makefile.in (TEST_DIRS): Remove. (TEST_TARGETS, check-parallel): Rewrite. (check-gdb.%, BASE1_FILES, BASE2_FILES, check-gdb.base%) (subdir_do, subdirs): Remove. (do-check-parallel, check/%): New targets. (clean): Remove outputs, temp, and cache directories. (saw_dash_j): New variable. (CHECK_TARGET): Use it. (check): Depend on all, site.exp. Rewrite. (check-single): Remove dependencies. (slow_tests, all_tests, reordered_tests): New variables.
2013-08-27 19:52:25 +02:00
-rm -rf outputs temp cache
-rm -f read1.so expect-read1
if [ x"${ALL_SUBDIRS}" != x ] ; then \
for dir in ${ALL_SUBDIRS}; \
do \
echo "$$dir:"; \
if [ -d $$dir ]; then \
(cd $$dir; $(MAKE) clean); \
fi; \
done ; \
else true; fi
distclean maintainer-clean realclean: clean
-rm -f *~ core
-rm -f Makefile config.status *-init.exp
-rm -fr *.log summary detail *.plog *.sum *.psum site.*
if [ x"${ALL_SUBDIRS}" != x ] ; then \
for dir in ${ALL_SUBDIRS}; \
do \
echo "$$dir:"; \
if [ -d $$dir ]; then \
(cd $$dir; $(MAKE) distclean); \
fi; \
done ; \
else true; fi
* config/alpha/alpha-linux.mt: Remove file. * config/alpha/alpha.mt: Remove file. * config/alpha/alpha-osf1.mt: Remove file. * config/alpha/fbsd.mt: Remove file. * config/alpha/nbsd.mt: Remove file. * config/alpha/obsd.mt: Remove file. * config/arm/embed.mt: Remove file. * config/arm/linux.mt: Remove file. * config/arm/nbsd.mt: Remove file. * config/arm/obsd.mt: Remove file. * config/arm/wince.mt: Remove file. * config/avr/avr.mt: Remove file. * config/cris/cris.mt: Remove file. * config/frv/frv.mt: Remove file. * config/h8300/h8300.mt: Remove file. * config/i386/cygwin.mt: Remove file. * config/i386/fbsd64.mt: Remove file. * config/i386/fbsd.mt: Remove file. * config/i386/i386gnu.mt: Remove file. * config/i386/i386.mt: Remove file. * config/i386/i386sol2.mt: Remove file. * config/i386/linux64.mt: Remove file. * config/i386/linux.mt: Remove file. * config/i386/mingw.mt: Remove file. * config/i386/nbsd64.mt: Remove file. * config/i386/nbsd.mt: Remove file. * config/i386/nto.mt: Remove file. * config/i386/obsd64.mt: Remove file. * config/i386/obsd.mt: Remove file. * config/i386/sol2-64.mt: Remove file. * config/ia64/ia64.mt: Remove file. * config/ia64/linux.mt: Remove file. * config/iq2000/iq2000.mt: Remove file. * config/m32c/m32c.mt: Remove file. * config/m32r/linux.mt: Remove file. * config/m32r/m32r.mt: Remove file. * config/m68hc11/m68hc11.mt: Remove file. * config/m68k/linux.mt: Remove file. * config/m68k/monitor.mt: Remove file. * config/m68k/nbsd.mt: Remove file. * config/m68k/obsd.mt: Remove file. * config/m88k/obsd.mt: Remove file. * config/mep/mep.mt: Remove file. * config/mips/embed.mt: Remove file. * config/mips/irix5.mt: Remove file. * config/mips/irix6.mt: Remove file. * config/mips/linux.mt: Remove file. * config/mips/nbsd.mt: Remove file. * config/mips/obsd64.mt: Remove file. * config/mn10300/linux.mt: Remove file. * config/mn10300/mn10300.mt: Remove file. * config/mt/mt.mt: Remove file. * config/pa/hppahpux.mt: Remove file. * config/pa/hppa.mt: Remove file. * config/pa/linux.mt: Remove file. * config/pa/obsd.mt: Remove file. * config/powerpc/aix.mt: Remove file. * config/powerpc/linux.mt: Remove file. * config/powerpc/nbsd.mt: Remove file. * config/powerpc/obsd.mt: Remove file. * config/powerpc/ppc-eabi.mt: Remove file. * config/s390/s390.mt: Remove file. * config/score/embed.mt: Remove file. * config/sh/embed.mt: Remove file. * config/sh/linux.mt: Remove file. * config/sh/nbsd.mt: Remove file. * config/sh/obsd.mt: Remove file. * config/sh/sh64.mt: Remove file. * config/sparc/embed.mt: Remove file. * config/sparc/fbsd.mt: Remove file. * config/sparc/linux64.mt: Remove file. * config/sparc/linux.mt: Remove file. * config/sparc/nbsd64.mt: Remove file. * config/sparc/nbsd.mt: Remove file. * config/sparc/obsd64.mt: Remove file. * config/sparc/obsd.mt: Remove file. * config/sparc/sol2-64.mt: Remove file. * config/sparc/sol2.mt: Remove file. * config/sparc/sparc64.mt: Remove file. * config/sparc/sparc.mt: Remove file. * config/spu/spu.mt: Remove file. * config/v850/v850.mt: Remove file. * config/vax/nbsd.mt: Remove file. * config/vax/obsd.mt: Remove file. * config/vax/vax.mt: Remove file. * config/xstormy16/xstormy16.mt: Remove file. * config/xtensa/xtensa.mt: Remove file. * configure.tgt (gdb_target_cpu): Remove. Do not set anywhere. (gdb_target): Likewise. (gdb_target_obs): Document. Set for every target to contents of TDEPFILES in former .mt makefile fragment. * configure.ac (TARGET_OBS): Define. (target_makefile_frag, gdb_target_cpu): Do not define. * configure: Regenerate. * Makefile.in (MT_FLAGS): Remove. (GLOBAL_CFLAGS): Update. (TARGET_OBS): Substitute from configure. (DEPFILES): Remove TDEPFILES, add TARGET_OBS. (@target_makefile_frag@): Remove. doc/ChangeLog: * Makefile.in (Makefile): Do not depend on target_makefile_frag. testsuite/ChangeLog: * Makefile.in (Makefile): Do not depend on target_makefile_frag. (target_cpu): Remove.
2007-11-17 01:54:18 +01:00
Makefile : Makefile.in config.status $(host_makefile_frag)
$(SHELL) config.status
config.status: configure
$(SHELL) config.status --recheck
2011-03-07 18:03:51 +01:00
TAGS: force
2011-03-07 23:02:45 +01:00
find $(srcdir) -name '*.exp' -print | \
etags --regex='/proc[ \t]+\([^ \t]+\)/\1/' -
# Build the expect wrapper script that preloads the read1.so library.
expect-read1:
@echo Making expect-read1
@rm -f expect-read1-tmp
@touch expect-read1-tmp
@echo "# THIS FILE IS GENERATED -*- buffer-read-only: t -*- \n" >>expect-read1-tmp
@echo "# vi:set ro: */\n\n" >>expect-read1-tmp
@echo "# To regenerate this file, run:\n" >>expect-read1-tmp
@echo "# make clean; make/\n" >>expect-read1-tmp
@echo "export LD_PRELOAD=`pwd`/read1.so" >>expect-read1-tmp
@echo 'exec expect "$$@"' >>expect-read1-tmp
@chmod +x expect-read1-tmp
@mv expect-read1-tmp expect-read1
# Build the read1.so preload library. This overrides the `read'
# function, making it read one byte at a time. Running the testsuite
# with this catches racy tests.
read1.so: lib/read1.c
$(CC) -o $@ ${srcdir}/lib/read1.c -Wall -g -shared -fPIC $(CFLAGS)
# Build the read1 machinery.
.PHONY: read1
read1: read1.so expect-read1