a68ab35173
* c-cppbuiltin.c (c_cpp_builtins): Change _OPENMP value to 200805. * langhooks.h (struct lang_hooks_for_decls): Add omp_finish_clause. Add omp_private_outer_ref hook, add another argument to omp_clause_default_ctor hook. * langhooks-def.h (LANG_HOOKS_OMP_FINISH_CLAUSE): Define. (LANG_HOOKS_OMP_PRIVATE_OUTER_REF): Define. (LANG_HOOKS_OMP_CLAUSE_DEFAULT_CTOR): Change to hook_tree_tree_tree_tree_null. (LANG_HOOKS_DECLS): Add LANG_HOOKS_OMP_FINISH_CLAUSE and LANG_HOOKS_OMP_PRIVATE_OUTER_REF. * hooks.c (hook_tree_tree_tree_tree_null): New function. * hooks.h (hook_tree_tree_tree_tree_null): New prototype. * tree.def (OMP_TASK): New tree code. * tree.h (OMP_TASK_COPYFN, OMP_TASK_ARG_SIZE, OMP_TASK_ARG_ALIGN, OMP_CLAUSE_PRIVATE_OUTER_REF, OMP_CLAUSE_LASTPRIVATE_STMT, OMP_CLAUSE_COLLAPSE_ITERVAR, OMP_CLAUSE_COLLAPSE_COUNT, OMP_TASKREG_CHECK, OMP_TASKREG_BODY, OMP_TASKREG_CLAUSES, OMP_TASKREG_FN, OMP_TASKREG_DATA_ARG, OMP_TASK_BODY, OMP_TASK_CLAUSES, OMP_TASK_FN, OMP_TASK_DATA_ARG, OMP_CLAUSE_COLLAPSE_EXPR): Define. (enum omp_clause_default_kind): Add OMP_CLAUSE_DEFAULT_FIRSTPRIVATE. (OMP_DIRECTIVE_P): Add OMP_TASK. (OMP_CLAUSE_COLLAPSE, OMP_CLAUSE_UNTIED): New clause codes. (OMP_CLAUSE_SCHEDULE_AUTO): New schedule kind. * tree.c (omp_clause_code_name): Add OMP_CLAUSE_COLLAPSE and OMP_CLAUSE_UNTIED entries. (omp_clause_num_ops): Likewise. Increase OMP_CLAUSE_LASTPRIVATE num_ops to 2. (walk_tree_1): Handle OMP_CLAUSE_COLLAPSE and OMP_CLAUSE_UNTIED. Walk OMP_CLAUSE_LASTPRIVATE_STMT. * tree-pretty-print.c (dump_omp_clause): Handle OMP_CLAUSE_SCHEDULE_AUTO, OMP_CLAUSE_UNTIED, OMP_CLAUSE_COLLAPSE, OMP_CLAUSE_DEFAULT_FIRSTPRIVATE. (dump_generic_node): Handle OMP_TASK and collapsed OMP_FOR loops. * c-omp.c (c_finish_omp_for): Allow pointer iterators. Remove warning about unsigned iterators. Change decl/init/cond/incr arguments to TREE_VECs, check arguments for all collapsed loops. (c_finish_omp_taskwait): New function. (c_split_parallel_clauses): Put OMP_CLAUSE_COLLAPSE clause to ws_clauses. * c-parser.c (c_parser_omp_for_loop): Parse collapsed loops. Call default_function_array_conversion on init. Add par_clauses argument. If decl is present in parallel's lastprivate clause, change it to shared and add lastprivate clause for decl to OMP_FOR_CLAUSES. Add clauses argument, on success set OMP_FOR_CLAUSES to it. Look up collapse count in clauses. (c_parser_omp_for, c_parser_omp_parallel): Adjust c_parser_omp_for_loop callers. (OMP_FOR_CLAUSE_MASK): Add 1 << PRAGMA_OMP_CLAUSE_COLLAPSE. (c_parser_pragma): Handle PRAGMA_OMP_TASKWAIT. (c_parser_omp_clause_name): Handle collapse and untied clauses. (c_parser_omp_clause_collapse, c_parser_omp_clause_untied): New functions. (c_parser_omp_clause_schedule): Handle schedule(auto). Include correct location in the error message. (c_parser_omp_all_clauses): Handle PRAGMA_OMP_CLAUSE_COLLAPSE and PRAGMA_OMP_CLAUSE_UNTIED. (OMP_TASK_CLAUSE_MASK): Define. (c_parser_omp_task, c_parser_omp_taskwait): New functions. (c_parser_omp_construct): Handle PRAGMA_OMP_TASK. * tree-nested.c (convert_nonlocal_omp_clauses, convert_local_omp_clauses): Handle OMP_CLAUSE_LASTPRIVATE_STMT, OMP_CLAUSE_REDUCTION_INIT, OMP_CLAUSE_REDUCTION_MERGE, OMP_CLAUSE_COLLAPSE and OMP_CLAUSE_UNTIED. Don't handle TREE_STATIC or DECL_EXTERNAL VAR_DECLs in OMP_CLAUSE_DECL. (conver_nonlocal_reference, convert_local_reference, convert_call_expr): Handle OMP_TASK the same as OMP_PARALLEL. Use OMP_TASKREG_* macros rather than OMP_PARALLEL_*. (walk_omp_for): Adjust for OMP_FOR_{INIT,COND,INCR} changes. * tree-gimple.c (is_gimple_stmt): Handle OMP_TASK. * c-tree.h (c_begin_omp_task, c_finish_omp_task): New prototypes. * c-pragma.h (PRAGMA_OMP_TASK, PRAGMA_OMP_TASKWAIT): New. (PRAGMA_OMP_CLAUSE_COLLAPSE, PRAGMA_OMP_CLAUSE_UNTIED): New. * c-typeck.c (c_begin_omp_task, c_finish_omp_task): New functions. (c_finish_omp_clauses): Handle OMP_CLAUSE_COLLAPSE and OMP_CLAUSE_UNTIED. * c-pragma.c (init_pragma): Init omp task and omp taskwait pragmas. * c-common.h (c_finish_omp_taskwait): New prototype. * gimple-low.c (lower_stmt): Handle OMP_TASK. * tree-parloops.c (create_parallel_loop): Create 1 entry vectors for OMP_FOR_{INIT,COND,INCR}. * tree-cfg.c (remove_useless_stmts_1): Handle OMP_* containers. (make_edges): Handle OMP_TASK. * tree-ssa-operands.c (get_expr_operands): Handle collapsed OMP_FOR loops, adjust for OMP_FOR_{INIT,COND,INCR} changes. * tree-inline.c (estimate_num_insns_1): Handle OMP_TASK. * builtin-types.def (BT_PTR_ULONGLONG, BT_PTR_FN_VOID_PTR_PTR, BT_FN_BOOL_ULONGLONGPTR_ULONGLONGPTR, BT_FN_BOOL_BOOL_ULL_ULL_ULL_ULLPTR_ULLPTR, BT_FN_BOOL_BOOL_ULL_ULL_ULL_ULL_ULLPTR_ULLPTR, BT_FN_VOID_OMPFN_PTR_OMPCPYFN_LONG_LONG_BOOL_UINT): New. * omp-builtins.def (BUILT_IN_GOMP_TASK, BUILT_IN_GOMP_TASKWAIT, BUILT_IN_GOMP_LOOP_ULL_STATIC_START, BUILT_IN_GOMP_LOOP_ULL_DYNAMIC_START, BUILT_IN_GOMP_LOOP_ULL_GUIDED_START, BUILT_IN_GOMP_LOOP_ULL_RUNTIME_START, BUILT_IN_GOMP_LOOP_ULL_ORDERED_STATIC_START, BUILT_IN_GOMP_LOOP_ULL_ORDERED_DYNAMIC_START, BUILT_IN_GOMP_LOOP_ULL_ORDERED_GUIDED_START, BUILT_IN_GOMP_LOOP_ULL_ORDERED_RUNTIME_START, BUILT_IN_GOMP_LOOP_ULL_STATIC_NEXT, BUILT_IN_GOMP_LOOP_ULL_DYNAMIC_NEXT, BUILT_IN_GOMP_LOOP_ULL_GUIDED_NEXT, BUILT_IN_GOMP_LOOP_ULL_RUNTIME_NEXT, BUILT_IN_GOMP_LOOP_ULL_ORDERED_STATIC_NEXT, BUILT_IN_GOMP_LOOP_ULL_ORDERED_DYNAMIC_NEXT, BUILT_IN_GOMP_LOOP_ULL_ORDERED_GUIDED_NEXT, BUILT_IN_GOMP_LOOP_ULL_ORDERED_RUNTIME_NEXT): New builtins. * gimplify.c (gimplify_omp_for): Allow pointer type for decl, handle POINTER_PLUS_EXPR. If loop counter has been replaced and original iterator is present in lastprivate clause or if collapse > 1, set OMP_CLAUSE_LASTPRIVATE_STMT. Handle collapsed OMP_FOR loops, adjust for OMP_FOR_{INIT,COND,INCR} changes. (gimplify_expr): Handle OMP_SECTIONS_SWITCH and OMP_TASK. (enum gimplify_omp_var_data): Add GOVD_PRIVATE_OUTER_REF. (omp_notice_variable): Set GOVD_PRIVATE_OUTER_REF if needed, if it is set, lookup var in outer contexts too. Handle OMP_CLAUSE_DEFAULT_FIRSTPRIVATE. Handle vars that are supposed to be implicitly determined firstprivate for task regions. (gimplify_scan_omp_clauses): Set GOVD_PRIVATE_OUTER_REF if needed, if it is set, lookup var in outer contexts too. Set OMP_CLAUSE_PRIVATE_OUTER_REF if GOVD_PRIVATE_OUTER_REF is set. Handle OMP_CLAUSE_LASTPRIVATE_STMT, OMP_CLAUSE_COLLAPSE and OMP_CLAUSE_UNTIED. Take region_type as last argument instead of in_parallel and in_combined_parallel. (gimplify_omp_parallel, gimplify_omp_for, gimplify_omp_workshare): Adjust callers. (gimplify_adjust_omp_clauses_1): Set OMP_CLAUSE_PRIVATE_OUTER_REF if GOVD_PRIVATE_OUTER_REF is set. Call omp_finish_clause langhook. (new_omp_context): Set default_kind to OMP_CLAUSE_DEFAULT_UNSPECIFIED for OMP_TASK regions. (omp_region_type): New enum. (struct gimplify_omp_ctx): Remove is_parallel and is_combined_parallel fields, add region_type. (new_omp_context): Take region_type as argument instead of is_parallel and is_combined_parallel. (gimple_add_tmp_var, omp_firstprivatize_variable, omp_notice_variable, omp_is_private, omp_check_private): Adjust ctx->is_parallel and ctx->is_combined_parallel checks. (gimplify_omp_task): New function. (gimplify_adjust_omp_clauses): Handle OMP_CLAUSE_COLLAPSE and OMP_CLAUSE_UNTIED. * omp-low.c (extract_omp_for_data): Use schedule(static) for schedule(auto). Handle pointer and unsigned iterators. Compute fd->iter_type. Handle POINTER_PLUS_EXPR increments. Add loops argument. Extract data for collapsed OMP_FOR loops. (expand_parallel_call): Assert sched_kind isn't auto, map runtime schedule to index 3. (struct omp_for_data_loop): New type. (struct omp_for_data): Remove v, n1, n2, step, cond_code fields. Add loop, loops, collapse and iter_type fields. (workshare_safe_to_combine_p): Disallow combined for if iter_type is unsigned long long. Don't combine collapse > 1 loops unless all bounds and steps are constant. Adjust extract_omp_for_data caller. (expand_omp_for_generic): Handle pointer, unsigned and long long iterators. Handle collapsed OMP_FOR loops. Adjust for struct omp_for_data changes. If libgomp function doesn't return boolean_type_node, add comparison of the return value with 0. (expand_omp_for_static_nochunk, expand_omp_for_static_chunk): Handle pointer, unsigned and long long iterators. Adjust for struct omp_for_data changes. (expand_omp_for): Assert sched_kind isn't auto, map runtime schedule to index 3. Use GOMP_loop_ull*{start,next} if iter_type is unsigned long long. Allocate loops array, pass it to extract_omp_for_data. For collapse > 1 loops use always expand_omp_for_generic. (omp_context): Add sfield_map and srecord_type fields. (is_task_ctx, lookup_sfield): New functions. (use_pointer_for_field): Use is_task_ctx helper. Change first argument's type from const_tree to tree. Clarify comment. In OMP_TASK disallow copy-in/out sharing. (build_sender_ref): Call lookup_sfield instead of lookup_field. (install_var_field): Add mask argument. Populate both record_type and srecord_type if needed. (delete_omp_context): Destroy sfield_map, clear DECL_ABSTRACT_ORIGIN in srecord_type. (fixup_child_record_type): Also remap FIELD_DECL's DECL_SIZE{,_UNIT} and DECL_FIELD_OFFSET. (scan_sharing_clauses): Adjust install_var_field callers. For firstprivate clauses on explicit tasks allocate the var by value in record_type unconditionally, rather than by reference. Handle OMP_CLAUSE_PRIVATE_OUTER_REF. Scan OMP_CLAUSE_LASTPRIVATE_STMT. Use is_taskreg_ctx instead of is_parallel_ctx. Handle OMP_CLAUSE_COLLAPSE and OMP_CLAUSE_UNTIED. (create_omp_child_function_name): Add task_copy argument, use *_omp_cpyfn* names if it is true. (create_omp_child_function): Add task_copy argument, if true create *_omp_cpyfn* helper function. (scan_omp_parallel): Adjust create_omp_child_function callers. Rename parallel_nesting_level to taskreg_nesting_level. (scan_omp_task): New function. (lower_rec_input_clauses): Don't run constructors for firstprivate explicit task vars which are initialized by *_omp_cpyfn*. Pass outer var ref to omp_clause_default_ctor hook if OMP_CLAUSE_PRIVATE_OUTER_REF or OMP_CLAUSE_LASTPRIVATE. Replace OMP_CLAUSE_REDUCTION_PLACEHOLDER decls in OMP_CLAUSE_REDUCTION_INIT. (lower_send_clauses): Clear DECL_ABSTRACT_ORIGIN if in task to avoid duplicate setting of fields. Handle OMP_CLAUSE_PRIVATE_OUTER_REF. (lower_send_shared_vars): Use srecord_type if non-NULL. Don't copy-out if TREE_READONLY, only copy-in. (expand_task_copyfn): New function. (expand_task_call): New function. (struct omp_taskcopy_context): New type. (task_copyfn_copy_decl, task_copyfn_remap_type, create_task_copyfn): New functions. (lower_omp_parallel): Rename to... (lower_omp_taskreg): ... this. Use OMP_TASKREG_* macros where needed. Call create_task_copyfn if srecord_type is needed. Adjust sender_decl type. (task_shared_vars): New variable. (check_omp_nesting_restrictions): Warn if work-sharing, barrier, master or ordered region is closely nested inside OMP_TASK. Add warnings for barrier if closely nested inside of work-sharing, ordered, or master region. (scan_omp_1): Call check_omp_nesting_restrictions even for GOMP_barrier calls. Rename parallel_nesting_level to taskreg_nesting_level. Handle OMP_TASK. (lower_lastprivate_clauses): Even if some lastprivate is found on a work-sharing construct, continue looking for them on parent parallel construct. (lower_omp_for_lastprivate): Add lastprivate clauses to the beginning of dlist rather than end. Adjust for struct omp_for_data changes. (lower_omp_for): Add rec input clauses before OMP_FOR_PRE_BODY, not after it. Handle collapsed OMP_FOR loops, adjust for OMP_FOR_{INIT,COND,INCR} changes, adjust extract_omp_for_data caller. (get_ws_args_for): Adjust extract_omp_for_data caller. (scan_omp_for): Handle collapsed OMP_FOR loops, adjust for OMP_FOR_{INIT,COND,INCR} changes. (lower_omp_single_simple): If libgomp function doesn't return boolean_type_node, add comparison of the return value with 0. (diagnose_sb_1, diagnose_sb_2): Handle collapsed OMP_FOR loops, adjust for OMP_FOR_{INIT,COND,INCR} changes. Handle OMP_TASK. (parallel_nesting_level): Rename to... (taskreg_nesting_level): ... this. (is_taskreg_ctx): New function. (build_outer_var_ref, omp_copy_decl): Use is_taskreg_ctx instead of is_parallel_ctx. (execute_lower_omp): Rename parallel_nesting_level to taskreg_nesting_level. (expand_omp_parallel): Rename to... (expand_omp_taskreg): ... this. Use OMP_TASKREG_* macros where needed. Call omp_task_call for OMP_TASK regions. (expand_omp): Adjust caller, handle OMP_TASK. (lower_omp_1): Adjust lower_omp_taskreg caller, handle OMP_TASK. * bitmap.c (bitmap_default_obstack_depth): New variable. (bitmap_obstack_initialize, bitmap_obstack_release): Do nothing if argument is NULL and bitmap_default_obstack is already initialized. * ipa-struct-reorg.c (do_reorg_1): Call bitmap_obstack_release at the end. * matrix-reorg.c (matrix_reorg): Likewise. cp/ * cp-tree.h (cxx_omp_finish_clause, cxx_omp_create_clause_info, dependent_omp_for_p, begin_omp_task, finish_omp_task, finish_omp_taskwait): New prototypes. (cxx_omp_clause_default_ctor): Add outer argument. (finish_omp_for): Add new clauses argument. * cp-gimplify.c (cxx_omp_finish_clause): New function. (cxx_omp_predetermined_sharing): Moved from semantics.c, rewritten. (cxx_omp_clause_default_ctor): Add outer argument. (cp_genericize_r): Walk OMP_CLAUSE_LASTPRIVATE_STMT. * cp-objcp-common.h (LANG_HOOKS_OMP_FINISH_CLAUSE): Define. * parser.c (cp_parser_omp_for_loop): Parse collapsed for loops. Add par_clauses argument. If decl is present in parallel's lastprivate clause, change that clause to shared and add a lastprivate clause for decl to OMP_FOR_CLAUSES. Fix wording of error messages. Adjust finish_omp_for caller. Add clauses argument. Parse loops with random access iterators. (cp_parser_omp_clause_collapse, cp_parser_omp_clause_untied): New functions. (cp_parser_omp_for, cp_parser_omp_parallel): Adjust cp_parser_omp_for_loop callers. (cp_parser_omp_for_cond, cp_parser_omp_for_incr): New helper functions. (cp_parser_omp_clause_name): Handle collapse and untied clauses. (cp_parser_omp_clause_schedule): Handle auto schedule. (cp_parser_omp_all_clauses): Handle PRAGMA_OMP_CLAUSE_COLLAPSE and PRAGMA_OMP_CLAUSE_UNTIED. (OMP_FOR_CLAUSE_MASK): Add PRAGMA_OMP_CLAUSE_COLLAPSE. (OMP_TASK_CLAUSE_MASK): Define. (cp_parser_omp_task, cp_parser_omp_taskwait): New functions. (cp_parser_omp_construct): Handle PRAGMA_OMP_TASK. (cp_parser_pragma): Handle PRAGMA_OMP_TASK and PRAGMA_OMP_TASKWAIT. * pt.c (tsubst_omp_clauses): Handle OMP_CLAUSE_COLLAPSE and OMP_CLAUSE_UNTIED. Handle OMP_CLAUSE_LASTPRIVATE_STMT. (tsubst_omp_for_iterator): New function. (dependent_omp_for_p): New function. (tsubst_expr) <case OMP_FOR>: Use it. Handle collapsed OMP_FOR loops. Adjust finish_omp_for caller. Handle loops with random access iterators. Adjust for OMP_FOR_{INIT,COND,INCR} changes. (tsubst_expr): Handle OMP_TASK. * semantics.c (cxx_omp_create_clause_info): New function. (finish_omp_clauses): Call it. Handle OMP_CLAUSE_UNTIED and OMP_CLAUSE_COLLAPSE. (cxx_omp_predetermined_sharing): Removed. * semantics.c (finish_omp_for): Allow pointer iterators. Use handle_omp_for_class_iterator and dependent_omp_for_p. Handle collapsed for loops. Adjust c_finish_omp_for caller. Add new clauses argument. Fix check for type dependent cond or incr. Set OMP_FOR_CLAUSES to clauses. Use cp_convert instead of fold_convert to convert incr amount to difference_type. Only fold if not in template. If decl is mentioned in lastprivate clause, set OMP_CLAUSE_LASTPRIVATE_STMT. Handle loops with random access iterators. Adjust for OMP_FOR_{INIT,COND,INCR} changes. (finish_omp_threadprivate): Allow static class members of the current class. (handle_omp_for_class_iterator, begin_omp_task, finish_omp_task, finish_omp_taskwait): New functions. * parser.c (cp_parser_binary_expression): Add prec argument. (cp_parser_assignment_expression): Adjust caller. * cp-tree.h (outer_curly_brace_block): New prototype. * decl.c (outer_curly_brace_block): No longer static. fortran/ * scanner.c (skip_free_comments, skip_fixed_comments): Handle tabs. * parse.c (next_free): Allow tab after !$omp. (decode_omp_directive): Handle !$omp task, !$omp taskwait and !$omp end task. (case_executable): Add ST_OMP_TASKWAIT. (case_exec_markers): Add ST_OMP_TASK. (gfc_ascii_statement): Handle ST_OMP_TASK, ST_OMP_END_TASK and ST_OMP_TASKWAIT. (parse_omp_structured_block, parse_executable): Handle ST_OMP_TASK. * gfortran.h (gfc_find_sym_in_expr): New prototype. (gfc_statement): Add ST_OMP_TASK, ST_OMP_END_TASK and ST_OMP_TASKWAIT. (gfc_omp_clauses): Add OMP_SCHED_AUTO to sched_kind, OMP_DEFAULT_FIRSTPRIVATE to default_sharing. Add collapse and untied fields. (gfc_exec_op): Add EXEC_OMP_TASK and EXEC_OMP_TASKWAIT. * f95-lang.c (LANG_HOOKS_OMP_CLAUSE_COPY_CTOR, LANG_HOOKS_OMP_CLAUSE_ASSIGN_OP, LANG_HOOKS_OMP_CLAUSE_DTOR, LANG_HOOKS_OMP_PRIVATE_OUTER_REF): Define. * trans.h (gfc_omp_clause_default_ctor): Add another argument. (gfc_omp_clause_copy_ctor, gfc_omp_clause_assign_op, gfc_omp_clause_dtor, gfc_omp_private_outer_ref): New prototypes. * types.def (BT_ULONGLONG, BT_PTR_ULONGLONG, BT_FN_BOOL_ULONGLONGPTR_ULONGLONGPTR, BT_FN_BOOL_BOOL_ULL_ULL_ULL_ULLPTR_ULLPTR, BT_FN_BOOL_BOOL_ULL_ULL_ULL_ULL_ULLPTR_ULLPTR, BT_FN_VOID_PTR_PTR, BT_PTR_FN_VOID_PTR_PTR, BT_FN_VOID_OMPFN_PTR_OMPCPYFN_LONG_LONG_BOOL_UINT): New. (BT_BOOL): Use integer type with BOOL_TYPE_SIZE rather than boolean_type_node. * dump-parse-tree.c (gfc_show_omp_node): Handle EXEC_OMP_TASK, EXEC_OMP_TASKWAIT, OMP_SCHED_AUTO, OMP_DEFAULT_FIRSTPRIVATE, untied and collapse clauses. (gfc_show_code_node): Handle EXEC_OMP_TASK and EXEC_OMP_TASKWAIT. * trans.c (gfc_trans_code): Handle EXEC_OMP_TASK and EXEC_OMP_TASKWAIT. * st.c (gfc_free_statement): Likewise. * resolve.c (gfc_resolve_blocks, resolve_code): Likewise. (find_sym_in_expr): Rename to... (gfc_find_sym_in_expr): ... this. No longer static. (resolve_allocate_expr, resolve_ordinary_assign): Adjust caller. * match.h (gfc_match_omp_task, gfc_match_omp_taskwait): New prototypes. * openmp.c (resolve_omp_clauses): Allow allocatable arrays in firstprivate, lastprivate, reduction, copyprivate and copyin clauses. (omp_current_do_code): Made static. (omp_current_do_collapse): New variable. (gfc_resolve_omp_do_blocks): Compute omp_current_do_collapse, clear omp_current_do_code and omp_current_do_collapse on return. (gfc_resolve_do_iterator): Handle collapsed do loops. (resolve_omp_do): Likewise, diagnose errorneous collapsed do loops. (OMP_CLAUSE_COLLAPSE, OMP_CLAUSE_UNTIED): Define. (gfc_match_omp_clauses): Handle default (firstprivate), schedule (auto), untied and collapse (n) clauses. (OMP_DO_CLAUSES): Add OMP_CLAUSE_COLLAPSE. (OMP_TASK_CLAUSES): Define. (gfc_match_omp_task, gfc_match_omp_taskwait): New functions. * trans-openmp.c (gfc_omp_private_outer_ref): New function. (gfc_omp_clause_default_ctor): Add outer argument. For allocatable arrays allocate them with the bounds of the outer var if outer var is allocated. (gfc_omp_clause_copy_ctor, gfc_omp_clause_assign_op, gfc_omp_clause_dtor): New functions. (gfc_trans_omp_array_reduction): If decl is allocatable array, allocate it with outer var's bounds in OMP_CLAUSE_REDUCTION_INIT and deallocate it in OMP_CLAUSE_REDUCTION_MERGE. (gfc_omp_predetermined_sharing): Return OMP_CLAUSE_DEFAULT_SHARED for assumed-size arrays. (gfc_trans_omp_do): Add par_clauses argument. If dovar is present in lastprivate clause and do loop isn't simple, set OMP_CLAUSE_LASTPRIVATE_STMT. If dovar is present in parallel's lastprivate clause, change it to shared and add lastprivate clause to OMP_FOR_CLAUSES. Handle collapsed do loops. (gfc_trans_omp_directive): Adjust gfc_trans_omp_do callers. (gfc_trans_omp_parallel_do): Likewise. Move collapse clause to OMP_FOR from OMP_PARALLEL. (gfc_trans_omp_clauses): Handle OMP_SCHED_AUTO, OMP_DEFAULT_FIRSTPRIVATE, untied and collapse clauses. (gfc_trans_omp_task, gfc_trans_omp_taskwait): New functions. (gfc_trans_omp_directive): Handle EXEC_OMP_TASK and EXEC_OMP_TASKWAIT. gcc/testsuite/ * gcc.dg/gomp/collapse-1.c: New test. * gcc.dg/gomp/nesting-1.c: New test. * g++.dg/gomp/task-1.C: New test. * g++.dg/gomp/predetermined-1.C: New test. * g++.dg/gomp/tls-4.C: New test. * gfortran.dg/gomp/collapse1.f90: New test. * gfortran.dg/gomp/sharing-3.f90: New test. * gcc.dg/gomp/pr27499.c (foo): Remove is unsigned dg-warning. * g++.dg/gomp/pr27499.C (foo): Likewise. * g++.dg/gomp/for-16.C (foo): Likewise. * g++.dg/gomp/tls-3.C: Remove dg-error, add S::s definition. * g++.dg/gomp/pr34607.C: Adjust dg-error location. * g++.dg/gomp/for-16.C (foo): Add a new dg-error. * gcc.dg/gomp/appendix-a/a.35.4.c: Add dg-warning. * gcc.dg/gomp/appendix-a/a.35.6.c: Likewise. * gfortran.dg/gomp/appendix-a/a.35.4.f90: Likewise. * gfortran.dg/gomp/appendix-a/a.35.6.f90: Likewise. * gfortran.dg/gomp/omp_parse1.f90: Remove !$omp tab test. * gfortran.dg/gomp/appendix-a/a.33.4.f90: Remove dg-error about allocatable array. * gfortran.dg/gomp/reduction1.f90: Likewise. libgomp/ * configure.ac (LIBGOMP_GNU_SYMBOL_VERSIONING): New AC_DEFINE. Substitute also OMP_*LOCK_25*. * configure: Regenerated. * config.h.in: Regenerated. * Makefile.am (libgomp_la_SOURCES): Add loop_ull.c, iter_ull.c, ptrlock.c and task.c. * Makefile.in: Regenerated. * testsuite/Makefile.in: Regenerated. * task.c: New file. * loop_ull.c: New file. * iter_ull.c: New file. * libgomp.h: Include ptrlock.h. (enum gomp_task_kind): New type. (struct gomp_team): Add task_lock, task_queue, task_count, task_running_count, single_count fields. Add work_share_list_free_lock ifndef HAVE_SYNC_BUILTINS. Remove work_share_lock, generation_mask, oldest_live_gen, num_live_gen and init_work_shares fields, add work work_share_list_alloc, work_share_list_free and work_share_chunk fields. Change work_shares from pointer to pointers into an array. Change ordered_release field into gomp_sem_t ** from flexible array member. Add implicit_task and initial_work_shares fields. Move close to the end of the struct. (struct gomp_team_state): Add single_count, last_work_share, active_level and level fields, remove work_share_generation. (gomp_barrier_handle_tasks): New prototype. (gomp_finish_task): New inline function. (struct gomp_work_share): Move chunk_size, end, incr into transparent union/struct, add chunk_size_ull, end_ll, incr_ll and next_ll fields. Reshuffle fields. Add next_alloc, next_ws, next_free and inline_ordered_team_ids fields, change ordered_team_ids into pointer from flexible array member. Add mode field. Put lock and next into a different cache line from most of the write-once fields. (gomp_iter_ull_static_next, gomp_iter_ull_dynamic_next_locked, gomp_iter_ull_guided_next_locked, gomp_iter_ull_dynamic_next, gomp_iter_ull_guided_next): New prototypes. (gomp_new_icv): New prototype. (struct gomp_thread): Add thread_pool and task fields. (struct gomp_thread_pool): New type. (gomp_new_team): New prototype. (gomp_team_start): Change type of last argument. (gomp_new_work_share): Removed. (gomp_init_work_share, gomp_fini_work_share): New prototypes. (gomp_work_share_init_done): New static inline. (gomp_throttled_spin_count_var, gomp_available_cpus, gomp_managed_threads): New extern decls. (gomp_init_task): New prototype. (gomp_spin_count_var): New extern var decl. (LIBGOMP_GNU_SYMBOL_VERSIONING): Undef if no visibility or no alias support, or if not PIC. (gomp_init_lock_30, gomp_destroy_lock_30, gomp_set_lock_30, gomp_unset_lock_30, gomp_test_lock_30, gomp_init_nest_lock_30, gomp_destroy_nest_lock_30, gomp_set_nest_lock_30, gomp_unset_nest_lock_30, gomp_test_nest_lock_30, gomp_init_lock_25, gomp_destroy_lock_25, gomp_set_lock_25, gomp_unset_lock_25, gomp_test_lock_25, gomp_init_nest_lock_25, gomp_destroy_nest_lock_25, gomp_set_nest_lock_25, gomp_unset_nest_lock_25, gomp_test_nest_lock_25): New prototypes. (omp_lock_symver, strong_alias): Define. (gomp_remaining_threads_count, gomp_remaining_threads_lock): New decls. (gomp_end_task): New. (struct gomp_task_icv, gomp_global_icv): New. (gomp_thread_limit_var, gomp_max_active_levels_var): New. (struct gomp_task): New. (gomp_nthreads_var, gomp_dyn_var, gomp_nest_var, gomp_run_sched_var, gomp_run_sched_chunk): Remove. (gomp_icv): New. (gomp_schedule_type): Reorder enum to match omp_sched_t. * team.c (struct gomp_thread_start_data): Add thread_pool and task fields. (gomp_thread_start): Add gomp_team_barrier_wait call. For non-nested case remove clearing of docked thread thr fields. Use pool fields instead of global gomp_* variables. Use gomp_barrier_wait_last when needed. Initialize ts.active_level. Create tasks for each member thread. (free_team): Only destroy team barrier, task_lock here and free it. (gomp_free_thread): Free last_team if non-NULL. (gomp_team_end): Call gomp_team_barrier_wait instead of gomp_barrier_wait. For nested case call one extra gomp_barrier_wait. Move here some destruction from free_team. Call free_team on pool->last_team if any, rather than freeing current team. Destroy work_share_list_free_lock ifndef HAVE_SYNC_BUILTINS. (gomp_new_icv): New function. (gomp_threads, gomp_threads_size, gomp_threads_used, gomp_threads_dock): Removed. (gomp_thread_destructor): New variable. (gomp_new_thread_pool, gomp_free_pool_helper, gomp_free_thread): New functions. (gomp_team_start): Create new pool if current thread doesn't have one. Use pool fields instead of global gomp_* variables. Initialize thread_pool field for new threads. Clear single_count. Change last argument from ws to team, don't create new team, set ts.work_share to &team->work_shares[0] and clear ts.last_work_share. Don't clear ts.work_share_generation. If number of threads changed, adjust atomically gomp_managed_threads. Use gomp_init_task instead of gomp_new_task, set thr->task to the corresponding implicit_task array entry. Create tasks for each member thread. Initialize ts.level. (initialize_team): Call pthread_key_create on gomp_thread_destructor. (team_destructor): New function. (new_team): Removed. (gomp_new_team): New function. (free_team): Free gomp_work_share blocks chained through next_alloc, instead of freeing work_shares and destroying work_share_lock. (gomp_team_end): Call gomp_fini_work_share. If number of threads changed, adjust atomically gomp_managed_threads. Use gomp_end_task. * barrier.c (GOMP_barrier): Call gomp_team_barrier_wait instead of gomp_barrier_wait. * single.c (GOMP_single_copy_start): Call gomp_team_barrier_wait instead of gomp_barrier_wait. Call gomp_work_share_init_done if gomp_work_share_start returned true. Don't unlock ws->lock. (GOMP_single_copy_end): Call gomp_team_barrier_wait instead of gomp_barrier_wait. (GOMP_single_start): Rewritten if HAVE_SYNC_BUILTINS. Call gomp_work_share_init_done if gomp_work_share_start returned true. Don't unlock ws->lock. * work.c: Include stddef.h. (free_work_share): Use work_share_list_free_lock instead of atomic chaining ifndef HAVE_SYNC_BUILTINS. Add team argument. Call gomp_fini_work_share and then either free ws if orphaned, or put it into work_share_list_free list of the current team. (alloc_work_share, gomp_init_work_share, gomp_fini_work_share): New functions. (gomp_work_share_start, gomp_work_share_end, gomp_work_share_end_nowait): Rewritten. * omp_lib.f90.in Change some tabs to spaces to prevent warnings. (openmp_version): Set to 200805. (omp_sched_kind, omp_sched_static, omp_sched_dynamic, omp_sched_guided, omp_sched_auto): New parameters. (omp_set_schedule, omp_get_schedule, omp_get_thread_limit, omp_set_max_active_levels, omp_get_max_active_levels, omp_get_level, omp_get_ancestor_thread_num, omp_get_team_size, omp_get_active_level): New interfaces. * omp_lib.h.in (openmp_version): Set to 200805. (omp_sched_kind, omp_sched_static, omp_sched_dynamic, omp_sched_guided, omp_sched_auto): New parameters. (omp_set_schedule, omp_get_schedule, omp_get_thread_limit, omp_set_max_active_levels, omp_get_max_active_levels, omp_get_level, omp_get_ancestor_thread_num, omp_get_team_size, omp_get_active_level): New externals. * loop.c: Include limits.h. (GOMP_loop_runtime_next, GOMP_loop_ordered_runtime_next): Handle GFS_AUTO. (GOMP_loop_runtime_start, GOMP_loop_ordered_runtime_start): Likewise. Use gomp_icv. (gomp_loop_static_start, gomp_loop_dynamic_start): Clear ts.static_trip here. (gomp_loop_static_start, gomp_loop_ordered_static_start): Call gomp_work_share_init_done after gomp_loop_init. Don't unlock ws->lock. (gomp_loop_dynamic_start, gomp_loop_guided_start): Call gomp_work_share_init_done after gomp_loop_init. If HAVE_SYNC_BUILTINS, don't unlock ws->lock, otherwise lock it. (gomp_loop_ordered_dynamic_start, gomp_loop_ordered_guided_start): Call gomp_work_share_init_done after gomp_loop_init. Lock ws->lock. (gomp_parallel_loop_start): Call gomp_new_team instead of gomp_new_work_share. Call gomp_loop_init on &team->work_shares[0]. Adjust gomp_team_start caller. Pass 0 as second argument to gomp_resolve_num_threads. (gomp_loop_init): For GFS_DYNAMIC, multiply ws->chunk_size by incr. If adding ws->chunk_size nthreads + 1 times after end won't overflow, set ws->mode to 1. * libgomp_g.h (GOMP_loop_ull_static_start, GOMP_loop_ull_dynamic_start, GOMP_loop_ull_guided_start, GOMP_loop_ull_runtime_start, GOMP_loop_ull_ordered_static_start, GOMP_loop_ull_ordered_dynamic_start, GOMP_loop_ull_ordered_guided_start, GOMP_loop_ull_ordered_runtime_start, GOMP_loop_ull_static_next, GOMP_loop_ull_dynamic_next, GOMP_loop_ull_guided_next, GOMP_loop_ull_runtime_next, GOMP_loop_ull_ordered_static_next, GOMP_loop_ull_ordered_dynamic_next, GOMP_loop_ull_ordered_guided_next, GOMP_loop_ull_ordered_runtime_next, GOMP_task, GOMP_taskwait): New prototypes. * libgomp.map: Export lock routines also @@OMP_2.0. (GOMP_loop_ordered_dynamic_first, GOMP_loop_ordered_guided_first, GOMP_loop_ordered_runtime_first, GOMP_loop_ordered_static_first): Remove. (GOMP_loop_ull_dynamic_next, GOMP_loop_ull_dynamic_start, GOMP_loop_ull_guided_next, GOMP_loop_ull_guided_start, GOMP_loop_ull_ordered_dynamic_next, GOMP_loop_ull_ordered_dynamic_start, GOMP_loop_ull_ordered_guided_next, GOMP_loop_ull_ordered_guided_start, GOMP_loop_ull_ordered_runtime_next, GOMP_loop_ull_ordered_runtime_start, GOMP_loop_ull_ordered_static_next, GOMP_loop_ull_ordered_static_start, GOMP_loop_ull_runtime_next, GOMP_loop_ull_runtime_start, GOMP_loop_ull_static_next, GOMP_loop_ull_static_start, GOMP_task, GOMP_taskwait): Export @@GOMP_2.0. (omp_set_schedule, omp_get_schedule, omp_get_thread_limit, omp_set_max_active_levels, omp_get_max_active_levels, omp_get_level, omp_get_ancestor_thread_num, omp_get_team_size, omp_get_active_level, omp_set_schedule_, omp_set_schedule_8_, omp_get_schedule_, omp_get_schedule_8_, omp_get_thread_limit_, omp_set_max_active_levels_, omp_set_max_active_levels_8_, omp_get_max_active_levels_, omp_get_level_, omp_get_ancestor_thread_num_, omp_get_ancestor_thread_num_8_, omp_get_team_size_, omp_get_team_size_8_, omp_get_active_level_): New exports @@OMP_3.0. * omp.h.in (omp_sched_t): New type. (omp_set_schedule, omp_get_schedule, omp_get_thread_limit, omp_set_max_active_levels, omp_get_max_active_levels, omp_get_level, omp_get_ancestor_thread_num, omp_get_team_size, omp_get_active_level): New prototypes. * env.c (gomp_spin_count_var, gomp_throttled_spin_count_var, gomp_available_cpus, gomp_managed_threads, gomp_max_active_levels_var, gomp_thread_limit_var, gomp_remaining_threads_count, gomp_remaining_threads_lock): New variables. (parse_spincount): New function. (initialize_env): Call gomp_init_num_threads unconditionally. Initialize gomp_available_cpus. Call parse_spincount, initialize gomp_{,throttled_}spin_count_var depending on presence and value of OMP_WAIT_POLICY and GOMP_SPINCOUNT env vars. Handle GOMP_BLOCKTIME env var. Handle OMP_WAIT_POLICY, OMP_MAX_ACTIVE_LEVELS, OMP_THREAD_LIMIT, OMP_STACKSIZE env vars. Handle unit specification for GOMP_STACKSIZE. Initialize gomp_remaining_threads_count and gomp_remaining_threads_lock if needed. Use gomp_global_icv. (gomp_nthreads_var, gomp_dyn_var, gomp_nest_var, gomp_run_sched_var, gomp_run_sched_chunk): Remove. (gomp_global_icv): New. (parse_schedule): Use it. Parse "auto". (omp_set_num_threads): Use gomp_icv. (omp_set_dynamic, omp_get_dynamic, omp_set_nested, omp_get_nested): Likewise. (omp_get_max_threads): Move from parallel.c. (omp_set_schedule, omp_get_schedule, omp_get_thread_limit, omp_set_max_active_levels, omp_get_max_active_levels): New functions, add ialias. (parse_stacksize, parse_wait_policy): New functions. * fortran.c: Rewrite lock wrappers, if symbol versioning provide both wrappers for compatibility and new locks. (omp_set_schedule, omp_get_schedule, omp_get_thread_limit, omp_set_max_active_levels, omp_get_max_active_levels, omp_get_level, omp_get_ancestor_thread_num, omp_get_team_size, omp_get_active_level): New ialias_redirect. (omp_set_schedule_, omp_set_schedule_8_, omp_get_schedule_, omp_get_schedule_8_, omp_get_thread_limit_, omp_set_max_active_levels_, omp_set_max_active_levels_8_, omp_get_max_active_levels_, omp_get_level_, omp_get_ancestor_thread_num_, omp_get_ancestor_thread_num_8_, omp_get_team_size_, omp_get_team_size_8_, omp_get_active_level_): New functions. * parallel.c: Include limits.h. (gomp_resolve_num_threads): Add count argument. Rewritten. (GOMP_parallel_start): Call gomp_new_team and pass that as last argument to gomp_team_start. Pass 0 as second argument to gomp_resolve_num_threads. (GOMP_parallel_end): Decrease gomp_remaining_threads_count if gomp_thread_limit_var != ULONG_MAX. (omp_in_parallel): Implement using ts.active_level. (omp_get_max_threads): Move to env.c. (omp_get_level, omp_get_ancestor_thread_num, omp_get_team_size, omp_get_active_level): New functions, add ialias. * sections.c (GOMP_sections_start): Call gomp_work_share_init_done after gomp_sections_init. If HAVE_SYNC_BUILTINS, call gomp_iter_dynamic_next instead of the _locked variant and don't take lock around it, otherwise acquire it before calling gomp_iter_dynamic_next_locked. (GOMP_sections_next): If HAVE_SYNC_BUILTINS, call gomp_iter_dynamic_next instead of the _locked variant and don't take lock around it. (GOMP_parallel_sections_start): Call gomp_new_team instead of gomp_new_work_share. Call gomp_sections_init on &team->work_shares[0]. Adjust gomp_team_start caller. Pass count as second argument to gomp_resolve_num_threads, don't adjust num_threads after the call. Use gomp_icv. * iter.c (gomp_iter_dynamic_next_locked): Don't multiply ws->chunk_size by incr. (gomp_iter_dynamic_next): Likewise. If ws->mode, use more efficient code. * libgomp_f.h.in (omp_lock_25_arg_t, omp_nest_lock_25_arg_t): New types. (omp_lock_25_arg, omp_nest_lock_25_arg): New macros. (omp_check_defines): Check even the compat defines. * config/linux/ptrlock.c: New file. * config/linux/ptrlock.h: New file. * config/linux/wait.h: New file. * config/posix/ptrlock.c: New file. * config/posix/ptrlock.h: New file. * config/linux/bar.h (gomp_team_barrier_wait, gomp_team_barrier_wait_end, gomp_team_barrier_wake): New prototypes. (gomp_team_barrier_set_task_pending, gomp_team_barrier_clear_task_pending, gomp_team_barrier_set_waiting_for_tasks, gomp_team_barrier_waiting_for_tasks, gomp_team_barrier_done): New inlines. (gomp_barrier_t): Rewritten. (gomp_barrier_state_t): New typedef. (gomp_barrier_init, gomp_barrier_reinit, gomp_barrier_destroy, gomp_barrier_wait_start): Rewritten. (gomp_barrier_wait_end): Change second argument to gomp_barrier_state_t. (gomp_barrier_last_thread, gomp_barrier_wait_last): New static inlines. * config/linux/bar.c: Include wait.h instead of libgomp.h and futex.h. (gomp_barrier_wait_end): Rewritten. (gomp_team_barrier_wait, gomp_team_barrier_wait_end, gomp_team_barrier_wake, gomp_barrier_wait_last): New functions. * config/posix/bar.h (gomp_barrier_t): Add generation field. (gomp_barrier_state_t): New typedef. (gomp_team_barrier_wait, gomp_team_barrier_wait_end, gomp_team_barrier_wake): New prototypes. (gomp_barrier_wait_start): Or all but low 2 bits from generation into the return value. Return gomp_barrier_state_t. (gomp_team_barrier_set_task_pending, gomp_team_barrier_clear_task_pending, gomp_team_barrier_set_waiting_for_tasks, gomp_team_barrier_waiting_for_tasks, gomp_team_barrier_done): New inlines. (gomp_barrier_wait_end): Change second argument to gomp_barrier_state_t. (gomp_barrier_last_thread, gomp_barrier_wait_last): New static inlines. * config/posix/bar.c (gomp_barrier_init): Clear generation field. (gomp_barrier_wait_end): Change second argument to gomp_barrier_state_t. (gomp_team_barrier_wait, gomp_team_barrier_wait_end, gomp_team_barrier_wake): New functions. * config/linux/mutex.c: Include wait.h instead of libgomp.h and futex.h. (gomp_futex_wake, gomp_futex_wait): New variables. (gomp_mutex_lock_slow): Call do_wait instead of futex_wait. * config/linux/lock.c: Rewrite to make locks task owned, for backwards compatibility provide the old entrypoints if symbol versioning. Include wait.h instead of libgomp.h and futex.h. (gomp_set_nest_lock_25): Call do_wait instead of futex_wait. * config/posix95/lock.c: Rewrite to make locks task owned, for backwards compatibility provide the old entrypoints if symbol versioning. * config/posix/lock.c: Rewrite to make locks task owned, for backwards compatibility provide the old entrypoints if symbol versioning. * config/linux/proc.c (gomp_init_num_threads): Use gomp_global_icv. (get_num_procs, gomp_dynamic_max_threads): Use gomp_icv. * config/posix/proc.c, config/mingw32/proc.c: Similarly. * config/linux/powerpc/futex.h (FUTEX_WAIT, FUTEX_WAKE): Remove. (sys_futex0): Return error code. (futex_wake, futex_wait): If ENOSYS was returned, clear FUTEX_PRIVATE_FLAG in gomp_futex_wa{ke,it} and retry. (cpu_relax, atomic_write_barrier): New static inlines. * config/linux/alpha/futex.h (FUTEX_WAIT, FUTEX_WAKE): Remove. (futex_wake, futex_wait): If ENOSYS was returned, clear FUTEX_PRIVATE_FLAG in gomp_futex_wa{ke,it} and retry. (cpu_relax, atomic_write_barrier): New static inlines. * config/linux/x86/futex.h (FUTEX_WAIT, FUTEX_WAKE): Remove. (sys_futex0): Return error code. (futex_wake, futex_wait): If ENOSYS was returned, clear FUTEX_PRIVATE_FLAG in gomp_futex_wa{ke,it} and retry. (cpu_relax, atomic_write_barrier): New static inlines. * config/linux/s390/futex.h (FUTEX_WAIT, FUTEX_WAKE): Remove. (sys_futex0): Return error code. (futex_wake, futex_wait): If ENOSYS was returned, clear FUTEX_PRIVATE_FLAG in gomp_futex_wa{ke,it} and retry. (cpu_relax, atomic_write_barrier): New static inlines. * config/linux/ia64/futex.h (FUTEX_WAIT, FUTEX_WAKE): Remove. (sys_futex0): Return error code. (futex_wake, futex_wait): If ENOSYS was returned, clear FUTEX_PRIVATE_FLAG in gomp_futex_wa{ke,it} and retry. (cpu_relax, atomic_write_barrier): New static inlines. * config/linux/sparc/futex.h (FUTEX_WAIT, FUTEX_WAKE): Remove. (sys_futex0): Return error code. (futex_wake, futex_wait): If ENOSYS was returned, clear FUTEX_PRIVATE_FLAG in gomp_futex_wa{ke,it} and retry. (cpu_relax, atomic_write_barrier): New static inlines. * config/linux/sem.c: Include wait.h instead of libgomp.h and futex.h. (gomp_sem_wait_slow): Call do_wait instead of futex_wait. * config/linux/affinity.c: Assume HAVE_SYNC_BUILTINS. * config/linux/omp-lock.h (omp_lock_25_t, omp_nest_lock_25_t): New types. (omp_nest_lock_t): Change owner into void *, add lock field. * config/posix95/omp-lock.h: Include semaphore.h. (omp_lock_25_t, omp_nest_lock_25_t): New types. (omp_lock_t): Use sem_t instead of mutex if semaphores aren't broken. (omp_nest_lock_t): Likewise. Change owner to void *. * config/posix/omp-lock.h: Include semaphore.h. (omp_lock_25_t, omp_nest_lock_25_t): New types. (omp_lock_t): Use sem_t instead of mutex if semaphores aren't broken. (omp_nest_lock_t): Likewise. Add owner field. * testsuite/libgomp.c/collapse-1.c: New test. * testsuite/libgomp.c/collapse-2.c: New test. * testsuite/libgomp.c/collapse-3.c: New test. * testsuite/libgomp.c/icv-1.c: New test. * testsuite/libgomp.c/icv-2.c: New test. * testsuite/libgomp.c/lib-2.c: New test. * testsuite/libgomp.c/lock-1.c: New test. * testsuite/libgomp.c/lock-2.c: New test. * testsuite/libgomp.c/lock-3.c: New test. * testsuite/libgomp.c/loop-4.c: New test. * testsuite/libgomp.c/loop-5.c: New test. * testsuite/libgomp.c/loop-6.c: New test. * testsuite/libgomp.c/loop-7.c: New test. * testsuite/libgomp.c/loop-8.c: New test. * testsuite/libgomp.c/loop-9.c: New test. * testsuite/libgomp.c/nested-3.c: New test. * testsuite/libgomp.c/nestedfn-6.c: New test. * testsuite/libgomp.c/sort-1.c: New test. * testsuite/libgomp.c/task-1.c: New test. * testsuite/libgomp.c/task-2.c: New test. * testsuite/libgomp.c/task-3.c: New test. * testsuite/libgomp.c/task-4.c: New test. * testsuite/libgomp.c++/c++.exp: Add libstdc++-v3 build includes to C++ testsuite default compiler options. * testsuite/libgomp.c++/collapse-1.C: New test. * testsuite/libgomp.c++/collapse-2.C: New test. * testsuite/libgomp.c++/ctor-10.C: New test. * testsuite/libgomp.c++/for-1.C: New test. * testsuite/libgomp.c++/for-2.C: New test. * testsuite/libgomp.c++/for-3.C: New test. * testsuite/libgomp.c++/for-4.C: New test. * testsuite/libgomp.c++/for-5.C: New test. * testsuite/libgomp.c++/loop-8.C: New test. * testsuite/libgomp.c++/loop-9.C: New test. * testsuite/libgomp.c++/loop-10.C: New test. * testsuite/libgomp.c++/task-1.C: New test. * testsuite/libgomp.c++/task-2.C: New test. * testsuite/libgomp.c++/task-3.C: New test. * testsuite/libgomp.c++/task-4.C: New test. * testsuite/libgomp.c++/task-5.C: New test. * testsuite/libgomp.c++/task-6.C: New test. * testsuite/libgomp.fortran/allocatable1.f90: New test. * testsuite/libgomp.fortran/allocatable2.f90: New test. * testsuite/libgomp.fortran/allocatable3.f90: New test. * testsuite/libgomp.fortran/allocatable4.f90: New test. * testsuite/libgomp.fortran/collapse1.f90: New test. * testsuite/libgomp.fortran/collapse2.f90: New test. * testsuite/libgomp.fortran/collapse3.f90: New test. * testsuite/libgomp.fortran/collapse4.f90: New test. * testsuite/libgomp.fortran/lastprivate1.f90: New test. * testsuite/libgomp.fortran/lastprivate2.f90: New test. * testsuite/libgomp.fortran/lib4.f90: New test. * testsuite/libgomp.fortran/lock-1.f90: New test. * testsuite/libgomp.fortran/lock-2.f90: New test. * testsuite/libgomp.fortran/nested1.f90: New test. * testsuite/libgomp.fortran/nestedfn4.f90: New test. * testsuite/libgomp.fortran/strassen.f90: New test. * testsuite/libgomp.fortran/tabs1.f90: New test. * testsuite/libgomp.fortran/tabs2.f: New test. * testsuite/libgomp.fortran/task1.f90: New test. * testsuite/libgomp.fortran/task2.f90: New test. * testsuite/libgomp.fortran/vla4.f90: Add dg-warning. * testsuite/libgomp.fortran/vla5.f90: Likewise. * testsuite/libgomp.c/pr26943-2.c: Likewise. * testsuite/libgomp.c/pr26943-3.c: Likewise. * testsuite/libgomp.c/pr26943-4.c: Likewise. Co-Authored-By: Jakob Blomer <jakob.blomer@ira.uka.de> Co-Authored-By: Richard Henderson <rth@redhat.com> Co-Authored-By: Ulrich Drepper <drepper@redhat.com> From-SVN: r136433
578 lines
18 KiB
C
578 lines
18 KiB
C
/* Copyright (C) 2005, 2007, 2008 Free Software Foundation, Inc.
|
|
Contributed by Richard Henderson <rth@redhat.com>.
|
|
|
|
This file is part of the GNU OpenMP Library (libgomp).
|
|
|
|
Libgomp is free software; you can redistribute it and/or modify it
|
|
under the terms of the GNU Lesser General Public License as published by
|
|
the Free Software Foundation; either version 2.1 of the License, or
|
|
(at your option) any later version.
|
|
|
|
Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY
|
|
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
|
|
FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
|
|
more details.
|
|
|
|
You should have received a copy of the GNU Lesser General Public License
|
|
along with libgomp; see the file COPYING.LIB. If not, write to the
|
|
Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
|
|
MA 02110-1301, USA. */
|
|
|
|
/* As a special exception, if you link this library with other files, some
|
|
of which are compiled with GCC, to produce an executable, this library
|
|
does not by itself cause the resulting executable to be covered by the
|
|
GNU General Public License. This exception does not however invalidate
|
|
any other reasons why the executable file might be covered by the GNU
|
|
General Public License. */
|
|
|
|
/* This file contains data types and function declarations that are not
|
|
part of the official OpenMP user interface. There are declarations
|
|
in here that are part of the GNU OpenMP ABI, in that the compiler is
|
|
required to know about them and use them.
|
|
|
|
The convention is that the all caps prefix "GOMP" is used group items
|
|
that are part of the external ABI, and the lower case prefix "gomp"
|
|
is used group items that are completely private to the library. */
|
|
|
|
#ifndef LIBGOMP_H
|
|
#define LIBGOMP_H 1
|
|
|
|
#include "config.h"
|
|
#include "gstdint.h"
|
|
|
|
#include <pthread.h>
|
|
#include <stdbool.h>
|
|
|
|
#ifdef HAVE_ATTRIBUTE_VISIBILITY
|
|
# pragma GCC visibility push(hidden)
|
|
#endif
|
|
|
|
#include "sem.h"
|
|
#include "mutex.h"
|
|
#include "bar.h"
|
|
#include "ptrlock.h"
|
|
|
|
|
|
/* This structure contains the data to control one work-sharing construct,
|
|
either a LOOP (FOR/DO) or a SECTIONS. */
|
|
|
|
enum gomp_schedule_type
|
|
{
|
|
GFS_RUNTIME,
|
|
GFS_STATIC,
|
|
GFS_DYNAMIC,
|
|
GFS_GUIDED,
|
|
GFS_AUTO
|
|
};
|
|
|
|
struct gomp_work_share
|
|
{
|
|
/* This member records the SCHEDULE clause to be used for this construct.
|
|
The user specification of "runtime" will already have been resolved.
|
|
If this is a SECTIONS construct, this value will always be DYNAMIC. */
|
|
enum gomp_schedule_type sched;
|
|
|
|
int mode;
|
|
|
|
union {
|
|
struct {
|
|
/* This is the chunk_size argument to the SCHEDULE clause. */
|
|
long chunk_size;
|
|
|
|
/* This is the iteration end point. If this is a SECTIONS construct,
|
|
this is the number of contained sections. */
|
|
long end;
|
|
|
|
/* This is the iteration step. If this is a SECTIONS construct, this
|
|
is always 1. */
|
|
long incr;
|
|
};
|
|
|
|
struct {
|
|
/* The same as above, but for the unsigned long long loop variants. */
|
|
unsigned long long chunk_size_ull;
|
|
unsigned long long end_ull;
|
|
unsigned long long incr_ull;
|
|
};
|
|
};
|
|
|
|
/* This is a circular queue that details which threads will be allowed
|
|
into the ordered region and in which order. When a thread allocates
|
|
iterations on which it is going to work, it also registers itself at
|
|
the end of the array. When a thread reaches the ordered region, it
|
|
checks to see if it is the one at the head of the queue. If not, it
|
|
blocks on its RELEASE semaphore. */
|
|
unsigned *ordered_team_ids;
|
|
|
|
/* This is the number of threads that have registered themselves in
|
|
the circular queue ordered_team_ids. */
|
|
unsigned ordered_num_used;
|
|
|
|
/* This is the team_id of the currently acknowledged owner of the ordered
|
|
section, or -1u if the ordered section has not been acknowledged by
|
|
any thread. This is distinguished from the thread that is *allowed*
|
|
to take the section next. */
|
|
unsigned ordered_owner;
|
|
|
|
/* This is the index into the circular queue ordered_team_ids of the
|
|
current thread that's allowed into the ordered reason. */
|
|
unsigned ordered_cur;
|
|
|
|
/* This is a chain of allocated gomp_work_share blocks, valid only
|
|
in the first gomp_work_share struct in the block. */
|
|
struct gomp_work_share *next_alloc;
|
|
|
|
/* The above fields are written once during workshare initialization,
|
|
or related to ordered worksharing. Make sure the following fields
|
|
are in a different cache line. */
|
|
|
|
/* This lock protects the update of the following members. */
|
|
gomp_mutex_t lock __attribute__((aligned (64)));
|
|
|
|
/* This is the count of the number of threads that have exited the work
|
|
share construct. If the construct was marked nowait, they have moved on
|
|
to other work; otherwise they're blocked on a barrier. The last member
|
|
of the team to exit the work share construct must deallocate it. */
|
|
unsigned threads_completed;
|
|
|
|
union {
|
|
/* This is the next iteration value to be allocated. In the case of
|
|
GFS_STATIC loops, this the iteration start point and never changes. */
|
|
long next;
|
|
|
|
/* The same, but with unsigned long long type. */
|
|
unsigned long long next_ull;
|
|
|
|
/* This is the returned data structure for SINGLE COPYPRIVATE. */
|
|
void *copyprivate;
|
|
};
|
|
|
|
union {
|
|
/* Link to gomp_work_share struct for next work sharing construct
|
|
encountered after this one. */
|
|
gomp_ptrlock_t next_ws;
|
|
|
|
/* gomp_work_share structs are chained in the free work share cache
|
|
through this. */
|
|
struct gomp_work_share *next_free;
|
|
};
|
|
|
|
/* If only few threads are in the team, ordered_team_ids can point
|
|
to this array which fills the padding at the end of this struct. */
|
|
unsigned inline_ordered_team_ids[0];
|
|
};
|
|
|
|
/* This structure contains all of the thread-local data associated with
|
|
a thread team. This is the data that must be saved when a thread
|
|
encounters a nested PARALLEL construct. */
|
|
|
|
struct gomp_team_state
|
|
{
|
|
/* This is the team of which the thread is currently a member. */
|
|
struct gomp_team *team;
|
|
|
|
/* This is the work share construct which this thread is currently
|
|
processing. Recall that with NOWAIT, not all threads may be
|
|
processing the same construct. */
|
|
struct gomp_work_share *work_share;
|
|
|
|
/* This is the previous work share construct or NULL if there wasn't any.
|
|
When all threads are done with the current work sharing construct,
|
|
the previous one can be freed. The current one can't, as its
|
|
next_ws field is used. */
|
|
struct gomp_work_share *last_work_share;
|
|
|
|
/* This is the ID of this thread within the team. This value is
|
|
guaranteed to be between 0 and N-1, where N is the number of
|
|
threads in the team. */
|
|
unsigned team_id;
|
|
|
|
/* Nesting level. */
|
|
unsigned level;
|
|
|
|
/* Active nesting level. Only active parallel regions are counted. */
|
|
unsigned active_level;
|
|
|
|
#ifdef HAVE_SYNC_BUILTINS
|
|
/* Number of single stmts encountered. */
|
|
unsigned long single_count;
|
|
#endif
|
|
|
|
/* For GFS_RUNTIME loops that resolved to GFS_STATIC, this is the
|
|
trip number through the loop. So first time a particular loop
|
|
is encountered this number is 0, the second time through the loop
|
|
is 1, etc. This is unused when the compiler knows in advance that
|
|
the loop is statically scheduled. */
|
|
unsigned long static_trip;
|
|
};
|
|
|
|
/* These are the OpenMP 3.0 Internal Control Variables described in
|
|
section 2.3.1. Those described as having one copy per task are
|
|
stored within the structure; those described as having one copy
|
|
for the whole program are (naturally) global variables. */
|
|
|
|
struct gomp_task_icv
|
|
{
|
|
unsigned long nthreads_var;
|
|
enum gomp_schedule_type run_sched_var;
|
|
int run_sched_modifier;
|
|
bool dyn_var;
|
|
bool nest_var;
|
|
};
|
|
|
|
extern struct gomp_task_icv gomp_global_icv;
|
|
extern unsigned long gomp_thread_limit_var;
|
|
extern unsigned long gomp_remaining_threads_count;
|
|
#ifndef HAVE_SYNC_BUILTINS
|
|
extern gomp_mutex_t gomp_remaining_threads_lock;
|
|
#endif
|
|
extern unsigned long gomp_max_active_levels_var;
|
|
extern unsigned long long gomp_spin_count_var, gomp_throttled_spin_count_var;
|
|
extern unsigned long gomp_available_cpus, gomp_managed_threads;
|
|
|
|
enum gomp_task_kind
|
|
{
|
|
GOMP_TASK_IMPLICIT,
|
|
GOMP_TASK_IFFALSE,
|
|
GOMP_TASK_WAITING,
|
|
GOMP_TASK_TIED
|
|
};
|
|
|
|
/* This structure describes a "task" to be run by a thread. */
|
|
|
|
struct gomp_task
|
|
{
|
|
struct gomp_task *parent;
|
|
struct gomp_task *children;
|
|
struct gomp_task *next_child;
|
|
struct gomp_task *prev_child;
|
|
struct gomp_task *next_queue;
|
|
struct gomp_task *prev_queue;
|
|
struct gomp_task_icv icv;
|
|
void (*fn) (void *);
|
|
void *fn_data;
|
|
enum gomp_task_kind kind;
|
|
bool in_taskwait;
|
|
gomp_sem_t taskwait_sem;
|
|
};
|
|
|
|
/* This structure describes a "team" of threads. These are the threads
|
|
that are spawned by a PARALLEL constructs, as well as the work sharing
|
|
constructs that the team encounters. */
|
|
|
|
struct gomp_team
|
|
{
|
|
/* This is the number of threads in the current team. */
|
|
unsigned nthreads;
|
|
|
|
/* This is number of gomp_work_share structs that have been allocated
|
|
as a block last time. */
|
|
unsigned work_share_chunk;
|
|
|
|
/* This is the saved team state that applied to a master thread before
|
|
the current thread was created. */
|
|
struct gomp_team_state prev_ts;
|
|
|
|
/* This semaphore should be used by the master thread instead of its
|
|
"native" semaphore in the thread structure. Required for nested
|
|
parallels, as the master is a member of two teams. */
|
|
gomp_sem_t master_release;
|
|
|
|
/* This points to an array with pointers to the release semaphore
|
|
of the threads in the team. */
|
|
gomp_sem_t **ordered_release;
|
|
|
|
/* List of gomp_work_share structs chained through next_free fields.
|
|
This is populated and taken off only by the first thread in the
|
|
team encountering a new work sharing construct, in a critical
|
|
section. */
|
|
struct gomp_work_share *work_share_list_alloc;
|
|
|
|
/* List of gomp_work_share structs freed by free_work_share. New
|
|
entries are atomically added to the start of the list, and
|
|
alloc_work_share can safely only move all but the first entry
|
|
to work_share_list alloc, as free_work_share can happen concurrently
|
|
with alloc_work_share. */
|
|
struct gomp_work_share *work_share_list_free;
|
|
|
|
#ifdef HAVE_SYNC_BUILTINS
|
|
/* Number of simple single regions encountered by threads in this
|
|
team. */
|
|
unsigned long single_count;
|
|
#else
|
|
/* Mutex protecting addition of workshares to work_share_list_free. */
|
|
gomp_mutex_t work_share_list_free_lock;
|
|
#endif
|
|
|
|
/* This barrier is used for most synchronization of the team. */
|
|
gomp_barrier_t barrier;
|
|
|
|
/* Initial work shares, to avoid allocating any gomp_work_share
|
|
structs in the common case. */
|
|
struct gomp_work_share work_shares[8];
|
|
|
|
gomp_mutex_t task_lock;
|
|
struct gomp_task *task_queue;
|
|
int task_count;
|
|
int task_running_count;
|
|
|
|
/* This array contains structures for implicit tasks. */
|
|
struct gomp_task implicit_task[];
|
|
};
|
|
|
|
/* This structure contains all data that is private to libgomp and is
|
|
allocated per thread. */
|
|
|
|
struct gomp_thread
|
|
{
|
|
/* This is the function that the thread should run upon launch. */
|
|
void (*fn) (void *data);
|
|
void *data;
|
|
|
|
/* This is the current team state for this thread. The ts.team member
|
|
is NULL only if the thread is idle. */
|
|
struct gomp_team_state ts;
|
|
|
|
/* This is the task that the thread is currently executing. */
|
|
struct gomp_task *task;
|
|
|
|
/* This semaphore is used for ordered loops. */
|
|
gomp_sem_t release;
|
|
|
|
/* user pthread thread pool */
|
|
struct gomp_thread_pool *thread_pool;
|
|
};
|
|
|
|
|
|
struct gomp_thread_pool
|
|
{
|
|
/* This array manages threads spawned from the top level, which will
|
|
return to the idle loop once the current PARALLEL construct ends. */
|
|
struct gomp_thread **threads;
|
|
unsigned threads_size;
|
|
unsigned threads_used;
|
|
struct gomp_team *last_team;
|
|
|
|
/* This barrier holds and releases threads waiting in threads. */
|
|
gomp_barrier_t threads_dock;
|
|
};
|
|
|
|
/* ... and here is that TLS data. */
|
|
|
|
#ifdef HAVE_TLS
|
|
extern __thread struct gomp_thread gomp_tls_data;
|
|
static inline struct gomp_thread *gomp_thread (void)
|
|
{
|
|
return &gomp_tls_data;
|
|
}
|
|
#else
|
|
extern pthread_key_t gomp_tls_key;
|
|
static inline struct gomp_thread *gomp_thread (void)
|
|
{
|
|
return pthread_getspecific (gomp_tls_key);
|
|
}
|
|
#endif
|
|
|
|
extern struct gomp_task_icv *gomp_new_icv (void);
|
|
|
|
/* Here's how to access the current copy of the ICVs. */
|
|
|
|
static inline struct gomp_task_icv *gomp_icv (bool write)
|
|
{
|
|
struct gomp_task *task = gomp_thread ()->task;
|
|
if (task)
|
|
return &task->icv;
|
|
else if (write)
|
|
return gomp_new_icv ();
|
|
else
|
|
return &gomp_global_icv;
|
|
}
|
|
|
|
/* The attributes to be used during thread creation. */
|
|
extern pthread_attr_t gomp_thread_attr;
|
|
|
|
/* Other variables. */
|
|
|
|
extern unsigned short *gomp_cpu_affinity;
|
|
extern size_t gomp_cpu_affinity_len;
|
|
|
|
/* Function prototypes. */
|
|
|
|
/* affinity.c */
|
|
|
|
extern void gomp_init_affinity (void);
|
|
extern void gomp_init_thread_affinity (pthread_attr_t *);
|
|
|
|
/* alloc.c */
|
|
|
|
extern void *gomp_malloc (size_t) __attribute__((malloc));
|
|
extern void *gomp_malloc_cleared (size_t) __attribute__((malloc));
|
|
extern void *gomp_realloc (void *, size_t);
|
|
|
|
/* Avoid conflicting prototypes of alloca() in system headers by using
|
|
GCC's builtin alloca(). */
|
|
#define gomp_alloca(x) __builtin_alloca(x)
|
|
|
|
/* error.c */
|
|
|
|
extern void gomp_error (const char *, ...)
|
|
__attribute__((format (printf, 1, 2)));
|
|
extern void gomp_fatal (const char *, ...)
|
|
__attribute__((noreturn, format (printf, 1, 2)));
|
|
|
|
/* iter.c */
|
|
|
|
extern int gomp_iter_static_next (long *, long *);
|
|
extern bool gomp_iter_dynamic_next_locked (long *, long *);
|
|
extern bool gomp_iter_guided_next_locked (long *, long *);
|
|
|
|
#ifdef HAVE_SYNC_BUILTINS
|
|
extern bool gomp_iter_dynamic_next (long *, long *);
|
|
extern bool gomp_iter_guided_next (long *, long *);
|
|
#endif
|
|
|
|
/* iter_ull.c */
|
|
|
|
extern int gomp_iter_ull_static_next (unsigned long long *,
|
|
unsigned long long *);
|
|
extern bool gomp_iter_ull_dynamic_next_locked (unsigned long long *,
|
|
unsigned long long *);
|
|
extern bool gomp_iter_ull_guided_next_locked (unsigned long long *,
|
|
unsigned long long *);
|
|
|
|
#if defined HAVE_SYNC_BUILTINS && defined __LP64__
|
|
extern bool gomp_iter_ull_dynamic_next (unsigned long long *,
|
|
unsigned long long *);
|
|
extern bool gomp_iter_ull_guided_next (unsigned long long *,
|
|
unsigned long long *);
|
|
#endif
|
|
|
|
/* ordered.c */
|
|
|
|
extern void gomp_ordered_first (void);
|
|
extern void gomp_ordered_last (void);
|
|
extern void gomp_ordered_next (void);
|
|
extern void gomp_ordered_static_init (void);
|
|
extern void gomp_ordered_static_next (void);
|
|
extern void gomp_ordered_sync (void);
|
|
|
|
/* parallel.c */
|
|
|
|
extern unsigned gomp_resolve_num_threads (unsigned, unsigned);
|
|
|
|
/* proc.c (in config/) */
|
|
|
|
extern void gomp_init_num_threads (void);
|
|
extern unsigned gomp_dynamic_max_threads (void);
|
|
|
|
/* task.c */
|
|
|
|
extern void gomp_init_task (struct gomp_task *, struct gomp_task *,
|
|
struct gomp_task_icv *);
|
|
extern void gomp_end_task (void);
|
|
extern void gomp_barrier_handle_tasks (gomp_barrier_state_t);
|
|
|
|
static void inline
|
|
gomp_finish_task (struct gomp_task *task)
|
|
{
|
|
gomp_sem_destroy (&task->taskwait_sem);
|
|
}
|
|
|
|
/* team.c */
|
|
|
|
extern struct gomp_team *gomp_new_team (unsigned);
|
|
extern void gomp_team_start (void (*) (void *), void *, unsigned,
|
|
struct gomp_team *);
|
|
extern void gomp_team_end (void);
|
|
|
|
/* work.c */
|
|
|
|
extern void gomp_init_work_share (struct gomp_work_share *, bool, unsigned);
|
|
extern void gomp_fini_work_share (struct gomp_work_share *);
|
|
extern bool gomp_work_share_start (bool);
|
|
extern void gomp_work_share_end (void);
|
|
extern void gomp_work_share_end_nowait (void);
|
|
|
|
static inline void
|
|
gomp_work_share_init_done (void)
|
|
{
|
|
struct gomp_thread *thr = gomp_thread ();
|
|
if (__builtin_expect (thr->ts.last_work_share != NULL, 1))
|
|
gomp_ptrlock_set (&thr->ts.last_work_share->next_ws, thr->ts.work_share);
|
|
}
|
|
|
|
#ifdef HAVE_ATTRIBUTE_VISIBILITY
|
|
# pragma GCC visibility pop
|
|
#endif
|
|
|
|
/* Now that we're back to default visibility, include the globals. */
|
|
#include "libgomp_g.h"
|
|
|
|
/* Include omp.h by parts. */
|
|
#include "omp-lock.h"
|
|
#define _LIBGOMP_OMP_LOCK_DEFINED 1
|
|
#include "omp.h.in"
|
|
|
|
#if !defined (HAVE_ATTRIBUTE_VISIBILITY) \
|
|
|| !defined (HAVE_ATTRIBUTE_ALIAS) \
|
|
|| !defined (PIC)
|
|
# undef LIBGOMP_GNU_SYMBOL_VERSIONING
|
|
#endif
|
|
|
|
#ifdef LIBGOMP_GNU_SYMBOL_VERSIONING
|
|
extern void gomp_init_lock_30 (omp_lock_t *) __GOMP_NOTHROW;
|
|
extern void gomp_destroy_lock_30 (omp_lock_t *) __GOMP_NOTHROW;
|
|
extern void gomp_set_lock_30 (omp_lock_t *) __GOMP_NOTHROW;
|
|
extern void gomp_unset_lock_30 (omp_lock_t *) __GOMP_NOTHROW;
|
|
extern int gomp_test_lock_30 (omp_lock_t *) __GOMP_NOTHROW;
|
|
extern void gomp_init_nest_lock_30 (omp_nest_lock_t *) __GOMP_NOTHROW;
|
|
extern void gomp_destroy_nest_lock_30 (omp_nest_lock_t *) __GOMP_NOTHROW;
|
|
extern void gomp_set_nest_lock_30 (omp_nest_lock_t *) __GOMP_NOTHROW;
|
|
extern void gomp_unset_nest_lock_30 (omp_nest_lock_t *) __GOMP_NOTHROW;
|
|
extern int gomp_test_nest_lock_30 (omp_nest_lock_t *) __GOMP_NOTHROW;
|
|
|
|
extern void gomp_init_lock_25 (omp_lock_25_t *) __GOMP_NOTHROW;
|
|
extern void gomp_destroy_lock_25 (omp_lock_25_t *) __GOMP_NOTHROW;
|
|
extern void gomp_set_lock_25 (omp_lock_25_t *) __GOMP_NOTHROW;
|
|
extern void gomp_unset_lock_25 (omp_lock_25_t *) __GOMP_NOTHROW;
|
|
extern int gomp_test_lock_25 (omp_lock_25_t *) __GOMP_NOTHROW;
|
|
extern void gomp_init_nest_lock_25 (omp_nest_lock_25_t *) __GOMP_NOTHROW;
|
|
extern void gomp_destroy_nest_lock_25 (omp_nest_lock_25_t *) __GOMP_NOTHROW;
|
|
extern void gomp_set_nest_lock_25 (omp_nest_lock_25_t *) __GOMP_NOTHROW;
|
|
extern void gomp_unset_nest_lock_25 (omp_nest_lock_25_t *) __GOMP_NOTHROW;
|
|
extern int gomp_test_nest_lock_25 (omp_nest_lock_25_t *) __GOMP_NOTHROW;
|
|
|
|
# define strong_alias(fn, al) \
|
|
extern __typeof (fn) al __attribute__ ((alias (#fn)));
|
|
# define omp_lock_symver(fn) \
|
|
__asm (".symver g" #fn "_30, " #fn "@@OMP_3.0"); \
|
|
__asm (".symver g" #fn "_25, " #fn "@OMP_1.0");
|
|
#else
|
|
# define gomp_init_lock_30 omp_init_lock
|
|
# define gomp_destroy_lock_30 omp_destroy_lock
|
|
# define gomp_set_lock_30 omp_set_lock
|
|
# define gomp_unset_lock_30 omp_unset_lock
|
|
# define gomp_test_lock_30 omp_test_lock
|
|
# define gomp_init_nest_lock_30 omp_init_nest_lock
|
|
# define gomp_destroy_nest_lock_30 omp_destroy_nest_lock
|
|
# define gomp_set_nest_lock_30 omp_set_nest_lock
|
|
# define gomp_unset_nest_lock_30 omp_unset_nest_lock
|
|
# define gomp_test_nest_lock_30 omp_test_nest_lock
|
|
#endif
|
|
|
|
#ifdef HAVE_ATTRIBUTE_VISIBILITY
|
|
# define attribute_hidden __attribute__ ((visibility ("hidden")))
|
|
#else
|
|
# define attribute_hidden
|
|
#endif
|
|
|
|
#ifdef HAVE_ATTRIBUTE_ALIAS
|
|
# define ialias(fn) \
|
|
extern __typeof (fn) gomp_ialias_##fn \
|
|
__attribute__ ((alias (#fn))) attribute_hidden;
|
|
#else
|
|
# define ialias(fn)
|
|
#endif
|
|
|
|
#endif /* LIBGOMP_H */
|