d9a6bd32ad
gcc/ 2015-10-13 Jakub Jelinek <jakub@redhat.com> Aldy Hernandez <aldyh@redhat.com> Ilya Verbin <ilya.verbin@intel.com> * builtin-types.def (BT_FN_BOOL_UINT_LONGPTR_LONGPTR_LONGPTR, BT_FN_BOOL_UINT_ULLPTR_ULLPTR_ULLPTR, BT_FN_BOOL_UINT_LONGPTR_LONG_LONGPTR_LONGPTR, BT_FN_BOOL_UINT_ULLPTR_ULL_ULLPTR_ULLPTR, BT_FN_VOID_INT_SIZE_PTR_PTR_PTR_UINT_PTR, BT_FN_VOID_INT_OMPFN_SIZE_PTR_PTR_PTR_UINT_PTR, BT_FN_VOID_OMPFN_PTR_OMPCPYFN_LONG_LONG_BOOL_UINT_PTR_INT, BT_FN_VOID_OMPFN_PTR_OMPCPYFN_LONG_LONG_UINT_LONG_INT_LONG_LONG_LONG, BT_FN_VOID_OMPFN_PTR_OMPCPYFN_LONG_LONG_UINT_LONG_INT_ULL_ULL_ULL, BT_FN_VOID_LONG_VAR, BT_FN_VOID_ULL_VAR): New. (BT_FN_VOID_INT_PTR_SIZE_PTR_PTR_PTR, BT_FN_VOID_INT_OMPFN_PTR_SIZE_PTR_PTR_PTR, BT_FN_VOID_OMPFN_PTR_OMPCPYFN_LONG_LONG_BOOL_UINT_PTR): Remove. * cgraph.h (enum cgraph_simd_clone_arg_type): Add SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP, SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_CONSTANT_STEP and SIMD_CLONE_ARG_TYPE_LINEAR_VAL_CONSTANT_STEP. (struct cgraph_simd_clone_arg): Adjust comment. * coretypes.h (struct gomp_ordered): New forward decl. * gimple.c (gimple_build_omp_critical): Add CLAUSES argument, set critical clauses to it. (gimple_build_omp_ordered): Return gomp_ordered * instead of gimple *. Add CLAUSES argument, set ordered clauses to it. (gimple_copy): Unshare clauses on GIMPLE_OMP_CRITICAL and GIMPLE_OMP_ORDERED. * gimple.def (GIMPLE_OMP_ORDERED): Change from GSS_OMP to GSS_OMP_SINGLE_LAYOUT, move it after GIMPLE_OMP_TEAMS. * gimple.h (enum gf_mask): Add GF_OMP_TASK_TASKLOOP. Add another bit to GF_OMP_FOR_KIND_MASK mask. Add GF_OMP_FOR_KIND_TASKLOOP, renumber GF_OMP_FOR_KIND_CILKFOR and GF_OMP_FOR_KIND_OACC_LOOP. Adjust GF_OMP_FOR_SIMD, GF_OMP_FOR_COMBINED and GF_OMP_FOR_COMBINED_INTO. Add another bit to GF_OMP_TARGET_KIND_MASK mask. Add GF_OMP_TARGET_KIND_ENTER_DATA and GF_OMP_TARGET_KIND_EXIT_DATA, renumber GF_OMP_TARGET_KIND_OACC_{PARALLEL,KERNELS,DATA,UPDATE,ENTER_EXIT_DATA}. (gomp_critical): Add clauses field. (gomp_ordered): New struct. (is_a_helper <gomp_ordered *>::test): New inline. (gimple_build_omp_critical): Add CLAUSES argument. (gimple_build_omp_ordered): Likewise. Return gomp_ordered * instead of gimple *. (gimple_omp_critical_clauses, gimple_omp_critical_clauses_ptr, gimple_omp_critical_set_clauses, gimple_omp_ordered_clauses, gimple_omp_ordered_clauses_ptr, gimple_omp_ordered_set_clauses, gimple_omp_task_taskloop_p, gimple_omp_task_set_taskloop_p): New inline functions. * gimple-pretty-print.c (dump_gimple_omp_for): Handle taskloop. (dump_gimple_omp_target): Handle enter data and exit data. (dump_gimple_omp_block): Don't handle GIMPLE_OMP_ORDERED here. (dump_gimple_omp_critical): Print clauses. (dump_gimple_omp_ordered): New function. (dump_gimple_omp_task): Handle taskloop. (pp_gimple_stmt_1): Use dump_gimple_omp_ordered for GIMPLE_OMP_ORDERED. * gimple-walk.c (walk_gimple_op): Walk clauses on GIMPLE_OMP_CRITICAL and GIMPLE_OMP_ORDERED. * gimplify.c (enum gimplify_omp_var_data): Add GOVD_MAP_0LEN_ARRAY. (enum omp_region_type): Add ORT_COMBINED_TARGET and ORT_NONE. (struct gimplify_omp_ctx): Add loop_iter_var, target_map_scalars_firstprivate, target_map_pointers_as_0len_arrays and target_firstprivatize_array_bases fields. (delete_omp_context): Release loop_iter_var. (gimplify_bind_expr): Handle ORT_NONE. (maybe_fold_stmt): Adjust check for ORT_TARGET for the addition of ORT_COMBINED_TARGET. (is_gimple_stmt): Return true for OMP_TASKLOOP, OMP_TEAMS and OMP_TARGET{,_DATA,_UPDATE,_ENTER_DATA,_EXIT_DATA}. (omp_firstprivatize_variable): Handle ORT_NONE. Adjust check for ORT_TARGET for the addition of ORT_COMBINED_TARGET. Handle ctx->target_map_scalars_firstprivate. (omp_add_variable): Handle ORT_NONE. Allow map clause together with data sharing clauses. For data sharing clause with VLA decl on omp target/target data don't add firstprivate for the pointer. Call omp_notice_variable on TYPE_SIZE_UNIT only if it is a DECL_P. (omp_notice_threadprivate_variable): Adjust check for ORT_TARGET for the addition of ORT_COMBINED_TARGET. (omp_notice_variable): Handle ORT_NONE. Adjust check for ORT_TARGET for the addition of ORT_COMBINED_TARGET. Handle implicit mapping of pointers as zero length array sections and ctx->target_map_scalars_firstprivate mapping of scalars as firstprivate data sharing. (omp_check_private): Handle omp_member_access_dummy_var vars. (find_decl_expr): New function. (gimplify_scan_omp_clauses): Add CODE argument. For OMP_CLAUSE_IF complain if OMP_CLAUSE_IF_MODIFIER is present and does not match code. Handle OMP_CLAUSE_GANG separately. Handle OMP_CLAUSE_{PRIORITY,GRAINSIZE,NUM_TASKS,NOGROUP,THREADS,SIMD,SIMDLEN} clauses. Diagnose linear clause on combined distribute {, parallel for} simd construct, unless it is the loop iterator. Handle struct element GOMP_MAP_FIRSTPRIVATE_POINTER. Handle map clauses with COMPONENT_REF. Initialize ctx->target_map_scalars_firstprivate, ctx->target_firstprivatize_array_bases and ctx->target_map_pointers_as_0len_arrays. Add firstprivate for linear clause even to target region if combined. Remove map clauses with GOMP_MAP_FIRSTPRIVATE_POINTER kind from OMP_TARGET_{,ENTER_,EXIT_}DATA. For GOMP_MAP_FIRSTPRIVATE_POINTER map kind with non-INTEGER_CST OMP_CLAUSE_SIZE firstprivatize the bias. Handle OMP_CLAUSE_DEPEND_{SINK,SOURCE}. Handle OMP_CLAUSE_{{USE,IS}_DEVICE_PTR,DEFAULTMAP,HINT}. For linear clause on worksharing loop combined with parallel add shared clause on the parallel. Handle OMP_CLAUSE_REDUCTION with MEM_REF OMP_CLAUSE_DECL. Set DECL_NAME on omp_member_access_dummy_var vars. Add lastprivate clause to outer taskloop if needed. (gimplify_adjust_omp_clauses_1): Handle GOVD_MAP_0LEN_ARRAY. If gimplify_omp_ctxp->target_firstprivatize_array_bases, use GOMP_MAP_FIRSTPRIVATE_POINTER map kind instead of GOMP_MAP_POINTER. (gimplify_adjust_omp_clauses): Add CODE argument. Handle removal of GOMP_MAP_FIRSTPRIVATE_POINTER struct elements for struct not seen in target body. Handle removal of struct mapping if struct is not seen in target body. Remove GOMP_MAP_STRUCT map clause on OMP_TARGET_EXIT_DATA. Adjust check for ORT_TARGET for the addition of ORT_COMBINED_TARGET. Use GOMP_MAP_FIRSTPRIVATE_POINTER instead of GOMP_MAP_POINTER if ctx->target_firstprivatize_array_bases for VLAs. Set OMP_CLAUSE_MAP_PRIVATE if both data sharing and map clause appear together. Handle OMP_CLAUSE_{{USE,IS}_DEVICE_PTR,DEFAULTMAP,HINT}. Don't remove map clause if it has map-type-modifier always. Handle OMP_CLAUSE_{PRIORITY,GRAINSIZE,NUM_TASKS,NOGROUP,THREADS,SIMD,SIMDLEN} clauses. (gimplify_oacc_cache, gimplify_omp_parallel, gimplify_omp_task): Adjust gimplify_scan_omp_clauses and gimplify_adjust_omp_clauses callers. (gimplify_omp_for): Likewise. Handle OMP_TASKLOOP. Initialize loop_iter_var. Use OMP_FOR_ORIG_DECLS. Fix handling of lastprivate iterators in doacross loops. (gimplify_omp_workshare): Adjust gimplify_scan_omp_clauses and gimplify_adjust_omp_clauses callers. Use ORT_COMBINED_TARGET for OMP_TARGET_COMBINED. Adjust check for ORT_TARGET for the addition of ORT_COMBINED_TARGET. (gimplify_omp_target_update): Adjust gimplify_scan_omp_clauses and gimplify_adjust_omp_clauses callers. Handle OMP_TARGET_ENTER_DATA and OMP_TARGET_EXIT_DATA. (gimplify_omp_ordered): New function. (gimplify_expr): Handle OMP_TASKLOOP, OMP_TARGET_ENTER_DATA and OMP_TARGET_EXIT_DATA. Use gimplify_omp_ordered for OMP_ORDERED. Gimplify clauses on OMP_CRITICAL. * internal-fn.c (expand_GOMP_SIMD_ORDERED_START, expand_GOMP_SIMD_ORDERED_END): New functions. * internal-fn.def (GOMP_SIMD_ORDERED_START, GOMP_SIMD_ORDERED_END): New internal functions. * omp-builtins.def (BUILT_IN_GOMP_LOOP_DOACROSS_STATIC_START, BUILT_IN_GOMP_LOOP_DOACROSS_DYNAMIC_START, BUILT_IN_GOMP_LOOP_DOACROSS_GUIDED_START, BUILT_IN_GOMP_LOOP_DOACROSS_RUNTIME_START, BUILT_IN_GOMP_LOOP_ULL_DOACROSS_STATIC_START, BUILT_IN_GOMP_LOOP_ULL_DOACROSS_DYNAMIC_START, BUILT_IN_GOMP_LOOP_ULL_DOACROSS_GUIDED_START, BUILT_IN_GOMP_LOOP_ULL_DOACROSS_RUNTIME_START, BUILT_IN_GOMP_DOACROSS_POST, BUILT_IN_GOMP_DOACROSS_WAIT, BUILT_IN_GOMP_DOACROSS_ULL_POST, BUILT_IN_GOMP_DOACROSS_ULL_WAIT, BUILT_IN_GOMP_TARGET_ENTER_EXIT_DATA, BUILT_IN_GOMP_TASKLOOP, BUILT_IN_GOMP_TASKLOOP_ULL): New built-ins. (BUILT_IN_GOMP_TASK): Add INT argument to the end. (BUILT_IN_GOMP_TARGET): Rename from GOMP_target to GOMP_target_41, adjust type. (BUILT_IN_GOMP_TARGET_DATA): Rename from GOMP_target_data to GOMP_target_data_41, adjust type. (BUILT_IN_GOMP_TARGET_UPDATE): Rename from GOMP_target_update to GOMP_target_update_41, adjust type. * omp-low.c (struct omp_region): Adjust comments, add ord_stmt field. (struct omp_for_data): Add ordered and simd_schedule fields. (omp_member_access_dummy_var, unshare_and_remap_1, unshare_and_remap, is_taskloop_ctx): New functions. (is_taskreg_ctx): Use is_parallel_ctx and is_task_ctx. (extract_omp_for_data): Handle taskloops and doacross loops and simd schedule modifier. (omp_adjust_chunk_size): New function. (get_ws_args_for): Use it. (lookup_sfield): Change first argument to splay_tree_key, add overload with first argument tree. (maybe_lookup_field): Likewise. (use_pointer_for_field): Handle omp_member_access_dummy_var. (omp_copy_decl_2): If var is TREE_ADDRESSABLE listed in task_shared_vars, clear TREE_ADDRESSABLE on the copy. (build_outer_var_ref): Add LASTPRIVATE argument, handle taskloops and omp_member_access_dummy_var vars. (build_sender_ref): Change first argument to splay_tree_key, add overload with first argument tree. (install_var_field): For mask & 8 use &DECL_UID as key instead of the tree itself. (fixup_child_record_type): Const qualify *.omp_data_i. (scan_sharing_clauses): Handle OMP_CLAUSE_SHARED_FIRSTPRIVATE, C/C++ array reductions, OMP_CLAUSE_{IS,USE}_DEVICE_PTR clauses, OMP_CLAUSE_{PRIORITY,GRAINSIZE,NUM_TASKS,SIMDLEN,THREADS,SIMD} and OMP_CLAUSE_{NOGROUP,DEFAULTMAP} clauses, OMP_CLAUSE__LOOPTEMP_ clause on taskloop, GOMP_MAP_FIRSTPRIVATE_POINTER, OMP_CLAUSE_MAP_PRIVATE. (create_omp_child_function): Set TREE_READONLY on .omp_data_i. (find_combined_for): Allow searching for different GIMPLE_OMP_FOR kinds. (add_taskreg_looptemp_clauses): New function. (scan_omp_parallel): Use it. (scan_omp_task): Likewise. (finish_taskreg_scan): Handle OMP_CLAUSE_SHARED_FIRSTPRIVATE. For taskloop, move fields for the first two _LOOPTEMP_ clauses first. (check_omp_nesting_restrictions): Handle GF_OMP_TARGET_KIND_ENTER_DATA and GF_OMP_TARGET_KIND_EXIT_DATA. Formatting fixes. Allow the sandwiched taskloop constructs. Type check OMP_CLAUSE_DEPEND_{KIND,SOURCE}. Allow ordered simd inside of simd region. Diagnose depend(source) or depend(sink:...) on target constructs or task/taskloop. (handle_simd_reference): Use get_name. (lower_rec_input_clauses): Likewise. Ignore all OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE clauses on taskloop construct. Allow _LOOPTEMP_ clause on GOMP_TASK. Unshare new_var before passing it to omp_clause_{default,copy}_ctor. Handle OMP_CLAUSE_REDUCTION with MEM_REF OMP_CLAUSE_DECL. Set lastprivate_firstprivate flag for linear that needs copyin and copyout. Use BUILT_IN_ALLOCA_WITH_ALIGN instead of BUILT_IN_ALLOCA. (lower_lastprivate_clauses): For OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE on taskloop lookup decl in outer context. Pass true to build_outer_var_ref lastprivate argument. Handle OMP_CLAUSE_LASTPRIVATE_TASKLOOP_IV lastprivate if the decl is global outside of outer taskloop for. (lower_reduction_clauses): Handle OMP_CLAUSE_REDUCTION with MEM_REF OMP_CLAUSE_DECL. (lower_send_clauses): Ignore first two _LOOPTEMP_ clauses in taskloop GOMP_TASK. Handle OMP_CLAUSE_SHARED_FIRSTPRIVATE. Handle omp_member_access_dummy_var vars. Handle OMP_CLAUSE_REDUCTION with MEM_REF OMP_CLAUSE_DECL. Use new lookup_sfield overload. (lower_send_shared_vars): Ignore fields with NULL or FIELD_DECL abstract origin. Handle omp_member_access_dummy_var vars. (expand_parallel_call): Use expand_omp_build_assign. (expand_task_call): Handle taskloop construct expansion. Add REGION argument. Use GOMP_TASK_* defines instead of hardcoded integers. Add priority argument to GOMP_task* calls. Or in GOMP_TASK_FLAG_PRIORITY into flags if priority is present for GOMP_task call. (expand_omp_build_assign): Add prototype. Add AFTER argument, if true emit statements after *GSI_P and continue linking. (expand_omp_taskreg): Adjust expand_task_call caller. (expand_omp_for_init_counts): Rename zero_iter_bb argument to zero_iter1_bb and first_zero_iter to first_zero_iter1, add zero_iter2_bb and first_zero_iter2 arguments, handle computation of counts even for ordered loops. (expand_omp_for_init_vars): Handle GOMP_TASK inner_stmt. (expand_omp_ordered_source, expand_omp_ordered_sink, expand_omp_ordered_source_sink, expand_omp_for_ordered_loops): New functions. (expand_omp_for_generic): Use omp_adjust_chunk_size. Handle linear clauses on worksharing loop. Handle DOACROSS loop expansion. (expand_omp_for_static_nochunk): Handle linear clauses on worksharing loop. Adjust expand_omp_for_init_counts callers. (expand_omp_for_static_chunk): Likewise. Use omp_adjust_chunk_size. (expand_omp_simd): Handle addressable fd->loop.v. Adjust expand_omp_for_init_counts callers. (expand_omp_taskloop_for_outer, expand_omp_taskloop_for_inner): New functions. (expand_omp_for): Call expand_omp_taskloop_for_* for taskloop. Handle doacross loops. (expand_omp_target): Handle GF_OMP_TARGET_KIND_ENTER_DATA and GF_OMP_TARGET_KIND_EXIT_DATA. Pass flags and depend arguments to GOMP_target_{41,update_41,enter_exit_data} libcalls. (expand_omp): Don't expand ordered depend constructs here, record ord_stmt instead for later expand_omp_for_generic. (build_omp_regions_1): Handle GF_OMP_TARGET_KIND_ENTER_DATA and GF_OMP_TARGET_KIND_EXIT_DATA. Treat GIMPLE_OMP_ORDERED with depend clause as stand-alone directive. (lower_omp_ordered_clauses): New function. (lower_omp_ordered): Handle OMP_CLAUSE_SIMD, for OMP_CLAUSE_DEPEND don't lower anything. (lower_omp_for_lastprivate): Use last _looptemp_ clause on taskloop for comparison. (lower_omp_for): Handle taskloop constructs. Adjust OMP_CLAUSE_DECL and OMP_CLAUSE_LINEAR_STEP so that expand_omp_for_* can use it during expansion for linear adjustments. (create_task_copyfn): Handle OMP_CLAUSE_SHARED_FIRSTPRIVATE. (lower_depend_clauses): Assert not seeing sink/source depend kinds. Set TREE_ADDRESSABLE on array. Change first argument from gimple * to tree * pointing to the stmt's clauses. (lower_omp_taskreg): Adjust lower_depend_clauses caller. (lower_omp_target): Handle GF_OMP_TARGET_KIND_ENTER_DATA and GF_OMP_TARGET_KIND_EXIT_DATA, depend clauses, GOMP_MAP_{RELEASE,ALWAYS_{TO,FROM,TOFROM},FIRSTPRIVATE_POINTER,STRUCT} map kinds, OMP_CLAUSE_{FIRSTPRIVATE,PRIVATE,{IS,USE}_DEVICE_PTR clauses. Always use short kind and 8-bit align shift. (lower_omp_regimplify_p): Use IS_TYPE_OR_DECL_P macro. (struct lower_omp_regimplify_operands_data): New type. (lower_omp_regimplify_operands_p, lower_omp_regimplify_operands): New functions. (lower_omp_1): Use lower_omp_regimplify_operands instead of gimple_regimplify_operands. (make_gimple_omp_edges): Handle GF_OMP_TARGET_KIND_ENTER_DATA and GF_OMP_TARGET_KIND_EXIT_DATA. Treat GIMPLE_OMP_ORDERED with depend clause as stand-alone directive. (simd_clone_clauses_extract): Honor OMP_CLAUSE_LINEAR_KIND. (simd_clone_mangle): Mangle the various linear kinds per the new ABI. (simd_clone_adjust_argument_types): Handle SIMD_CLONE_ARG_TYPE_LINEAR_*_CONSTANT_STEP. (simd_clone_init_simd_arrays): Don't do anything for uval. (simd_clone_adjust): Handle SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP like SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP. Handle SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_CONSTANT_STEP. * omp-low.h (omp_member_access_dummy_var): New prototype. * passes.def (pass_simduid_cleanup): Schedule another copy of the pass after all optimizations. * tree.c (omp_clause_code_name): Add entries for OMP_CLAUSE_{TO_DECLARE,LINK,{USE,IS}_DEVICE_PTR,DEFAULTMAP,HINT} and OMP_CLAUSE_{PRIORITY,GRAINSIZE,NUM_TASKS,NOGROUP,THREADS,SIMD}. (omp_clause_num_ops): Likewise. Bump number of OMP_CLAUSE_REDUCTION arguments to 5 and for OMP_CLAUSE_ORDERED to 1. (walk_tree_1): Adjust for OMP_CLAUSE_ORDERED having 1 argument and OMP_CLAUSE_REDUCTION 5 arguments. Handle OMP_CLAUSE_{TO_DECLARE,LINK,{USE,IS}_DEVICE_PTR,DEFAULTMAP,HINT} and OMP_CLAUSE_{PRIORITY,GRAINSIZE,NUM_TASKS,NOGROUP,THREADS,SIMD} clauses. * tree-core.h (enum omp_clause_linear_kind): New. (struct tree_omp_clause): Change type of map_kind from unsigned char to unsigned int. Add subcode.if_modifier and subcode.linear_kind fields. (enum omp_clause_code): Add OMP_CLAUSE_{TO_DECLARE,LINK,{USE,IS}_DEVICE_PTR,DEFAULTMAP,HINT} and OMP_CLAUSE_{PRIORITY,GRAINSIZE,NUM_TASKS,NOGROUP,THREADS,SIMD}. (OMP_CLAUSE_REDUCTION): Document OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER. (enum omp_clause_depend_kind): Add OMP_CLAUSE_DEPEND_{SOURCE,SINK}. * tree.def (OMP_FOR): Add OMP_FOR_ORIG_DECLS operand. (OMP_CRITICAL): Move before OMP_SINGLE. Add OMP_CRITICAL_CLAUSES operand. (OMP_ORDERED): Move before OMP_SINGLE. Add OMP_ORDERED_CLAUSES operand. (OMP_TASKLOOP, OMP_TARGET_ENTER_DATA, OMP_TARGET_EXIT_DATA): New tree codes. * tree.h (OMP_BODY): Replace OMP_CRITICAL with OMP_TASKGROUP. (OMP_CLAUSE_SET_MAP_KIND): Cast to unsigned int rather than unsigned char. (OMP_CRITICAL_NAME): Adjust to be 3rd operand instead of 2nd. (OMP_CLAUSE_NUM_TASKS_EXPR): Formatting fix. (OMP_STANDALONE_CLAUSES): Adjust to cover OMP_TARGET_{ENTER,EXIT}_DATA. (OMP_CLAUSE_DEPEND_SINK_NEGATIVE, OMP_TARGET_COMBINED, OMP_CLAUSE_MAP_PRIVATE, OMP_FOR_ORIG_DECLS, OMP_CLAUSE_IF_MODIFIER, OMP_CLAUSE_MAP_MAYBE_ZERO_LENGTH_ARRAY_SECTION, OMP_CRITICAL_CLAUSES, OMP_CLAUSE_PRIVATE_TASKLOOP_IV, OMP_CLAUSE_LASTPRIVATE_TASKLOOP_IV, OMP_CLAUSE_HINT_EXPR, OMP_CLAUSE_SCHEDULE_SIMD, OMP_CLAUSE_LINEAR_KIND, OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER, OMP_CLAUSE_SHARED_FIRSTPRIVATE, OMP_ORDERED_CLAUSES, OMP_TARGET_ENTER_DATA_CLAUSES, OMP_TARGET_EXIT_DATA_CLAUSES, OMP_CLAUSE_NUM_TASKS_EXPR, OMP_CLAUSE_GRAINSIZE_EXPR, OMP_CLAUSE_PRIORITY_EXPR, OMP_CLAUSE_ORDERED_EXPR): Define. * tree-inline.c (remap_gimple_stmt): Handle clauses on GIMPLE_OMP_ORDERED and GIMPLE_OMP_CRITICAL. For IFN_GOMP_SIMD_ORDERED_{START,END} set has_simduid_loops. * tree-nested.c (convert_nonlocal_omp_clauses): Handle OMP_CLAUSE_{TO_DECLARE,LINK,{USE,IS}_DEVICE_PTR,SIMDLEN,PRIORITY,SIMD} and OMP_CLAUSE_{GRAINSIZE,NUM_TASKS,HINT,NOGROUP,THREADS,DEFAULTMAP} clauses. Handle OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER. (convert_local_omp_clauses): Likewise. * tree-pretty-print.c (dump_omp_clause): Handle OMP_CLAUSE_{TO_DECLARE,LINK,{USE,IS}_DEVICE_PTR,SIMDLEN,PRIORITY,SIMD} and OMP_CLAUSE_{GRAINSIZE,NUM_TASKS,HINT,NOGROUP,THREADS,DEFAULTMAP} clauses. Handle OMP_CLAUSE_IF_MODIFIER, OMP_CLAUSE_ORDERED_EXPR, OMP_CLAUSE_SCHEDULE_SIMD, OMP_CLAUSE_LINEAR_KIND, OMP_CLAUSE_DEPEND_{SOURCE,SINK}. Use "delete" for GOMP_MAP_FORCE_DEALLOC. Handle GOMP_MAP_{ALWAYS_{TO,FROM,TOFROM},RELEASE,FIRSTPRIVATE_POINTER,STRUCT}. (dump_generic_node): Handle OMP_TASKLOOP, OMP_TARGET_{ENTER,EXIT}_DATA and clauses on OMP_ORDERED and OMP_CRITICAL. * tree-vectorizer.c (adjust_simduid_builtins): Adjust comment. Remove IFN_GOMP_SIMD_ORDERED_{START,END}. (vectorize_loops): Adjust comments. (pass_simduid_cleanup::execute): Likewise. * tree-vect-stmts.c (vectorizable_simd_clone_call): Handle SIMD_CLONE_ARG_TYPE_LINEAR_{REF,VAL,UVAL}_CONSTANT_STEP. * wide-int.h (wi::gcd): New. gcc/c-family/ 2015-10-13 Jakub Jelinek <jakub@redhat.com> Aldy Hernandez <aldyh@redhat.com> * c-common.c (enum c_builtin_type): Define DEF_FUNCTION_TYPE_9, DEF_FUNCTION_TYPE_10 and DEF_FUNCTION_TYPE_11. (c_define_builtins): Likewise. * c-common.h (enum c_omp_clause_split): Add C_OMP_CLAUSE_SPLIT_TASKLOOP. (c_finish_omp_critical, c_finish_omp_ordered): Add CLAUSES argument. (c_finish_omp_for): Add ORIG_DECLV argument. * c-cppbuiltin.c (c_cpp_builtins): Predefine _OPENMP as 201511 instead of 201307. * c-omp.c (c_finish_omp_critical): Add CLAUSES argument, set OMP_CRITICAL_CLAUSES to it. (c_finish_omp_ordered): Add CLAUSES argument, set OMP_ORDERED_CLAUSES to it. (c_finish_omp_for): Add ORIG_DECLV argument, set OMP_FOR_ORIG_DECLS to it if OMP_FOR. Clear DECL_INITIAL on the IVs. (c_omp_split_clauses): Handle OpenMP 4.5 combined/composite constructs and new OpenMP 4.5 clauses. Clear OMP_CLAUSE_SCHEDULE_SIMD if not combined with OMP_SIMD. Add verification code. * c-pragma.c (omp_pragmas_simd): Add taskloop. * c-pragma.h (enum pragma_kind): Add PRAGMA_OMP_TASKLOOP. (enum pragma_omp_clause): Add PRAGMA_OMP_CLAUSE_{DEFAULTMAP,GRAINSIZE,HINT,{IS,USE}_DEVICE_PTR} and PRAGMA_OMP_CLAUSE_{LINK,NOGROUP,NUM_TASKS,PRIORITY,SIMD,THREADS}. gcc/c/ 2015-10-13 Jakub Jelinek <jakub@redhat.com> Aldy Hernandez <aldyh@redhat.com> * c-parser.c (c_parser_pragma): Handle PRAGMA_OMP_ORDERED here. (c_parser_omp_clause_name): Handle OpenMP 4.5 clauses. (c_parser_omp_variable_list): Handle structure elements for map, to and from clauses. Handle array sections in reduction clause. Formatting fixes. (c_parser_omp_clause_if): Add IS_OMP argument, handle parsing of if clause modifiers. (c_parser_omp_clause_num_tasks, c_parser_omp_clause_grainsize, c_parser_omp_clause_priority, c_parser_omp_clause_hint, c_parser_omp_clause_defaultmap, c_parser_omp_clause_use_device_ptr, c_parser_omp_clause_is_device_ptr): New functions. (c_parser_omp_clause_ordered): Parse optional parameter. (c_parser_omp_clause_reduction): Handle array reductions. (c_parser_omp_clause_schedule): Parse optional simd modifier. (c_parser_omp_clause_nogroup, c_parser_omp_clause_orderedkind): New functions. (c_parser_omp_clause_linear): Parse linear clause modifiers. (c_parser_omp_clause_depend_sink): New function. (c_parser_omp_clause_depend): Parse source/sink depend kinds. (c_parser_omp_clause_map): Parse release/delete map kinds and optional always modifier. (c_parser_oacc_all_clauses): Adjust c_parser_omp_clause_if and c_finish_omp_clauses callers. (c_parser_omp_all_clauses): Likewise. Parse OpenMP 4.5 clauses. Parse "to" as OMP_CLAUSE_TO_DECLARE if on declare target directive. (c_parser_oacc_cache): Adjust c_finish_omp_clauses caller. (OMP_CRITICAL_CLAUSE_MASK): Define. (c_parser_omp_critical): Parse critical clauses. (c_parser_omp_for_loop): Handle doacross loops, adjust c_finish_omp_for and c_finish_omp_clauses callers. (OMP_SIMD_CLAUSE_MASK): Add simdlen clause. (c_parser_omp_simd): Allow ordered clause if it has no parameter. (OMP_FOR_CLAUSE_MASK): Add linear clause. (c_parser_omp_for): Disallow ordered clause when combined with distribute. Disallow linear clause when combined with distribute and not combined with simd. (OMP_ORDERED_CLAUSE_MASK, OMP_ORDERED_DEPEND_CLAUSE_MASK): Define. (c_parser_omp_ordered): Add CONTEXT argument, remove LOC argument, parse clauses and if depend clause is found, don't parse a body. (c_parser_omp_parallel): Disallow copyin clause on target parallel. Allow target parallel without for after it. (OMP_TASK_CLAUSE_MASK): Add priority clause. (OMP_TARGET_DATA_CLAUSE_MASK): Add use_device_ptr clause. (c_parser_omp_target_data): Diagnose no map clauses or clauses with invalid kinds. (OMP_TARGET_UPDATE_CLAUSE_MASK): Add depend and nowait clauses. (OMP_TARGET_ENTER_DATA_CLAUSE_MASK, OMP_TARGET_EXIT_DATA_CLAUSE_MASK): Define. (c_parser_omp_target_enter_data, c_parser_omp_target_exit_data): New functions. (OMP_TARGET_CLAUSE_MASK): Add depend, nowait, private, firstprivate, defaultmap and is_device_ptr clauses. (c_parser_omp_target): Parse target parallel and target simd. Set OMP_TARGET_COMBINED on combined constructs. Parse target enter data and target exit data. Diagnose invalid map kinds. (OMP_DECLARE_TARGET_CLAUSE_MASK): Define. (c_parser_omp_declare_target): Parse OpenMP 4.5 forms of this construct. (c_parser_omp_declare_reduction): Use STRIP_NOPS when checking for &omp_priv. (OMP_TASKLOOP_CLAUSE_MASK): Define. (c_parser_omp_taskloop): New function. (c_parser_omp_construct): Don't handle PRAGMA_OMP_ORDERED here, handle PRAGMA_OMP_TASKLOOP. (c_parser_cilk_for): Adjust c_finish_omp_clauses callers. * c-tree.h (c_finish_omp_clauses): Add two new arguments. * c-typeck.c (handle_omp_array_sections_1): Fix comment typo. Add IS_OMP argument, handle structure element bases, diagnose bitfields, pass IS_OMP recursively, diagnose known zero length array sections in depend clauses, handle array sections in reduction clause, diagnose negative length even for pointers. (handle_omp_array_sections): Add IS_OMP argument, use auto_vec for types, pass IS_OMP down to handle_omp_array_sections_1, handle array sections in reduction clause, set OMP_CLAUSE_MAP_MAYBE_ZERO_LENGTH_ARRAY_SECTION if map could be zero length array section, use GOMP_MAP_FIRSTPRIVATE_POINTER for IS_OMP. (c_finish_omp_clauses): Add IS_OMP and DECLARE_SIMD arguments. Handle new OpenMP 4.5 clauses and new restrictions for the old ones. gcc/cp/ 2015-10-13 Jakub Jelinek <jakub@redhat.com> Aldy Hernandez <aldyh@redhat.com> * class.c (finish_struct_1): Call finish_omp_declare_simd_methods. * cp-gimplify.c (cp_gimplify_expr): Handle OMP_TASKLOOP. (cp_genericize_r): Likewise. (cxx_omp_finish_clause): Don't diagnose references. (cxx_omp_disregard_value_expr): New function. * cp-objcp-common.h (LANG_HOOKS_OMP_DISREGARD_VALUE_EXPR): Redefine. * cp-tree.h (OMP_FOR_GIMPLIFYING_P): Document for OMP_TASKLOOP. (DECL_OMP_PRIVATIZED_MEMBER): Define. (finish_omp_declare_simd_methods, push_omp_privatization_clauses, pop_omp_privatization_clauses, save_omp_privatization_clauses, restore_omp_privatization_clauses, omp_privatize_field, cxx_omp_disregard_value_expr): New prototypes. (finish_omp_clauses): Add two new arguments. (finish_omp_for): Add ORIG_DECLV argument. * parser.c (cp_parser_lambda_body): Call save_omp_privatization_clauses and restore_omp_privatization_clauses. (cp_parser_omp_clause_name): Handle OpenMP 4.5 clauses. (cp_parser_omp_var_list_no_open): Handle structure elements for map, to and from clauses. Handle array sections in reduction clause. Parse this keyword. Formatting fixes. (cp_parser_omp_clause_if): Add IS_OMP argument, handle parsing of if clause modifiers. (cp_parser_omp_clause_num_tasks, cp_parser_omp_clause_grainsize, cp_parser_omp_clause_priority, cp_parser_omp_clause_hint, cp_parser_omp_clause_defaultmap): New functions. (cp_parser_omp_clause_ordered): Parse optional parameter. (cp_parser_omp_clause_reduction): Handle array reductions. (cp_parser_omp_clause_schedule): Parse optional simd modifier. (cp_parser_omp_clause_nogroup, cp_parser_omp_clause_orderedkind): New functions. (cp_parser_omp_clause_linear): Parse linear clause modifiers. (cp_parser_omp_clause_depend_sink): New function. (cp_parser_omp_clause_depend): Parse source/sink depend kinds. (cp_parser_omp_clause_map): Parse release/delete map kinds and optional always modifier. (cp_parser_oacc_all_clauses): Adjust cp_parser_omp_clause_if and finish_omp_clauses callers. (cp_parser_omp_all_clauses): Likewise. Parse OpenMP 4.5 clauses. Parse "to" as OMP_CLAUSE_TO_DECLARE if on declare target directive. (OMP_CRITICAL_CLAUSE_MASK): Define. (cp_parser_omp_critical): Parse critical clauses. (cp_parser_omp_for_incr): Use cp_tree_equal if processing_template_decl. (cp_parser_omp_for_loop_init): Return tree instead of bool. Handle non-static data member iterators. (cp_parser_omp_for_loop): Handle doacross loops, adjust finish_omp_for and finish_omp_clauses callers. (cp_omp_split_clauses): Adjust finish_omp_clauses caller. (OMP_SIMD_CLAUSE_MASK): Add simdlen clause. (cp_parser_omp_simd): Allow ordered clause if it has no parameter. (OMP_FOR_CLAUSE_MASK): Add linear clause. (cp_parser_omp_for): Disallow ordered clause when combined with distribute. Disallow linear clause when combined with distribute and not combined with simd. (OMP_ORDERED_CLAUSE_MASK, OMP_ORDERED_DEPEND_CLAUSE_MASK): Define. (cp_parser_omp_ordered): Add CONTEXT argument, return bool instead of tree, parse clauses and if depend clause is found, don't parse a body. (cp_parser_omp_parallel): Disallow copyin clause on target parallel. Allow target parallel without for after it. (OMP_TASK_CLAUSE_MASK): Add priority clause. (OMP_TARGET_DATA_CLAUSE_MASK): Add use_device_ptr clause. (cp_parser_omp_target_data): Diagnose no map clauses or clauses with invalid kinds. (OMP_TARGET_UPDATE_CLAUSE_MASK): Add depend and nowait clauses. (OMP_TARGET_ENTER_DATA_CLAUSE_MASK, OMP_TARGET_EXIT_DATA_CLAUSE_MASK): Define. (cp_parser_omp_target_enter_data, cp_parser_omp_target_exit_data): New functions. (OMP_TARGET_CLAUSE_MASK): Add depend, nowait, private, firstprivate, defaultmap and is_device_ptr clauses. (cp_parser_omp_target): Parse target parallel and target simd. Set OMP_TARGET_COMBINED on combined constructs. Parse target enter data and target exit data. Diagnose invalid map kinds. (cp_parser_oacc_cache): Adjust finish_omp_clauses caller. (OMP_DECLARE_TARGET_CLAUSE_MASK): Define. (cp_parser_omp_declare_target): Parse OpenMP 4.5 forms of this construct. (OMP_TASKLOOP_CLAUSE_MASK): Define. (cp_parser_omp_taskloop): New function. (cp_parser_omp_construct): Don't handle PRAGMA_OMP_ORDERED here, handle PRAGMA_OMP_TASKLOOP. (cp_parser_pragma): Handle PRAGMA_OMP_ORDERED here directly, handle PRAGMA_OMP_TASKLOOP, call push_omp_privatization_clauses and pop_omp_privatization_clauses around parsing calls. (cp_parser_cilk_for): Adjust finish_omp_clauses caller. * pt.c (apply_late_template_attributes): Adjust tsubst_omp_clauses and finish_omp_clauses callers. (tsubst_omp_clause_decl): Return NULL if decl is NULL. For TREE_LIST, copy over OMP_CLAUSE_DEPEND_SINK_NEGATIVE bit. Use tsubst_expr instead of tsubst_copy, undo convert_from_reference effects. (tsubst_omp_clauses): Add ALLOW_FIELDS argument. Handle new OpenMP 4.5 clauses. Use tsubst_omp_clause_decl for more clauses. If ALLOW_FIELDS, handle non-static data members in the clauses. Clear OMP_CLAUSE_LINEAR_STEP if it has been cleared before. (omp_parallel_combined_clauses): New variable. (tsubst_omp_for_iterator): Add ORIG_DECLV argument, recur on OMP_FOR_ORIG_DECLS, handle non-static data member iterators. Improve handling of clauses on combined constructs. (tsubst_expr): Call push_omp_privatization_clauses and pop_omp_privatization_clauses around instantiation of certain OpenMP constructs, improve handling of clauses on combined constructs, handle OMP_TASKLOOP, adjust tsubst_omp_for_iterator, tsubst_omp_clauses and finish_omp_for callers, handle clauses on critical and ordered, handle OMP_TARGET_{ENTER,EXIT}_DATA. (instantiate_decl): Call save_omp_privatization_clauses and restore_omp_privatization_clauses around instantiation. (dependent_omp_for_p): Fix up comment typo. Handle SCOPE_REF. * semantics.c (omp_private_member_map, omp_private_member_vec, omp_private_member_ignore_next): New variables. (finish_non_static_data_member): Return dummy decl for privatized non-static data members. (omp_clause_decl_field, omp_clause_printable_decl, omp_note_field_privatization, omp_privatize_field): New functions. (handle_omp_array_sections_1): Fix comment typo. Add IS_OMP argument, handle structure element bases, diagnose bitfields, pass IS_OMP recursively, diagnose known zero length array sections in depend clauses, handle array sections in reduction clause, diagnose negative length even for pointers. (handle_omp_array_sections): Add IS_OMP argument, use auto_vec for types, pass IS_OMP down to handle_omp_array_sections_1, handle array sections in reduction clause, set OMP_CLAUSE_MAP_MAYBE_ZERO_LENGTH_ARRAY_SECTION if map could be zero length array section, use GOMP_MAP_FIRSTPRIVATE_POINTER for IS_OMP. (finish_omp_reduction_clause): Handle array sections and arrays. Use omp_clause_printable_decl. (finish_omp_declare_simd_methods, cp_finish_omp_clause_depend_sink): New functions. (finish_omp_clauses): Add ALLOW_FIELDS and DECLARE_SIMD arguments. Handle new OpenMP 4.5 clauses and new restrictions for the old ones, handle non-static data members, reject this keyword when not allowed. (push_omp_privatization_clauses, pop_omp_privatization_clauses, save_omp_privatization_clauses, restore_omp_privatization_clauses): New functions. (handle_omp_for_class_iterator): Handle OMP_TASKLOOP class iterators. Add collapse and ordered arguments. Fix handling of lastprivate iterators in doacross loops. (finish_omp_for): Add ORIG_DECLV argument, handle doacross loops, adjust c_finish_omp_for, handle_omp_for_class_iterator and finish_omp_clauses callers. Fill in OMP_CLAUSE_LINEAR_STEP on simd loops with non-static data member iterators. gcc/fortran/ 2015-10-13 Jakub Jelinek <jakub@redhat.com> Ilya Verbin <ilya.verbin@intel.com> * f95-lang.c (DEF_FUNCTION_TYPE_9, DEF_FUNCTION_TYPE_10, DEF_FUNCTION_TYPE_11, DEF_FUNCTION_TYPE_VAR_1): Define. * trans-openmp.c (gfc_trans_omp_clauses): Set OMP_CLAUSE_IF_MODIFIER to ERROR_MARK, OMP_CLAUSE_ORDERED_EXPR to NULL. (gfc_trans_omp_critical): Adjust for addition of clauses. (gfc_trans_omp_ordered): Likewise. * types.def (BT_FN_BOOL_UINT_LONGPTR_LONGPTR_LONGPTR, BT_FN_BOOL_UINT_ULLPTR_ULLPTR_ULLPTR, BT_FN_BOOL_UINT_LONGPTR_LONG_LONGPTR_LONGPTR, BT_FN_BOOL_UINT_ULLPTR_ULL_ULLPTR_ULLPTR, BT_FN_VOID_INT_SIZE_PTR_PTR_PTR_UINT_PTR, BT_FN_VOID_INT_OMPFN_SIZE_PTR_PTR_PTR_UINT_PTR, BT_FN_VOID_OMPFN_PTR_OMPCPYFN_LONG_LONG_BOOL_UINT_PTR_INT, BT_FN_VOID_OMPFN_PTR_OMPCPYFN_LONG_LONG_UINT_LONG_INT_LONG_LONG_LONG, BT_FN_VOID_OMPFN_PTR_OMPCPYFN_LONG_LONG_UINT_LONG_INT_ULL_ULL_ULL, BT_FN_VOID_LONG_VAR, BT_FN_VOID_ULL_VAR): New. (BT_FN_VOID_INT_PTR_SIZE_PTR_PTR_PTR, BT_FN_VOID_INT_OMPFN_PTR_SIZE_PTR_PTR_PTR, BT_FN_VOID_OMPFN_PTR_OMPCPYFN_LONG_LONG_BOOL_UINT_PTR): Remove. gcc/lto/ 2015-10-13 Jakub Jelinek <jakub@redhat.com> * lto-lang.c (DEF_FUNCTION_TYPE_9, DEF_FUNCTION_TYPE_10, DEF_FUNCTION_TYPE_11): Define. gcc/jit/ 2015-10-13 Jakub Jelinek <jakub@redhat.com> * jit-builtins.c (DEF_FUNCTION_TYPE_9, DEF_FUNCTION_TYPE_10, DEF_FUNCTION_TYPE_11): Define. * jit-builtins.h (DEF_FUNCTION_TYPE_9, DEF_FUNCTION_TYPE_10, DEF_FUNCTION_TYPE_11): Define. gcc/ada/ 2015-10-13 Jakub Jelinek <jakub@redhat.com> * gcc-interface/utils.c (DEF_FUNCTION_TYPE_9, DEF_FUNCTION_TYPE_10, DEF_FUNCTION_TYPE_11): Define. gcc/testsuite/ 2015-10-13 Jakub Jelinek <jakub@redhat.com> Aldy Hernandez <aldyh@redhat.com> * c-c++-common/gomp/cancel-1.c (f2): Add map clause to target data. * c-c++-common/gomp/clauses-1.c: New test. * c-c++-common/gomp/clauses-2.c: New test. * c-c++-common/gomp/clauses-3.c: New test. * c-c++-common/gomp/clauses-4.c: New test. * c-c++-common/gomp/declare-target-1.c: New test. * c-c++-common/gomp/declare-target-2.c: New test. * c-c++-common/gomp/depend-3.c: New test. * c-c++-common/gomp/depend-4.c: New test. * c-c++-common/gomp/doacross-1.c: New test. * c-c++-common/gomp/if-1.c: New test. * c-c++-common/gomp/if-2.c: New test. * c-c++-common/gomp/linear-1.c: New test. * c-c++-common/gomp/map-2.c: New test. * c-c++-common/gomp/map-3.c: New test. * c-c++-common/gomp/nesting-1.c (f_omp_parallel, f_omp_target_data): Add map clause to target data. * c-c++-common/gomp/nesting-warn-1.c (f_omp_target): Likewise. * c-c++-common/gomp/ordered-1.c: New test. * c-c++-common/gomp/ordered-2.c: New test. * c-c++-common/gomp/ordered-3.c: New test. * c-c++-common/gomp/pr61486-1.c (foo): Remove linear clause on non-iterator. * c-c++-common/gomp/pr61486-2.c (test, test2): Remove ordered clause and ordered construct where no longer allowed. * c-c++-common/gomp/priority-1.c: New test. * c-c++-common/gomp/reduction-1.c: New test. * c-c++-common/gomp/schedule-simd-1.c: New test. * c-c++-common/gomp/sink-1.c: New test. * c-c++-common/gomp/sink-2.c: New test. * c-c++-common/gomp/sink-3.c: New test. * c-c++-common/gomp/sink-4.c: New test. * c-c++-common/gomp/udr-1.c: New test. * c-c++-common/taskloop-1.c: New test. * c-c++-common/cpp/openmp-define-3.c: Adjust for the new value of _OPENMP macro. * c-c++-common/cilk-plus/PS/body.c (foo): Adjust expected diagnostics. * c-c++-common/goacc-gomp/nesting-fail-1.c (f_acc_parallel, f_acc_kernels, f_acc_data, f_acc_loop): Add map clause to target data. * gcc.dg/gomp/clause-1.c: * gcc.dg/gomp/reduction-1.c: New test. * gcc.dg/gomp/sink-fold-1.c: New test. * gcc.dg/gomp/sink-fold-2.c: New test. * gcc.dg/gomp/sink-fold-3.c: New test. * gcc.dg/vect/vect-simd-clone-15.c: New test. * g++.dg/gomp/clause-1.C (T::test): Remove dg-error on privatization of non-static data members. * g++.dg/gomp/clause-3.C (foo): Remove one dg-error directive. Add some linear clause tests. * g++.dg/gomp/declare-simd-3.C: New test. * g++.dg/gomp/linear-1.C: New test. * g++.dg/gomp/member-1.C: New test. * g++.dg/gomp/member-2.C: New test. * g++.dg/gomp/pr66571-2.C: New test. * g++.dg/gomp/pr67504.C (foo): Add test for ordered clause with dependent argument. * g++.dg/gomp/pr67522.C (foo): Add test for invalid array section in reduction clause. * g++.dg/gomp/reference-1.C: New test. * g++.dg/gomp/sink-1.C: New test. * g++.dg/gomp/sink-2.C: New test. * g++.dg/gomp/sink-3.C: New test. * g++.dg/gomp/task-1.C: Remove both dg-error directives. * g++.dg/gomp/this-1.C: New test. * g++.dg/gomp/this-2.C: New test. * g++.dg/vect/simd-clone-2.cc: New test. * g++.dg/vect/simd-clone-2.h: New test. * g++.dg/vect/simd-clone-3.cc: New test. * g++.dg/vect/simd-clone-4.cc: New test. * g++.dg/vect/simd-clone-4.h: New test. * g++.dg/vect/simd-clone-5.cc: New test. include/ 2015-10-13 Jakub Jelinek <jakub@redhat.com> Ilya Verbin <ilya.verbin@intel.com> * gomp-constants.h (GOMP_MAP_FLAG_ALWAYS): Define. (enum gomp_map_kind): Add GOMP_MAP_FIRSTPRIVATE, GOMP_MAP_FIRSTPRIVATE_INT, GOMP_MAP_USE_DEVICE_PTR, GOMP_MAP_ZERO_LEN_ARRAY_SECTION, GOMP_MAP_ALWAYS_TO, GOMP_MAP_ALWAYS_FROM, GOMP_MAP_ALWAYS_TOFROM, GOMP_MAP_STRUCT, GOMP_MAP_DELETE_ZERO_LEN_ARRAY_SECTION, GOMP_MAP_DELETE, GOMP_MAP_RELEASE, GOMP_MAP_FIRSTPRIVATE_POINTER. (GOMP_MAP_ALWAYS_TO_P, GOMP_MAP_ALWAYS_FROM_P): Define. (GOMP_TASK_FLAG_UNTIED, GOMP_TASK_FLAG_FINAL, GOMP_TASK_FLAG_MERGEABLE, GOMP_TASK_FLAG_DEPEND, GOMP_TASK_FLAG_PRIORITY, GOMP_TASK_FLAG_UP, GOMP_TASK_FLAG_GRAINSIZE, GOMP_TASK_FLAG_IF, GOMP_TASK_FLAG_NOGROUP, GOMP_TARGET_FLAG_NOWAIT, GOMP_TARGET_FLAG_EXIT_DATA, GOMP_TARGET_FLAG_UPDATE): Define. libgomp/ 2015-10-13 Jakub Jelinek <jakub@redhat.com> Aldy Hernandez <aldyh@redhat.com> Ilya Verbin <ilya.verbin@intel.com> * config/linux/affinity.c (omp_get_place_num_procs, omp_get_place_proc_ids, gomp_get_place_proc_ids_8): New functions. * config/linux/doacross.h: New file. * config/posix/affinity.c (omp_get_place_num_procs, omp_get_place_proc_ids, gomp_get_place_proc_ids_8): New functions. * config/posix/doacross.h: New file. * env.c: Include gomp-constants.h. (struct gomp_task_icv): Rename run_sched_modifier to run_sched_chunk_size. (gomp_max_task_priority_var): New variable. (parse_schedule): Rename run_sched_modifier to run_sched_chunk_size. (handle_omp_display_env): Change _OPENMP value from 201307 to 201511. Print OMP_MAX_TASK_PRIORITY. (initialize_env): Parse OMP_MAX_TASK_PRIORITY. (omp_set_schedule, omp_get_schedule): Rename modifier argument to chunk_size and run_sched_modifier to run_sched_chunk_size. (omp_get_max_task_priority, omp_get_initial_device, omp_get_num_places, omp_get_place_num, omp_get_partition_num_places, omp_get_partition_place_nums): New functions. * fortran.c (omp_set_schedule_, omp_set_schedule_8_, omp_get_schedule_, omp_get_schedule_8_): Rename modifier argument to chunk_size. (omp_get_num_places_, omp_get_place_num_procs_, omp_get_place_num_procs_8_, omp_get_place_proc_ids_, omp_get_place_proc_ids_8_, omp_get_place_num_, omp_get_partition_num_places_, omp_get_partition_place_nums_, omp_get_partition_place_nums_8_, omp_get_initial_device_, omp_get_max_task_priority_): New functions. * libgomp_g.h (GOMP_loop_doacross_static_start, GOMP_loop_doacross_dynamic_start, GOMP_loop_doacross_guided_start, GOMP_loop_doacross_runtime_start, GOMP_loop_ull_doacross_static_start, GOMP_loop_ull_doacross_dynamic_start, GOMP_loop_ull_doacross_guided_start, GOMP_loop_ull_doacross_runtime_start, GOMP_doacross_post, GOMP_doacross_wait, GOMP_doacross_ull_post, GOMP_doacross_wait, GOMP_taskloop, GOMP_taskloop_ull, GOMP_target_41, GOMP_target_data_41, GOMP_target_update_41, GOMP_target_enter_exit_data): New prototypes. (GOMP_task): Add prototype argument. * libgomp.h (_LIBGOMP_CHECKING_): Define to 0 if not yet defined. (struct gomp_doacross_work_share): New type. (struct gomp_work_share): Add doacross field. (struct gomp_task_icv): Rename run_sched_modifier to run_sched_chunk_size. (enum gomp_task_kind): Rename GOMP_TASK_IFFALSE to GOMP_TASK_UNDEFERRED. Add comments. (struct gomp_task_depend_entry): Add comments. (struct gomp_task): Likewise. (struct gomp_taskgroup): Likewise. (struct gomp_target_task): New type. (struct gomp_team): Add comment. (gomp_get_place_proc_ids_8, gomp_doacross_init, gomp_doacross_ull_init, gomp_task_maybe_wait_for_dependencies, gomp_create_target_task, gomp_target_task_fn): New prototypes. (struct target_var_desc): New type. (struct target_mem_desc): Adjust comment. Use struct target_var_desc instead of splay_tree_key for list. (REFCOUNT_INFINITY): Define. (struct splay_tree_key_s): Remove copy_from field. (struct gomp_device_descr): Add dev2dev_func field. (enum gomp_map_vars_kind): New enum. (gomp_map_vars): Add one argument. * libgomp.map (OMP_4.5): Export omp_get_max_task_priority, omp_get_max_task_priority_, omp_get_num_places, omp_get_num_places_, omp_get_place_num_procs, omp_get_place_num_procs_, omp_get_place_num_procs_8_, omp_get_place_proc_ids, omp_get_place_proc_ids_, omp_get_place_proc_ids_8_, omp_get_place_num, omp_get_place_num_, omp_get_partition_num_places, omp_get_partition_num_places_, omp_get_partition_place_nums, omp_get_partition_place_nums_, omp_get_partition_place_nums_8_, omp_get_initial_device, omp_get_initial_device_, omp_target_alloc, omp_target_free, omp_target_is_present, omp_target_memcpy, omp_target_memcpy_rect, omp_target_associate_ptr and omp_target_disassociate_ptr. (GOMP_4.0.2): Renamed to ... (GOMP_4.5): ... this. Export GOMP_target_41, GOMP_target_data_41, GOMP_target_update_41, GOMP_target_enter_exit_data, GOMP_taskloop, GOMP_taskloop_ull, GOMP_loop_doacross_dynamic_start, GOMP_loop_doacross_guided_start, GOMP_loop_doacross_runtime_start, GOMP_loop_doacross_static_start, GOMP_doacross_post, GOMP_doacross_wait, GOMP_loop_ull_doacross_dynamic_start, GOMP_loop_ull_doacross_guided_start, GOMP_loop_ull_doacross_runtime_start, GOMP_loop_ull_doacross_static_start, GOMP_doacross_ull_post and GOMP_doacross_ull_wait. * libgomp.texi: Document omp_get_max_task_priority. Rename modifier argument to chunk_size for omp_set_schedule and omp_get_schedule. Document OMP_MAX_TASK_PRIORITY env var. * loop.c (GOMP_loop_runtime_start): Adjust for run_sched_modifier to run_sched_chunk_size renaming. (GOMP_loop_ordered_runtime_start): Likewise. (gomp_loop_doacross_static_start, gomp_loop_doacross_dynamic_start, gomp_loop_doacross_guided_start, GOMP_loop_doacross_runtime_start, GOMP_parallel_loop_runtime_start): New functions. (GOMP_parallel_loop_runtime): Adjust for run_sched_modifier to run_sched_chunk_size renaming. (GOMP_loop_doacross_static_start, GOMP_loop_doacross_dynamic_start, GOMP_loop_doacross_guided_start): New functions or aliases. * loop_ull.c (GOMP_loop_ull_runtime_start): Adjust for run_sched_modifier to run_sched_chunk_size renaming. (GOMP_loop_ull_ordered_runtime_start): Likewise. (gomp_loop_ull_doacross_static_start, gomp_loop_ull_doacross_dynamic_start, gomp_loop_ull_doacross_guided_start, GOMP_loop_ull_doacross_runtime_start): New functions. (GOMP_loop_ull_doacross_static_start, GOMP_loop_ull_doacross_dynamic_start, GOMP_loop_ull_doacross_guided_start): New functions or aliases. * oacc-mem.c (acc_map_data, present_create_copy, gomp_acc_insert_pointer): Pass GOMP_MAP_VARS_OPENACC instead of false to gomp_map_vars. (gomp_acc_remove_pointer): Use copy_from from target_var_desc. * oacc-parallel.c (GOACC_data_start): Pass GOMP_MAP_VARS_OPENACC instead of false to gomp_map_vars. (GOACC_parallel_keyed): Likewise. Use copy_from from target_var_desc. * omp.h.in (omp_lock_hint_t): New type. (omp_init_lock_with_hint, omp_init_nest_lock_with_hint, omp_get_num_places, omp_get_place_num_procs, omp_get_place_proc_ids, omp_get_place_num, omp_get_partition_num_places, omp_get_partition_place_nums, omp_get_initial_device, omp_get_max_task_priority, omp_target_alloc, omp_target_free, omp_target_is_present, omp_target_memcpy, omp_target_memcpy_rect, omp_target_associate_ptr, omp_target_disassociate_ptr): New prototypes. * omp_lib.f90.in (omp_lock_hint_kind): New parameter. (omp_lock_hint_none, omp_lock_hint_uncontended, omp_lock_hint_contended, omp_lock_hint_nonspeculative, omp_lock_hint_speculative): New parameters. (omp_init_lock_with_hint, omp_init_nest_lock_with_hint, omp_get_num_places, omp_get_place_num_procs, omp_get_place_proc_ids, omp_get_place_num, omp_get_partition_num_places, omp_get_partition_place_nums, omp_get_initial_device, omp_get_max_task_priority): New interfaces. (omp_set_schedule, omp_get_schedule): Rename modifier argument to chunk_size. * omp_lib.h.in (omp_lock_hint_kind): New parameter. (omp_lock_hint_none, omp_lock_hint_uncontended, omp_lock_hint_contended, omp_lock_hint_nonspeculative, omp_lock_hint_speculative): New parameters. (omp_init_lock_with_hint, omp_init_nest_lock_with_hint, omp_get_num_places, omp_get_place_num_procs, omp_get_place_proc_ids, omp_get_place_num, omp_get_partition_num_places, omp_get_partition_place_nums, omp_get_initial_device, omp_get_max_task_priority): New functions and subroutines. * ordered.c: Include stdarg.h and string.h. (MAX_COLLAPSED_BITS): Define. (gomp_doacross_init, GOMP_doacross_post, GOMP_doacross_wait, gomp_doacross_ull_init, GOMP_doacross_ull_post, GOMP_doacross_ull_wait): New functions. * target.c: Include errno.h. (resolve_device): If device is not initialized, call gomp_init_device on it. (gomp_map_lookup): New function. (gomp_map_vars_existing): Add tgt_var argument, fill it in. Don't bump refcount if REFCOUNT_INFINITY. Handle GOMP_MAP_ALWAYS_TO_P. (get_kind): Rename is_openacc argument to short_mapkind. (gomp_map_pointer): Use gomp_map_lookup. (gomp_map_fields_existing): New function. (gomp_map_vars): Rename is_openacc argument to short_mapkind and is_target to pragma_kind. Handle GOMP_MAP_VARS_ENTER_DATA, handle GOMP_MAP_FIRSTPRIVATE_INT, GOMP_MAP_STRUCT, GOMP_MAP_USE_DEVICE_PTR, GOMP_MAP_ZERO_LEN_ARRAY_SECTION. Adjust for tgt->list changed type and copy_from living in there. (gomp_copy_from_async): Adjust for tgt->list changed type and copy_from living in there. (gomp_unmap_vars): Likewise. (gomp_update): Likewise. Rename is_openacc argument to short_mapkind. Don't fail if object is not mapped. (gomp_load_image_to_device): Initialize refcount to REFCOUNT_INFINITY. (gomp_target_fallback): New function. (gomp_get_target_fn_addr): Likewise. (GOMP_target): Adjust gomp_map_vars caller, use gomp_get_target_fn_addr and gomp_target_fallback. (GOMP_target_41): New function. (gomp_target_data_fallback): New function. (GOMP_target_data): Use it, adjust gomp_map_vars caller. (GOMP_target_data_41): New function. (GOMP_target_update): Adjust gomp_update caller. (GOMP_target_update_41): New function. (gomp_exit_data, GOMP_target_enter_exit_data, gomp_target_task_fn, omp_target_alloc, omp_target_free, omp_target_is_present, omp_target_memcpy, omp_target_memcpy_rect_worker, omp_target_memcpy_rect, omp_target_associate_ptr, omp_target_disassociate_ptr, gomp_load_plugin_for_device): New functions. * task.c: Include gomp-constants.h. Include taskloop.c twice to get GOMP_taskloop and GOMP_taskloop_ull definitions. (gomp_task_handle_depend): New function. (GOMP_task): Use it. Add priority argument. Use gomp-constant.h constants instead of hardcoded numbers. Rename GOMP_TASK_IFFALSE to GOMP_TASK_UNDEFERRED. (gomp_create_target_task): New function. (verify_children_queue, verify_taskgroup_queue, verify_task_queue): New functions. (gomp_task_run_pre): Call verify_*_queue functions. If an upcoming tied task is about to leave the sibling or taskgroup queues in an invalid state, adjust appropriately. Remove taskgroup argument. Add comments. (gomp_task_run_post_handle_dependers): Add comments. (gomp_task_run_post_remove_parent): Likewise. (gomp_barrier_handle_tasks): Adjust gomp_task_run_pre caller. (GOMP_taskwait): Likewise. Add comments. (gomp_task_maybe_wait_for_dependencies): Fix scheduling problem such that the first non parent_depends_on task does not end up at the end of the children queue. (GOMP_taskgroup_start): Rename GOMP_TASK_IFFALSE to GOMP_TASK_UNDEFERRED. (GOMP_taskgroup_end): Adjust gomp_task_run_pre caller. * taskloop.c: New file. * testsuite/lib/libgomp.exp (check_effective_target_offload_device_nonshared_as): New proc. * testsuite/libgomp.c/affinity-2.c: New test. * testsuite/libgomp.c/doacross-1.c: New test. * testsuite/libgomp.c/doacross-2.c: New test. * testsuite/libgomp.c/examples-4/declare_target-1.c (fib_wrapper): Add map clause to target. * testsuite/libgomp.c/examples-4/declare_target-4.c (accum): Likewise. * testsuite/libgomp.c/examples-4/declare_target-5.c (accum): Likewise. * testsuite/libgomp.c/examples-4/device-1.c (main): Likewise. * testsuite/libgomp.c/examples-4/device-3.c (main): Likewise. * testsuite/libgomp.c/examples-4/target_data-3.c (gramSchmidt): Likewise. * testsuite/libgomp.c/examples-4/teams-2.c (dotprod): Likewise. * testsuite/libgomp.c/examples-4/teams-3.c (dotprod): Likewise. * testsuite/libgomp.c/examples-4/teams-4.c (dotprod): Likewise. * testsuite/libgomp.c/for-2.h (OMPTGT, OMPTO, OMPFROM): Define if not defined. Use those where needed. * testsuite/libgomp.c/for-4.c: New test. * testsuite/libgomp.c/for-5.c: New test. * testsuite/libgomp.c/for-6.c: New test. * testsuite/libgomp.c/linear-1.c: New test. * testsuite/libgomp.c/ordered-4.c: New test. * testsuite/libgomp.c/pr66199-2.c (f2): Adjust for linear clause only allowed on the loop iterator. * testsuite/libgomp.c/pr66199-3.c: New test. * testsuite/libgomp.c/pr66199-4.c: New test. * testsuite/libgomp.c/reduction-7.c: New test. * testsuite/libgomp.c/reduction-8.c: New test. * testsuite/libgomp.c/reduction-9.c: New test. * testsuite/libgomp.c/reduction-10.c: New test. * testsuite/libgomp.c/target-1.c (fn2, fn3, fn4): Add map(tofrom:s). * testsuite/libgomp.c/target-2.c (fn2, fn3, fn4): Likewise. * testsuite/libgomp.c/target-7.c (foo): Add map(h) where needed. * testsuite/libgomp.c/target-11.c: New test. * testsuite/libgomp.c/target-12.c: New test. * testsuite/libgomp.c/target-13.c: New test. * testsuite/libgomp.c/target-14.c: New test. * testsuite/libgomp.c/target-15.c: New test. * testsuite/libgomp.c/target-16.c: New test. * testsuite/libgomp.c/target-17.c: New test. * testsuite/libgomp.c/target-18.c: New test. * testsuite/libgomp.c/target-19.c: New test. * testsuite/libgomp.c/target-20.c: New test. * testsuite/libgomp.c/target-21.c: New test. * testsuite/libgomp.c/target-22.c: New test. * testsuite/libgomp.c/target-23.c: New test. * testsuite/libgomp.c/target-24.c: New test. * testsuite/libgomp.c/target-25.c: New test. * testsuite/libgomp.c/target-26.c: New test. * testsuite/libgomp.c/target-27.c: New test. * testsuite/libgomp.c/taskloop-1.c: New test. * testsuite/libgomp.c/taskloop-2.c: New test. * testsuite/libgomp.c/taskloop-3.c: New test. * testsuite/libgomp.c/taskloop-4.c: New test. * testsuite/libgomp.c++/ctor-13.C: New test. * testsuite/libgomp.c++/doacross-1.C: New test. * testsuite/libgomp.c++/examples-4/declare_target-2.C: Replace offload_device with offload_device_nonshared_as. * testsuite/libgomp.c++/for-12.C: New test. * testsuite/libgomp.c++/for-13.C: New test. * testsuite/libgomp.c++/for-14.C: New test. * testsuite/libgomp.c++/linear-1.C: New test. * testsuite/libgomp.c++/member-1.C: New test. * testsuite/libgomp.c++/member-2.C: New test. * testsuite/libgomp.c++/member-3.C: New test. * testsuite/libgomp.c++/member-4.C: New test. * testsuite/libgomp.c++/member-5.C: New test. * testsuite/libgomp.c++/ordered-1.C: New test. * testsuite/libgomp.c++/reduction-5.C: New test. * testsuite/libgomp.c++/reduction-6.C: New test. * testsuite/libgomp.c++/reduction-7.C: New test. * testsuite/libgomp.c++/reduction-8.C: New test. * testsuite/libgomp.c++/reduction-9.C: New test. * testsuite/libgomp.c++/reduction-10.C: New test. * testsuite/libgomp.c++/reference-1.C: New test. * testsuite/libgomp.c++/simd14.C: New test. * testsuite/libgomp.c++/target-2.C (fn2): Add map(tofrom: s) clause. * testsuite/libgomp.c++/target-5.C: New test. * testsuite/libgomp.c++/target-6.C: New test. * testsuite/libgomp.c++/target-7.C: New test. * testsuite/libgomp.c++/target-8.C: New test. * testsuite/libgomp.c++/target-9.C: New test. * testsuite/libgomp.c++/target-10.C: New test. * testsuite/libgomp.c++/target-11.C: New test. * testsuite/libgomp.c++/target-12.C: New test. * testsuite/libgomp.c++/taskloop-1.C: New test. * testsuite/libgomp.c++/taskloop-2.C: New test. * testsuite/libgomp.c++/taskloop-3.C: New test. * testsuite/libgomp.c++/taskloop-4.C: New test. * testsuite/libgomp.c++/taskloop-5.C: New test. * testsuite/libgomp.c++/taskloop-6.C: New test. * testsuite/libgomp.c++/taskloop-7.C: New test. * testsuite/libgomp.c++/taskloop-8.C: New test. * testsuite/libgomp.c++/taskloop-9.C: New test. * testsuite/libgomp.fortran/affinity1.f90: New test. * testsuite/libgomp.fortran/affinity2.f90: New test. liboffloadmic/ 2015-10-13 Ilya Verbin <ilya.verbin@intel.com> * plugin/libgomp-plugin-intelmic.cpp (GOMP_OFFLOAD_dev2dev): New function. * plugin/offload_target_main.cpp (__offload_target_tgt2tgt): New static function, register it in liboffloadmic. From-SVN: r228777
1602 lines
45 KiB
C
1602 lines
45 KiB
C
/* Copyright (C) 2007-2015 Free Software Foundation, Inc.
|
|
Contributed by Richard Henderson <rth@redhat.com>.
|
|
|
|
This file is part of the GNU Offloading and Multi Processing Library
|
|
(libgomp).
|
|
|
|
Libgomp is free software; you can redistribute it and/or modify it
|
|
under the terms of the GNU General Public License as published by
|
|
the Free Software Foundation; either version 3, or (at your option)
|
|
any later version.
|
|
|
|
Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY
|
|
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
|
|
FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
|
more details.
|
|
|
|
Under Section 7 of GPL version 3, you are granted additional
|
|
permissions described in the GCC Runtime Library Exception, version
|
|
3.1, as published by the Free Software Foundation.
|
|
|
|
You should have received a copy of the GNU General Public License and
|
|
a copy of the GCC Runtime Library Exception along with this program;
|
|
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
|
|
<http://www.gnu.org/licenses/>. */
|
|
|
|
/* This file handles the maintainence of tasks in response to task
|
|
creation and termination. */
|
|
|
|
#include "libgomp.h"
|
|
#include <stdlib.h>
|
|
#include <string.h>
|
|
#include "gomp-constants.h"
|
|
|
|
typedef struct gomp_task_depend_entry *hash_entry_type;
|
|
|
|
static inline void *
|
|
htab_alloc (size_t size)
|
|
{
|
|
return gomp_malloc (size);
|
|
}
|
|
|
|
static inline void
|
|
htab_free (void *ptr)
|
|
{
|
|
free (ptr);
|
|
}
|
|
|
|
#include "hashtab.h"
|
|
|
|
static inline hashval_t
|
|
htab_hash (hash_entry_type element)
|
|
{
|
|
return hash_pointer (element->addr);
|
|
}
|
|
|
|
static inline bool
|
|
htab_eq (hash_entry_type x, hash_entry_type y)
|
|
{
|
|
return x->addr == y->addr;
|
|
}
|
|
|
|
/* Create a new task data structure. */
|
|
|
|
void
|
|
gomp_init_task (struct gomp_task *task, struct gomp_task *parent_task,
|
|
struct gomp_task_icv *prev_icv)
|
|
{
|
|
task->parent = parent_task;
|
|
task->icv = *prev_icv;
|
|
task->kind = GOMP_TASK_IMPLICIT;
|
|
task->taskwait = NULL;
|
|
task->in_tied_task = false;
|
|
task->final_task = false;
|
|
task->copy_ctors_done = false;
|
|
task->parent_depends_on = false;
|
|
task->children = NULL;
|
|
task->taskgroup = NULL;
|
|
task->dependers = NULL;
|
|
task->depend_hash = NULL;
|
|
task->depend_count = 0;
|
|
}
|
|
|
|
/* Clean up a task, after completing it. */
|
|
|
|
void
|
|
gomp_end_task (void)
|
|
{
|
|
struct gomp_thread *thr = gomp_thread ();
|
|
struct gomp_task *task = thr->task;
|
|
|
|
gomp_finish_task (task);
|
|
thr->task = task->parent;
|
|
}
|
|
|
|
/* Orphan the task in CHILDREN and all its siblings. */
|
|
|
|
static inline void
|
|
gomp_clear_parent (struct gomp_task *children)
|
|
{
|
|
struct gomp_task *task = children;
|
|
|
|
if (task)
|
|
do
|
|
{
|
|
task->parent = NULL;
|
|
task = task->next_child;
|
|
}
|
|
while (task != children);
|
|
}
|
|
|
|
/* Helper function for GOMP_task and gomp_create_target_task. Depend clause
|
|
handling for undeferred task creation. */
|
|
|
|
static void
|
|
gomp_task_handle_depend (struct gomp_task *task, struct gomp_task *parent,
|
|
void **depend)
|
|
{
|
|
size_t ndepend = (uintptr_t) depend[0];
|
|
size_t nout = (uintptr_t) depend[1];
|
|
size_t i;
|
|
hash_entry_type ent;
|
|
|
|
task->depend_count = ndepend;
|
|
task->num_dependees = 0;
|
|
if (parent->depend_hash == NULL)
|
|
parent->depend_hash = htab_create (2 * ndepend > 12 ? 2 * ndepend : 12);
|
|
for (i = 0; i < ndepend; i++)
|
|
{
|
|
task->depend[i].addr = depend[2 + i];
|
|
task->depend[i].next = NULL;
|
|
task->depend[i].prev = NULL;
|
|
task->depend[i].task = task;
|
|
task->depend[i].is_in = i >= nout;
|
|
task->depend[i].redundant = false;
|
|
task->depend[i].redundant_out = false;
|
|
|
|
hash_entry_type *slot = htab_find_slot (&parent->depend_hash,
|
|
&task->depend[i], INSERT);
|
|
hash_entry_type out = NULL, last = NULL;
|
|
if (*slot)
|
|
{
|
|
/* If multiple depends on the same task are the same, all but the
|
|
first one are redundant. As inout/out come first, if any of them
|
|
is inout/out, it will win, which is the right semantics. */
|
|
if ((*slot)->task == task)
|
|
{
|
|
task->depend[i].redundant = true;
|
|
continue;
|
|
}
|
|
for (ent = *slot; ent; ent = ent->next)
|
|
{
|
|
if (ent->redundant_out)
|
|
break;
|
|
|
|
last = ent;
|
|
|
|
/* depend(in:...) doesn't depend on earlier depend(in:...). */
|
|
if (i >= nout && ent->is_in)
|
|
continue;
|
|
|
|
if (!ent->is_in)
|
|
out = ent;
|
|
|
|
struct gomp_task *tsk = ent->task;
|
|
if (tsk->dependers == NULL)
|
|
{
|
|
tsk->dependers
|
|
= gomp_malloc (sizeof (struct gomp_dependers_vec)
|
|
+ 6 * sizeof (struct gomp_task *));
|
|
tsk->dependers->n_elem = 1;
|
|
tsk->dependers->allocated = 6;
|
|
tsk->dependers->elem[0] = task;
|
|
task->num_dependees++;
|
|
continue;
|
|
}
|
|
/* We already have some other dependency on tsk from earlier
|
|
depend clause. */
|
|
else if (tsk->dependers->n_elem
|
|
&& (tsk->dependers->elem[tsk->dependers->n_elem - 1]
|
|
== task))
|
|
continue;
|
|
else if (tsk->dependers->n_elem == tsk->dependers->allocated)
|
|
{
|
|
tsk->dependers->allocated
|
|
= tsk->dependers->allocated * 2 + 2;
|
|
tsk->dependers
|
|
= gomp_realloc (tsk->dependers,
|
|
sizeof (struct gomp_dependers_vec)
|
|
+ (tsk->dependers->allocated
|
|
* sizeof (struct gomp_task *)));
|
|
}
|
|
tsk->dependers->elem[tsk->dependers->n_elem++] = task;
|
|
task->num_dependees++;
|
|
}
|
|
task->depend[i].next = *slot;
|
|
(*slot)->prev = &task->depend[i];
|
|
}
|
|
*slot = &task->depend[i];
|
|
|
|
/* There is no need to store more than one depend({,in}out:) task per
|
|
address in the hash table chain for the purpose of creation of
|
|
deferred tasks, because each out depends on all earlier outs, thus it
|
|
is enough to record just the last depend({,in}out:). For depend(in:),
|
|
we need to keep all of the previous ones not terminated yet, because
|
|
a later depend({,in}out:) might need to depend on all of them. So, if
|
|
the new task's clause is depend({,in}out:), we know there is at most
|
|
one other depend({,in}out:) clause in the list (out). For
|
|
non-deferred tasks we want to see all outs, so they are moved to the
|
|
end of the chain, after first redundant_out entry all following
|
|
entries should be redundant_out. */
|
|
if (!task->depend[i].is_in && out)
|
|
{
|
|
if (out != last)
|
|
{
|
|
out->next->prev = out->prev;
|
|
out->prev->next = out->next;
|
|
out->next = last->next;
|
|
out->prev = last;
|
|
last->next = out;
|
|
if (out->next)
|
|
out->next->prev = out;
|
|
}
|
|
out->redundant_out = true;
|
|
}
|
|
}
|
|
}
|
|
|
|
/* Called when encountering an explicit task directive. If IF_CLAUSE is
|
|
false, then we must not delay in executing the task. If UNTIED is true,
|
|
then the task may be executed by any member of the team.
|
|
|
|
DEPEND is an array containing:
|
|
depend[0]: number of depend elements.
|
|
depend[1]: number of depend elements of type "out".
|
|
depend[2..N+1]: address of [1..N]th depend element. */
|
|
|
|
void
|
|
GOMP_task (void (*fn) (void *), void *data, void (*cpyfn) (void *, void *),
|
|
long arg_size, long arg_align, bool if_clause, unsigned flags,
|
|
void **depend, int priority)
|
|
{
|
|
struct gomp_thread *thr = gomp_thread ();
|
|
struct gomp_team *team = thr->ts.team;
|
|
|
|
#ifdef HAVE_BROKEN_POSIX_SEMAPHORES
|
|
/* If pthread_mutex_* is used for omp_*lock*, then each task must be
|
|
tied to one thread all the time. This means UNTIED tasks must be
|
|
tied and if CPYFN is non-NULL IF(0) must be forced, as CPYFN
|
|
might be running on different thread than FN. */
|
|
if (cpyfn)
|
|
if_clause = false;
|
|
flags &= ~GOMP_TASK_FLAG_UNTIED;
|
|
#endif
|
|
|
|
/* If parallel or taskgroup has been cancelled, don't start new tasks. */
|
|
if (team
|
|
&& (gomp_team_barrier_cancelled (&team->barrier)
|
|
|| (thr->task->taskgroup && thr->task->taskgroup->cancelled)))
|
|
return;
|
|
|
|
if ((flags & GOMP_TASK_FLAG_PRIORITY) == 0)
|
|
priority = 0;
|
|
/* FIXME, use priority. */
|
|
(void) priority;
|
|
|
|
if (!if_clause || team == NULL
|
|
|| (thr->task && thr->task->final_task)
|
|
|| team->task_count > 64 * team->nthreads)
|
|
{
|
|
struct gomp_task task;
|
|
|
|
/* If there are depend clauses and earlier deferred sibling tasks
|
|
with depend clauses, check if there isn't a dependency. If there
|
|
is, we need to wait for them. There is no need to handle
|
|
depend clauses for non-deferred tasks other than this, because
|
|
the parent task is suspended until the child task finishes and thus
|
|
it can't start further child tasks. */
|
|
if ((flags & GOMP_TASK_FLAG_DEPEND)
|
|
&& thr->task && thr->task->depend_hash)
|
|
gomp_task_maybe_wait_for_dependencies (depend);
|
|
|
|
gomp_init_task (&task, thr->task, gomp_icv (false));
|
|
task.kind = GOMP_TASK_UNDEFERRED;
|
|
task.final_task = (thr->task && thr->task->final_task)
|
|
|| (flags & GOMP_TASK_FLAG_FINAL);
|
|
if (thr->task)
|
|
{
|
|
task.in_tied_task = thr->task->in_tied_task;
|
|
task.taskgroup = thr->task->taskgroup;
|
|
}
|
|
thr->task = &task;
|
|
if (__builtin_expect (cpyfn != NULL, 0))
|
|
{
|
|
char buf[arg_size + arg_align - 1];
|
|
char *arg = (char *) (((uintptr_t) buf + arg_align - 1)
|
|
& ~(uintptr_t) (arg_align - 1));
|
|
cpyfn (arg, data);
|
|
fn (arg);
|
|
}
|
|
else
|
|
fn (data);
|
|
/* Access to "children" is normally done inside a task_lock
|
|
mutex region, but the only way this particular task.children
|
|
can be set is if this thread's task work function (fn)
|
|
creates children. So since the setter is *this* thread, we
|
|
need no barriers here when testing for non-NULL. We can have
|
|
task.children set by the current thread then changed by a
|
|
child thread, but seeing a stale non-NULL value is not a
|
|
problem. Once past the task_lock acquisition, this thread
|
|
will see the real value of task.children. */
|
|
if (task.children != NULL)
|
|
{
|
|
gomp_mutex_lock (&team->task_lock);
|
|
gomp_clear_parent (task.children);
|
|
gomp_mutex_unlock (&team->task_lock);
|
|
}
|
|
gomp_end_task ();
|
|
}
|
|
else
|
|
{
|
|
struct gomp_task *task;
|
|
struct gomp_task *parent = thr->task;
|
|
struct gomp_taskgroup *taskgroup = parent->taskgroup;
|
|
char *arg;
|
|
bool do_wake;
|
|
size_t depend_size = 0;
|
|
|
|
if (flags & GOMP_TASK_FLAG_DEPEND)
|
|
depend_size = ((uintptr_t) depend[0]
|
|
* sizeof (struct gomp_task_depend_entry));
|
|
task = gomp_malloc (sizeof (*task) + depend_size
|
|
+ arg_size + arg_align - 1);
|
|
arg = (char *) (((uintptr_t) (task + 1) + depend_size + arg_align - 1)
|
|
& ~(uintptr_t) (arg_align - 1));
|
|
gomp_init_task (task, parent, gomp_icv (false));
|
|
task->kind = GOMP_TASK_UNDEFERRED;
|
|
task->in_tied_task = parent->in_tied_task;
|
|
task->taskgroup = taskgroup;
|
|
thr->task = task;
|
|
if (cpyfn)
|
|
{
|
|
cpyfn (arg, data);
|
|
task->copy_ctors_done = true;
|
|
}
|
|
else
|
|
memcpy (arg, data, arg_size);
|
|
thr->task = parent;
|
|
task->kind = GOMP_TASK_WAITING;
|
|
task->fn = fn;
|
|
task->fn_data = arg;
|
|
task->final_task = (flags & GOMP_TASK_FLAG_FINAL) >> 1;
|
|
gomp_mutex_lock (&team->task_lock);
|
|
/* If parallel or taskgroup has been cancelled, don't start new
|
|
tasks. */
|
|
if (__builtin_expect ((gomp_team_barrier_cancelled (&team->barrier)
|
|
|| (taskgroup && taskgroup->cancelled))
|
|
&& !task->copy_ctors_done, 0))
|
|
{
|
|
gomp_mutex_unlock (&team->task_lock);
|
|
gomp_finish_task (task);
|
|
free (task);
|
|
return;
|
|
}
|
|
if (taskgroup)
|
|
taskgroup->num_children++;
|
|
if (depend_size)
|
|
{
|
|
gomp_task_handle_depend (task, parent, depend);
|
|
if (task->num_dependees)
|
|
{
|
|
gomp_mutex_unlock (&team->task_lock);
|
|
return;
|
|
}
|
|
}
|
|
if (parent->children)
|
|
{
|
|
task->next_child = parent->children;
|
|
task->prev_child = parent->children->prev_child;
|
|
task->next_child->prev_child = task;
|
|
task->prev_child->next_child = task;
|
|
}
|
|
else
|
|
{
|
|
task->next_child = task;
|
|
task->prev_child = task;
|
|
}
|
|
parent->children = task;
|
|
if (taskgroup)
|
|
{
|
|
/* If applicable, place task into its taskgroup. */
|
|
if (taskgroup->children)
|
|
{
|
|
task->next_taskgroup = taskgroup->children;
|
|
task->prev_taskgroup = taskgroup->children->prev_taskgroup;
|
|
task->next_taskgroup->prev_taskgroup = task;
|
|
task->prev_taskgroup->next_taskgroup = task;
|
|
}
|
|
else
|
|
{
|
|
task->next_taskgroup = task;
|
|
task->prev_taskgroup = task;
|
|
}
|
|
taskgroup->children = task;
|
|
}
|
|
if (team->task_queue)
|
|
{
|
|
task->next_queue = team->task_queue;
|
|
task->prev_queue = team->task_queue->prev_queue;
|
|
task->next_queue->prev_queue = task;
|
|
task->prev_queue->next_queue = task;
|
|
}
|
|
else
|
|
{
|
|
task->next_queue = task;
|
|
task->prev_queue = task;
|
|
team->task_queue = task;
|
|
}
|
|
++team->task_count;
|
|
++team->task_queued_count;
|
|
gomp_team_barrier_set_task_pending (&team->barrier);
|
|
do_wake = team->task_running_count + !parent->in_tied_task
|
|
< team->nthreads;
|
|
gomp_mutex_unlock (&team->task_lock);
|
|
if (do_wake)
|
|
gomp_team_barrier_wake (&team->barrier, 1);
|
|
}
|
|
}
|
|
|
|
ialias (GOMP_taskgroup_start)
|
|
ialias (GOMP_taskgroup_end)
|
|
|
|
#define TYPE long
|
|
#define UTYPE unsigned long
|
|
#define TYPE_is_long 1
|
|
#include "taskloop.c"
|
|
#undef TYPE
|
|
#undef UTYPE
|
|
#undef TYPE_is_long
|
|
|
|
#define TYPE unsigned long long
|
|
#define UTYPE TYPE
|
|
#define GOMP_taskloop GOMP_taskloop_ull
|
|
#include "taskloop.c"
|
|
#undef TYPE
|
|
#undef UTYPE
|
|
#undef GOMP_taskloop
|
|
|
|
/* Called for nowait target tasks. */
|
|
|
|
void
|
|
gomp_create_target_task (struct gomp_device_descr *devicep,
|
|
void (*fn) (void *), size_t mapnum, void **hostaddrs,
|
|
size_t *sizes, unsigned short *kinds,
|
|
unsigned int flags, void **depend)
|
|
{
|
|
struct gomp_thread *thr = gomp_thread ();
|
|
struct gomp_team *team = thr->ts.team;
|
|
|
|
/* If parallel or taskgroup has been cancelled, don't start new tasks. */
|
|
if (team
|
|
&& (gomp_team_barrier_cancelled (&team->barrier)
|
|
|| (thr->task->taskgroup && thr->task->taskgroup->cancelled)))
|
|
return;
|
|
|
|
struct gomp_target_task *ttask;
|
|
struct gomp_task *task;
|
|
struct gomp_task *parent = thr->task;
|
|
struct gomp_taskgroup *taskgroup = parent->taskgroup;
|
|
bool do_wake;
|
|
size_t depend_size = 0;
|
|
|
|
if (depend != NULL)
|
|
depend_size = ((uintptr_t) depend[0]
|
|
* sizeof (struct gomp_task_depend_entry));
|
|
task = gomp_malloc (sizeof (*task) + depend_size
|
|
+ sizeof (*ttask)
|
|
+ mapnum * (sizeof (void *) + sizeof (size_t)
|
|
+ sizeof (unsigned short)));
|
|
gomp_init_task (task, parent, gomp_icv (false));
|
|
task->kind = GOMP_TASK_WAITING;
|
|
task->in_tied_task = parent->in_tied_task;
|
|
task->taskgroup = taskgroup;
|
|
ttask = (struct gomp_target_task *) &task->depend[(uintptr_t) depend[0]];
|
|
ttask->devicep = devicep;
|
|
ttask->fn = fn;
|
|
ttask->mapnum = mapnum;
|
|
memcpy (ttask->hostaddrs, hostaddrs, mapnum * sizeof (void *));
|
|
ttask->sizes = (size_t *) &ttask->hostaddrs[mapnum];
|
|
memcpy (ttask->sizes, sizes, mapnum * sizeof (size_t));
|
|
ttask->kinds = (unsigned short *) &ttask->sizes[mapnum];
|
|
memcpy (ttask->kinds, kinds, mapnum * sizeof (unsigned short));
|
|
ttask->flags = flags;
|
|
task->fn = gomp_target_task_fn;
|
|
task->fn_data = ttask;
|
|
task->final_task = 0;
|
|
gomp_mutex_lock (&team->task_lock);
|
|
/* If parallel or taskgroup has been cancelled, don't start new tasks. */
|
|
if (__builtin_expect (gomp_team_barrier_cancelled (&team->barrier)
|
|
|| (taskgroup && taskgroup->cancelled), 0))
|
|
{
|
|
gomp_mutex_unlock (&team->task_lock);
|
|
gomp_finish_task (task);
|
|
free (task);
|
|
return;
|
|
}
|
|
if (taskgroup)
|
|
taskgroup->num_children++;
|
|
if (depend_size)
|
|
{
|
|
gomp_task_handle_depend (task, parent, depend);
|
|
if (task->num_dependees)
|
|
{
|
|
gomp_mutex_unlock (&team->task_lock);
|
|
return;
|
|
}
|
|
}
|
|
if (parent->children)
|
|
{
|
|
task->next_child = parent->children;
|
|
task->prev_child = parent->children->prev_child;
|
|
task->next_child->prev_child = task;
|
|
task->prev_child->next_child = task;
|
|
}
|
|
else
|
|
{
|
|
task->next_child = task;
|
|
task->prev_child = task;
|
|
}
|
|
parent->children = task;
|
|
if (taskgroup)
|
|
{
|
|
/* If applicable, place task into its taskgroup. */
|
|
if (taskgroup->children)
|
|
{
|
|
task->next_taskgroup = taskgroup->children;
|
|
task->prev_taskgroup = taskgroup->children->prev_taskgroup;
|
|
task->next_taskgroup->prev_taskgroup = task;
|
|
task->prev_taskgroup->next_taskgroup = task;
|
|
}
|
|
else
|
|
{
|
|
task->next_taskgroup = task;
|
|
task->prev_taskgroup = task;
|
|
}
|
|
taskgroup->children = task;
|
|
}
|
|
if (team->task_queue)
|
|
{
|
|
task->next_queue = team->task_queue;
|
|
task->prev_queue = team->task_queue->prev_queue;
|
|
task->next_queue->prev_queue = task;
|
|
task->prev_queue->next_queue = task;
|
|
}
|
|
else
|
|
{
|
|
task->next_queue = task;
|
|
task->prev_queue = task;
|
|
team->task_queue = task;
|
|
}
|
|
++team->task_count;
|
|
++team->task_queued_count;
|
|
gomp_team_barrier_set_task_pending (&team->barrier);
|
|
do_wake = team->task_running_count + !parent->in_tied_task
|
|
< team->nthreads;
|
|
gomp_mutex_unlock (&team->task_lock);
|
|
if (do_wake)
|
|
gomp_team_barrier_wake (&team->barrier, 1);
|
|
}
|
|
|
|
#if _LIBGOMP_CHECKING
|
|
/* Sanity check TASK to make sure it is in its parent's children
|
|
queue, and that the tasks therein are in the right order.
|
|
|
|
The expected order is:
|
|
parent_depends_on WAITING tasks
|
|
!parent_depends_on WAITING tasks
|
|
TIED tasks
|
|
|
|
PARENT is the alleged parent of TASK. */
|
|
|
|
static void
|
|
verify_children_queue (struct gomp_task *task, struct gomp_task *parent)
|
|
{
|
|
if (task->parent != parent)
|
|
gomp_fatal ("verify_children_queue: incompatible parents");
|
|
/* It's OK, Annie was an orphan and she turned out all right. */
|
|
if (!parent)
|
|
return;
|
|
|
|
bool seen_tied = false;
|
|
bool seen_plain_waiting = false;
|
|
bool found = false;
|
|
struct gomp_task *t = parent->children;
|
|
while (1)
|
|
{
|
|
if (t == task)
|
|
found = true;
|
|
if (seen_tied && t->kind == GOMP_TASK_WAITING)
|
|
gomp_fatal ("verify_children_queue: WAITING task after TIED");
|
|
if (t->kind == GOMP_TASK_TIED)
|
|
seen_tied = true;
|
|
else if (t->kind == GOMP_TASK_WAITING)
|
|
{
|
|
if (t->parent_depends_on)
|
|
{
|
|
if (seen_plain_waiting)
|
|
gomp_fatal ("verify_children_queue: parent_depends_on after "
|
|
"!parent_depends_on");
|
|
}
|
|
else
|
|
seen_plain_waiting = true;
|
|
}
|
|
t = t->next_child;
|
|
if (t == parent->children)
|
|
break;
|
|
}
|
|
if (!found)
|
|
gomp_fatal ("verify_children_queue: child not found in parent queue");
|
|
}
|
|
|
|
/* Sanity check TASK to make sure it is in its taskgroup queue (if
|
|
applicable), and that the tasks therein are in the right order.
|
|
|
|
The expected order is that GOMP_TASK_WAITING tasks must come before
|
|
GOMP_TASK_TIED tasks.
|
|
|
|
TASK is the task. */
|
|
|
|
static void
|
|
verify_taskgroup_queue (struct gomp_task *task)
|
|
{
|
|
struct gomp_taskgroup *taskgroup = task->taskgroup;
|
|
if (!taskgroup)
|
|
return;
|
|
|
|
bool seen_tied = false;
|
|
bool found = false;
|
|
struct gomp_task *t = taskgroup->children;
|
|
while (1)
|
|
{
|
|
if (t == task)
|
|
found = true;
|
|
if (t->kind == GOMP_TASK_WAITING && seen_tied)
|
|
gomp_fatal ("verify_taskgroup_queue: WAITING task after TIED");
|
|
if (t->kind == GOMP_TASK_TIED)
|
|
seen_tied = true;
|
|
t = t->next_taskgroup;
|
|
if (t == taskgroup->children)
|
|
break;
|
|
}
|
|
if (!found)
|
|
gomp_fatal ("verify_taskgroup_queue: child not found in parent queue");
|
|
}
|
|
|
|
/* Verify that TASK is in the team's task queue. */
|
|
|
|
static void
|
|
verify_task_queue (struct gomp_task *task, struct gomp_team *team)
|
|
{
|
|
struct gomp_task *t = team->task_queue;
|
|
if (team)
|
|
while (1)
|
|
{
|
|
if (t == task)
|
|
return;
|
|
t = t->next_queue;
|
|
if (t == team->task_queue)
|
|
break;
|
|
}
|
|
gomp_fatal ("verify_team_queue: child not in team");
|
|
}
|
|
#endif
|
|
|
|
static inline bool
|
|
gomp_task_run_pre (struct gomp_task *child_task, struct gomp_task *parent,
|
|
struct gomp_team *team)
|
|
{
|
|
#if _LIBGOMP_CHECKING
|
|
verify_children_queue (child_task, parent);
|
|
verify_taskgroup_queue (child_task);
|
|
verify_task_queue (child_task, team);
|
|
#endif
|
|
|
|
if (parent)
|
|
{
|
|
/* Adjust children such that it will point to a next child,
|
|
while the current one is scheduled to be executed. This way,
|
|
GOMP_taskwait (and others) can schedule a next task while
|
|
waiting.
|
|
|
|
Do not remove it entirely from the circular list, as it is
|
|
still a child, though not one we should consider first (say
|
|
by GOMP_taskwait). */
|
|
if (parent->children == child_task)
|
|
parent->children = child_task->next_child;
|
|
/* TIED tasks cannot come before WAITING tasks. If we're about
|
|
to make this task TIED, rewire things appropriately.
|
|
However, a TIED task at the end is perfectly fine. */
|
|
else if (child_task->next_child->kind == GOMP_TASK_WAITING
|
|
&& child_task->next_child != parent->children)
|
|
{
|
|
/* Remove from the list. */
|
|
child_task->prev_child->next_child = child_task->next_child;
|
|
child_task->next_child->prev_child = child_task->prev_child;
|
|
/* Rewire at the end of its siblings. */
|
|
child_task->next_child = parent->children;
|
|
child_task->prev_child = parent->children->prev_child;
|
|
parent->children->prev_child->next_child = child_task;
|
|
parent->children->prev_child = child_task;
|
|
}
|
|
|
|
/* If the current task (child_task) is at the top of the
|
|
parent's last_parent_depends_on, it's about to be removed
|
|
from it. Adjust last_parent_depends_on appropriately. */
|
|
if (__builtin_expect (child_task->parent_depends_on, 0)
|
|
&& parent->taskwait->last_parent_depends_on == child_task)
|
|
{
|
|
/* The last_parent_depends_on list was built with all
|
|
parent_depends_on entries linked to the prev_child. Grab
|
|
the next last_parent_depends_on head from this prev_child if
|
|
available... */
|
|
if (child_task->prev_child->kind == GOMP_TASK_WAITING
|
|
&& child_task->prev_child->parent_depends_on)
|
|
parent->taskwait->last_parent_depends_on = child_task->prev_child;
|
|
else
|
|
{
|
|
/* ...otherwise, there are no more parent_depends_on
|
|
entries waiting to run. In which case, clear the
|
|
list. */
|
|
parent->taskwait->last_parent_depends_on = NULL;
|
|
}
|
|
}
|
|
}
|
|
|
|
/* Adjust taskgroup to point to the next taskgroup. See note above
|
|
regarding adjustment of children as to why the child_task is not
|
|
removed entirely from the circular list. */
|
|
struct gomp_taskgroup *taskgroup = child_task->taskgroup;
|
|
if (taskgroup)
|
|
{
|
|
if (taskgroup->children == child_task)
|
|
taskgroup->children = child_task->next_taskgroup;
|
|
/* TIED tasks cannot come before WAITING tasks. If we're about
|
|
to make this task TIED, rewire things appropriately.
|
|
However, a TIED task at the end is perfectly fine. */
|
|
else if (child_task->next_taskgroup->kind == GOMP_TASK_WAITING
|
|
&& child_task->next_taskgroup != taskgroup->children)
|
|
{
|
|
/* Remove from the list. */
|
|
child_task->prev_taskgroup->next_taskgroup
|
|
= child_task->next_taskgroup;
|
|
child_task->next_taskgroup->prev_taskgroup
|
|
= child_task->prev_taskgroup;
|
|
/* Rewire at the end of its taskgroup. */
|
|
child_task->next_taskgroup = taskgroup->children;
|
|
child_task->prev_taskgroup = taskgroup->children->prev_taskgroup;
|
|
taskgroup->children->prev_taskgroup->next_taskgroup = child_task;
|
|
taskgroup->children->prev_taskgroup = child_task;
|
|
}
|
|
}
|
|
|
|
/* Remove child_task from the task_queue. */
|
|
child_task->prev_queue->next_queue = child_task->next_queue;
|
|
child_task->next_queue->prev_queue = child_task->prev_queue;
|
|
if (team->task_queue == child_task)
|
|
{
|
|
if (child_task->next_queue != child_task)
|
|
team->task_queue = child_task->next_queue;
|
|
else
|
|
team->task_queue = NULL;
|
|
}
|
|
child_task->kind = GOMP_TASK_TIED;
|
|
|
|
if (--team->task_queued_count == 0)
|
|
gomp_team_barrier_clear_task_pending (&team->barrier);
|
|
if ((gomp_team_barrier_cancelled (&team->barrier)
|
|
|| (taskgroup && taskgroup->cancelled))
|
|
&& !child_task->copy_ctors_done)
|
|
return true;
|
|
return false;
|
|
}
|
|
|
|
static void
|
|
gomp_task_run_post_handle_depend_hash (struct gomp_task *child_task)
|
|
{
|
|
struct gomp_task *parent = child_task->parent;
|
|
size_t i;
|
|
|
|
for (i = 0; i < child_task->depend_count; i++)
|
|
if (!child_task->depend[i].redundant)
|
|
{
|
|
if (child_task->depend[i].next)
|
|
child_task->depend[i].next->prev = child_task->depend[i].prev;
|
|
if (child_task->depend[i].prev)
|
|
child_task->depend[i].prev->next = child_task->depend[i].next;
|
|
else
|
|
{
|
|
hash_entry_type *slot
|
|
= htab_find_slot (&parent->depend_hash, &child_task->depend[i],
|
|
NO_INSERT);
|
|
if (*slot != &child_task->depend[i])
|
|
abort ();
|
|
if (child_task->depend[i].next)
|
|
*slot = child_task->depend[i].next;
|
|
else
|
|
htab_clear_slot (parent->depend_hash, slot);
|
|
}
|
|
}
|
|
}
|
|
|
|
/* After CHILD_TASK has been run, adjust the various task queues to
|
|
give higher priority to the tasks that depend on CHILD_TASK.
|
|
|
|
TEAM is the team to which CHILD_TASK belongs to. */
|
|
|
|
static size_t
|
|
gomp_task_run_post_handle_dependers (struct gomp_task *child_task,
|
|
struct gomp_team *team)
|
|
{
|
|
struct gomp_task *parent = child_task->parent;
|
|
size_t i, count = child_task->dependers->n_elem, ret = 0;
|
|
for (i = 0; i < count; i++)
|
|
{
|
|
struct gomp_task *task = child_task->dependers->elem[i];
|
|
if (--task->num_dependees != 0)
|
|
continue;
|
|
|
|
struct gomp_taskgroup *taskgroup = task->taskgroup;
|
|
if (parent)
|
|
{
|
|
if (parent->children)
|
|
{
|
|
/* If parent is in gomp_task_maybe_wait_for_dependencies
|
|
and it doesn't need to wait for this task, put it after
|
|
all ready to run tasks it needs to wait for. */
|
|
if (parent->taskwait && parent->taskwait->last_parent_depends_on
|
|
&& !task->parent_depends_on)
|
|
{
|
|
/* Put depender in last_parent_depends_on. */
|
|
struct gomp_task *last_parent_depends_on
|
|
= parent->taskwait->last_parent_depends_on;
|
|
task->next_child = last_parent_depends_on->next_child;
|
|
task->prev_child = last_parent_depends_on;
|
|
}
|
|
else
|
|
{
|
|
/* Make depender a sibling of child_task, and place
|
|
it at the top of said sibling list. */
|
|
task->next_child = parent->children;
|
|
task->prev_child = parent->children->prev_child;
|
|
parent->children = task;
|
|
}
|
|
task->next_child->prev_child = task;
|
|
task->prev_child->next_child = task;
|
|
}
|
|
else
|
|
{
|
|
/* Make depender a sibling of child_task. */
|
|
task->next_child = task;
|
|
task->prev_child = task;
|
|
parent->children = task;
|
|
}
|
|
if (parent->taskwait)
|
|
{
|
|
if (parent->taskwait->in_taskwait)
|
|
{
|
|
parent->taskwait->in_taskwait = false;
|
|
gomp_sem_post (&parent->taskwait->taskwait_sem);
|
|
}
|
|
else if (parent->taskwait->in_depend_wait)
|
|
{
|
|
parent->taskwait->in_depend_wait = false;
|
|
gomp_sem_post (&parent->taskwait->taskwait_sem);
|
|
}
|
|
if (parent->taskwait->last_parent_depends_on == NULL
|
|
&& task->parent_depends_on)
|
|
parent->taskwait->last_parent_depends_on = task;
|
|
}
|
|
}
|
|
/* If depender is in a taskgroup, put it at the TOP of its
|
|
taskgroup. */
|
|
if (taskgroup)
|
|
{
|
|
if (taskgroup->children)
|
|
{
|
|
task->next_taskgroup = taskgroup->children;
|
|
task->prev_taskgroup = taskgroup->children->prev_taskgroup;
|
|
task->next_taskgroup->prev_taskgroup = task;
|
|
task->prev_taskgroup->next_taskgroup = task;
|
|
}
|
|
else
|
|
{
|
|
task->next_taskgroup = task;
|
|
task->prev_taskgroup = task;
|
|
}
|
|
taskgroup->children = task;
|
|
if (taskgroup->in_taskgroup_wait)
|
|
{
|
|
taskgroup->in_taskgroup_wait = false;
|
|
gomp_sem_post (&taskgroup->taskgroup_sem);
|
|
}
|
|
}
|
|
/* Put depender of child_task at the END of the team's
|
|
task_queue. */
|
|
if (team->task_queue)
|
|
{
|
|
task->next_queue = team->task_queue;
|
|
task->prev_queue = team->task_queue->prev_queue;
|
|
task->next_queue->prev_queue = task;
|
|
task->prev_queue->next_queue = task;
|
|
}
|
|
else
|
|
{
|
|
task->next_queue = task;
|
|
task->prev_queue = task;
|
|
team->task_queue = task;
|
|
}
|
|
++team->task_count;
|
|
++team->task_queued_count;
|
|
++ret;
|
|
}
|
|
free (child_task->dependers);
|
|
child_task->dependers = NULL;
|
|
if (ret > 1)
|
|
gomp_team_barrier_set_task_pending (&team->barrier);
|
|
return ret;
|
|
}
|
|
|
|
static inline size_t
|
|
gomp_task_run_post_handle_depend (struct gomp_task *child_task,
|
|
struct gomp_team *team)
|
|
{
|
|
if (child_task->depend_count == 0)
|
|
return 0;
|
|
|
|
/* If parent is gone already, the hash table is freed and nothing
|
|
will use the hash table anymore, no need to remove anything from it. */
|
|
if (child_task->parent != NULL)
|
|
gomp_task_run_post_handle_depend_hash (child_task);
|
|
|
|
if (child_task->dependers == NULL)
|
|
return 0;
|
|
|
|
return gomp_task_run_post_handle_dependers (child_task, team);
|
|
}
|
|
|
|
/* Remove CHILD_TASK from its parent. */
|
|
|
|
static inline void
|
|
gomp_task_run_post_remove_parent (struct gomp_task *child_task)
|
|
{
|
|
struct gomp_task *parent = child_task->parent;
|
|
if (parent == NULL)
|
|
return;
|
|
|
|
/* If this was the last task the parent was depending on,
|
|
synchronize with gomp_task_maybe_wait_for_dependencies so it can
|
|
clean up and return. */
|
|
if (__builtin_expect (child_task->parent_depends_on, 0)
|
|
&& --parent->taskwait->n_depend == 0
|
|
&& parent->taskwait->in_depend_wait)
|
|
{
|
|
parent->taskwait->in_depend_wait = false;
|
|
gomp_sem_post (&parent->taskwait->taskwait_sem);
|
|
}
|
|
|
|
/* Remove CHILD_TASK from its sibling list. */
|
|
child_task->prev_child->next_child = child_task->next_child;
|
|
child_task->next_child->prev_child = child_task->prev_child;
|
|
if (parent->children != child_task)
|
|
return;
|
|
if (child_task->next_child != child_task)
|
|
parent->children = child_task->next_child;
|
|
else
|
|
{
|
|
/* We access task->children in GOMP_taskwait
|
|
outside of the task lock mutex region, so
|
|
need a release barrier here to ensure memory
|
|
written by child_task->fn above is flushed
|
|
before the NULL is written. */
|
|
__atomic_store_n (&parent->children, NULL, MEMMODEL_RELEASE);
|
|
if (parent->taskwait && parent->taskwait->in_taskwait)
|
|
{
|
|
parent->taskwait->in_taskwait = false;
|
|
gomp_sem_post (&parent->taskwait->taskwait_sem);
|
|
}
|
|
}
|
|
}
|
|
|
|
/* Remove CHILD_TASK from its taskgroup. */
|
|
|
|
static inline void
|
|
gomp_task_run_post_remove_taskgroup (struct gomp_task *child_task)
|
|
{
|
|
struct gomp_taskgroup *taskgroup = child_task->taskgroup;
|
|
if (taskgroup == NULL)
|
|
return;
|
|
child_task->prev_taskgroup->next_taskgroup = child_task->next_taskgroup;
|
|
child_task->next_taskgroup->prev_taskgroup = child_task->prev_taskgroup;
|
|
if (taskgroup->num_children > 1)
|
|
--taskgroup->num_children;
|
|
else
|
|
{
|
|
/* We access taskgroup->num_children in GOMP_taskgroup_end
|
|
outside of the task lock mutex region, so
|
|
need a release barrier here to ensure memory
|
|
written by child_task->fn above is flushed
|
|
before the NULL is written. */
|
|
__atomic_store_n (&taskgroup->num_children, 0, MEMMODEL_RELEASE);
|
|
}
|
|
if (taskgroup->children != child_task)
|
|
return;
|
|
if (child_task->next_taskgroup != child_task)
|
|
taskgroup->children = child_task->next_taskgroup;
|
|
else
|
|
{
|
|
taskgroup->children = NULL;
|
|
if (taskgroup->in_taskgroup_wait)
|
|
{
|
|
taskgroup->in_taskgroup_wait = false;
|
|
gomp_sem_post (&taskgroup->taskgroup_sem);
|
|
}
|
|
}
|
|
}
|
|
|
|
void
|
|
gomp_barrier_handle_tasks (gomp_barrier_state_t state)
|
|
{
|
|
struct gomp_thread *thr = gomp_thread ();
|
|
struct gomp_team *team = thr->ts.team;
|
|
struct gomp_task *task = thr->task;
|
|
struct gomp_task *child_task = NULL;
|
|
struct gomp_task *to_free = NULL;
|
|
int do_wake = 0;
|
|
|
|
gomp_mutex_lock (&team->task_lock);
|
|
if (gomp_barrier_last_thread (state))
|
|
{
|
|
if (team->task_count == 0)
|
|
{
|
|
gomp_team_barrier_done (&team->barrier, state);
|
|
gomp_mutex_unlock (&team->task_lock);
|
|
gomp_team_barrier_wake (&team->barrier, 0);
|
|
return;
|
|
}
|
|
gomp_team_barrier_set_waiting_for_tasks (&team->barrier);
|
|
}
|
|
|
|
while (1)
|
|
{
|
|
bool cancelled = false;
|
|
if (team->task_queue != NULL)
|
|
{
|
|
child_task = team->task_queue;
|
|
cancelled = gomp_task_run_pre (child_task, child_task->parent,
|
|
team);
|
|
if (__builtin_expect (cancelled, 0))
|
|
{
|
|
if (to_free)
|
|
{
|
|
gomp_finish_task (to_free);
|
|
free (to_free);
|
|
to_free = NULL;
|
|
}
|
|
goto finish_cancelled;
|
|
}
|
|
team->task_running_count++;
|
|
child_task->in_tied_task = true;
|
|
}
|
|
gomp_mutex_unlock (&team->task_lock);
|
|
if (do_wake)
|
|
{
|
|
gomp_team_barrier_wake (&team->barrier, do_wake);
|
|
do_wake = 0;
|
|
}
|
|
if (to_free)
|
|
{
|
|
gomp_finish_task (to_free);
|
|
free (to_free);
|
|
to_free = NULL;
|
|
}
|
|
if (child_task)
|
|
{
|
|
thr->task = child_task;
|
|
child_task->fn (child_task->fn_data);
|
|
thr->task = task;
|
|
}
|
|
else
|
|
return;
|
|
gomp_mutex_lock (&team->task_lock);
|
|
if (child_task)
|
|
{
|
|
finish_cancelled:;
|
|
size_t new_tasks
|
|
= gomp_task_run_post_handle_depend (child_task, team);
|
|
gomp_task_run_post_remove_parent (child_task);
|
|
gomp_clear_parent (child_task->children);
|
|
gomp_task_run_post_remove_taskgroup (child_task);
|
|
to_free = child_task;
|
|
child_task = NULL;
|
|
if (!cancelled)
|
|
team->task_running_count--;
|
|
if (new_tasks > 1)
|
|
{
|
|
do_wake = team->nthreads - team->task_running_count;
|
|
if (do_wake > new_tasks)
|
|
do_wake = new_tasks;
|
|
}
|
|
if (--team->task_count == 0
|
|
&& gomp_team_barrier_waiting_for_tasks (&team->barrier))
|
|
{
|
|
gomp_team_barrier_done (&team->barrier, state);
|
|
gomp_mutex_unlock (&team->task_lock);
|
|
gomp_team_barrier_wake (&team->barrier, 0);
|
|
gomp_mutex_lock (&team->task_lock);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
/* Called when encountering a taskwait directive.
|
|
|
|
Wait for all children of the current task. */
|
|
|
|
void
|
|
GOMP_taskwait (void)
|
|
{
|
|
struct gomp_thread *thr = gomp_thread ();
|
|
struct gomp_team *team = thr->ts.team;
|
|
struct gomp_task *task = thr->task;
|
|
struct gomp_task *child_task = NULL;
|
|
struct gomp_task *to_free = NULL;
|
|
struct gomp_taskwait taskwait;
|
|
int do_wake = 0;
|
|
|
|
/* The acquire barrier on load of task->children here synchronizes
|
|
with the write of a NULL in gomp_task_run_post_remove_parent. It is
|
|
not necessary that we synchronize with other non-NULL writes at
|
|
this point, but we must ensure that all writes to memory by a
|
|
child thread task work function are seen before we exit from
|
|
GOMP_taskwait. */
|
|
if (task == NULL
|
|
|| __atomic_load_n (&task->children, MEMMODEL_ACQUIRE) == NULL)
|
|
return;
|
|
|
|
memset (&taskwait, 0, sizeof (taskwait));
|
|
gomp_mutex_lock (&team->task_lock);
|
|
while (1)
|
|
{
|
|
bool cancelled = false;
|
|
if (task->children == NULL)
|
|
{
|
|
bool destroy_taskwait = task->taskwait != NULL;
|
|
task->taskwait = NULL;
|
|
gomp_mutex_unlock (&team->task_lock);
|
|
if (to_free)
|
|
{
|
|
gomp_finish_task (to_free);
|
|
free (to_free);
|
|
}
|
|
if (destroy_taskwait)
|
|
gomp_sem_destroy (&taskwait.taskwait_sem);
|
|
return;
|
|
}
|
|
if (task->children->kind == GOMP_TASK_WAITING)
|
|
{
|
|
child_task = task->children;
|
|
cancelled
|
|
= gomp_task_run_pre (child_task, task, team);
|
|
if (__builtin_expect (cancelled, 0))
|
|
{
|
|
if (to_free)
|
|
{
|
|
gomp_finish_task (to_free);
|
|
free (to_free);
|
|
to_free = NULL;
|
|
}
|
|
goto finish_cancelled;
|
|
}
|
|
}
|
|
else
|
|
{
|
|
/* All tasks we are waiting for are already running
|
|
in other threads. Wait for them. */
|
|
if (task->taskwait == NULL)
|
|
{
|
|
taskwait.in_depend_wait = false;
|
|
gomp_sem_init (&taskwait.taskwait_sem, 0);
|
|
task->taskwait = &taskwait;
|
|
}
|
|
taskwait.in_taskwait = true;
|
|
}
|
|
gomp_mutex_unlock (&team->task_lock);
|
|
if (do_wake)
|
|
{
|
|
gomp_team_barrier_wake (&team->barrier, do_wake);
|
|
do_wake = 0;
|
|
}
|
|
if (to_free)
|
|
{
|
|
gomp_finish_task (to_free);
|
|
free (to_free);
|
|
to_free = NULL;
|
|
}
|
|
if (child_task)
|
|
{
|
|
thr->task = child_task;
|
|
child_task->fn (child_task->fn_data);
|
|
thr->task = task;
|
|
}
|
|
else
|
|
gomp_sem_wait (&taskwait.taskwait_sem);
|
|
gomp_mutex_lock (&team->task_lock);
|
|
if (child_task)
|
|
{
|
|
finish_cancelled:;
|
|
size_t new_tasks
|
|
= gomp_task_run_post_handle_depend (child_task, team);
|
|
|
|
/* Remove child_task from children list, and set up the next
|
|
sibling to be run. */
|
|
child_task->prev_child->next_child = child_task->next_child;
|
|
child_task->next_child->prev_child = child_task->prev_child;
|
|
if (task->children == child_task)
|
|
{
|
|
if (child_task->next_child != child_task)
|
|
task->children = child_task->next_child;
|
|
else
|
|
task->children = NULL;
|
|
}
|
|
/* Orphan all the children of CHILD_TASK. */
|
|
gomp_clear_parent (child_task->children);
|
|
|
|
/* Remove CHILD_TASK from its taskgroup. */
|
|
gomp_task_run_post_remove_taskgroup (child_task);
|
|
|
|
to_free = child_task;
|
|
child_task = NULL;
|
|
team->task_count--;
|
|
if (new_tasks > 1)
|
|
{
|
|
do_wake = team->nthreads - team->task_running_count
|
|
- !task->in_tied_task;
|
|
if (do_wake > new_tasks)
|
|
do_wake = new_tasks;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
/* This is like GOMP_taskwait, but we only wait for tasks that the
|
|
upcoming task depends on.
|
|
|
|
DEPEND is as in GOMP_task. */
|
|
|
|
void
|
|
gomp_task_maybe_wait_for_dependencies (void **depend)
|
|
{
|
|
struct gomp_thread *thr = gomp_thread ();
|
|
struct gomp_task *task = thr->task;
|
|
struct gomp_team *team = thr->ts.team;
|
|
struct gomp_task_depend_entry elem, *ent = NULL;
|
|
struct gomp_taskwait taskwait;
|
|
struct gomp_task *last_parent_depends_on = NULL;
|
|
size_t ndepend = (uintptr_t) depend[0];
|
|
size_t nout = (uintptr_t) depend[1];
|
|
size_t i;
|
|
size_t num_awaited = 0;
|
|
struct gomp_task *child_task = NULL;
|
|
struct gomp_task *to_free = NULL;
|
|
int do_wake = 0;
|
|
|
|
gomp_mutex_lock (&team->task_lock);
|
|
for (i = 0; i < ndepend; i++)
|
|
{
|
|
elem.addr = depend[i + 2];
|
|
ent = htab_find (task->depend_hash, &elem);
|
|
for (; ent; ent = ent->next)
|
|
if (i >= nout && ent->is_in)
|
|
continue;
|
|
else
|
|
{
|
|
struct gomp_task *tsk = ent->task;
|
|
if (!tsk->parent_depends_on)
|
|
{
|
|
tsk->parent_depends_on = true;
|
|
++num_awaited;
|
|
/* If a task we need to wait for is not already
|
|
running and is ready to be scheduled, move it to
|
|
front, so that we run it as soon as possible.
|
|
|
|
We rearrange the children queue such that all
|
|
parent_depends_on tasks are first, and
|
|
last_parent_depends_on points to the last such task
|
|
we rearranged. For example, given the following
|
|
children where PD[123] are the parent_depends_on
|
|
tasks:
|
|
|
|
task->children
|
|
|
|
|
V
|
|
C1 -> C2 -> C3 -> PD1 -> PD2 -> PD3 -> C4
|
|
|
|
We rearrange such that:
|
|
|
|
task->children
|
|
| +--- last_parent_depends_on
|
|
| |
|
|
V V
|
|
PD1 -> PD2 -> PD3 -> C1 -> C2 -> C3 -> C4
|
|
*/
|
|
|
|
if (tsk->num_dependees == 0 && tsk->kind == GOMP_TASK_WAITING)
|
|
{
|
|
if (last_parent_depends_on)
|
|
{
|
|
tsk->prev_child->next_child = tsk->next_child;
|
|
tsk->next_child->prev_child = tsk->prev_child;
|
|
tsk->prev_child = last_parent_depends_on;
|
|
tsk->next_child = last_parent_depends_on->next_child;
|
|
tsk->prev_child->next_child = tsk;
|
|
tsk->next_child->prev_child = tsk;
|
|
}
|
|
else if (tsk != task->children)
|
|
{
|
|
tsk->prev_child->next_child = tsk->next_child;
|
|
tsk->next_child->prev_child = tsk->prev_child;
|
|
tsk->prev_child = task->children->prev_child;
|
|
tsk->next_child = task->children;
|
|
task->children = tsk;
|
|
tsk->prev_child->next_child = tsk;
|
|
tsk->next_child->prev_child = tsk;
|
|
}
|
|
last_parent_depends_on = tsk;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
if (num_awaited == 0)
|
|
{
|
|
gomp_mutex_unlock (&team->task_lock);
|
|
return;
|
|
}
|
|
|
|
memset (&taskwait, 0, sizeof (taskwait));
|
|
taskwait.n_depend = num_awaited;
|
|
taskwait.last_parent_depends_on = last_parent_depends_on;
|
|
gomp_sem_init (&taskwait.taskwait_sem, 0);
|
|
task->taskwait = &taskwait;
|
|
|
|
while (1)
|
|
{
|
|
bool cancelled = false;
|
|
if (taskwait.n_depend == 0)
|
|
{
|
|
task->taskwait = NULL;
|
|
gomp_mutex_unlock (&team->task_lock);
|
|
if (to_free)
|
|
{
|
|
gomp_finish_task (to_free);
|
|
free (to_free);
|
|
}
|
|
gomp_sem_destroy (&taskwait.taskwait_sem);
|
|
return;
|
|
}
|
|
if (task->children->kind == GOMP_TASK_WAITING)
|
|
{
|
|
child_task = task->children;
|
|
cancelled
|
|
= gomp_task_run_pre (child_task, task, team);
|
|
if (__builtin_expect (cancelled, 0))
|
|
{
|
|
if (to_free)
|
|
{
|
|
gomp_finish_task (to_free);
|
|
free (to_free);
|
|
to_free = NULL;
|
|
}
|
|
goto finish_cancelled;
|
|
}
|
|
}
|
|
else
|
|
/* All tasks we are waiting for are already running
|
|
in other threads. Wait for them. */
|
|
taskwait.in_depend_wait = true;
|
|
gomp_mutex_unlock (&team->task_lock);
|
|
if (do_wake)
|
|
{
|
|
gomp_team_barrier_wake (&team->barrier, do_wake);
|
|
do_wake = 0;
|
|
}
|
|
if (to_free)
|
|
{
|
|
gomp_finish_task (to_free);
|
|
free (to_free);
|
|
to_free = NULL;
|
|
}
|
|
if (child_task)
|
|
{
|
|
thr->task = child_task;
|
|
child_task->fn (child_task->fn_data);
|
|
thr->task = task;
|
|
}
|
|
else
|
|
gomp_sem_wait (&taskwait.taskwait_sem);
|
|
gomp_mutex_lock (&team->task_lock);
|
|
if (child_task)
|
|
{
|
|
finish_cancelled:;
|
|
size_t new_tasks
|
|
= gomp_task_run_post_handle_depend (child_task, team);
|
|
if (child_task->parent_depends_on)
|
|
--taskwait.n_depend;
|
|
|
|
/* Remove child_task from sibling list. */
|
|
child_task->prev_child->next_child = child_task->next_child;
|
|
child_task->next_child->prev_child = child_task->prev_child;
|
|
if (task->children == child_task)
|
|
{
|
|
if (child_task->next_child != child_task)
|
|
task->children = child_task->next_child;
|
|
else
|
|
task->children = NULL;
|
|
}
|
|
|
|
gomp_clear_parent (child_task->children);
|
|
gomp_task_run_post_remove_taskgroup (child_task);
|
|
to_free = child_task;
|
|
child_task = NULL;
|
|
team->task_count--;
|
|
if (new_tasks > 1)
|
|
{
|
|
do_wake = team->nthreads - team->task_running_count
|
|
- !task->in_tied_task;
|
|
if (do_wake > new_tasks)
|
|
do_wake = new_tasks;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
/* Called when encountering a taskyield directive. */
|
|
|
|
void
|
|
GOMP_taskyield (void)
|
|
{
|
|
/* Nothing at the moment. */
|
|
}
|
|
|
|
void
|
|
GOMP_taskgroup_start (void)
|
|
{
|
|
struct gomp_thread *thr = gomp_thread ();
|
|
struct gomp_team *team = thr->ts.team;
|
|
struct gomp_task *task = thr->task;
|
|
struct gomp_taskgroup *taskgroup;
|
|
|
|
/* If team is NULL, all tasks are executed as
|
|
GOMP_TASK_UNDEFERRED tasks and thus all children tasks of
|
|
taskgroup and their descendant tasks will be finished
|
|
by the time GOMP_taskgroup_end is called. */
|
|
if (team == NULL)
|
|
return;
|
|
taskgroup = gomp_malloc (sizeof (struct gomp_taskgroup));
|
|
taskgroup->prev = task->taskgroup;
|
|
taskgroup->children = NULL;
|
|
taskgroup->in_taskgroup_wait = false;
|
|
taskgroup->cancelled = false;
|
|
taskgroup->num_children = 0;
|
|
gomp_sem_init (&taskgroup->taskgroup_sem, 0);
|
|
task->taskgroup = taskgroup;
|
|
}
|
|
|
|
void
|
|
GOMP_taskgroup_end (void)
|
|
{
|
|
struct gomp_thread *thr = gomp_thread ();
|
|
struct gomp_team *team = thr->ts.team;
|
|
struct gomp_task *task = thr->task;
|
|
struct gomp_taskgroup *taskgroup;
|
|
struct gomp_task *child_task = NULL;
|
|
struct gomp_task *to_free = NULL;
|
|
int do_wake = 0;
|
|
|
|
if (team == NULL)
|
|
return;
|
|
taskgroup = task->taskgroup;
|
|
|
|
/* The acquire barrier on load of taskgroup->num_children here
|
|
synchronizes with the write of 0 in gomp_task_run_post_remove_taskgroup.
|
|
It is not necessary that we synchronize with other non-0 writes at
|
|
this point, but we must ensure that all writes to memory by a
|
|
child thread task work function are seen before we exit from
|
|
GOMP_taskgroup_end. */
|
|
if (__atomic_load_n (&taskgroup->num_children, MEMMODEL_ACQUIRE) == 0)
|
|
goto finish;
|
|
|
|
gomp_mutex_lock (&team->task_lock);
|
|
while (1)
|
|
{
|
|
bool cancelled = false;
|
|
if (taskgroup->children == NULL)
|
|
{
|
|
if (taskgroup->num_children)
|
|
{
|
|
if (task->children == NULL)
|
|
goto do_wait;
|
|
child_task = task->children;
|
|
}
|
|
else
|
|
{
|
|
gomp_mutex_unlock (&team->task_lock);
|
|
if (to_free)
|
|
{
|
|
gomp_finish_task (to_free);
|
|
free (to_free);
|
|
}
|
|
goto finish;
|
|
}
|
|
}
|
|
else
|
|
child_task = taskgroup->children;
|
|
if (child_task->kind == GOMP_TASK_WAITING)
|
|
{
|
|
cancelled
|
|
= gomp_task_run_pre (child_task, child_task->parent, team);
|
|
if (__builtin_expect (cancelled, 0))
|
|
{
|
|
if (to_free)
|
|
{
|
|
gomp_finish_task (to_free);
|
|
free (to_free);
|
|
to_free = NULL;
|
|
}
|
|
goto finish_cancelled;
|
|
}
|
|
}
|
|
else
|
|
{
|
|
child_task = NULL;
|
|
do_wait:
|
|
/* All tasks we are waiting for are already running
|
|
in other threads. Wait for them. */
|
|
taskgroup->in_taskgroup_wait = true;
|
|
}
|
|
gomp_mutex_unlock (&team->task_lock);
|
|
if (do_wake)
|
|
{
|
|
gomp_team_barrier_wake (&team->barrier, do_wake);
|
|
do_wake = 0;
|
|
}
|
|
if (to_free)
|
|
{
|
|
gomp_finish_task (to_free);
|
|
free (to_free);
|
|
to_free = NULL;
|
|
}
|
|
if (child_task)
|
|
{
|
|
thr->task = child_task;
|
|
child_task->fn (child_task->fn_data);
|
|
thr->task = task;
|
|
}
|
|
else
|
|
gomp_sem_wait (&taskgroup->taskgroup_sem);
|
|
gomp_mutex_lock (&team->task_lock);
|
|
if (child_task)
|
|
{
|
|
finish_cancelled:;
|
|
size_t new_tasks
|
|
= gomp_task_run_post_handle_depend (child_task, team);
|
|
gomp_task_run_post_remove_parent (child_task);
|
|
gomp_clear_parent (child_task->children);
|
|
gomp_task_run_post_remove_taskgroup (child_task);
|
|
to_free = child_task;
|
|
child_task = NULL;
|
|
team->task_count--;
|
|
if (new_tasks > 1)
|
|
{
|
|
do_wake = team->nthreads - team->task_running_count
|
|
- !task->in_tied_task;
|
|
if (do_wake > new_tasks)
|
|
do_wake = new_tasks;
|
|
}
|
|
}
|
|
}
|
|
|
|
finish:
|
|
task->taskgroup = taskgroup->prev;
|
|
gomp_sem_destroy (&taskgroup->taskgroup_sem);
|
|
free (taskgroup);
|
|
}
|
|
|
|
int
|
|
omp_in_final (void)
|
|
{
|
|
struct gomp_thread *thr = gomp_thread ();
|
|
return thr->task && thr->task->final_task;
|
|
}
|
|
|
|
ialias (omp_in_final)
|