gcc/libgomp/task.c
Jakub Jelinek 28567c40e2 builtin-types.def (BT_FN_VOID_BOOL, [...]): New.
* builtin-types.def (BT_FN_VOID_BOOL, BT_FN_VOID_SIZE_SIZE_PTR,
	BT_FN_UINT_UINT_PTR_PTR, BT_FN_UINT_OMPFN_PTR_UINT_UINT,
	BT_FN_BOOL_UINT_LONGPTR_LONG_LONG_LONGPTR_LONGPTR_PTR_PTR,
	BT_FN_BOOL_UINT_ULLPTR_LONG_ULL_ULLPTR_ULLPTR_PTR_PTR,
	BT_FN_BOOL_LONG_LONG_LONG_LONG_LONG_LONGPTR_LONGPTR_PTR_PTR,
	BT_FN_BOOL_BOOL_ULL_ULL_ULL_LONG_ULL_ULLPTR_ULLPTR_PTR_PTR): New.
	* gengtype.c (open_base_files): Add omp-general.h.
	* gimple.c (gimple_build_omp_critical):
	(gimple_build_omp_taskgroup): Add CLAUSES argument.  Call
	gimple_omp_taskgroup_set_clauses.
	(gimple_build_omp_atomic_load): Add mo argument, call
	gimple_omp_atomic_set_memory_order.
	(gimple_build_omp_atomic_store): Likewise.
	(gimple_copy): Adjust handling of GIMPLE_OMP_TASKGROUP.
	* gimple.def (GIMPLE_OMP_TASKGROUP): Use GSS_OMP_SINGLE_LAYOUT
	instead of GSS_OMP.
	(GIMPLE_OMP_TEAMS): Use GSS_OMP_PARALLEL_LAYOUT instead
	of GSS_OMP_SINGLE_LAYOUT, adjust comments.
	* gimple.h (enum gf_mask): Add GF_OMP_TEAMS_HOST, GF_OMP_TASK_TASKWAIT
	and GF_OMP_ATOMIC_MEMORY_ORDER.  Remove GF_OMP_ATOMIC_SEQ_CST, use
	different value for GF_OMP_ATOMIC_NEED_VALUE.
	(struct gimple_statement_omp_taskreg): Add GIMPLE_OMP_TEAMS to
	comments.
	(struct gimple_statement_omp_single_layout): And remove here.
	(struct gomp_teams): Inherit from gimple_statement_omp_taskreg rather
	than gimple_statement_omp_single_layout.
	(is_a_helper <gimple_statement_omp_taskreg *>::test): Allow
	GIMPLE_OMP_TEAMS.
	(is_a_helper <const gimple_statement_omp_taskreg *>::test): Likewise.
	(gimple_omp_subcode): Formatting fix.
	(gimple_omp_teams_child_fn, gimple_omp_teams_child_fn_ptr,
	gimple_omp_teams_set_child_fn, gimple_omp_teams_data_arg,
	gimple_omp_teams_data_arg_ptr, gimple_omp_teams_set_data_arg,
	gimple_omp_teams_host, gimple_omp_teams_set_host,
	gimple_omp_task_taskwait_p, gimple_omp_task_set_taskwait_p,
	gimple_omp_taskgroup_clauses, gimple_omp_taskgroup_clauses_ptr,
	gimple_omp_taskgroup_set_clauses): New inline functions.
	(gimple_build_omp_atomic_load): Add enum omp_memory_order argument.
	(gimple_build_omp_atomic_store): Likewise.
	(gimple_omp_atomic_seq_cst_p): Remove.
	(gimple_omp_atomic_memory_order): New function.
	(gimple_omp_atomic_set_seq_cst): Remove.
	(gimple_omp_atomic_set_memory_order): New function.
	(gimple_build_omp_taskgroup): Add clauses argument.
	* gimple-pretty-print.c (dump_gimple_omp_taskgroup): New function.
	(dump_gimple_omp_task): Print taskwait with depend clauses.
	(dump_gimple_omp_atomic_load, dump_gimple_omp_atomic_store): Use
	dump_omp_atomic_memory_order.
	(pp_gimple_stmt_1): Handle GIMPLE_OMP_TASKGROUP.
	* gimplify.c (enum gimplify_omp_var_data): Add GOVD_MAP_ALLOC_ONLY,
	GOVD_MAP_FROM_ONLY and GOVD_NONTEMPORAL.
	(enum omp_region_type): Reserve bits 1 and 2 for auxiliary flags,
	renumber values of most of ORT_* enumerators, add ORT_HOST_TEAMS,
	ORT_COMBINED_HOST_TEAMS, ORT_TASKGROUP, ORT_TASKLOOP and
	ORT_UNTIED_TASKLOOP enumerators.
	(enum gimplify_defaultmap_kind): New.
	(struct gimplify_omp_ctx): Remove target_map_scalars_firstprivate and
	target_map_pointers_as_0len_arrays members, add defaultmap.
	(new_omp_context): Initialize defaultmap member.
	(gimple_add_tmp_var): Handle ORT_TASKGROUP like ORT_WORKSHARE.
	(maybe_fold_stmt): Don't fold even in host teams regions.
	(omp_firstprivatize_variable): Handle ORT_TASKGROUP like
	ORT_WORKSHARE.  Test ctx->defaultmap[GDMK_SCALAR] instead of
	ctx->omp_firstprivatize_variable.
	(omp_add_variable): Don't add private/firstprivate for VLAs in
	ORT_TASKGROUP.
	(omp_default_clause): Print "taskloop" rather than "task" if
	ORT_*TASKLOOP.
	(omp_notice_variable): Handle ORT_TASKGROUP like ORT_WORKSHARE.
	Handle new defaultmap clause kinds.
	(omp_is_private): Handle ORT_TASKGROUP like ORT_WORKSHARE.  Allow simd
	iterator to be lastprivate or private.  Fix up diagnostics if linear
	is used on collapse>1 simd iterator.
	(omp_check_private): Handle ORT_TASKGROUP like ORT_WORKSHARE.
	(gimplify_omp_depend): New function.
	(gimplify_scan_omp_clauses): Add shared clause on parallel for
	combined parallel master taskloop{, simd} if taskloop has
	firstprivate, lastprivate or reduction clause.  Handle
	OMP_CLAUSE_REDUCTION_TASK diagnostics.  Adjust tests for
	ORT_COMBINED_TEAMS.  Gimplify depend clauses with iterators.  Handle
	cancel and simd OMP_CLAUSE_IF_MODIFIERs.  Handle
	OMP_CLAUSE_NONTEMPORAL.  Handle new defaultmap clause kinds.  Handle
	OMP_CLAUSE_{TASK,IN}_REDUCTION.  Diagnose invalid conditional
	lastprivate.
	(gimplify_adjust_omp_clauses_1): Ignore GOVD_NONTEMPORAL.  Handle
	GOVD_MAP_ALLOC_ONLY and GOVD_MAP_FROM_ONLY.  
	(gimplify_adjust_omp_clauses): Handle OMP_CLAUSE_NONTEMPORAL.  Handle
	OMP_CLAUSE_{TASK,IN}_REDUCTION.
	(gimplify_omp_task): Handle taskwait with depend clauses.
	(gimplify_omp_for): Add shared clause on parallel for combined
	parallel master taskloop{, simd} if taskloop has firstprivate,
	lastprivate or reduction clause.  Use ORT_TASKLOOP or
	ORT_UNTIED_TASKLOOP instead of ORT_TASK or ORT_UNTIED_TASK.  Adjust
	tests for ORT_COMBINED_TEAMS.  Handle C++ range for loops with
	NULL TREE_PURPOSE in OMP_FOR_ORIG_DECLS.  Firstprivatize
	__for_end and __for_range temporaries on OMP_PARALLEL for
	distribute parallel for{, simd}.  Move OMP_CLAUSE_REDUCTION
	and OMP_CLAUSE_IN_REDUCTION from taskloop to the task construct
	sandwiched in between two taskloops.
	(computable_teams_clause): Test ctx->defaultmap[GDMK_SCALAR]
	instead of ctx->omp_firstprivatize_variable.
	(gimplify_omp_workshare): Set ort to ORT_HOST_TEAMS or
	ORT_COMBINED_HOST_TEAMS if not inside of target construct.  If
	host teams, use gimplify_and_return_first etc. for body like
	for target or target data constructs, and at the end call
	gimple_omp_teams_set_host on the GIMPLE_OMP_TEAMS object.
	(gimplify_omp_atomic): Use OMP_ATOMIC_MEMORY_ORDER instead
	of OMP_ATOMIC_SEQ_CST, pass it as new argument to
	gimple_build_omp_atomic_load and gimple_build_omp_atomic_store, remove
	gimple_omp_atomic_set_seq_cst calls.
	(gimplify_expr) <case OMP_TASKGROUP>: Move handling into a separate
	case, handle taskgroup clauses.
	* lto-streamer-out.c (hash_tree): Handle
	OMP_CLAUSE_{TASK,IN}_REDUCTION.
	* Makefile.in (GTFILES): Add omp-general.h.
	* omp-builtins.def (BUILT_IN_GOMP_TASKWAIT_DEPEND,
	BUILT_IN_GOMP_LOOP_NONMONOTONIC_RUNTIME_START,
	BUILT_IN_GOMP_LOOP_MAYBE_NONMONOTONIC_RUNTIME_START,
	BUILT_IN_GOMP_LOOP_START, BUILT_IN_GOMP_LOOP_ORDERED_START,
	BUILT_IN_GOMP_LOOP_DOACROSS_START,
	BUILT_IN_GOMP_LOOP_NONMONOTONIC_RUNTIME_NEXT,
	BUILT_IN_GOMP_LOOP_MAYBE_NONMONOTONIC_RUNTIME_NEXT,
	BUILT_IN_GOMP_LOOP_ULL_NONMONOTONIC_RUNTIME_START,
	BUILT_IN_GOMP_LOOP_ULL_MAYBE_NONMONOTONIC_RUNTIME_START,
	BUILT_IN_GOMP_LOOP_ULL_START, BUILT_IN_GOMP_LOOP_ULL_ORDERED_START,
	BUILT_IN_GOMP_LOOP_ULL_DOACROSS_START,
	BUILT_IN_GOMP_LOOP_ULL_NONMONOTONIC_RUNTIME_NEXT,
	BUILT_IN_GOMP_LOOP_ULL_MAYBE_NONMONOTONIC_RUNTIME_NEXT,
	BUILT_IN_GOMP_PARALLEL_LOOP_NONMONOTONIC_RUNTIME,
	BUILT_IN_GOMP_PARALLEL_LOOP_MAYBE_NONMONOTONIC_RUNTIME,
	BUILT_IN_GOMP_PARALLEL_REDUCTIONS, BUILT_IN_GOMP_SECTIONS2_START,
	BUILT_IN_GOMP_TEAMS_REG, BUILT_IN_GOMP_TASKGROUP_REDUCTION_REGISTER,
	BUILT_IN_GOMP_TASKGROUP_REDUCTION_UNREGISTER,
	BUILT_IN_GOMP_TASK_REDUCTION_REMAP,
	BUILT_IN_GOMP_WORKSHARE_TASK_REDUCTION_UNREGISTER): New builtins.
	* omp-expand.c (workshare_safe_to_combine_p): Return false for
	non-worksharing loops.
	(omp_adjust_chunk_size): Don't adjust anything if chunk_size is zero.
	(determine_parallel_type): Don't combine parallel with worksharing
	which has _reductemp_ clause.
	(expand_parallel_call): Emit the GOMP_*nonmonotonic_runtime* or
	GOMP_*maybe_nonmonotonic_runtime* builtins instead of GOMP_*runtime*
	if there is nonmonotonic modifier or if there is no modifier and no
	ordered clause.  For dynamic and guided schedule without monotonic
	and nonmonotonic modifier, default to nonmonotonic.
	(expand_omp_for): Likewise.  Adjust expand_omp_for_generic caller, use
	GOMP_loop{,_ull}{,_ordered,_doacross}_start builtins if there are
	task reductions.
	(expand_task_call): Add GOMP_TASK_FLAG_REDUCTION flag to flags if
	there are any reduction clauses.
	(expand_taskwait_call): New function.
	(expand_teams_call): New function.
	(expand_omp_taskreg): Allow GIMPLE_OMP_TEAMS and call
	expand_teams_call for it.  Formatting fix.  Handle taskwait with
	depend clauses.
	(expand_omp_for_generic): Add SCHED_ARG argument.  Handle expansion
	of worksharing loops with task reductions.
	(expand_omp_for_static_nochunk, expand_omp_for_static_chunk): Handle
	expansion of worksharing loops with task reductions.
	(expand_omp_sections): Handle expansion of sections with task
	reductions.
	(expand_omp_synch): For host teams call expand_omp_taskreg.
	(omp_memory_order_to_memmodel): New function.
	(expand_omp_atomic_load, expand_omp_atomic_store,
	expand_omp_atomic_fetch_op): Use it and gimple_omp_atomic_memory_order
	instead of gimple_omp_atomic_seq_cst_p.
	(build_omp_regions_1, omp_make_gimple_edges): Treat taskwait with
	depend clauses as a standalone directive.
	* omp-general.c (enum omp_requires): New variable.
	(omp_extract_for_data): Initialize have_reductemp member.  Allow
	NE_EXPR even in OpenMP loops, transform them into LT_EXPR or
	GT_EXPR loops depending on incr sign.  Formatting fixes.
	* omp-general.h (struct omp_for_data): Add have_reductemp member.
	(enum omp_requires): New enum.
	(omp_requires_mask): Declare.
	* omp-grid.c (grid_eliminate_combined_simd_part): Formatting fix.
	Fix comment typos.
	* omp-low.c (struct omp_context): Add task_reductions and
	task_reduction_map fields.
	(is_host_teams_ctx): New function.
	(is_taskreg_ctx): Return true also if is_host_teams_ctx.
	(use_pointer_for_field): Use is_global_var instead of
	TREE_STATIC || DECL_EXTERNAL, and apply only if not privatized
	in outer contexts.
	(build_outer_var_ref): Ignore taskgroup outer contexts.
	(delete_omp_context): Release task_reductions and task_reduction_map.
	(scan_sharing_clauses): Don't add any fields for reduction clause on
	taskloop.  Handle OMP_CLAUSE__REDUCTEMP_.  Handle
	OMP_CLAUSE_{IN,TASK}_REDUCTION and OMP_CLAUSE_REDUCTION with task
	modifier.  Don't ignore shared clauses in is_host_teams_ctx contexts.
	Handle OMP_CLAUSE_NONTEMPORAL.
	(add_taskreg_looptemp_clauses): Add OMP_CLAUSE__REDUCTEMP_ clause if
	needed.
	(scan_omp_parallel): Add _reductemp_ clause if there are any reduction
	clauses with task modifier.
	(scan_omp_task): Handle taskwait with depend clauses.
	(finish_taskreg_scan): Move field corresponding to _reductemp_ clause
	first.  Move also OMP_CLAUSE__REDUCTEMP_ clause in front if present.
	Handle GIMPLE_OMP_TEAMS like GIMPLE_OMP_PARALLEL.
	(scan_omp_for): Fix comment formatting.
	(scan_omp_teams): Handle host teams constructs.
	(check_omp_nesting_restrictions): Allow teams with no outer
	OpenMP context.  Adjust diagnostics for teams strictly nested into
	some explicit OpenMP construct other than target.  Allow OpenMP atomics
	inside of simd regions.
	(scan_omp_1_stmt): Call scan_sharing_clauses for taskgroups.
	(scan_omp_1_stmt) <case GIMPLE_OMP_TEAMS>: Temporarily bump
	taskreg_nesting_level while scanning host teams construct.
	(task_reduction_read): New function.
	(lower_rec_input_clauses): Handle OMP_CLAUSE_REDUCTION on taskloop
	construct.  Handle OMP_CLAUSE_IN_REDUCTION and OMP_CLAUSE__REDUCTEMP_
	clauses.  Handle OMP_CLAUSE_REDUCTION with task modifier.  Remove
	second argument create_tmp_var if it is NULL.  Don't ignore shared
	clauses in is_host_teams_ctx contexts.  Handle
	OMP_CLAUSE_FIRSTPRIVATE_NO_REFERENCE on OMP_CLAUSE_FIRSTPRIVATE
	clauses.
	(lower_reduction_clauses): Ignore reduction clauses with task
	modifier.  Remove second argument create_tmp_var if it is NULL.
	Initialize OMP_ATOMIC_MEMORY_ORDER to relaxed.
	(lower_send_clauses): Ignore reduction clauses with task modifier.
	Handle OMP_CLAUSE__REDUCTEMP_.  Don't send anything for
	OMP_CLAUSE_REDUCTION on taskloop.  Handle OMP_CLAUSE_IN_REDUCTION.
	(maybe_add_implicit_barrier_cancel): Add OMP_RETURN argument, don't
	rely that it is the last stmt in body so far.  Ignore outer taskgroup
	contexts.
	(omp_task_reductions_find_first, omp_task_reduction_iterate,
	lower_omp_task_reductions): New functions.
	(lower_omp_sections): Handle reduction clauses with taskgroup
	modifiers.  Adjust maybe_add_implicit_barrier_cancel caller.
	(lower_omp_single): Adjust maybe_add_implicit_barrier_cancel caller.
	(lower_omp_for): Likewise.  Handle reduction clauses with taskgroup
	modifiers.
	(lower_omp_taskgroup): Handle taskgroup reductions.
	(create_task_copyfn): Copy over OMP_CLAUSE__REDUCTEMP_ pointer.
	Handle OMP_CLAUSE_IN_REDUCTION and OMP_CLAUSE_REDUCTION clauses.
	(lower_depend_clauses): If there are any
	OMP_CLAUSE_DEPEND_DEPOBJ or OMP_CLAUSE_DEPEND_MUTEXINOUTSET
	depend clauses, use a new array format.  If OMP_CLAUSE_DEPEND_LAST is
	seen, assume lowering is done already and return early.  Set kind
	on artificial depend clause to OMP_CLAUSE_DEPEND_LAST.
	(lower_omp_taskreg): Handle reduction clauses with task modifier on
	parallel construct.  Handle reduction clause on taskloop construct.
	Handle taskwait with depend clauses.
	(lower_omp_1): Use lower_omp_taskreg instead of lower_omp_teams
	for host teams constructs.
	* tree.c (omp_clause_num_ops): Add in_reduction, task_reduction,
	nontemporal and _reductemp_ clause entries.
	(omp_clause_code_name): Likewise.
	(walk_tree_1): Handle OMP_CLAUSE_{IN,TASK}_REDUCTION,
	OMP_CLAUSE_NONTEMPORAL and OMP_CLAUSE__REDUCTEMP_.
	* tree-core.h (enum omp_clause_code): Add
	OMP_CLAUSE_{{IN,TASK}_REDUCTION,NONTEMPORAL,_REDUCTEMP_}.
	(enum omp_clause_defaultmap_kind, enum omp_memory_order): New.
	(struct tree_base): Add omp_atomic_memory_order field into union.
	Remove OMP_ATOMIC_SEQ_CST comment.
	(enum omp_clause_depend_kind): Add OMP_CLAUSE_DEPEND_MUTEXINOUTSET
	and OMP_CLAUSE_DEPEND_DEPOBJ.
	(struct tree_omp_clause): Add subcode.defaultmap_kind.
	* tree.def (OMP_TASKGROUP): Add another operand, move next to other
	OpenMP constructs with body and clauses operands.
	* tree.h (OMP_BODY): Use OMP_MASTER instead of OMP_TASKGROUP.
	(OMP_CLAUSES): Use OMP_TASKGROUP instead of OMP_SINGLE.
	(OMP_TASKGROUP_CLAUSES): Define.
	(OMP_CLAUSE_DECL): Use OMP_CLAUSE__REDUCTEMP_ instead of
	OMP_CLAUSE__LOOPTEMP_.
	(OMP_ATOMIC_SEQ_CST): Remove.
	(OMP_ATOMIC_MEMORY_ORDER, OMP_CLAUSE_FIRSTPRIVATE_NO_REFERENCE,
	OMP_CLAUSE_LASTPRIVATE_CONDITIONAL): Define.
	(OMP_CLAUSE_REDUCTION_CODE, OMP_CLAUSE_REDUCTION_INIT,
	OMP_CLAUSE_REDUCTION_MERGE, OMP_CLAUSE_REDUCTION_PLACEHOLDER,
	OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER,
	OMP_CLAUSE_REDUCTION_OMP_ORIG_REF): Handle
	OMP_CLAUSE_{,IN_,TASK_}REDUCTION.
	(OMP_CLAUSE_REDUCTION_TASK, OMP_CLAUSE_REDUCTION_INSCAN,
	OMP_CLAUSE_DEFAULTMAP_KIND, OMP_CLAUSE_DEFAULTMAP_CATEGORY,
	OMP_CLAUSE_DEFAULTMAP_BEHAVIOR, OMP_CLAUSE_DEFAULTMAP_SET_KIND):
	Define.
	* tree-inline.c (remap_gimple_stmt): Remap taskgroup clauses.
	* tree-nested.c (convert_nonlocal_omp_clauses): Handle
	OMP_CLAUSE__REDUCTEMP_, OMP_CLAUSE_NONTEMPORAL.
	(convert_local_omp_clauses): Likewise.  Remove useless test.
	* tree-parloops.c (create_call_for_reduction_1): Pass
	OMP_MEMORY_ORDER_RELAXED as new argument to
	dump_gimple_omp_atomic_load and dump_gimple_omp_atomic_store.
	* tree-pretty-print.c (dump_omp_iterators): New function.
	(dump_omp_clause): Handle OMP_CLAUSE__REDUCTEMP_,
	OMP_CLAUSE_NONTEMPORAL, OMP_CLAUSE_{TASK,IN}_REDUCTION.  Print
	reduction modifiers.  Handle OMP_CLAUSE_DEPEND_DEPOBJ and
	OMP_CLAUSE_DEPEND_MUTEXINOUTSET.  Print iterators in depend clauses.
	Print __internal__ for OMP_CLAUSE_DEPEND_LAST.  Handle cancel and
	simd OMP_CLAUSE_IF_MODIFIERs.  Handle new kinds of
	OMP_CLAUSE_DEFAULTMAP. Print conditional: for
	OMP_CLAUSE_LASTPRIVATE_CONDITIONAL.
	(dump_omp_atomic_memory_order): New function.
	(dump_generic_node): Use it.  Print taskgroup clauses.  Print
	taskwait with depend clauses.
	* tree-pretty-print.h (dump_omp_atomic_memory_order): Declare.
	* tree-streamer-in.c (unpack_ts_omp_clause_value_fields):
	Handle OMP_CLAUSE_{TASK,IN}_REDUCTION.
	* tree-streamer-out.c (pack_ts_omp_clause_value_fields,
	write_ts_omp_clause_tree_pointers): Likewise.
gcc/c-family/
	* c-common.h (c_finish_omp_taskgroup): Add CLAUSES argument.
	(c_finish_omp_atomic): Replace bool SEQ_CST argument with
	enum omp_memory_order MEMORY_ORDER.
	(c_finish_omp_flush): Add MO argument.
	(c_omp_depend_t_p, c_finish_omp_depobj): Declare.
	(c_finish_omp_for): Add FINAL_P argument.
	* c-omp.c: Include memmodel.h.
	(c_finish_omp_taskgroup): Add CLAUSES argument.  Set
	OMP_TASKGROUP_CLAUSES to it.
	(c_finish_omp_atomic): Replace bool SEQ_CST argument with
	enum omp_memory_order MEMORY_ORDER.  Set OMP_ATOMIC_MEMORY_ORDER
	instead of OMP_ATOMIC_SEQ_CST.
	(c_omp_depend_t_p, c_finish_omp_depobj): New functions.
	(c_finish_omp_flush): Add MO argument, if not MEMMODEL_LAST, emit
	__atomic_thread_fence call with the given value.
	(check_omp_for_incr_expr): Formatting fixes.
	(c_finish_omp_for): Add FINAL_P argument.  Allow NE_EXPR
	even in OpenMP loops, diagnose if NE_EXPR and incr expression
	is not constant expression 1 or -1.  Transform NE_EXPR loops
	with iterators pointers to VLA into LT_EXPR or GT_EXPR loops.
	(c_omp_check_loop_iv_r): Look for orig decl of C++ range for
	loops too.
	(c_omp_split_clauses): Add support for combined
	#pragma omp parallel master and
	#pragma omp {,parallel }master taskloop{, simd} constructs.
	Handle OMP_CLAUSE_IN_REDUCTION.  Handle OMP_CLAUSE_REDUCTION_TASK.
	Handle OMP_CLAUSE_NONTEMPORAL.  Handle splitting OMP_CLAUSE_IF
	also to OMP_SIMD.  Copy OMP_CLAUSE_LASTPRIVATE_CONDITIONAL.
	(c_omp_predetermined_sharing): Don't return
	OMP_CLAUSE_DEFAULT_SHARED for const qualified decls.
	* c-pragma.c (omp_pragmas): Add PRAGMA_OMP_DEPOBJ and
	PRAGMA_OMP_REQUIRES.
	* c-pragma.h (enum pragma_kind): Likewise.
	(enum pragma_omp_clause): Add PRAGMA_OMP_CLAUSE_NONTEMPORAL
	and PRAGMA_OMP_CLAUSE_{IN,TASK}_REDUCTION.
gcc/c/
	* c-parser.c: Include memmode.h.
	(c_parser_omp_depobj, c_parser_omp_requires): New functions.
	(c_parser_pragma): Handle PRAGMA_OMP_DEPOBJ and PRAGMA_OMP_REQUIRES.
	(c_parser_omp_clause_name): Handle nontemporal, in_reduction and
	task_reduction clauses.
	(c_parser_omp_variable_list): Handle OMP_CLAUSE_{IN,TASK}_REDUCTION.
	For OMP_CLAUSE_DEPEND, parse clause operands as either an array
	section, or lvalue assignment expression.
	(c_parser_omp_clause_if): Handle cancel and simd modifiers.
	(c_parser_omp_clause_lastprivate): Parse optional
	conditional: modifier.
	(c_parser_omp_clause_hint): Require constant integer expression rather
	than just integer expression.
	(c_parser_omp_clause_defaultmap): Parse new kinds of defaultmap
	clause.
	(c_parser_omp_clause_reduction): Add IS_OMP and KIND arguments.
	Parse reduction modifiers.  Pass KIND to c_parser_omp_variable_list.
	(c_parser_omp_clause_nontemporal, c_parser_omp_iterators): New
	functions.
	(c_parser_omp_clause_depend): Parse iterator modifier and handle
	iterators.  Parse mutexinoutset and depobj kinds.
	(c_parser_oacc_all_clauses): Adjust c_parser_omp_clause_reduction
	callers.
	(c_parser_omp_all_clauses): Likewise.  Handle
	PRAGMA_OMP_CLAUSE_NONTEMPORAL and
	PRAGMA_OMP_CLAUSE_{IN,TASK}_REDUCTION.
	(c_parser_omp_atomic): Parse hint and memory order clauses.  Handle
	default memory order from requires directive if any.  Adjust
	c_finish_omp_atomic caller.
	(c_parser_omp_critical): Allow comma in between (name) and hint clause.
	(c_parser_omp_flush): Parse flush with memory-order-clause.
	(c_parser_omp_for_loop): Allow NE_EXPR even in
	OpenMP loops, adjust c_finish_omp_for caller.
	(OMP_SIMD_CLAUSE_MASK): Add if and nontemporal clauses.
	(c_parser_omp_master): Add p_name, mask and cclauses arguments.
	Allow to be called while parsing combined parallel master.
	Parse combined master taskloop{, simd}.
	(c_parser_omp_parallel): Parse combined
	parallel master{, taskloop{, simd}} constructs.
	(OMP_TASK_CLAUSE_MASK): Add in_reduction clause.
	(OMP_TASKGROUP_CLAUSE_MASK): Define.
	(c_parser_omp_taskgroup): Add LOC argument.  Parse taskgroup clauses.
	(OMP_TASKWAIT_CLAUSE_MASK): Define.
	(c_parser_omp_taskwait): Handle taskwait with depend clauses.
	(c_parser_omp_teams): Force a BIND_EXPR with BLOCK
	around teams body.  Use SET_EXPR_LOCATION.
	(c_parser_omp_target_data): Allow target data
	with only use_device_ptr clauses.
	(c_parser_omp_target): Use SET_EXPR_LOCATION.  Set
	OMP_REQUIRES_TARGET_USED bit in omp_requires_mask.
	(c_parser_omp_requires): New function.
	(c_finish_taskloop_clauses): New function.
	(OMP_TASKLOOP_CLAUSE_MASK): Add reduction and in_reduction clauses.
	(c_parser_omp_taskloop): Use c_finish_taskloop_clauses.  Add forward
	declaration.  Disallow in_reduction clause when combined with parallel
	master.
	(c_parser_omp_construct): Adjust c_parser_omp_master and
	c_parser_omp_taskgroup callers.
	* c-typeck.c (c_finish_omp_cancel): Diagnose if clause with modifier
	other than cancel.
	(handle_omp_array_sections_1): Handle OMP_CLAUSE_{IN,TASK}_REDUCTION
	like OMP_CLAUSE_REDUCTION.
	(handle_omp_array_sections): Likewise.  Call save_expr on array
	reductions before calling build_index_type.  Handle depend clauses
	with iterators.
	(struct c_find_omp_var_s): New type.
	(c_find_omp_var_r, c_omp_finish_iterators): New functions.
	(c_finish_omp_clauses): Don't diagnose nonmonotonic clause
	with static, runtime or auto schedule kinds.  Call save_expr for whole
	array reduction sizes.  Diagnose reductions with zero sized elements
	or variable length structures.  Diagnose nogroup clause used with
	reduction clause(s).  Handle depend clause with
	OMP_CLAUSE_DEPEND_DEPOBJ.  Diagnose bit-fields.  Require
	omp_depend_t type for OMP_CLAUSE_DEPEND_DEPOBJ kinds and
	some different type for other kinds.  Use build_unary_op with
	ADDR_EXPR and build_indirect_ref instead of c_mark_addressable.
	Handle depend clauses with iterators.  Remove no longer needed special
	case that predetermined const qualified vars may be specified in
	firstprivate clause.  Complain if const qualified vars are mentioned
	in data-sharing clauses other than firstprivate or shared.  Use
	error_at with OMP_CLAUSE_LOCATION (c) as first argument instead of
	error.  Formatting fix.  Handle OMP_CLAUSE_NONTEMPORAL and
	OMP_CLAUSE_{IN,TASK}_REDUCTION.  Allow any lvalue as
	OMP_CLAUSE_DEPEND operand (besides array section), adjust diagnostics.
gcc/cp/
	* constexpr.c (potential_constant_expression_1): Handle OMP_DEPOBJ.
	* cp-gimplify.c (cp_genericize_r): Handle
	OMP_CLAUSE_{IN,TASK}_REDUCTION.
	(cxx_omp_predetermined_sharing_1): Don't return
	OMP_CLAUSE_DEFAULT_SHARED for const qualified decls with no mutable
	member.  Return OMP_CLAUSE_DEFAULT_FIRSTPRIVATE for this pointer.
	* cp-objcp-common.c (cp_common_init_ts): Handle OMP_DEPOBJ.
	* cp-tree.def (OMP_DEPOBJ): New tree code.
	* cp-tree.h (OMP_ATOMIC_DEPENDENT_P): Return true also for first
	argument being OMP_CLAUSE.
	(OMP_DEPOBJ_DEPOBJ, OMP_DEPOBJ_CLAUSES): Define.
	(cp_convert_omp_range_for, cp_finish_omp_range_for): Declare.
	(finish_omp_atomic): Add LOC, CLAUSES and MO arguments.  Remove
	SEQ_CST argument.
	(finish_omp_for_block): Declare.
	(finish_omp_flush): Add MO argument.
	(finish_omp_depobj): Declare.
	* cxx-pretty-print.c (cxx_pretty_printer::statement): Handle
	OMP_DEPOBJ.
	* dump.c (cp_dump_tree): Likewise.
	* lex.c (cxx_init): Likewise.
	* parser.c: Include memmodel.h.
	(cp_parser_for): Pass false as new is_omp argument to
	cp_parser_range_for.
	(cp_parser_range_for): Add IS_OMP argument, return before finalizing
	if it is true.
	(cp_parser_omp_clause_name): Handle nontemporal, in_reduction and
	task_reduction clauses.
        (cp_parser_omp_var_list_no_open): Handle
	OMP_CLAUSE_{IN,TASK}_REDUCTION.  For OMP_CLAUSE_DEPEND, parse clause
	operands as either an array section, or lvalue assignment expression.
	(cp_parser_omp_clause_if): Handle cancel and simd modifiers.
	(cp_parser_omp_clause_defaultmap): Parse new kinds of defaultmap
	clause.
	(cp_parser_omp_clause_reduction): Add IS_OMP and KIND arguments.
	Parse reduction modifiers.  Pass KIND to c_parser_omp_variable_list.
	(cp_parser_omp_clause_lastprivate, cp_parser_omp_iterators): New
	functions.
	(cp_parser_omp_clause_depend): Parse iterator modifier and handle
	iterators.  Parse mutexinoutset and depobj kinds.
	(cp_parser_oacc_all_clauses): Adjust cp_parser_omp_clause_reduction
	callers.
	(cp_parser_omp_all_clauses): Likewise.  Handle
	PRAGMA_OMP_CLAUSE_NONTEMPORAL and
	PRAGMA_OMP_CLAUSE_{IN,TASK}_REDUCTION.  Call
	cp_parser_omp_clause_lastprivate for OpenMP lastprivate clause.
	(cp_parser_omp_atomic): Pass pragma_tok->location as
	LOC to finish_omp_atomic.  Parse hint and memory order clauses.
	Handle default memory order from requires directive if any.  Adjust
	finish_omp_atomic caller.
	(cp_parser_omp_critical): Allow comma in between (name) and hint
	clause.
	(cp_parser_omp_depobj): New function.
	(cp_parser_omp_flush): Parse flush with memory-order-clause.
	(cp_parser_omp_for_cond): Allow NE_EXPR even in OpenMP loops.
	(cp_convert_omp_range_for, cp_finish_omp_range_for): New functions.
	(cp_parser_omp_for_loop): Parse C++11 range for loops among omp
	loops.  Handle OMP_CLAUSE_IN_REDUCTION like OMP_CLAUSE_REDUCTION.
	(OMP_SIMD_CLAUSE_MASK): Add if and nontemporal clauses.
	(cp_parser_omp_simd, cp_parser_omp_for): Call keep_next_level before
	begin_omp_structured_block and call finish_omp_for_block on
	finish_omp_structured_block result.
	(cp_parser_omp_master): Add p_name, mask and cclauses arguments.
	Allow to be called while parsing combined parallel master.
	Parse combined master taskloop{, simd}.
	(cp_parser_omp_parallel): Parse combined
	parallel master{, taskloop{, simd}} constructs.
	(cp_parser_omp_single): Use SET_EXPR_LOCATION.
	(OMP_TASK_CLAUSE_MASK): Add in_reduction clause.
	(OMP_TASKWAIT_CLAUSE_MASK): Define.
	(cp_parser_omp_taskwait): Handle taskwait with depend clauses.
	(OMP_TASKGROUP_CLAUSE_MASK): Define.
	(cp_parser_omp_taskgroup): Parse taskgroup clauses, adjust
	c_finish_omp_taskgroup caller.
	(cp_parser_omp_distribute): Call keep_next_level before
	begin_omp_structured_block and call finish_omp_for_block on
	finish_omp_structured_block result.
	(cp_parser_omp_teams): Force a BIND_EXPR with BLOCK around teams
	body.
	(cp_parser_omp_target_data): Allow target data with only
	use_device_ptr clauses.
	(cp_parser_omp_target): Set OMP_REQUIRES_TARGET_USED bit in
	omp_requires_mask.
	(cp_parser_omp_requires): New function.
	(OMP_TASKLOOP_CLAUSE_MASK): Add reduction and in_reduction clauses.
	(cp_parser_omp_taskloop): Add forward declaration.  Disallow
	in_reduction clause when combined with parallel master.  Call
	keep_next_level before begin_omp_structured_block and call
	finish_omp_for_block on finish_omp_structured_block result.
	(cp_parser_omp_construct): Adjust cp_parser_omp_master caller.
	(cp_parser_pragma): Handle PRAGMA_OMP_DEPOBJ and PRAGMA_OMP_REQUIRES.
	* pt.c (tsubst_omp_clause_decl): Add iterators_cache argument.
	Adjust recursive calls.  Handle iterators.
	(tsubst_omp_clauses): Handle OMP_CLAUSE_{IN,TASK}_REDUCTION and
	OMP_CLAUSE_NONTEMPORAL.  Adjust tsubst_omp_clause_decl callers.
	(tsubst_decomp_names):
	(tsubst_omp_for_iterator): Change orig_declv into a reference.
	Handle range for loops.  Move orig_declv handling after declv/initv
	handling.
	(tsubst_expr): Force a BIND_EXPR with BLOCK around teams body.
	Adjust finish_omp_atomic caller.  Call keep_next_level before
	begin_omp_structured_block.  Call cp_finish_omp_range_for for range
	for loops and use {begin,finish}_omp_structured_block instead of
	{push,pop}_stmt_list if there are any range for loops.  Call
	finish_omp_for_block on finish_omp_structured_block result.
	Handle OMP_DEPOBJ.  Handle taskwait with depend clauses.  For
	OMP_ATOMIC call tsubst_omp_clauses on clauses if any, adjust
	finish_omp_atomic caller.  Use OMP_ATOMIC_MEMORY_ORDER rather
	than OMP_ATOMIC_SEQ_CST.  Handle clauses on OMP_TASKGROUP.
	(dependent_omp_for_p): Always return true for range for loops if
	processing_template_decl.  Return true if class type iterator
	does not have INTEGER_CST increment.
	* semantics.c: Include memmodel.h.
	(handle_omp_array_sections_1): Handle OMP_CLAUSE_{IN,TASK}_REDUCTION
	like OMP_CLAUSE_REDUCTION.
	(handle_omp_array_sections): Likewise.  Call save_expr on array
	reductions before calling build_index_type.  Handle depend clauses
	with iterators.
	(finish_omp_reduction_clause): Call save_expr for whole array
	reduction sizes.  Don't mark OMP_CLAUSE_DECL addressable if it has
	reference type.  Do mark decl_placeholder addressable if needed.
	Use error_at with OMP_CLAUSE_LOCATION (c) as first argument instead
	of error.
	(cp_omp_finish_iterators): New function.
	(finish_omp_clauses): Don't diagnose nonmonotonic clause with static,
	runtime or auto schedule kinds.  Diagnose nogroup clause used with
	reduction clause(s).  Handle depend clause with
	OMP_CLAUSE_DEPEND_DEPOBJ.  Diagnose bit-fields.  Require
	omp_depend_t type for OMP_CLAUSE_DEPEND_DEPOBJ kinds and
	some different type for other kinds.  Use cp_build_addr_expr
	and cp_build_indirect_ref instead of cxx_mark_addressable.
	Handle depend clauses with iterators.  Only handle static data members
	in the special case that const qualified vars may be specified in
	firstprivate clause.  Complain if const qualified vars without mutable
	members are mentioned in data-sharing clauses other than firstprivate
	or shared.  Use error_at with OMP_CLAUSE_LOCATION (c) as first
	argument instead of error.  Diagnose more than one nontemporal clause
	refering to the same variable.  Use error_at rather than error for
	priority and hint clause diagnostics.  Fix pasto for hint clause.
	Diagnose hint expression that doesn't fold into INTEGER_CST.
	Diagnose if clause with modifier other than cancel.  Handle
	OMP_CLAUSE_{IN,TASK}_REDUCTION like OMP_CLAUSE_REDUCTION.  Allow any
	lvalue as OMP_CLAUSE_DEPEND operand (besides array section), adjust
	diagnostics.
	(handle_omp_for_class_iterator): Don't create a new TREE_LIST if one
	has been created already for range for, just fill TREE_PURPOSE and
	TREE_VALUE.  Call cp_fully_fold on incr.
	(finish_omp_for): Don't check cond/incr if cond is global_namespace.
	Pass to c_omp_check_loop_iv_exprs orig_declv if non-NULL.  Don't
	use IS_EMPTY_STMT on NULL pre_body.  Adjust c_finish_omp_for caller.
	(finish_omp_for_block): New function.
	(finish_omp_atomic): Add LOC argument, pass it through
	to c_finish_omp_atomic and set it as location of OMP_ATOMIC* trees.
	Remove SEQ_CST argument.  Add CLAUSES and MO arguments.  Adjust
	c_finish_omp_atomic caller.  Stick clauses if any into first argument
	of wrapping OMP_ATOMIC.
	(finish_omp_depobj): New function.
	(finish_omp_flush): Add MO argument, if not
	MEMMODEL_LAST, emit __atomic_thread_fence call with the given value.
	(finish_omp_cancel): Diagnose if clause with modifier other than
	cancel.
gcc/fortran/
	* trans-openmp.c (gfc_trans_omp_clauses): Use
	OMP_CLAUSE_DEFAULTMAP_SET_KIND.
	(gfc_trans_omp_atomic): Set OMP_ATOMIC_MEMORY_ORDER
	rather than OMP_ATOMIC_SEQ_CST.
	(gfc_trans_omp_taskgroup): Build OMP_TASKGROUP using
	make_node instead of build1_loc.
	* types.def (BT_FN_VOID_BOOL, BT_FN_VOID_SIZE_SIZE_PTR,
	BT_FN_UINT_UINT_PTR_PTR, BT_FN_UINT_OMPFN_PTR_UINT_UINT,
	BT_FN_BOOL_UINT_LONGPTR_LONG_LONG_LONGPTR_LONGPTR_PTR_PTR,
	BT_FN_BOOL_UINT_ULLPTR_LONG_ULL_ULLPTR_ULLPTR_PTR_PTR,
	BT_FN_BOOL_LONG_LONG_LONG_LONG_LONG_LONGPTR_LONGPTR_PTR_PTR,
	BT_FN_BOOL_BOOL_ULL_ULL_ULL_LONG_ULL_ULLPTR_ULLPTR_PTR_PTR): New.
	(BT_FN_VOID_INT_OMPFN_SIZE_PTR_PTR_PTR_UINT_PTR_PTR): Formatting fix.
gcc/testsuite/
	* c-c++-common/gomp/atomic-17.c: New test.
	* c-c++-common/gomp/atomic-18.c: New test.
	* c-c++-common/gomp/atomic-19.c: New test.
	* c-c++-common/gomp/atomic-20.c: New test.
	* c-c++-common/gomp/atomic-21.c: New test.
	* c-c++-common/gomp/atomic-22.c: New test.
	* c-c++-common/gomp/clauses-1.c (r2): New variable.
	(foo): Add ntm argument and test if and nontemporal clauses on
	constructs with simd.
	(bar): Put taskloop simd inside of taskgroup with task_reduction,
	use in_reduction clause instead of reduction.  Add another
	taskloop simd without nogroup clause, but with reduction clause and
	a new in_reduction.  Add ntm and i3 arguments.  Test if and
	nontemporal clauses on constructs with simd.  Change if clauses on
	some constructs from specific to the particular constituents to one
	without a modifier.  Add new tests for combined host teams and for
	new parallel master and {,parallel }master taskloop{, simd} combined
	constructs.
	(baz): New function with host teams tests.
	* gcc.dg/gomp/combined-1.c: Moved to ...
	* c-c++-common/gomp/combined-1.c: ... here.  Adjust expected library
	call.
	* c-c++-common/gomp/combined-2.c: New test.
	* c-c++-common/gomp/combined-3.c: New test.
	* c-c++-common/gomp/critical-1.c: New test.
	* c-c++-common/gomp/critical-2.c: New test.
	* c-c++-common/gomp/default-1.c: New test.
	* c-c++-common/gomp/defaultmap-1.c: New test.
	* c-c++-common/gomp/defaultmap-2.c: New test.
	* c-c++-common/gomp/defaultmap-3.c: New test.
	* c-c++-common/gomp/depend-5.c: New test.
	* c-c++-common/gomp/depend-6.c: New test.
	* c-c++-common/gomp/depend-iterator-1.c: New test.
	* c-c++-common/gomp/depend-iterator-2.c: New test.
	* c-c++-common/gomp/depobj-1.c: New test.
	* c-c++-common/gomp/flush-1.c: New test.
	* c-c++-common/gomp/flush-2.c: New test.
	* c-c++-common/gomp/for-1.c: New test.
	* c-c++-common/gomp/for-2.c: New test.
	* c-c++-common/gomp/for-3.c: New test.
	* c-c++-common/gomp/for-4.c: New test.
	* c-c++-common/gomp/for-5.c: New test.
	* c-c++-common/gomp/for-6.c: New test.
	* c-c++-common/gomp/for-7.c: New test.
	* c-c++-common/gomp/if-1.c (foo): Add some further tests.
	* c-c++-common/gomp/if-2.c (foo): Likewise.  Expect slightly different
	diagnostics wording in one case.
	* c-c++-common/gomp/if-3.c: New test.
	* c-c++-common/gomp/master-combined-1.c: New test.
	* c-c++-common/gomp/master-combined-2.c: New test.
	* c-c++-common/gomp/nontemporal-1.c: New test.
	* c-c++-common/gomp/nontemporal-2.c: New test.
	* c-c++-common/gomp/reduction-task-1.c: New test.
	* c-c++-common/gomp/reduction-task-2.c: New test.
	* c-c++-common/gomp/requires-1.c: New test.
	* c-c++-common/gomp/requires-2.c: New test.
	* c-c++-common/gomp/requires-3.c: New test.
	* c-c++-common/gomp/requires-4.c: New test.
	* c-c++-common/gomp/schedule-modifiers-1.c (bar): Don't expect
	diagnostics for nonmonotonic modifier with static, runtime or auto
	schedule kinds.
	* c-c++-common/gomp/simd7.c: New test.
	* c-c++-common/gomp/target-data-1.c: New test.
	* c-c++-common/gomp/taskloop-reduction-1.c: New test.
	* c-c++-common/gomp/taskwait-depend-1.c: New test.
	* c-c++-common/gomp/teams-1.c: New test.
	* c-c++-common/gomp/teams-2.c: New test.
	* gcc.dg/gomp/appendix-a/a.24.1.c: Update from OpenMP examples.  Add
	shared(c) clause.
	* gcc.dg/gomp/atomic-5.c (f1): Add another expected error.
	* gcc.dg/gomp/clause-1.c: Adjust expected diagnostics for const
	qualified vars without mutable member no longer being predeterined
	shared.
	* gcc.dg/gomp/sharing-1.c: Likewise.
	* g++.dg/gomp/clause-3.C: Likewise.
	* g++.dg/gomp/member-2.C: Likewise.
	* g++.dg/gomp/predetermined-1.C: Likewise.
	* g++.dg/gomp/private-1.C: Likewise.
	* g++.dg/gomp/sharing-1.C: Likewise.
	* g++.dg/gomp/sharing-2.C: Likewise.  Add a few tests with aggregate
	const static data member without mutable elements.
	* gcc.dg/gomp/for-4.c: Expected nonmonotonic functions in the dumps.
	* gcc.dg/gomp/for-5.c: Likewise.
	* gcc.dg/gomp/for-6.c: Change expected library call.
	* gcc.dg/gomp/pr39495-2.c (foo): Don't expect errors on !=.
	* gcc.dg/gomp/reduction-2.c: New test.
	* gcc.dg/gomp/simd-1.c: New test.
	* gcc.dg/gomp/teams-1.c: Adjust expected diagnostic lines.
	* g++.dg/gomp/atomic-18.C: New test.
	* g++.dg/gomp/atomic-19.C: New test.
	* g++.dg/gomp/atomic-5.C (f1): Adjust expected lines of read-only
	variable messages.  Add another expected error.
	* g++.dg/gomp/critical-3.C: New test.
	* g++.dg/gomp/depend-iterator-1.C: New test.
	* g++.dg/gomp/depend-iterator-2.C: New test.
	* g++.dg/gomp/depobj-1.C: New test.
	* g++.dg/gomp/doacross-1.C: New test.
	* g++.dg/gomp/for-21.C: New test.
	* g++.dg/gomp/for-4.C: Expected nonmonotonic functions in the dumps.
	* g++.dg/gomp/for-5.C: Likewise.
	* g++.dg/gomp/for-6.C: Change expected library call.
	* g++.dg/gomp/loop-4.C: New test.
	* g++.dg/gomp/pr33372-1.C: Adjust location of the expected
	diagnostics.
	* g++.dg/gomp/pr33372-3.C: Likewise.
	* g++.dg/gomp/pr39495-2.C (foo): Don't expect errors on !=.
	* g++.dg/gomp/simd-2.C: New test.
	* g++.dg/gomp/tpl-atomic-2.C: Adjust expected diagnostic lines.
include/
	* gomp-constants.h (GOMP_TASK_FLAG_REDUCTION,
	GOMP_DEPEND_IN, GOMP_DEPEND_OUT, GOMP_DEPEND_INOUT,
	GOMP_DEPEND_MUTEXINOUTSET): Define.
libgomp/
	* affinity.c (gomp_display_affinity_place): New function.
	* affinity-fmt.c: New file.
	* alloc.c (gomp_aligned_alloc, gomp_aligned_free): New functions.
	* config/linux/affinity.c (gomp_display_affinity_place): New function.
	* config/nvptx/icv-device.c (omp_get_num_teams, omp_get_team_num):
	Move these functions to ...
	* config/nvptx/teams.c: ... here.  New file.
	* config/nvptx/target.c (omp_pause_resource, omp_pause_resource_all):
	New functions.
	* config/nvptx/team.c (gomp_team_start, gomp_pause_host): New
	functions.
	* configure.ac: Check for aligned_alloc, posix_memalign, memalign
	and _aligned_malloc.
	(HAVE_UNAME, HAVE_GETHOSTNAME, HAVE_GETPID): Add new tests.
	* configure.tgt: Add -DUSING_INITIAL_EXEC_TLS to XCFLAGS for Linux.
	* env.c (gomp_display_affinity_var, gomp_affinity_format_var,
	gomp_affinity_format_len): New variables.
	(parse_schedule): Parse monotonic and nonmonotonic modifiers in
	OMP_SCHEDULE variable.  Set GFS_MONOTONIC for monotonic schedules.
	(handle_omp_display_env): Display monotonic/nonmonotonic schedule
	modifiers.  Display (non-default) chunk sizes.  Print
	OMP_DISPLAY_AFFINITY and OMP_AFFINITY_FORMAT.
	(initialize_env): Don't call pthread_attr_setdetachstate.  Handle
	OMP_DISPLAY_AFFINITY and OMP_AFFINITY_FORMAT env vars.
	* fortran.c: Include stdio.h and string.h.
	(omp_pause_resource, omp_pause_resource_all): Add ialias_redirect.
	(omp_get_schedule_, omp_get_schedule_8_): Mask off GFS_MONOTONIC bit.
	(omp_set_affinity_format_, omp_get_affinity_format_,
	omp_display_affinity_, omp_capture_affinity_, omp_pause_resource_,
	omp_pause_resource_all_): New functions.
	* icv.c (omp_set_schedule): Mask off omp_sched_monotonic bit in
	switch.
	* icv-device.c (omp_get_num_teams, omp_get_team_num): Move these
	functions to ...
	* teams.c: ... here.  New file.
	* libgomp_g.h: Include gstdint.h.
	(GOMP_loop_nonmonotonic_runtime_start,
	GOMP_loop_maybe_nonmonotonic_runtime_start, GOMP_loop_start,
	GOMP_loop_ordered_start, GOMP_loop_nonmonotonic_runtime_next,
	GOMP_loop_maybe_nonmonotonic_runtime_next, GOMP_loop_doacross_start,
	GOMP_parallel_loop_nonmonotonic_runtime,
	GOMP_parallel_loop_maybe_nonmonotonic_runtime,
	GOMP_loop_ull_nonmonotonic_runtime_start,
	GOMP_loop_ull_maybe_nonmonotonic_runtime_start, GOMP_loop_ull_start,
	GOMP_loop_ull_ordered_start, GOMP_loop_ull_nonmonotonic_runtime_next,
	GOMP_loop_ull_maybe_nonmonotonic_runtime_next,
	GOMP_loop_ull_doacross_start, GOMP_parallel_reductions,
	GOMP_taskwait_depend, GOMP_taskgroup_reduction_register,
	GOMP_taskgroup_reduction_unregister, GOMP_task_reduction_remap,
	GOMP_workshare_task_reduction_unregister, GOMP_sections2_start,
	GOMP_teams_reg): Declare.
	* libgomp.h (GOMP_HAVE_EFFICIENT_ALIGNED_ALLOC): Define unless
	gomp_aligned_alloc uses fallback implementation.
	(gomp_aligned_alloc, gomp_aligned_free): Declare.
	(enum gomp_schedule_type): Add GFS_MONOTONIC.
	(struct gomp_doacross_work_share): Add extra field.
	(struct gomp_work_share): Add task_reductions field.
	(struct gomp_taskgroup): Add workshare and reductions fields.
	(GOMP_NEEDS_THREAD_HANDLE): Define if needed.
	(gomp_thread_handle): New typedef.
	(gomp_display_affinity_place, gomp_set_affinity_format,
	gomp_display_string, gomp_display_affinity,
	gomp_display_affinity_thread): Declare.
	(gomp_doacross_init, gomp_doacross_ull_init): Add size_t argument.
	(gomp_parallel_reduction_register, gomp_workshare_taskgroup_start,
	gomp_workshare_task_reduction_register): Declare.
	(gomp_team_start): Add taskgroup argument.
	(gomp_pause_host): Declare.
	(gomp_init_work_share, gomp_work_share_start): Change bool argument
	to size_t.
	(gomp_thread_self, gomp_thread_to_pthread_t): New inline functions.
	* libgomp.map (GOMP_5.0): Export GOMP_loop_start,
	GOMP_loop_ordered_start, GOMP_loop_doacross_start,
	GOMP_loop_ull_start, GOMP_loop_ull_ordered_start,
	GOMP_loop_ull_doacross_start,
	GOMP_workshare_task_reduction_unregister, GOMP_sections2_start,
	GOMP_loop_maybe_nonmonotonic_runtime_next,
	GOMP_loop_maybe_nonmonotonic_runtime_start,
	GOMP_loop_nonmonotonic_runtime_next,
	GOMP_loop_nonmonotonic_runtime_start,
	GOMP_loop_ull_maybe_nonmonotonic_runtime_next,
	GOMP_loop_ull_maybe_nonmonotonic_runtime_start,
	GOMP_loop_ull_nonmonotonic_runtime_next,
	GOMP_loop_ull_nonmonotonic_runtime_start,
	GOMP_parallel_loop_maybe_nonmonotonic_runtime,
	GOMP_parallel_loop_nonmonotonic_runtime, GOMP_parallel_reductions,
	GOMP_taskgroup_reduction_register,
	GOMP_taskgroup_reduction_unregister, GOMP_task_reduction_remap,
	GOMP_teams_reg and GOMP_taskwait_depend.
	(OMP_5.0): Export omp_pause_resource{,_all}{,_},
	omp_{capture,display}_affinity{,_}, and
	omp_[gs]et_affinity_format{,_}.
	* loop.c: Include string.h.
	(GOMP_loop_runtime_next): Add ialias.
	(GOMP_taskgroup_reduction_register): Add ialias_redirect.
	(gomp_loop_static_start, gomp_loop_dynamic_start,
	gomp_loop_guided_start, gomp_loop_ordered_static_start,
	gomp_loop_ordered_dynamic_start, gomp_loop_ordered_guided_start,
	gomp_loop_doacross_static_start, gomp_loop_doacross_dynamic_start,
	gomp_loop_doacross_guided_start): Adjust gomp_work_share_start
	or gomp_doacross_init callers.
	(gomp_adjust_sched, GOMP_loop_start, GOMP_loop_ordered_start,
	GOMP_loop_doacross_start): New functions.
	(GOMP_loop_runtime_start, GOMP_loop_ordered_runtime_start,
	GOMP_loop_doacross_runtime_start, GOMP_parallel_loop_runtime_start):
	Mask off GFS_MONOTONIC bit.
	(GOMP_loop_maybe_nonmonotonic_runtime_next,
	GOMP_loop_maybe_nonmonotonic_runtime_start,
	GOMP_loop_nonmonotonic_runtime_next,
	GOMP_loop_nonmonotonic_runtime_start,
	GOMP_parallel_loop_maybe_nonmonotonic_runtime,
	GOMP_parallel_loop_nonmonotonic_runtime): New aliases or wrapper
	functions.
	(gomp_parallel_loop_start): Pass NULL as taskgroup to
	gomp_team_start.
	* loop_ull.c: Include string.h.
	(GOMP_loop_ull_runtime_next): Add ialias.
	(GOMP_taskgroup_reduction_register): Add ialias_redirect.
	(gomp_loop_ull_static_start, gomp_loop_ull_dynamic_start,
	gomp_loop_ull_guided_start, gomp_loop_ull_ordered_static_start,
	gomp_loop_ull_ordered_dynamic_start,
	gomp_loop_ull_ordered_guided_start,
	gomp_loop_ull_doacross_static_start,
	gomp_loop_ull_doacross_dynamic_start,
	gomp_loop_ull_doacross_guided_start): Adjust gomp_work_share_start
	and gomp_doacross_ull_init callers.
	(gomp_adjust_sched, GOMP_loop_ull_start, GOMP_loop_ull_ordered_start,
	GOMP_loop_ull_doacross_start): New functions.
	(GOMP_loop_ull_runtime_start,
	GOMP_loop_ull_ordered_runtime_start,
	GOMP_loop_ull_doacross_runtime_start): Mask off GFS_MONOTONIC bit.
	(GOMP_loop_ull_maybe_nonmonotonic_runtime_next,
	GOMP_loop_ull_maybe_nonmonotonic_runtime_start,
	GOMP_loop_ull_nonmonotonic_runtime_next,
	GOMP_loop_ull_nonmonotonic_runtime_start): Likewise.
	* Makefile.am (libgomp_la_SOURCES): Add teams.c and affinity-fmt.c.
	* omp.h.in (enum omp_sched_t): Add omp_sched_monotonic.
	(omp_pause_resource_t, omp_depend_t): New typedefs.
	(enum omp_lock_hint_t): Renamed to ...
	(enum omp_sync_hint_t): ... this.  Define omp_sync_hint_*
	enumerators using numbers and omp_lock_hint_* as their aliases.
	(omp_lock_hint_t): New typedef.  Rename to ...
	(omp_sync_hint_t): ... this.
	(omp_init_lock_with_hint, omp_init_nest_lock_with_hint): Use
	omp_sync_hint_t instead of omp_lock_hint_t.
	(omp_pause_resource, omp_pause_resource_all, omp_set_affinity_format,
	omp_get_affinity_format, omp_display_affinity, omp_capture_affinity):
	Declare.
	(omp_target_is_present, omp_target_disassociate_ptr):
	Change first argument from void * to const void *.
	(omp_target_memcpy, omp_target_memcpy_rect): Change second argument
	from void * to const void *.
	(omp_target_associate_ptr): Change first and second arguments from
	void * to const void *.
	* omp_lib.f90.in (omp_pause_resource_kind, omp_pause_soft,
	omp_pause_hard): New parameters.
	(omp_pause_resource, omp_pause_resource_all, omp_set_affinity_format,
	omp_get_affinity_format, omp_display_affinity, omp_capture_affinity):
	New interfaces.
	* omp_lib.h.in (omp_pause_resource_kind, omp_pause_soft,
	omp_pause_hard): New parameters.
	(omp_pause_resource, omp_pause_resource_all, omp_set_affinity_format,
	omp_get_affinity_format, omp_display_affinity, omp_capture_affinity):
	New externals.
	* ordered.c (gomp_doacross_init, gomp_doacross_ull_init): Add
	EXTRA argument.  If not needed to prepare array, if extra is 0,
	clear ws->doacross, otherwise allocate just doacross structure and
	extra payload.  If array is needed, allocate also extra payload.
	(GOMP_doacross_post, GOMP_doacross_wait, GOMP_doacross_ull_post,
	GOMP_doacross_ull_wait): Handle doacross->array == NULL like
	doacross == NULL.
	* parallel.c (GOMP_parallel_start): Pass NULL as taskgroup to
	gomp_team_start.
	(GOMP_parallel): Likewise.  Formatting fix.
	(GOMP_parallel_reductions): New function.
	(GOMP_cancellation_point): If taskgroup has workshare
	flag set, check cancelled of prev taskgroup if any.
	(GOMP_cancel): If taskgroup has workshare flag set, set cancelled
	on prev taskgroup if any.
	* sections.c: Include string.h.
	(GOMP_taskgroup_reduction_register): Add ialias_redirect.
	(GOMP_sections_start): Adjust gomp_work_share_start caller.
	(GOMP_sections2_start): New function.
	(GOMP_parallel_sections_start, GOMP_parallel_sections):
	Pass NULL as taskgroup to gomp_team_start.
	* single.c (GOMP_single_start, GOMP_single_copy_start): Adjust
	gomp_work_share_start callers.
	* target.c (GOMP_target_update_ext, GOMP_target_enter_exit_data):
	If taskgroup has workshare flag set, check cancelled on prev
	taskgroup if any.  Guard all cancellation tests with
	gomp_cancel_var test.
	(omp_target_is_present, omp_target_disassociate_ptr):
	Change ptr argument from void * to const void *.
	(omp_target_memcpy): Change src argument from void * to const void *.
	(omp_target_memcpy_rect): Likewise.
	(omp_target_memcpy_rect_worker): Likewise.  Use const char * casts
	instead of char * where needed.
	(omp_target_associate_ptr): Change host_ptr and device_ptr arguments
	from void * to const void *.
	(omp_pause_resource, omp_pause_resource_all): New functions.
	* task.c (gomp_task_handle_depend): Handle new depend array format
	in addition to the old.  Handle mutexinoutset kinds the same as
	inout for now, handle unspecified kinds.
	(gomp_create_target_task): If taskgroup has workshare flag set, check
	cancelled on prev taskgroup if any.  Guard all cancellation tests with
	gomp_cancel_var test.  Handle new depend array format count in
	addition to the old.
	(GOMP_task): Likewise.  Adjust function comment.
	(gomp_task_run_pre): If taskgroup has workshare flag set, check
	cancelled on prev taskgroup if any.  Guard all cancellation tests with
	gomp_cancel_var test.
	(GOMP_taskwait_depend): New function.
	(gomp_task_maybe_wait_for_dependencies): Handle new depend array
	format in addition to the old.  Handle mutexinoutset kinds the same as
	inout for now, handle unspecified kinds.  Fix a function comment typo.
	(gomp_taskgroup_init): New function.
	(GOMP_taskgroup_start): Use it.
	(gomp_reduction_register, gomp_create_artificial_team,
	GOMP_taskgroup_reduction_register,
	GOMP_taskgroup_reduction_unregister, GOMP_task_reduction_remap,
	gomp_parallel_reduction_register,
	gomp_workshare_task_reduction_register,
	gomp_workshare_taskgroup_start,
	GOMP_workshare_task_reduction_unregister): New functions.
	* taskloop.c (GOMP_taskloop): If taskgroup has workshare flag set,
	check cancelled on prev taskgroup if any.  Guard all cancellation
	tests with gomp_cancel_var test.  Handle GOMP_TASK_FLAG_REDUCTION flag
	by calling GOMP_taskgroup_reduction_register.
	* team.c (gomp_thread_attr): Remove comment.
	(struct gomp_thread_start_data): Add handle field.
	(gomp_thread_start): Call pthread_detach.
	(gomp_new_team): Adjust gomp_init_work_share caller.
	(gomp_free_pool_helper): Call pthread_detach.
	(gomp_team_start): Add taskgroup argument, initialize implicit
	tasks' taskgroup field to that.  Don't call
	pthread_attr_setdetachstate.  Handle OMP_DISPLAY_AFFINITY env var.
	(gomp_team_end): Determine nesting by thr->ts.level != 0
	rather than thr->ts.team != NULL.
	(gomp_pause_pool_helper, gomp_pause_host): New functions.
	* work.c (alloc_work_share): Use gomp_aligned_alloc instead of
	gomp_malloc if GOMP_HAVE_EFFICIENT_ALIGNED_ALLOC is defined.
	(gomp_init_work_share): Change ORDERED argument from bool to size_t,
	if more than 1 allocate also extra payload at the end of array.  Never
	keep ordered_team_ids NULL, set it to inline_ordered_team_ids instead.
	(gomp_work_share_start): Change ORDERED argument from bool to size_t,
	return true instead of ws.
	* Makefile.in: Regenerated.
	* configure: Regenerated.
	* config.h.in: Regenerated.
	* testsuite/libgomp.c/cancel-for-2.c (foo): Use cancel modifier
	in some cases.
	* testsuite/libgomp.c-c++-common/cancel-parallel-1.c: New test.
	* testsuite/libgomp.c-c++-common/cancel-taskgroup-3.c: New test.
	* testsuite/libgomp.c-c++-common/depend-iterator-1.c: New test.
	* testsuite/libgomp.c-c++-common/depend-iterator-2.c: New test.
	* testsuite/libgomp.c-c++-common/depend-mutexinout-1.c: New test.
	* testsuite/libgomp.c-c++-common/depend-mutexinout-2.c: New test.
	* testsuite/libgomp.c-c++-common/depobj-1.c: New test.
	* testsuite/libgomp.c-c++-common/display-affinity-1.c: New test.
	* testsuite/libgomp.c-c++-common/for-10.c: New test.
	* testsuite/libgomp.c-c++-common/for-11.c: New test.
	* testsuite/libgomp.c-c++-common/for-12.c: New test.
	* testsuite/libgomp.c-c++-common/for-13.c: New test.
	* testsuite/libgomp.c-c++-common/for-14.c: New test.
	* testsuite/libgomp.c-c++-common/for-15.c: New test.
	* testsuite/libgomp.c-c++-common/for-2.h: If CONDNE macro is defined,
	define a different N(test), don't define N(f0) to N(f14), but instead
	define N(f20) to N(f34) using != comparisons.
	* testsuite/libgomp.c-c++-common/for-7.c: New test.
	* testsuite/libgomp.c-c++-common/for-8.c: New test.
	* testsuite/libgomp.c-c++-common/for-9.c: New test.
	* testsuite/libgomp.c-c++-common/master-combined-1.c: New test.
	* testsuite/libgomp.c-c++-common/pause-1.c: New test.
	* testsuite/libgomp.c-c++-common/pause-2.c: New test.
	* testsuite/libgomp.c-c++-common/pr66199-10.c: New test.
	* testsuite/libgomp.c-c++-common/pr66199-11.c: New test.
	* testsuite/libgomp.c-c++-common/pr66199-12.c: New test.
	* testsuite/libgomp.c-c++-common/pr66199-13.c: New test.
	* testsuite/libgomp.c-c++-common/pr66199-14.c: New test.
	* testsuite/libgomp.c-c++-common/simd-1.c: New test.
	* testsuite/libgomp.c-c++-common/taskloop-reduction-1.c: New test.
	* testsuite/libgomp.c-c++-common/taskloop-reduction-2.c: New test.
	* testsuite/libgomp.c-c++-common/taskloop-reduction-3.c: New test.
	* testsuite/libgomp.c-c++-common/taskloop-reduction-4.c: New test.
	* testsuite/libgomp.c-c++-common/task-reduction-11.c: New test.
	* testsuite/libgomp.c-c++-common/task-reduction-12.c: New test.
	* testsuite/libgomp.c-c++-common/task-reduction-1.c: New test.
	* testsuite/libgomp.c-c++-common/task-reduction-2.c: New test.
	* testsuite/libgomp.c-c++-common/task-reduction-3.c: New test.
	* testsuite/libgomp.c-c++-common/task-reduction-4.c: New test.
	* testsuite/libgomp.c-c++-common/task-reduction-5.c: New test.
	* testsuite/libgomp.c-c++-common/task-reduction-6.c: New test.
	* testsuite/libgomp.c-c++-common/task-reduction-7.c: New test.
	* testsuite/libgomp.c-c++-common/task-reduction-8.c: New test.
	* testsuite/libgomp.c-c++-common/task-reduction-9.c: New test.
	* testsuite/libgomp.c-c++-common/taskwait-depend-1.c: New test.
	* testsuite/libgomp.c++/depend-1.C: New test.
	* testsuite/libgomp.c++/depend-iterator-1.C: New test.
	* testsuite/libgomp.c++/depobj-1.C: New test.
	* testsuite/libgomp.c++/for-16.C: New test.
	* testsuite/libgomp.c++/for-21.C: New test.
	* testsuite/libgomp.c++/for-22.C: New test.
	* testsuite/libgomp.c++/for-23.C: New test.
	* testsuite/libgomp.c++/for-24.C: New test.
	* testsuite/libgomp.c++/for-25.C: New test.
	* testsuite/libgomp.c++/for-26.C: New test.
	* testsuite/libgomp.c++/taskloop-reduction-1.C: New test.
	* testsuite/libgomp.c++/taskloop-reduction-2.C: New test.
	* testsuite/libgomp.c++/taskloop-reduction-3.C: New test.
	* testsuite/libgomp.c++/taskloop-reduction-4.C: New test.
	* testsuite/libgomp.c++/task-reduction-10.C: New test.
	* testsuite/libgomp.c++/task-reduction-11.C: New test.
	* testsuite/libgomp.c++/task-reduction-12.C: New test.
	* testsuite/libgomp.c++/task-reduction-13.C: New test.
	* testsuite/libgomp.c++/task-reduction-14.C: New test.
	* testsuite/libgomp.c++/task-reduction-15.C: New test.
	* testsuite/libgomp.c++/task-reduction-16.C: New test.
	* testsuite/libgomp.c++/task-reduction-17.C: New test.
	* testsuite/libgomp.c++/task-reduction-18.C: New test.
	* testsuite/libgomp.c++/task-reduction-19.C: New test.
	* testsuite/libgomp.c/task-reduction-1.c: New test.
	* testsuite/libgomp.c++/task-reduction-1.C: New test.
	* testsuite/libgomp.c/task-reduction-2.c: New test.
	* testsuite/libgomp.c++/task-reduction-2.C: New test.
	* testsuite/libgomp.c++/task-reduction-3.C: New test.
	* testsuite/libgomp.c++/task-reduction-4.C: New test.
	* testsuite/libgomp.c++/task-reduction-5.C: New test.
	* testsuite/libgomp.c++/task-reduction-6.C: New test.
	* testsuite/libgomp.c++/task-reduction-7.C: New test.
	* testsuite/libgomp.c++/task-reduction-8.C: New test.
	* testsuite/libgomp.c++/task-reduction-9.C: New test.
	* testsuite/libgomp.c/teams-1.c: New test.
	* testsuite/libgomp.c/teams-2.c: New test.
	* testsuite/libgomp.c/thread-limit-4.c: New test.
	* testsuite/libgomp.c/thread-limit-5.c: New test.
	* testsuite/libgomp.fortran/display-affinity-1.f90: New test.

From-SVN: r265930
2018-11-08 18:13:04 +01:00

2329 lines
68 KiB
C

/* Copyright (C) 2007-2018 Free Software Foundation, Inc.
Contributed by Richard Henderson <rth@redhat.com>.
This file is part of the GNU Offloading and Multi Processing Library
(libgomp).
Libgomp is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option)
any later version.
Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
/* This file handles the maintainence of tasks in response to task
creation and termination. */
#include "libgomp.h"
#include <stdlib.h>
#include <string.h>
#include "gomp-constants.h"
typedef struct gomp_task_depend_entry *hash_entry_type;
static inline void *
htab_alloc (size_t size)
{
return gomp_malloc (size);
}
static inline void
htab_free (void *ptr)
{
free (ptr);
}
#include "hashtab.h"
static inline hashval_t
htab_hash (hash_entry_type element)
{
return hash_pointer (element->addr);
}
static inline bool
htab_eq (hash_entry_type x, hash_entry_type y)
{
return x->addr == y->addr;
}
/* Create a new task data structure. */
void
gomp_init_task (struct gomp_task *task, struct gomp_task *parent_task,
struct gomp_task_icv *prev_icv)
{
/* It would seem that using memset here would be a win, but it turns
out that partially filling gomp_task allows us to keep the
overhead of task creation low. In the nqueens-1.c test, for a
sufficiently large N, we drop the overhead from 5-6% to 1%.
Note, the nqueens-1.c test in serial mode is a good test to
benchmark the overhead of creating tasks as there are millions of
tiny tasks created that all run undeferred. */
task->parent = parent_task;
task->icv = *prev_icv;
task->kind = GOMP_TASK_IMPLICIT;
task->taskwait = NULL;
task->in_tied_task = false;
task->final_task = false;
task->copy_ctors_done = false;
task->parent_depends_on = false;
priority_queue_init (&task->children_queue);
task->taskgroup = NULL;
task->dependers = NULL;
task->depend_hash = NULL;
task->depend_count = 0;
}
/* Clean up a task, after completing it. */
void
gomp_end_task (void)
{
struct gomp_thread *thr = gomp_thread ();
struct gomp_task *task = thr->task;
gomp_finish_task (task);
thr->task = task->parent;
}
/* Clear the parent field of every task in LIST. */
static inline void
gomp_clear_parent_in_list (struct priority_list *list)
{
struct priority_node *p = list->tasks;
if (p)
do
{
priority_node_to_task (PQ_CHILDREN, p)->parent = NULL;
p = p->next;
}
while (p != list->tasks);
}
/* Splay tree version of gomp_clear_parent_in_list.
Clear the parent field of every task in NODE within SP, and free
the node when done. */
static void
gomp_clear_parent_in_tree (prio_splay_tree sp, prio_splay_tree_node node)
{
if (!node)
return;
prio_splay_tree_node left = node->left, right = node->right;
gomp_clear_parent_in_list (&node->key.l);
#if _LIBGOMP_CHECKING_
memset (node, 0xaf, sizeof (*node));
#endif
/* No need to remove the node from the tree. We're nuking
everything, so just free the nodes and our caller can clear the
entire splay tree. */
free (node);
gomp_clear_parent_in_tree (sp, left);
gomp_clear_parent_in_tree (sp, right);
}
/* Clear the parent field of every task in Q and remove every task
from Q. */
static inline void
gomp_clear_parent (struct priority_queue *q)
{
if (priority_queue_multi_p (q))
{
gomp_clear_parent_in_tree (&q->t, q->t.root);
/* All the nodes have been cleared in gomp_clear_parent_in_tree.
No need to remove anything. We can just nuke everything. */
q->t.root = NULL;
}
else
gomp_clear_parent_in_list (&q->l);
}
/* Helper function for GOMP_task and gomp_create_target_task.
For a TASK with in/out dependencies, fill in the various dependency
queues. PARENT is the parent of said task. DEPEND is as in
GOMP_task. */
static void
gomp_task_handle_depend (struct gomp_task *task, struct gomp_task *parent,
void **depend)
{
size_t ndepend = (uintptr_t) depend[0];
size_t i;
hash_entry_type ent;
if (ndepend)
{
/* depend[0] is total # */
size_t nout = (uintptr_t) depend[1]; /* # of out: and inout: */
/* ndepend - nout is # of in: */
for (i = 0; i < ndepend; i++)
{
task->depend[i].addr = depend[2 + i];
task->depend[i].is_in = i >= nout;
}
}
else
{
ndepend = (uintptr_t) depend[1]; /* total # */
size_t nout = (uintptr_t) depend[2]; /* # of out: and inout: */
size_t nmutexinoutset = (uintptr_t) depend[3]; /* # of mutexinoutset: */
/* For now we treat mutexinoutset like out, which is compliant, but
inefficient. */
size_t nin = (uintptr_t) depend[4]; /* # of in: */
/* ndepend - nout - nmutexinoutset - nin is # of depobjs */
size_t normal = nout + nmutexinoutset + nin;
size_t n = 0;
for (i = normal; i < ndepend; i++)
{
void **d = (void **) (uintptr_t) depend[5 + i];
switch ((uintptr_t) d[1])
{
case GOMP_DEPEND_OUT:
case GOMP_DEPEND_INOUT:
case GOMP_DEPEND_MUTEXINOUTSET:
break;
case GOMP_DEPEND_IN:
continue;
default:
gomp_fatal ("unknown omp_depend_t dependence type %d",
(int) (uintptr_t) d[1]);
}
task->depend[n].addr = d[0];
task->depend[n++].is_in = 0;
}
for (i = 0; i < normal; i++)
{
task->depend[n].addr = depend[5 + i];
task->depend[n++].is_in = i >= nout + nmutexinoutset;
}
for (i = normal; i < ndepend; i++)
{
void **d = (void **) (uintptr_t) depend[5 + i];
if ((uintptr_t) d[1] != GOMP_DEPEND_IN)
continue;
task->depend[n].addr = d[0];
task->depend[n++].is_in = 1;
}
}
task->depend_count = ndepend;
task->num_dependees = 0;
if (parent->depend_hash == NULL)
parent->depend_hash = htab_create (2 * ndepend > 12 ? 2 * ndepend : 12);
for (i = 0; i < ndepend; i++)
{
task->depend[i].next = NULL;
task->depend[i].prev = NULL;
task->depend[i].task = task;
task->depend[i].redundant = false;
task->depend[i].redundant_out = false;
hash_entry_type *slot = htab_find_slot (&parent->depend_hash,
&task->depend[i], INSERT);
hash_entry_type out = NULL, last = NULL;
if (*slot)
{
/* If multiple depends on the same task are the same, all but the
first one are redundant. As inout/out come first, if any of them
is inout/out, it will win, which is the right semantics. */
if ((*slot)->task == task)
{
task->depend[i].redundant = true;
continue;
}
for (ent = *slot; ent; ent = ent->next)
{
if (ent->redundant_out)
break;
last = ent;
/* depend(in:...) doesn't depend on earlier depend(in:...). */
if (task->depend[i].is_in && ent->is_in)
continue;
if (!ent->is_in)
out = ent;
struct gomp_task *tsk = ent->task;
if (tsk->dependers == NULL)
{
tsk->dependers
= gomp_malloc (sizeof (struct gomp_dependers_vec)
+ 6 * sizeof (struct gomp_task *));
tsk->dependers->n_elem = 1;
tsk->dependers->allocated = 6;
tsk->dependers->elem[0] = task;
task->num_dependees++;
continue;
}
/* We already have some other dependency on tsk from earlier
depend clause. */
else if (tsk->dependers->n_elem
&& (tsk->dependers->elem[tsk->dependers->n_elem - 1]
== task))
continue;
else if (tsk->dependers->n_elem == tsk->dependers->allocated)
{
tsk->dependers->allocated
= tsk->dependers->allocated * 2 + 2;
tsk->dependers
= gomp_realloc (tsk->dependers,
sizeof (struct gomp_dependers_vec)
+ (tsk->dependers->allocated
* sizeof (struct gomp_task *)));
}
tsk->dependers->elem[tsk->dependers->n_elem++] = task;
task->num_dependees++;
}
task->depend[i].next = *slot;
(*slot)->prev = &task->depend[i];
}
*slot = &task->depend[i];
/* There is no need to store more than one depend({,in}out:) task per
address in the hash table chain for the purpose of creation of
deferred tasks, because each out depends on all earlier outs, thus it
is enough to record just the last depend({,in}out:). For depend(in:),
we need to keep all of the previous ones not terminated yet, because
a later depend({,in}out:) might need to depend on all of them. So, if
the new task's clause is depend({,in}out:), we know there is at most
one other depend({,in}out:) clause in the list (out). For
non-deferred tasks we want to see all outs, so they are moved to the
end of the chain, after first redundant_out entry all following
entries should be redundant_out. */
if (!task->depend[i].is_in && out)
{
if (out != last)
{
out->next->prev = out->prev;
out->prev->next = out->next;
out->next = last->next;
out->prev = last;
last->next = out;
if (out->next)
out->next->prev = out;
}
out->redundant_out = true;
}
}
}
/* Called when encountering an explicit task directive. If IF_CLAUSE is
false, then we must not delay in executing the task. If UNTIED is true,
then the task may be executed by any member of the team.
DEPEND is an array containing:
if depend[0] is non-zero, then:
depend[0]: number of depend elements.
depend[1]: number of depend elements of type "out/inout".
depend[2..N+1]: address of [1..N]th depend element.
otherwise, when depend[0] is zero, then:
depend[1]: number of depend elements.
depend[2]: number of depend elements of type "out/inout".
depend[3]: number of depend elements of type "mutexinoutset".
depend[4]: number of depend elements of type "in".
depend[5..4+depend[2]+depend[3]+depend[4]]: address of depend elements
depend[5+depend[2]+depend[3]+depend[4]..4+depend[1]]: address of
omp_depend_t objects. */
void
GOMP_task (void (*fn) (void *), void *data, void (*cpyfn) (void *, void *),
long arg_size, long arg_align, bool if_clause, unsigned flags,
void **depend, int priority)
{
struct gomp_thread *thr = gomp_thread ();
struct gomp_team *team = thr->ts.team;
#ifdef HAVE_BROKEN_POSIX_SEMAPHORES
/* If pthread_mutex_* is used for omp_*lock*, then each task must be
tied to one thread all the time. This means UNTIED tasks must be
tied and if CPYFN is non-NULL IF(0) must be forced, as CPYFN
might be running on different thread than FN. */
if (cpyfn)
if_clause = false;
flags &= ~GOMP_TASK_FLAG_UNTIED;
#endif
/* If parallel or taskgroup has been cancelled, don't start new tasks. */
if (__builtin_expect (gomp_cancel_var, 0) && team)
{
if (gomp_team_barrier_cancelled (&team->barrier))
return;
if (thr->task->taskgroup)
{
if (thr->task->taskgroup->cancelled)
return;
if (thr->task->taskgroup->workshare
&& thr->task->taskgroup->prev
&& thr->task->taskgroup->prev->cancelled)
return;
}
}
if ((flags & GOMP_TASK_FLAG_PRIORITY) == 0)
priority = 0;
else if (priority > gomp_max_task_priority_var)
priority = gomp_max_task_priority_var;
if (!if_clause || team == NULL
|| (thr->task && thr->task->final_task)
|| team->task_count > 64 * team->nthreads)
{
struct gomp_task task;
/* If there are depend clauses and earlier deferred sibling tasks
with depend clauses, check if there isn't a dependency. If there
is, we need to wait for them. There is no need to handle
depend clauses for non-deferred tasks other than this, because
the parent task is suspended until the child task finishes and thus
it can't start further child tasks. */
if ((flags & GOMP_TASK_FLAG_DEPEND)
&& thr->task && thr->task->depend_hash)
gomp_task_maybe_wait_for_dependencies (depend);
gomp_init_task (&task, thr->task, gomp_icv (false));
task.kind = GOMP_TASK_UNDEFERRED;
task.final_task = (thr->task && thr->task->final_task)
|| (flags & GOMP_TASK_FLAG_FINAL);
task.priority = priority;
if (thr->task)
{
task.in_tied_task = thr->task->in_tied_task;
task.taskgroup = thr->task->taskgroup;
}
thr->task = &task;
if (__builtin_expect (cpyfn != NULL, 0))
{
char buf[arg_size + arg_align - 1];
char *arg = (char *) (((uintptr_t) buf + arg_align - 1)
& ~(uintptr_t) (arg_align - 1));
cpyfn (arg, data);
fn (arg);
}
else
fn (data);
/* Access to "children" is normally done inside a task_lock
mutex region, but the only way this particular task.children
can be set is if this thread's task work function (fn)
creates children. So since the setter is *this* thread, we
need no barriers here when testing for non-NULL. We can have
task.children set by the current thread then changed by a
child thread, but seeing a stale non-NULL value is not a
problem. Once past the task_lock acquisition, this thread
will see the real value of task.children. */
if (!priority_queue_empty_p (&task.children_queue, MEMMODEL_RELAXED))
{
gomp_mutex_lock (&team->task_lock);
gomp_clear_parent (&task.children_queue);
gomp_mutex_unlock (&team->task_lock);
}
gomp_end_task ();
}
else
{
struct gomp_task *task;
struct gomp_task *parent = thr->task;
struct gomp_taskgroup *taskgroup = parent->taskgroup;
char *arg;
bool do_wake;
size_t depend_size = 0;
if (flags & GOMP_TASK_FLAG_DEPEND)
depend_size = ((uintptr_t) (depend[0] ? depend[0] : depend[1])
* sizeof (struct gomp_task_depend_entry));
task = gomp_malloc (sizeof (*task) + depend_size
+ arg_size + arg_align - 1);
arg = (char *) (((uintptr_t) (task + 1) + depend_size + arg_align - 1)
& ~(uintptr_t) (arg_align - 1));
gomp_init_task (task, parent, gomp_icv (false));
task->priority = priority;
task->kind = GOMP_TASK_UNDEFERRED;
task->in_tied_task = parent->in_tied_task;
task->taskgroup = taskgroup;
thr->task = task;
if (cpyfn)
{
cpyfn (arg, data);
task->copy_ctors_done = true;
}
else
memcpy (arg, data, arg_size);
thr->task = parent;
task->kind = GOMP_TASK_WAITING;
task->fn = fn;
task->fn_data = arg;
task->final_task = (flags & GOMP_TASK_FLAG_FINAL) >> 1;
gomp_mutex_lock (&team->task_lock);
/* If parallel or taskgroup has been cancelled, don't start new
tasks. */
if (__builtin_expect (gomp_cancel_var, 0)
&& !task->copy_ctors_done)
{
if (gomp_team_barrier_cancelled (&team->barrier))
{
do_cancel:
gomp_mutex_unlock (&team->task_lock);
gomp_finish_task (task);
free (task);
return;
}
if (taskgroup)
{
if (taskgroup->cancelled)
goto do_cancel;
if (taskgroup->workshare
&& taskgroup->prev
&& taskgroup->prev->cancelled)
goto do_cancel;
}
}
if (taskgroup)
taskgroup->num_children++;
if (depend_size)
{
gomp_task_handle_depend (task, parent, depend);
if (task->num_dependees)
{
/* Tasks that depend on other tasks are not put into the
various waiting queues, so we are done for now. Said
tasks are instead put into the queues via
gomp_task_run_post_handle_dependers() after their
dependencies have been satisfied. After which, they
can be picked up by the various scheduling
points. */
gomp_mutex_unlock (&team->task_lock);
return;
}
}
priority_queue_insert (PQ_CHILDREN, &parent->children_queue,
task, priority,
PRIORITY_INSERT_BEGIN,
/*adjust_parent_depends_on=*/false,
task->parent_depends_on);
if (taskgroup)
priority_queue_insert (PQ_TASKGROUP, &taskgroup->taskgroup_queue,
task, priority,
PRIORITY_INSERT_BEGIN,
/*adjust_parent_depends_on=*/false,
task->parent_depends_on);
priority_queue_insert (PQ_TEAM, &team->task_queue,
task, priority,
PRIORITY_INSERT_END,
/*adjust_parent_depends_on=*/false,
task->parent_depends_on);
++team->task_count;
++team->task_queued_count;
gomp_team_barrier_set_task_pending (&team->barrier);
do_wake = team->task_running_count + !parent->in_tied_task
< team->nthreads;
gomp_mutex_unlock (&team->task_lock);
if (do_wake)
gomp_team_barrier_wake (&team->barrier, 1);
}
}
ialias (GOMP_taskgroup_start)
ialias (GOMP_taskgroup_end)
ialias (GOMP_taskgroup_reduction_register)
#define TYPE long
#define UTYPE unsigned long
#define TYPE_is_long 1
#include "taskloop.c"
#undef TYPE
#undef UTYPE
#undef TYPE_is_long
#define TYPE unsigned long long
#define UTYPE TYPE
#define GOMP_taskloop GOMP_taskloop_ull
#include "taskloop.c"
#undef TYPE
#undef UTYPE
#undef GOMP_taskloop
static void inline
priority_queue_move_task_first (enum priority_queue_type type,
struct priority_queue *head,
struct gomp_task *task)
{
#if _LIBGOMP_CHECKING_
if (!priority_queue_task_in_queue_p (type, head, task))
gomp_fatal ("Attempt to move first missing task %p", task);
#endif
struct priority_list *list;
if (priority_queue_multi_p (head))
{
list = priority_queue_lookup_priority (head, task->priority);
#if _LIBGOMP_CHECKING_
if (!list)
gomp_fatal ("Unable to find priority %d", task->priority);
#endif
}
else
list = &head->l;
priority_list_remove (list, task_to_priority_node (type, task), 0);
priority_list_insert (type, list, task, task->priority,
PRIORITY_INSERT_BEGIN, type == PQ_CHILDREN,
task->parent_depends_on);
}
/* Actual body of GOMP_PLUGIN_target_task_completion that is executed
with team->task_lock held, or is executed in the thread that called
gomp_target_task_fn if GOMP_PLUGIN_target_task_completion has been
run before it acquires team->task_lock. */
static void
gomp_target_task_completion (struct gomp_team *team, struct gomp_task *task)
{
struct gomp_task *parent = task->parent;
if (parent)
priority_queue_move_task_first (PQ_CHILDREN, &parent->children_queue,
task);
struct gomp_taskgroup *taskgroup = task->taskgroup;
if (taskgroup)
priority_queue_move_task_first (PQ_TASKGROUP, &taskgroup->taskgroup_queue,
task);
priority_queue_insert (PQ_TEAM, &team->task_queue, task, task->priority,
PRIORITY_INSERT_BEGIN, false,
task->parent_depends_on);
task->kind = GOMP_TASK_WAITING;
if (parent && parent->taskwait)
{
if (parent->taskwait->in_taskwait)
{
/* One more task has had its dependencies met.
Inform any waiters. */
parent->taskwait->in_taskwait = false;
gomp_sem_post (&parent->taskwait->taskwait_sem);
}
else if (parent->taskwait->in_depend_wait)
{
/* One more task has had its dependencies met.
Inform any waiters. */
parent->taskwait->in_depend_wait = false;
gomp_sem_post (&parent->taskwait->taskwait_sem);
}
}
if (taskgroup && taskgroup->in_taskgroup_wait)
{
/* One more task has had its dependencies met.
Inform any waiters. */
taskgroup->in_taskgroup_wait = false;
gomp_sem_post (&taskgroup->taskgroup_sem);
}
++team->task_queued_count;
gomp_team_barrier_set_task_pending (&team->barrier);
/* I'm afraid this can't be done after releasing team->task_lock,
as gomp_target_task_completion is run from unrelated thread and
therefore in between gomp_mutex_unlock and gomp_team_barrier_wake
the team could be gone already. */
if (team->nthreads > team->task_running_count)
gomp_team_barrier_wake (&team->barrier, 1);
}
/* Signal that a target task TTASK has completed the asynchronously
running phase and should be requeued as a task to handle the
variable unmapping. */
void
GOMP_PLUGIN_target_task_completion (void *data)
{
struct gomp_target_task *ttask = (struct gomp_target_task *) data;
struct gomp_task *task = ttask->task;
struct gomp_team *team = ttask->team;
gomp_mutex_lock (&team->task_lock);
if (ttask->state == GOMP_TARGET_TASK_READY_TO_RUN)
{
ttask->state = GOMP_TARGET_TASK_FINISHED;
gomp_mutex_unlock (&team->task_lock);
return;
}
ttask->state = GOMP_TARGET_TASK_FINISHED;
gomp_target_task_completion (team, task);
gomp_mutex_unlock (&team->task_lock);
}
static void gomp_task_run_post_handle_depend_hash (struct gomp_task *);
/* Called for nowait target tasks. */
bool
gomp_create_target_task (struct gomp_device_descr *devicep,
void (*fn) (void *), size_t mapnum, void **hostaddrs,
size_t *sizes, unsigned short *kinds,
unsigned int flags, void **depend, void **args,
enum gomp_target_task_state state)
{
struct gomp_thread *thr = gomp_thread ();
struct gomp_team *team = thr->ts.team;
/* If parallel or taskgroup has been cancelled, don't start new tasks. */
if (__builtin_expect (gomp_cancel_var, 0) && team)
{
if (gomp_team_barrier_cancelled (&team->barrier))
return true;
if (thr->task->taskgroup)
{
if (thr->task->taskgroup->cancelled)
return true;
if (thr->task->taskgroup->workshare
&& thr->task->taskgroup->prev
&& thr->task->taskgroup->prev->cancelled)
return true;
}
}
struct gomp_target_task *ttask;
struct gomp_task *task;
struct gomp_task *parent = thr->task;
struct gomp_taskgroup *taskgroup = parent->taskgroup;
bool do_wake;
size_t depend_size = 0;
uintptr_t depend_cnt = 0;
size_t tgt_align = 0, tgt_size = 0;
if (depend != NULL)
{
depend_cnt = (uintptr_t) (depend[0] ? depend[0] : depend[1]);
depend_size = depend_cnt * sizeof (struct gomp_task_depend_entry);
}
if (fn)
{
/* GOMP_MAP_FIRSTPRIVATE need to be copied first, as they are
firstprivate on the target task. */
size_t i;
for (i = 0; i < mapnum; i++)
if ((kinds[i] & 0xff) == GOMP_MAP_FIRSTPRIVATE)
{
size_t align = (size_t) 1 << (kinds[i] >> 8);
if (tgt_align < align)
tgt_align = align;
tgt_size = (tgt_size + align - 1) & ~(align - 1);
tgt_size += sizes[i];
}
if (tgt_align)
tgt_size += tgt_align - 1;
else
tgt_size = 0;
}
task = gomp_malloc (sizeof (*task) + depend_size
+ sizeof (*ttask)
+ mapnum * (sizeof (void *) + sizeof (size_t)
+ sizeof (unsigned short))
+ tgt_size);
gomp_init_task (task, parent, gomp_icv (false));
task->priority = 0;
task->kind = GOMP_TASK_WAITING;
task->in_tied_task = parent->in_tied_task;
task->taskgroup = taskgroup;
ttask = (struct gomp_target_task *) &task->depend[depend_cnt];
ttask->devicep = devicep;
ttask->fn = fn;
ttask->mapnum = mapnum;
ttask->args = args;
memcpy (ttask->hostaddrs, hostaddrs, mapnum * sizeof (void *));
ttask->sizes = (size_t *) &ttask->hostaddrs[mapnum];
memcpy (ttask->sizes, sizes, mapnum * sizeof (size_t));
ttask->kinds = (unsigned short *) &ttask->sizes[mapnum];
memcpy (ttask->kinds, kinds, mapnum * sizeof (unsigned short));
if (tgt_align)
{
char *tgt = (char *) &ttask->kinds[mapnum];
size_t i;
uintptr_t al = (uintptr_t) tgt & (tgt_align - 1);
if (al)
tgt += tgt_align - al;
tgt_size = 0;
for (i = 0; i < mapnum; i++)
if ((kinds[i] & 0xff) == GOMP_MAP_FIRSTPRIVATE)
{
size_t align = (size_t) 1 << (kinds[i] >> 8);
tgt_size = (tgt_size + align - 1) & ~(align - 1);
memcpy (tgt + tgt_size, hostaddrs[i], sizes[i]);
ttask->hostaddrs[i] = tgt + tgt_size;
tgt_size = tgt_size + sizes[i];
}
}
ttask->flags = flags;
ttask->state = state;
ttask->task = task;
ttask->team = team;
task->fn = NULL;
task->fn_data = ttask;
task->final_task = 0;
gomp_mutex_lock (&team->task_lock);
/* If parallel or taskgroup has been cancelled, don't start new tasks. */
if (__builtin_expect (gomp_cancel_var, 0))
{
if (gomp_team_barrier_cancelled (&team->barrier))
{
do_cancel:
gomp_mutex_unlock (&team->task_lock);
gomp_finish_task (task);
free (task);
return true;
}
if (taskgroup)
{
if (taskgroup->cancelled)
goto do_cancel;
if (taskgroup->workshare
&& taskgroup->prev
&& taskgroup->prev->cancelled)
goto do_cancel;
}
}
if (depend_size)
{
gomp_task_handle_depend (task, parent, depend);
if (task->num_dependees)
{
if (taskgroup)
taskgroup->num_children++;
gomp_mutex_unlock (&team->task_lock);
return true;
}
}
if (state == GOMP_TARGET_TASK_DATA)
{
gomp_task_run_post_handle_depend_hash (task);
gomp_mutex_unlock (&team->task_lock);
gomp_finish_task (task);
free (task);
return false;
}
if (taskgroup)
taskgroup->num_children++;
/* For async offloading, if we don't need to wait for dependencies,
run the gomp_target_task_fn right away, essentially schedule the
mapping part of the task in the current thread. */
if (devicep != NULL
&& (devicep->capabilities & GOMP_OFFLOAD_CAP_OPENMP_400))
{
priority_queue_insert (PQ_CHILDREN, &parent->children_queue, task, 0,
PRIORITY_INSERT_END,
/*adjust_parent_depends_on=*/false,
task->parent_depends_on);
if (taskgroup)
priority_queue_insert (PQ_TASKGROUP, &taskgroup->taskgroup_queue,
task, 0, PRIORITY_INSERT_END,
/*adjust_parent_depends_on=*/false,
task->parent_depends_on);
task->pnode[PQ_TEAM].next = NULL;
task->pnode[PQ_TEAM].prev = NULL;
task->kind = GOMP_TASK_TIED;
++team->task_count;
gomp_mutex_unlock (&team->task_lock);
thr->task = task;
gomp_target_task_fn (task->fn_data);
thr->task = parent;
gomp_mutex_lock (&team->task_lock);
task->kind = GOMP_TASK_ASYNC_RUNNING;
/* If GOMP_PLUGIN_target_task_completion has run already
in between gomp_target_task_fn and the mutex lock,
perform the requeuing here. */
if (ttask->state == GOMP_TARGET_TASK_FINISHED)
gomp_target_task_completion (team, task);
else
ttask->state = GOMP_TARGET_TASK_RUNNING;
gomp_mutex_unlock (&team->task_lock);
return true;
}
priority_queue_insert (PQ_CHILDREN, &parent->children_queue, task, 0,
PRIORITY_INSERT_BEGIN,
/*adjust_parent_depends_on=*/false,
task->parent_depends_on);
if (taskgroup)
priority_queue_insert (PQ_TASKGROUP, &taskgroup->taskgroup_queue, task, 0,
PRIORITY_INSERT_BEGIN,
/*adjust_parent_depends_on=*/false,
task->parent_depends_on);
priority_queue_insert (PQ_TEAM, &team->task_queue, task, 0,
PRIORITY_INSERT_END,
/*adjust_parent_depends_on=*/false,
task->parent_depends_on);
++team->task_count;
++team->task_queued_count;
gomp_team_barrier_set_task_pending (&team->barrier);
do_wake = team->task_running_count + !parent->in_tied_task
< team->nthreads;
gomp_mutex_unlock (&team->task_lock);
if (do_wake)
gomp_team_barrier_wake (&team->barrier, 1);
return true;
}
/* Given a parent_depends_on task in LIST, move it to the front of its
priority so it is run as soon as possible.
Care is taken to update the list's LAST_PARENT_DEPENDS_ON field.
We rearrange the queue such that all parent_depends_on tasks are
first, and last_parent_depends_on points to the last such task we
rearranged. For example, given the following tasks in a queue
where PD[123] are the parent_depends_on tasks:
task->children
|
V
C1 -> C2 -> C3 -> PD1 -> PD2 -> PD3 -> C4
We rearrange such that:
task->children
| +--- last_parent_depends_on
| |
V V
PD1 -> PD2 -> PD3 -> C1 -> C2 -> C3 -> C4. */
static void inline
priority_list_upgrade_task (struct priority_list *list,
struct priority_node *node)
{
struct priority_node *last_parent_depends_on
= list->last_parent_depends_on;
if (last_parent_depends_on)
{
node->prev->next = node->next;
node->next->prev = node->prev;
node->prev = last_parent_depends_on;
node->next = last_parent_depends_on->next;
node->prev->next = node;
node->next->prev = node;
}
else if (node != list->tasks)
{
node->prev->next = node->next;
node->next->prev = node->prev;
node->prev = list->tasks->prev;
node->next = list->tasks;
list->tasks = node;
node->prev->next = node;
node->next->prev = node;
}
list->last_parent_depends_on = node;
}
/* Given a parent_depends_on TASK in its parent's children_queue, move
it to the front of its priority so it is run as soon as possible.
PARENT is passed as an optimization.
(This function could be defined in priority_queue.c, but we want it
inlined, and putting it in priority_queue.h is not an option, given
that gomp_task has not been properly defined at that point). */
static void inline
priority_queue_upgrade_task (struct gomp_task *task,
struct gomp_task *parent)
{
struct priority_queue *head = &parent->children_queue;
struct priority_node *node = &task->pnode[PQ_CHILDREN];
#if _LIBGOMP_CHECKING_
if (!task->parent_depends_on)
gomp_fatal ("priority_queue_upgrade_task: task must be a "
"parent_depends_on task");
if (!priority_queue_task_in_queue_p (PQ_CHILDREN, head, task))
gomp_fatal ("priority_queue_upgrade_task: cannot find task=%p", task);
#endif
if (priority_queue_multi_p (head))
{
struct priority_list *list
= priority_queue_lookup_priority (head, task->priority);
priority_list_upgrade_task (list, node);
}
else
priority_list_upgrade_task (&head->l, node);
}
/* Given a CHILD_TASK in LIST that is about to be executed, move it out of
the way in LIST so that other tasks can be considered for
execution. LIST contains tasks of type TYPE.
Care is taken to update the queue's LAST_PARENT_DEPENDS_ON field
if applicable. */
static void inline
priority_list_downgrade_task (enum priority_queue_type type,
struct priority_list *list,
struct gomp_task *child_task)
{
struct priority_node *node = task_to_priority_node (type, child_task);
if (list->tasks == node)
list->tasks = node->next;
else if (node->next != list->tasks)
{
/* The task in NODE is about to become TIED and TIED tasks
cannot come before WAITING tasks. If we're about to
leave the queue in such an indeterminate state, rewire
things appropriately. However, a TIED task at the end is
perfectly fine. */
struct gomp_task *next_task = priority_node_to_task (type, node->next);
if (next_task->kind == GOMP_TASK_WAITING)
{
/* Remove from list. */
node->prev->next = node->next;
node->next->prev = node->prev;
/* Rewire at the end. */
node->next = list->tasks;
node->prev = list->tasks->prev;
list->tasks->prev->next = node;
list->tasks->prev = node;
}
}
/* If the current task is the last_parent_depends_on for its
priority, adjust last_parent_depends_on appropriately. */
if (__builtin_expect (child_task->parent_depends_on, 0)
&& list->last_parent_depends_on == node)
{
struct gomp_task *prev_child = priority_node_to_task (type, node->prev);
if (node->prev != node
&& prev_child->kind == GOMP_TASK_WAITING
&& prev_child->parent_depends_on)
list->last_parent_depends_on = node->prev;
else
{
/* There are no more parent_depends_on entries waiting
to run, clear the list. */
list->last_parent_depends_on = NULL;
}
}
}
/* Given a TASK in HEAD that is about to be executed, move it out of
the way so that other tasks can be considered for execution. HEAD
contains tasks of type TYPE.
Care is taken to update the queue's LAST_PARENT_DEPENDS_ON field
if applicable.
(This function could be defined in priority_queue.c, but we want it
inlined, and putting it in priority_queue.h is not an option, given
that gomp_task has not been properly defined at that point). */
static void inline
priority_queue_downgrade_task (enum priority_queue_type type,
struct priority_queue *head,
struct gomp_task *task)
{
#if _LIBGOMP_CHECKING_
if (!priority_queue_task_in_queue_p (type, head, task))
gomp_fatal ("Attempt to downgrade missing task %p", task);
#endif
if (priority_queue_multi_p (head))
{
struct priority_list *list
= priority_queue_lookup_priority (head, task->priority);
priority_list_downgrade_task (type, list, task);
}
else
priority_list_downgrade_task (type, &head->l, task);
}
/* Setup CHILD_TASK to execute. This is done by setting the task to
TIED, and updating all relevant queues so that CHILD_TASK is no
longer chosen for scheduling. Also, remove CHILD_TASK from the
overall team task queue entirely.
Return TRUE if task or its containing taskgroup has been
cancelled. */
static inline bool
gomp_task_run_pre (struct gomp_task *child_task, struct gomp_task *parent,
struct gomp_team *team)
{
#if _LIBGOMP_CHECKING_
if (child_task->parent)
priority_queue_verify (PQ_CHILDREN,
&child_task->parent->children_queue, true);
if (child_task->taskgroup)
priority_queue_verify (PQ_TASKGROUP,
&child_task->taskgroup->taskgroup_queue, false);
priority_queue_verify (PQ_TEAM, &team->task_queue, false);
#endif
/* Task is about to go tied, move it out of the way. */
if (parent)
priority_queue_downgrade_task (PQ_CHILDREN, &parent->children_queue,
child_task);
/* Task is about to go tied, move it out of the way. */
struct gomp_taskgroup *taskgroup = child_task->taskgroup;
if (taskgroup)
priority_queue_downgrade_task (PQ_TASKGROUP, &taskgroup->taskgroup_queue,
child_task);
priority_queue_remove (PQ_TEAM, &team->task_queue, child_task,
MEMMODEL_RELAXED);
child_task->pnode[PQ_TEAM].next = NULL;
child_task->pnode[PQ_TEAM].prev = NULL;
child_task->kind = GOMP_TASK_TIED;
if (--team->task_queued_count == 0)
gomp_team_barrier_clear_task_pending (&team->barrier);
if (__builtin_expect (gomp_cancel_var, 0)
&& !child_task->copy_ctors_done)
{
if (gomp_team_barrier_cancelled (&team->barrier))
return true;
if (taskgroup)
{
if (taskgroup->cancelled)
return true;
if (taskgroup->workshare
&& taskgroup->prev
&& taskgroup->prev->cancelled)
return true;
}
}
return false;
}
static void
gomp_task_run_post_handle_depend_hash (struct gomp_task *child_task)
{
struct gomp_task *parent = child_task->parent;
size_t i;
for (i = 0; i < child_task->depend_count; i++)
if (!child_task->depend[i].redundant)
{
if (child_task->depend[i].next)
child_task->depend[i].next->prev = child_task->depend[i].prev;
if (child_task->depend[i].prev)
child_task->depend[i].prev->next = child_task->depend[i].next;
else
{
hash_entry_type *slot
= htab_find_slot (&parent->depend_hash, &child_task->depend[i],
NO_INSERT);
if (*slot != &child_task->depend[i])
abort ();
if (child_task->depend[i].next)
*slot = child_task->depend[i].next;
else
htab_clear_slot (parent->depend_hash, slot);
}
}
}
/* After a CHILD_TASK has been run, adjust the dependency queue for
each task that depends on CHILD_TASK, to record the fact that there
is one less dependency to worry about. If a task that depended on
CHILD_TASK now has no dependencies, place it in the various queues
so it gets scheduled to run.
TEAM is the team to which CHILD_TASK belongs to. */
static size_t
gomp_task_run_post_handle_dependers (struct gomp_task *child_task,
struct gomp_team *team)
{
struct gomp_task *parent = child_task->parent;
size_t i, count = child_task->dependers->n_elem, ret = 0;
for (i = 0; i < count; i++)
{
struct gomp_task *task = child_task->dependers->elem[i];
/* CHILD_TASK satisfies a dependency for TASK. Keep track of
TASK's remaining dependencies. Once TASK has no other
depenencies, put it into the various queues so it will get
scheduled for execution. */
if (--task->num_dependees != 0)
continue;
struct gomp_taskgroup *taskgroup = task->taskgroup;
if (parent)
{
priority_queue_insert (PQ_CHILDREN, &parent->children_queue,
task, task->priority,
PRIORITY_INSERT_BEGIN,
/*adjust_parent_depends_on=*/true,
task->parent_depends_on);
if (parent->taskwait)
{
if (parent->taskwait->in_taskwait)
{
/* One more task has had its dependencies met.
Inform any waiters. */
parent->taskwait->in_taskwait = false;
gomp_sem_post (&parent->taskwait->taskwait_sem);
}
else if (parent->taskwait->in_depend_wait)
{
/* One more task has had its dependencies met.
Inform any waiters. */
parent->taskwait->in_depend_wait = false;
gomp_sem_post (&parent->taskwait->taskwait_sem);
}
}
}
if (taskgroup)
{
priority_queue_insert (PQ_TASKGROUP, &taskgroup->taskgroup_queue,
task, task->priority,
PRIORITY_INSERT_BEGIN,
/*adjust_parent_depends_on=*/false,
task->parent_depends_on);
if (taskgroup->in_taskgroup_wait)
{
/* One more task has had its dependencies met.
Inform any waiters. */
taskgroup->in_taskgroup_wait = false;
gomp_sem_post (&taskgroup->taskgroup_sem);
}
}
priority_queue_insert (PQ_TEAM, &team->task_queue,
task, task->priority,
PRIORITY_INSERT_END,
/*adjust_parent_depends_on=*/false,
task->parent_depends_on);
++team->task_count;
++team->task_queued_count;
++ret;
}
free (child_task->dependers);
child_task->dependers = NULL;
if (ret > 1)
gomp_team_barrier_set_task_pending (&team->barrier);
return ret;
}
static inline size_t
gomp_task_run_post_handle_depend (struct gomp_task *child_task,
struct gomp_team *team)
{
if (child_task->depend_count == 0)
return 0;
/* If parent is gone already, the hash table is freed and nothing
will use the hash table anymore, no need to remove anything from it. */
if (child_task->parent != NULL)
gomp_task_run_post_handle_depend_hash (child_task);
if (child_task->dependers == NULL)
return 0;
return gomp_task_run_post_handle_dependers (child_task, team);
}
/* Remove CHILD_TASK from its parent. */
static inline void
gomp_task_run_post_remove_parent (struct gomp_task *child_task)
{
struct gomp_task *parent = child_task->parent;
if (parent == NULL)
return;
/* If this was the last task the parent was depending on,
synchronize with gomp_task_maybe_wait_for_dependencies so it can
clean up and return. */
if (__builtin_expect (child_task->parent_depends_on, 0)
&& --parent->taskwait->n_depend == 0
&& parent->taskwait->in_depend_wait)
{
parent->taskwait->in_depend_wait = false;
gomp_sem_post (&parent->taskwait->taskwait_sem);
}
if (priority_queue_remove (PQ_CHILDREN, &parent->children_queue,
child_task, MEMMODEL_RELEASE)
&& parent->taskwait && parent->taskwait->in_taskwait)
{
parent->taskwait->in_taskwait = false;
gomp_sem_post (&parent->taskwait->taskwait_sem);
}
child_task->pnode[PQ_CHILDREN].next = NULL;
child_task->pnode[PQ_CHILDREN].prev = NULL;
}
/* Remove CHILD_TASK from its taskgroup. */
static inline void
gomp_task_run_post_remove_taskgroup (struct gomp_task *child_task)
{
struct gomp_taskgroup *taskgroup = child_task->taskgroup;
if (taskgroup == NULL)
return;
bool empty = priority_queue_remove (PQ_TASKGROUP,
&taskgroup->taskgroup_queue,
child_task, MEMMODEL_RELAXED);
child_task->pnode[PQ_TASKGROUP].next = NULL;
child_task->pnode[PQ_TASKGROUP].prev = NULL;
if (taskgroup->num_children > 1)
--taskgroup->num_children;
else
{
/* We access taskgroup->num_children in GOMP_taskgroup_end
outside of the task lock mutex region, so
need a release barrier here to ensure memory
written by child_task->fn above is flushed
before the NULL is written. */
__atomic_store_n (&taskgroup->num_children, 0, MEMMODEL_RELEASE);
}
if (empty && taskgroup->in_taskgroup_wait)
{
taskgroup->in_taskgroup_wait = false;
gomp_sem_post (&taskgroup->taskgroup_sem);
}
}
void
gomp_barrier_handle_tasks (gomp_barrier_state_t state)
{
struct gomp_thread *thr = gomp_thread ();
struct gomp_team *team = thr->ts.team;
struct gomp_task *task = thr->task;
struct gomp_task *child_task = NULL;
struct gomp_task *to_free = NULL;
int do_wake = 0;
gomp_mutex_lock (&team->task_lock);
if (gomp_barrier_last_thread (state))
{
if (team->task_count == 0)
{
gomp_team_barrier_done (&team->barrier, state);
gomp_mutex_unlock (&team->task_lock);
gomp_team_barrier_wake (&team->barrier, 0);
return;
}
gomp_team_barrier_set_waiting_for_tasks (&team->barrier);
}
while (1)
{
bool cancelled = false;
if (!priority_queue_empty_p (&team->task_queue, MEMMODEL_RELAXED))
{
bool ignored;
child_task
= priority_queue_next_task (PQ_TEAM, &team->task_queue,
PQ_IGNORED, NULL,
&ignored);
cancelled = gomp_task_run_pre (child_task, child_task->parent,
team);
if (__builtin_expect (cancelled, 0))
{
if (to_free)
{
gomp_finish_task (to_free);
free (to_free);
to_free = NULL;
}
goto finish_cancelled;
}
team->task_running_count++;
child_task->in_tied_task = true;
}
gomp_mutex_unlock (&team->task_lock);
if (do_wake)
{
gomp_team_barrier_wake (&team->barrier, do_wake);
do_wake = 0;
}
if (to_free)
{
gomp_finish_task (to_free);
free (to_free);
to_free = NULL;
}
if (child_task)
{
thr->task = child_task;
if (__builtin_expect (child_task->fn == NULL, 0))
{
if (gomp_target_task_fn (child_task->fn_data))
{
thr->task = task;
gomp_mutex_lock (&team->task_lock);
child_task->kind = GOMP_TASK_ASYNC_RUNNING;
team->task_running_count--;
struct gomp_target_task *ttask
= (struct gomp_target_task *) child_task->fn_data;
/* If GOMP_PLUGIN_target_task_completion has run already
in between gomp_target_task_fn and the mutex lock,
perform the requeuing here. */
if (ttask->state == GOMP_TARGET_TASK_FINISHED)
gomp_target_task_completion (team, child_task);
else
ttask->state = GOMP_TARGET_TASK_RUNNING;
child_task = NULL;
continue;
}
}
else
child_task->fn (child_task->fn_data);
thr->task = task;
}
else
return;
gomp_mutex_lock (&team->task_lock);
if (child_task)
{
finish_cancelled:;
size_t new_tasks
= gomp_task_run_post_handle_depend (child_task, team);
gomp_task_run_post_remove_parent (child_task);
gomp_clear_parent (&child_task->children_queue);
gomp_task_run_post_remove_taskgroup (child_task);
to_free = child_task;
child_task = NULL;
if (!cancelled)
team->task_running_count--;
if (new_tasks > 1)
{
do_wake = team->nthreads - team->task_running_count;
if (do_wake > new_tasks)
do_wake = new_tasks;
}
if (--team->task_count == 0
&& gomp_team_barrier_waiting_for_tasks (&team->barrier))
{
gomp_team_barrier_done (&team->barrier, state);
gomp_mutex_unlock (&team->task_lock);
gomp_team_barrier_wake (&team->barrier, 0);
gomp_mutex_lock (&team->task_lock);
}
}
}
}
/* Called when encountering a taskwait directive.
Wait for all children of the current task. */
void
GOMP_taskwait (void)
{
struct gomp_thread *thr = gomp_thread ();
struct gomp_team *team = thr->ts.team;
struct gomp_task *task = thr->task;
struct gomp_task *child_task = NULL;
struct gomp_task *to_free = NULL;
struct gomp_taskwait taskwait;
int do_wake = 0;
/* The acquire barrier on load of task->children here synchronizes
with the write of a NULL in gomp_task_run_post_remove_parent. It is
not necessary that we synchronize with other non-NULL writes at
this point, but we must ensure that all writes to memory by a
child thread task work function are seen before we exit from
GOMP_taskwait. */
if (task == NULL
|| priority_queue_empty_p (&task->children_queue, MEMMODEL_ACQUIRE))
return;
memset (&taskwait, 0, sizeof (taskwait));
bool child_q = false;
gomp_mutex_lock (&team->task_lock);
while (1)
{
bool cancelled = false;
if (priority_queue_empty_p (&task->children_queue, MEMMODEL_RELAXED))
{
bool destroy_taskwait = task->taskwait != NULL;
task->taskwait = NULL;
gomp_mutex_unlock (&team->task_lock);
if (to_free)
{
gomp_finish_task (to_free);
free (to_free);
}
if (destroy_taskwait)
gomp_sem_destroy (&taskwait.taskwait_sem);
return;
}
struct gomp_task *next_task
= priority_queue_next_task (PQ_CHILDREN, &task->children_queue,
PQ_TEAM, &team->task_queue, &child_q);
if (next_task->kind == GOMP_TASK_WAITING)
{
child_task = next_task;
cancelled
= gomp_task_run_pre (child_task, task, team);
if (__builtin_expect (cancelled, 0))
{
if (to_free)
{
gomp_finish_task (to_free);
free (to_free);
to_free = NULL;
}
goto finish_cancelled;
}
}
else
{
/* All tasks we are waiting for are either running in other
threads, or they are tasks that have not had their
dependencies met (so they're not even in the queue). Wait
for them. */
if (task->taskwait == NULL)
{
taskwait.in_depend_wait = false;
gomp_sem_init (&taskwait.taskwait_sem, 0);
task->taskwait = &taskwait;
}
taskwait.in_taskwait = true;
}
gomp_mutex_unlock (&team->task_lock);
if (do_wake)
{
gomp_team_barrier_wake (&team->barrier, do_wake);
do_wake = 0;
}
if (to_free)
{
gomp_finish_task (to_free);
free (to_free);
to_free = NULL;
}
if (child_task)
{
thr->task = child_task;
if (__builtin_expect (child_task->fn == NULL, 0))
{
if (gomp_target_task_fn (child_task->fn_data))
{
thr->task = task;
gomp_mutex_lock (&team->task_lock);
child_task->kind = GOMP_TASK_ASYNC_RUNNING;
struct gomp_target_task *ttask
= (struct gomp_target_task *) child_task->fn_data;
/* If GOMP_PLUGIN_target_task_completion has run already
in between gomp_target_task_fn and the mutex lock,
perform the requeuing here. */
if (ttask->state == GOMP_TARGET_TASK_FINISHED)
gomp_target_task_completion (team, child_task);
else
ttask->state = GOMP_TARGET_TASK_RUNNING;
child_task = NULL;
continue;
}
}
else
child_task->fn (child_task->fn_data);
thr->task = task;
}
else
gomp_sem_wait (&taskwait.taskwait_sem);
gomp_mutex_lock (&team->task_lock);
if (child_task)
{
finish_cancelled:;
size_t new_tasks
= gomp_task_run_post_handle_depend (child_task, team);
if (child_q)
{
priority_queue_remove (PQ_CHILDREN, &task->children_queue,
child_task, MEMMODEL_RELAXED);
child_task->pnode[PQ_CHILDREN].next = NULL;
child_task->pnode[PQ_CHILDREN].prev = NULL;
}
gomp_clear_parent (&child_task->children_queue);
gomp_task_run_post_remove_taskgroup (child_task);
to_free = child_task;
child_task = NULL;
team->task_count--;
if (new_tasks > 1)
{
do_wake = team->nthreads - team->task_running_count
- !task->in_tied_task;
if (do_wake > new_tasks)
do_wake = new_tasks;
}
}
}
}
/* Called when encountering a taskwait directive with depend clause(s).
Wait as if it was an mergeable included task construct with empty body. */
void
GOMP_taskwait_depend (void **depend)
{
struct gomp_thread *thr = gomp_thread ();
struct gomp_team *team = thr->ts.team;
/* If parallel or taskgroup has been cancelled, return early. */
if (__builtin_expect (gomp_cancel_var, 0) && team)
{
if (gomp_team_barrier_cancelled (&team->barrier))
return;
if (thr->task->taskgroup)
{
if (thr->task->taskgroup->cancelled)
return;
if (thr->task->taskgroup->workshare
&& thr->task->taskgroup->prev
&& thr->task->taskgroup->prev->cancelled)
return;
}
}
if (thr->task && thr->task->depend_hash)
gomp_task_maybe_wait_for_dependencies (depend);
}
/* An undeferred task is about to run. Wait for all tasks that this
undeferred task depends on.
This is done by first putting all known ready dependencies
(dependencies that have their own dependencies met) at the top of
the scheduling queues. Then we iterate through these imminently
ready tasks (and possibly other high priority tasks), and run them.
If we run out of ready dependencies to execute, we either wait for
the remaining dependencies to finish, or wait for them to get
scheduled so we can run them.
DEPEND is as in GOMP_task. */
void
gomp_task_maybe_wait_for_dependencies (void **depend)
{
struct gomp_thread *thr = gomp_thread ();
struct gomp_task *task = thr->task;
struct gomp_team *team = thr->ts.team;
struct gomp_task_depend_entry elem, *ent = NULL;
struct gomp_taskwait taskwait;
size_t orig_ndepend = (uintptr_t) depend[0];
size_t nout = (uintptr_t) depend[1];
size_t ndepend = orig_ndepend;
size_t normal = ndepend;
size_t n = 2;
size_t i;
size_t num_awaited = 0;
struct gomp_task *child_task = NULL;
struct gomp_task *to_free = NULL;
int do_wake = 0;
if (ndepend == 0)
{
ndepend = nout;
nout = (uintptr_t) depend[2] + (uintptr_t) depend[3];
normal = nout + (uintptr_t) depend[4];
n = 5;
}
gomp_mutex_lock (&team->task_lock);
for (i = 0; i < ndepend; i++)
{
elem.addr = depend[i + n];
elem.is_in = i >= nout;
if (__builtin_expect (i >= normal, 0))
{
void **d = (void **) elem.addr;
switch ((uintptr_t) d[1])
{
case GOMP_DEPEND_IN:
break;
case GOMP_DEPEND_OUT:
case GOMP_DEPEND_INOUT:
case GOMP_DEPEND_MUTEXINOUTSET:
elem.is_in = 0;
break;
default:
gomp_fatal ("unknown omp_depend_t dependence type %d",
(int) (uintptr_t) d[1]);
}
elem.addr = d[0];
}
ent = htab_find (task->depend_hash, &elem);
for (; ent; ent = ent->next)
if (elem.is_in && ent->is_in)
continue;
else
{
struct gomp_task *tsk = ent->task;
if (!tsk->parent_depends_on)
{
tsk->parent_depends_on = true;
++num_awaited;
/* If depenency TSK itself has no dependencies and is
ready to run, move it up front so that we run it as
soon as possible. */
if (tsk->num_dependees == 0 && tsk->kind == GOMP_TASK_WAITING)
priority_queue_upgrade_task (tsk, task);
}
}
}
if (num_awaited == 0)
{
gomp_mutex_unlock (&team->task_lock);
return;
}
memset (&taskwait, 0, sizeof (taskwait));
taskwait.n_depend = num_awaited;
gomp_sem_init (&taskwait.taskwait_sem, 0);
task->taskwait = &taskwait;
while (1)
{
bool cancelled = false;
if (taskwait.n_depend == 0)
{
task->taskwait = NULL;
gomp_mutex_unlock (&team->task_lock);
if (to_free)
{
gomp_finish_task (to_free);
free (to_free);
}
gomp_sem_destroy (&taskwait.taskwait_sem);
return;
}
/* Theoretically when we have multiple priorities, we should
chose between the highest priority item in
task->children_queue and team->task_queue here, so we should
use priority_queue_next_task(). However, since we are
running an undeferred task, perhaps that makes all tasks it
depends on undeferred, thus a priority of INF? This would
make it unnecessary to take anything into account here,
but the dependencies.
On the other hand, if we want to use priority_queue_next_task(),
care should be taken to only use priority_queue_remove()
below if the task was actually removed from the children
queue. */
bool ignored;
struct gomp_task *next_task
= priority_queue_next_task (PQ_CHILDREN, &task->children_queue,
PQ_IGNORED, NULL, &ignored);
if (next_task->kind == GOMP_TASK_WAITING)
{
child_task = next_task;
cancelled
= gomp_task_run_pre (child_task, task, team);
if (__builtin_expect (cancelled, 0))
{
if (to_free)
{
gomp_finish_task (to_free);
free (to_free);
to_free = NULL;
}
goto finish_cancelled;
}
}
else
/* All tasks we are waiting for are either running in other
threads, or they are tasks that have not had their
dependencies met (so they're not even in the queue). Wait
for them. */
taskwait.in_depend_wait = true;
gomp_mutex_unlock (&team->task_lock);
if (do_wake)
{
gomp_team_barrier_wake (&team->barrier, do_wake);
do_wake = 0;
}
if (to_free)
{
gomp_finish_task (to_free);
free (to_free);
to_free = NULL;
}
if (child_task)
{
thr->task = child_task;
if (__builtin_expect (child_task->fn == NULL, 0))
{
if (gomp_target_task_fn (child_task->fn_data))
{
thr->task = task;
gomp_mutex_lock (&team->task_lock);
child_task->kind = GOMP_TASK_ASYNC_RUNNING;
struct gomp_target_task *ttask
= (struct gomp_target_task *) child_task->fn_data;
/* If GOMP_PLUGIN_target_task_completion has run already
in between gomp_target_task_fn and the mutex lock,
perform the requeuing here. */
if (ttask->state == GOMP_TARGET_TASK_FINISHED)
gomp_target_task_completion (team, child_task);
else
ttask->state = GOMP_TARGET_TASK_RUNNING;
child_task = NULL;
continue;
}
}
else
child_task->fn (child_task->fn_data);
thr->task = task;
}
else
gomp_sem_wait (&taskwait.taskwait_sem);
gomp_mutex_lock (&team->task_lock);
if (child_task)
{
finish_cancelled:;
size_t new_tasks
= gomp_task_run_post_handle_depend (child_task, team);
if (child_task->parent_depends_on)
--taskwait.n_depend;
priority_queue_remove (PQ_CHILDREN, &task->children_queue,
child_task, MEMMODEL_RELAXED);
child_task->pnode[PQ_CHILDREN].next = NULL;
child_task->pnode[PQ_CHILDREN].prev = NULL;
gomp_clear_parent (&child_task->children_queue);
gomp_task_run_post_remove_taskgroup (child_task);
to_free = child_task;
child_task = NULL;
team->task_count--;
if (new_tasks > 1)
{
do_wake = team->nthreads - team->task_running_count
- !task->in_tied_task;
if (do_wake > new_tasks)
do_wake = new_tasks;
}
}
}
}
/* Called when encountering a taskyield directive. */
void
GOMP_taskyield (void)
{
/* Nothing at the moment. */
}
static inline struct gomp_taskgroup *
gomp_taskgroup_init (struct gomp_taskgroup *prev)
{
struct gomp_taskgroup *taskgroup
= gomp_malloc (sizeof (struct gomp_taskgroup));
taskgroup->prev = prev;
priority_queue_init (&taskgroup->taskgroup_queue);
taskgroup->reductions = prev ? prev->reductions : NULL;
taskgroup->in_taskgroup_wait = false;
taskgroup->cancelled = false;
taskgroup->workshare = false;
taskgroup->num_children = 0;
gomp_sem_init (&taskgroup->taskgroup_sem, 0);
return taskgroup;
}
void
GOMP_taskgroup_start (void)
{
struct gomp_thread *thr = gomp_thread ();
struct gomp_team *team = thr->ts.team;
struct gomp_task *task = thr->task;
/* If team is NULL, all tasks are executed as
GOMP_TASK_UNDEFERRED tasks and thus all children tasks of
taskgroup and their descendant tasks will be finished
by the time GOMP_taskgroup_end is called. */
if (team == NULL)
return;
task->taskgroup = gomp_taskgroup_init (task->taskgroup);
}
void
GOMP_taskgroup_end (void)
{
struct gomp_thread *thr = gomp_thread ();
struct gomp_team *team = thr->ts.team;
struct gomp_task *task = thr->task;
struct gomp_taskgroup *taskgroup;
struct gomp_task *child_task = NULL;
struct gomp_task *to_free = NULL;
int do_wake = 0;
if (team == NULL)
return;
taskgroup = task->taskgroup;
if (__builtin_expect (taskgroup == NULL, 0)
&& thr->ts.level == 0)
{
/* This can happen if GOMP_taskgroup_start is called when
thr->ts.team == NULL, but inside of the taskgroup there
is #pragma omp target nowait that creates an implicit
team with a single thread. In this case, we want to wait
for all outstanding tasks in this team. */
gomp_team_barrier_wait (&team->barrier);
return;
}
/* The acquire barrier on load of taskgroup->num_children here
synchronizes with the write of 0 in gomp_task_run_post_remove_taskgroup.
It is not necessary that we synchronize with other non-0 writes at
this point, but we must ensure that all writes to memory by a
child thread task work function are seen before we exit from
GOMP_taskgroup_end. */
if (__atomic_load_n (&taskgroup->num_children, MEMMODEL_ACQUIRE) == 0)
goto finish;
bool unused;
gomp_mutex_lock (&team->task_lock);
while (1)
{
bool cancelled = false;
if (priority_queue_empty_p (&taskgroup->taskgroup_queue,
MEMMODEL_RELAXED))
{
if (taskgroup->num_children)
{
if (priority_queue_empty_p (&task->children_queue,
MEMMODEL_RELAXED))
goto do_wait;
child_task
= priority_queue_next_task (PQ_CHILDREN, &task->children_queue,
PQ_TEAM, &team->task_queue,
&unused);
}
else
{
gomp_mutex_unlock (&team->task_lock);
if (to_free)
{
gomp_finish_task (to_free);
free (to_free);
}
goto finish;
}
}
else
child_task
= priority_queue_next_task (PQ_TASKGROUP, &taskgroup->taskgroup_queue,
PQ_TEAM, &team->task_queue, &unused);
if (child_task->kind == GOMP_TASK_WAITING)
{
cancelled
= gomp_task_run_pre (child_task, child_task->parent, team);
if (__builtin_expect (cancelled, 0))
{
if (to_free)
{
gomp_finish_task (to_free);
free (to_free);
to_free = NULL;
}
goto finish_cancelled;
}
}
else
{
child_task = NULL;
do_wait:
/* All tasks we are waiting for are either running in other
threads, or they are tasks that have not had their
dependencies met (so they're not even in the queue). Wait
for them. */
taskgroup->in_taskgroup_wait = true;
}
gomp_mutex_unlock (&team->task_lock);
if (do_wake)
{
gomp_team_barrier_wake (&team->barrier, do_wake);
do_wake = 0;
}
if (to_free)
{
gomp_finish_task (to_free);
free (to_free);
to_free = NULL;
}
if (child_task)
{
thr->task = child_task;
if (__builtin_expect (child_task->fn == NULL, 0))
{
if (gomp_target_task_fn (child_task->fn_data))
{
thr->task = task;
gomp_mutex_lock (&team->task_lock);
child_task->kind = GOMP_TASK_ASYNC_RUNNING;
struct gomp_target_task *ttask
= (struct gomp_target_task *) child_task->fn_data;
/* If GOMP_PLUGIN_target_task_completion has run already
in between gomp_target_task_fn and the mutex lock,
perform the requeuing here. */
if (ttask->state == GOMP_TARGET_TASK_FINISHED)
gomp_target_task_completion (team, child_task);
else
ttask->state = GOMP_TARGET_TASK_RUNNING;
child_task = NULL;
continue;
}
}
else
child_task->fn (child_task->fn_data);
thr->task = task;
}
else
gomp_sem_wait (&taskgroup->taskgroup_sem);
gomp_mutex_lock (&team->task_lock);
if (child_task)
{
finish_cancelled:;
size_t new_tasks
= gomp_task_run_post_handle_depend (child_task, team);
gomp_task_run_post_remove_parent (child_task);
gomp_clear_parent (&child_task->children_queue);
gomp_task_run_post_remove_taskgroup (child_task);
to_free = child_task;
child_task = NULL;
team->task_count--;
if (new_tasks > 1)
{
do_wake = team->nthreads - team->task_running_count
- !task->in_tied_task;
if (do_wake > new_tasks)
do_wake = new_tasks;
}
}
}
finish:
task->taskgroup = taskgroup->prev;
gomp_sem_destroy (&taskgroup->taskgroup_sem);
free (taskgroup);
}
static inline __attribute__((always_inline)) void
gomp_reduction_register (uintptr_t *data, uintptr_t *old, uintptr_t *orig,
unsigned nthreads)
{
size_t total_cnt = 0;
uintptr_t *d = data;
struct htab *old_htab = NULL, *new_htab;
do
{
if (__builtin_expect (orig != NULL, 0))
{
/* For worksharing task reductions, memory has been allocated
already by some other thread that encountered the construct
earlier. */
d[2] = orig[2];
d[6] = orig[6];
orig = (uintptr_t *) orig[4];
}
else
{
size_t sz = d[1] * nthreads;
/* Should use omp_alloc if d[3] is not -1. */
void *ptr = gomp_aligned_alloc (d[2], sz);
memset (ptr, '\0', sz);
d[2] = (uintptr_t) ptr;
d[6] = d[2] + sz;
}
d[5] = 0;
total_cnt += d[0];
if (d[4] == 0)
{
d[4] = (uintptr_t) old;
break;
}
else
d = (uintptr_t *) d[4];
}
while (1);
if (old && old[5])
{
old_htab = (struct htab *) old[5];
total_cnt += htab_elements (old_htab);
}
new_htab = htab_create (total_cnt);
if (old_htab)
{
/* Copy old hash table, like in htab_expand. */
hash_entry_type *p, *olimit;
new_htab->n_elements = htab_elements (old_htab);
olimit = old_htab->entries + old_htab->size;
p = old_htab->entries;
do
{
hash_entry_type x = *p;
if (x != HTAB_EMPTY_ENTRY && x != HTAB_DELETED_ENTRY)
*find_empty_slot_for_expand (new_htab, htab_hash (x)) = x;
p++;
}
while (p < olimit);
}
d = data;
do
{
size_t j;
for (j = 0; j < d[0]; ++j)
{
uintptr_t *p = d + 7 + j * 3;
p[2] = (uintptr_t) d;
/* Ugly hack, hash_entry_type is defined for the task dependencies,
which hash on the first element which is a pointer. We need
to hash also on the first sizeof (uintptr_t) bytes which contain
a pointer. Hide the cast from the compiler. */
hash_entry_type n;
__asm ("" : "=g" (n) : "0" (p));
*htab_find_slot (&new_htab, n, INSERT) = n;
}
if (d[4] == (uintptr_t) old)
break;
else
d = (uintptr_t *) d[4];
}
while (1);
d[5] = (uintptr_t) new_htab;
}
static void
gomp_create_artificial_team (void)
{
struct gomp_thread *thr = gomp_thread ();
struct gomp_task_icv *icv;
struct gomp_team *team = gomp_new_team (1);
struct gomp_task *task = thr->task;
icv = task ? &task->icv : &gomp_global_icv;
team->prev_ts = thr->ts;
thr->ts.team = team;
thr->ts.team_id = 0;
thr->ts.work_share = &team->work_shares[0];
thr->ts.last_work_share = NULL;
#ifdef HAVE_SYNC_BUILTINS
thr->ts.single_count = 0;
#endif
thr->ts.static_trip = 0;
thr->task = &team->implicit_task[0];
gomp_init_task (thr->task, NULL, icv);
if (task)
{
thr->task = task;
gomp_end_task ();
free (task);
thr->task = &team->implicit_task[0];
}
#ifdef LIBGOMP_USE_PTHREADS
else
pthread_setspecific (gomp_thread_destructor, thr);
#endif
}
/* The format of data is:
data[0] cnt
data[1] size
data[2] alignment (on output array pointer)
data[3] allocator (-1 if malloc allocator)
data[4] next pointer
data[5] used internally (htab pointer)
data[6] used internally (end of array)
cnt times
ent[0] address
ent[1] offset
ent[2] used internally (pointer to data[0])
The entries are sorted by increasing offset, so that a binary
search can be performed. Normally, data[8] is 0, exception is
for worksharing construct task reductions in cancellable parallel,
where at offset 0 there should be space for a pointer and an integer
which are used internally. */
void
GOMP_taskgroup_reduction_register (uintptr_t *data)
{
struct gomp_thread *thr = gomp_thread ();
struct gomp_team *team = thr->ts.team;
struct gomp_task *task;
unsigned nthreads;
if (__builtin_expect (team == NULL, 0))
{
/* The task reduction code needs a team and task, so for
orphaned taskgroups just create the implicit team. */
gomp_create_artificial_team ();
ialias_call (GOMP_taskgroup_start) ();
team = thr->ts.team;
}
nthreads = team->nthreads;
task = thr->task;
gomp_reduction_register (data, task->taskgroup->reductions, NULL, nthreads);
task->taskgroup->reductions = data;
}
void
GOMP_taskgroup_reduction_unregister (uintptr_t *data)
{
uintptr_t *d = data;
htab_free ((struct htab *) data[5]);
do
{
gomp_aligned_free ((void *) d[2]);
d = (uintptr_t *) d[4];
}
while (d && !d[5]);
}
ialias (GOMP_taskgroup_reduction_unregister)
/* For i = 0 to cnt-1, remap ptrs[i] which is either address of the
original list item or address of previously remapped original list
item to address of the private copy, store that to ptrs[i].
For i < cntorig, additionally set ptrs[cnt+i] to the address of
the original list item. */
void
GOMP_task_reduction_remap (size_t cnt, size_t cntorig, void **ptrs)
{
struct gomp_thread *thr = gomp_thread ();
struct gomp_task *task = thr->task;
unsigned id = thr->ts.team_id;
uintptr_t *data = task->taskgroup->reductions;
uintptr_t *d;
struct htab *reduction_htab = (struct htab *) data[5];
size_t i;
for (i = 0; i < cnt; ++i)
{
hash_entry_type ent, n;
__asm ("" : "=g" (ent) : "0" (ptrs + i));
n = htab_find (reduction_htab, ent);
if (n)
{
uintptr_t *p;
__asm ("" : "=g" (p) : "0" (n));
/* At this point, p[0] should be equal to (uintptr_t) ptrs[i],
p[1] is the offset within the allocated chunk for each
thread, p[2] is the array registered with
GOMP_taskgroup_reduction_register, d[2] is the base of the
allocated memory and d[1] is the size of the allocated chunk
for one thread. */
d = (uintptr_t *) p[2];
ptrs[i] = (void *) (d[2] + id * d[1] + p[1]);
if (__builtin_expect (i < cntorig, 0))
ptrs[cnt + i] = (void *) p[0];
continue;
}
d = data;
while (d != NULL)
{
if ((uintptr_t) ptrs[i] >= d[2] && (uintptr_t) ptrs[i] < d[6])
break;
d = (uintptr_t *) d[4];
}
if (d == NULL)
gomp_fatal ("couldn't find matching task_reduction or reduction with "
"task modifier for %p", ptrs[i]);
uintptr_t off = ((uintptr_t) ptrs[i] - d[2]) % d[1];
ptrs[i] = (void *) (d[2] + id * d[1] + off);
if (__builtin_expect (i < cntorig, 0))
{
size_t lo = 0, hi = d[0] - 1;
while (lo <= hi)
{
size_t m = (lo + hi) / 2;
if (d[7 + 3 * m + 1] < off)
lo = m + 1;
else if (d[7 + 3 * m + 1] == off)
{
ptrs[cnt + i] = (void *) d[7 + 3 * m];
break;
}
else
hi = m - 1;
}
if (lo > hi)
gomp_fatal ("couldn't find matching task_reduction or reduction "
"with task modifier for %p", ptrs[i]);
}
}
}
struct gomp_taskgroup *
gomp_parallel_reduction_register (uintptr_t *data, unsigned nthreads)
{
struct gomp_taskgroup *taskgroup = gomp_taskgroup_init (NULL);
gomp_reduction_register (data, NULL, NULL, nthreads);
taskgroup->reductions = data;
return taskgroup;
}
void
gomp_workshare_task_reduction_register (uintptr_t *data, uintptr_t *orig)
{
struct gomp_thread *thr = gomp_thread ();
struct gomp_team *team = thr->ts.team;
struct gomp_task *task = thr->task;
unsigned nthreads = team->nthreads;
gomp_reduction_register (data, task->taskgroup->reductions, orig, nthreads);
task->taskgroup->reductions = data;
}
void
gomp_workshare_taskgroup_start (void)
{
struct gomp_thread *thr = gomp_thread ();
struct gomp_team *team = thr->ts.team;
struct gomp_task *task;
if (team == NULL)
{
gomp_create_artificial_team ();
team = thr->ts.team;
}
task = thr->task;
task->taskgroup = gomp_taskgroup_init (task->taskgroup);
task->taskgroup->workshare = true;
}
void
GOMP_workshare_task_reduction_unregister (bool cancelled)
{
struct gomp_thread *thr = gomp_thread ();
struct gomp_task *task = thr->task;
struct gomp_team *team = thr->ts.team;
uintptr_t *data = task->taskgroup->reductions;
ialias_call (GOMP_taskgroup_end) ();
if (thr->ts.team_id == 0)
ialias_call (GOMP_taskgroup_reduction_unregister) (data);
else
htab_free ((struct htab *) data[5]);
if (!cancelled)
gomp_team_barrier_wait (&team->barrier);
}
int
omp_in_final (void)
{
struct gomp_thread *thr = gomp_thread ();
return thr->task && thr->task->final_task;
}
ialias (omp_in_final)