gcc/libgomp/task.c
Jakub Jelinek acf0174b6f target.c: New file.
libgomp/
	* target.c: New file.
	* Makefile.am (libgomp_la_SOURCES): Add target.c.
	* Makefile.in: Regenerated.
	* libgomp_g.h (GOMP_task): Add depend argument.
	(GOMP_barrier_cancel, GOMP_loop_end_cancel,
	GOMP_sections_end_cancel, GOMP_target, GOMP_target_data,
	GOMP_target_end_data, GOMP_target_update, GOMP_teams,
	GOMP_parallel_loop_static, GOMP_parallel_loop_dynamic,
	GOMP_parallel_loop_guided, GOMP_parallel_loop_runtime,
	GOMP_parallel, GOMP_cancel, GOMP_cancellation_point,
	GOMP_taskgroup_start, GOMP_taskgroup_end,
	GOMP_parallel_sections): New prototypes.
	* fortran.c (omp_is_initial_device): Add ialias_redirect.
	(omp_is_initial_device_): New function.
	(ULP, STR1, STR2, ialias_redirect): Removed.
	(omp_get_cancellation_, omp_get_proc_bind_, omp_set_default_device_,
	omp_set_default_device_8_, omp_get_default_device_,
	omp_get_num_devices_, omp_get_num_teams_, omp_get_team_num_): New
	functions.
	* libgomp.map (GOMP_barrier_cancel, GOMP_loop_end_cancel,
	GOMP_sections_end_cancel, GOMP_target, GOMP_target_data,
	GOMP_target_end_data, GOMP_target_update, GOMP_teams): Export
	@@GOMP_4.0.
	(omp_is_initial_device, omp_is_initial_device_, omp_get_cancellation,
	omp_get_cancellation_, omp_get_proc_bind, omp_get_proc_bind_,
	omp_set_default_device, omp_set_default_device_,
	omp_set_default_device_8_, omp_get_default_device,
	omp_get_default_device_, omp_get_num_devices, omp_get_num_devices_,
	omp_get_num_teams, omp_get_num_teams_, omp_get_team_num,
	omp_get_team_num_): Export @@OMP_4.0.
	* team.c (struct gomp_thread_start_data): Add place field.
	(gomp_thread_start): Clear thr->thread_pool and
	thr->task before returning.  Use gomp_team_barrier_wait_final
	instead of gomp_team_barrier_wait.  Initialize thr->place.
	(gomp_new_team): Initialize work_shares_to_free, work_share_cancelled,
	team_cancelled and task_queued_count fields.
	(gomp_free_pool_helper): Clear thr->thread_pool and thr->task
	before calling pthread_exit.
	(gomp_free_thread): No longer static.  Use
	gomp_managed_threads_lock instead of gomp_remaining_threads_lock.
	(gomp_team_start): Add flags argument.  Set
	thr->thread_pool->threads_busy to nthreads immediately after creating
	new pool.  Use gomp_managed_threads_lock instead of
	gomp_remaining_threads_lock.  Handle OpenMP 4.0 affinity.
	(gomp_team_end): Use gomp_managed_threads_lock instead of
	gomp_remaining_threads_lock.  Use gomp_team_barrier_wait_final instead
	of gomp_team_barrier_wait.  If team->team_cancelled, call
	gomp_fini_worshare on ws chain starting at team->work_shares_to_free
	rather than thr->ts.work_share.
	(initialize_team): Don't call gomp_sem_init here.
	* sections.c (GOMP_parallel_sections_start): Adjust gomp_team_start
	caller.
	(GOMP_parallel_sections, GOMP_sections_end_cancel): New functions.
	* env.c (gomp_global_icv): Add default_device_var, target_data and
	bind_var initializers.
	(gomp_cpu_affinity, gomp_cpu_affinity_len): Remove.
	(gomp_bind_var_list, gomp_bind_var_list_len, gomp_places_list,
	gomp_places_list_len): New variables.
	(parse_bind_var, parse_one_place, parse_places_var): New functions.
	(parse_affinity): Rewritten to construct OMP_PLACES list with unit
	sized places.
	(gomp_cancel_var): New global variable.
	(parse_int): New function.
	(handle_omp_display_env): New function.
	(initialize_env): Use it.  Initialize default_device_var.
	Parse OMP_CANCELLATION env var.  Use parse_bind_var to parse
	OMP_PROC_BIND instead of parse_boolean.  Use parse_places_var for
	OMP_PLACES parsing.  Don't call parse_affinity if OMP_PLACES has
	been successfully parsed (and call gomp_init_affinity in that case).
	(omp_get_cancellation, omp_get_proc_bind, omp_set_default_device,
	omp_get_default_device, omp_get_num_devices, omp_get_num_teams,
	omp_get_team_num, omp_is_initial_device): New functions.
	* libgomp.h: Include stdlib.h.
	(ialias_ulp, ialias_str1, ialias_str2, ialias_redirect, ialias_call):
	Define.
	(struct target_mem_desc): Forward declare.
	(struct gomp_task_icv): Add default_device_var, target_data, bind_var
	and thread_limit_var fields.
	(gomp_get_num_devices): New prototype.
	(gomp_cancel_var): New extern decl.
	(struct gomp_team): Add work_shares_to_free, work_share_cancelled,
	team_cancelled and task_queued_count fields.  Add comments about
	task_{,queued_,running_}count.
	(gomp_cancel_kind): New enum.
	(gomp_work_share_end_cancel): New prototype.
	(struct gomp_task): Add next_taskgroup, prev_taskgroup, taskgroup,
	copy_ctors_done, dependers, depend_hash, depend_count, num_dependees
	and depend fields.
	(struct gomp_taskgroup): New type.
	(struct gomp_task_depend_entry,
	struct gomp_dependers_vec): New types.
	(gomp_finish_task): Free depend_hash if non-NULL.
	(struct gomp_team_state): Add place_partition_off
	and place_partition_len fields.
	(gomp_bind_var_list, gomp_bind_var_list_len, gomp_places_list,
	gomp_places_list_len): New extern decls.
	(struct gomp_thread): Add place field.
	(gomp_cpu_affinity, gomp_cpu_affinity_len): Remove.
	(gomp_init_thread_affinity): Add place argument.
	(gomp_affinity_alloc, gomp_affinity_init_place, gomp_affinity_add_cpus,
	gomp_affinity_remove_cpu, gomp_affinity_copy_place,
	gomp_affinity_same_place, gomp_affinity_finalize_place_list,
	gomp_affinity_init_level, gomp_affinity_print_place): New
	prototypes.
	(gomp_team_start): Add flags argument.
	(gomp_thread_limit_var, gomp_remaining_threads_count,
	gomp_remaining_threads_lock): Remove.
	(gomp_managed_threads_lock): New variable.
	(struct gomp_thread_pool): Add threads_busy field.
	(gomp_free_thread): New prototype.
	* task.c: Include hashtab.h.
	(hash_entry_type): New typedef.
	(htab_alloc, htab_free, htab_hash, htab_eq): New inlines.
	(gomp_init_task): Clear dependers, depend_hash, depend_count,
	copy_ctors_done and taskgroup fields.
	(GOMP_task): Add depend argument, handle depend clauses.  If
	gomp_team_barrier_cancelled or if it's taskgroup has been
	cancelled, don't queue or start new tasks.  Set copy_ctors_done
	field if needed.  Initialize taskgroup field.  If copy_ctors_done
	and already cancelled, don't discard the task.  If taskgroup is
	non-NULL, enqueue the task into taskgroup queue.  Increment
	num_children field in taskgroup.  Increment task_queued_count.
	(gomp_task_run_pre, gomp_task_run_post_remove_parent,
	gomp_task_run_post_remove_taskgroup): New inline functions.
	(gomp_task_run_post_handle_depend_hash,
	gomp_task_run_post_handle_dependers,
	gomp_task_run_post_handle_depend): New functions.
	(GOMP_taskwait): Use them.  If more than one new tasks
	have been queued, wake other threads if needed.
	(gomp_barrier_handle_tasks): Likewise.  If
	gomp_team_barrier_cancelled, don't start any new tasks, just free
	all tasks.
	(GOMP_taskgroup_start, GOMP_taskgroup_end): New functions.
	* omp_lib.f90.in
	(omp_proc_bind_kind, omp_proc_bind_false,
	omp_proc_bind_true, omp_proc_bind_master, omp_proc_bind_close,
	omp_proc_bind_spread): New params.
	(omp_get_cancellation, omp_get_proc_bind, omp_set_default_device,
	omp_get_default_device, omp_get_num_devices, omp_get_num_teams,
	omp_get_team_num, omp_is_initial_device): New interfaces.
	(omp_get_dynamic, omp_get_nested, omp_in_parallel,
	omp_get_max_threads, omp_get_num_procs, omp_get_num_threads,
	omp_get_thread_num, omp_get_thread_limit, omp_set_max_active_levels,
	omp_get_max_active_levels, omp_get_level, omp_get_ancestor_thread_num,
	omp_get_team_size, omp_get_active_level, omp_in_final): Remove
	useless use omp_lib_kinds.
	* omp.h.in (omp_proc_bind_t): New typedef.
	(omp_get_cancellation, omp_get_proc_bind, omp_set_default_device,
	omp_get_default_device, omp_get_num_devices, omp_get_num_teams,
	omp_get_team_num, omp_is_initial_device): New prototypes.
	* loop.c (gomp_parallel_loop_start): Add flags argument, pass it
	through to gomp_team_start.
	(GOMP_parallel_loop_static_start, GOMP_parallel_loop_dynamic_start,
	GOMP_parallel_loop_guided_start, GOMP_parallel_loop_runtime_start):
	Adjust gomp_parallel_loop_start callers.
	(GOMP_parallel_loop_static, GOMP_parallel_loop_dynamic,
	GOMP_parallel_loop_guided, GOMP_parallel_loop_runtime,
	GOMP_loop_end_cancel): New functions.
	(GOMP_parallel_end): Add ialias_redirect.
	* hashtab.h: New file.
	* libgomp.texi (Environment Variables): Minor cleanup,
	update section refs to OpenMP 4.0rc2.
	(OMP_DISPLAY_ENV, GOMP_SPINCOUNT): Document these
	environment variables.
	* work.c (gomp_work_share_end, gomp_work_share_end_nowait): Set
	team->work_shares_to_free to thr->ts.work_share before calling
	free_work_share.
	(gomp_work_share_end_cancel): New function.
	* config/linux/proc.c: Include errno.h.
	(gomp_get_cpuset_size, gomp_cpuset_size, gomp_cpusetp): New variables.
	(gomp_cpuset_popcount): Add cpusetsize argument, use it instead of
	sizeof (cpu_set_t) to determine number of iterations.  Fix up check
	extern decl.  Use CPU_COUNT_S if available, or CPU_COUNT if
	gomp_cpuset_size is sizeof (cpu_set_t).
	(gomp_init_num_threads): Initialize gomp_cpuset_size,
	gomp_get_cpuset_size and gomp_cpusetp here, use gomp_cpusetp instead
	of &cpuset and pass gomp_cpuset_size instead of sizeof (cpu_set_t)
	to pthread_getaffinity_np.  Free and clear gomp_cpusetp if it didn't
	contain any logical CPUs.
	(get_num_procs): Don't call pthread_getaffinity_np if gomp_cpusetp
	is NULL.  Use gomp_cpusetp instead of &cpuset and pass
	gomp_get_cpuset_size instead of sizeof (cpu_set_t) to
	pthread_getaffinity_np.  Check gomp_places_list instead of
	gomp_cpu_affinity.  Adjust gomp_cpuset_popcount caller.
	* config/linux/bar.c (gomp_barrier_wait_end,
	gomp_barrier_wait_last): Use BAR_* defines.
	(gomp_team_barrier_wait_end): Likewise.  Clear BAR_CANCELLED
	from state where needed.  Set work_share_cancelled to 0 on last
	thread.
	(gomp_team_barrier_wait_final, gomp_team_barrier_wait_cancel_end,
	gomp_team_barrier_wait_cancel, gomp_team_barrier_cancel): New
	functions.
	* config/linux/proc.h (gomp_cpuset_popcount): Add attribute_hidden.
	Add cpusetsize argument.
	(gomp_cpuset_size, gomp_cpusetp): Declare.
	* config/linux/affinity.c: Include errno.h, stdio.h and string.h.
	(affinity_counter): Remove.
	(CPU_ISSET_S, CPU_ZERO_S, CPU_SET_S, CPU_CLR_S): Define
	if CPU_ALLOC_SIZE isn't defined.
	(gomp_init_affinity): Rewritten, if gomp_places_list is NULL, try
	silently create OMP_PLACES=threads, if it is non-NULL afterwards,
	bind current thread to the first place.
	(gomp_init_thread_affinity): Rewritten.  Add place argument, just
	pthread_setaffinity_np to gomp_places_list[place].
	(gomp_affinity_alloc, gomp_affinity_init_place, gomp_affinity_add_cpus,
	gomp_affinity_remove_cpu, gomp_affinity_copy_place,
	gomp_affinity_same_place, gomp_affinity_finalize_place_list,
	gomp_affinity_init_level, gomp_affinity_print_place): New functions.
	* config/linux/bar.h (BAR_TASK_PENDING, BAR_WAS_LAST,
	BAR_WAITING_FOR_TASK, BAR_INCR, BAR_CANCELLED): Define.
	(gomp_barrier_t): Add awaited_final field.
	(gomp_barrier_init): Initialize awaited_final field.
	(gomp_team_barrier_wait_final, gomp_team_barrier_wait_cancel,
	gomp_team_barrier_wait_cancel_end, gomp_team_barrier_cancel): New
	prototypes.
	(gomp_barrier_wait_start): Preserve BAR_CANCELLED bit.  Use BAR_*
	defines.
	(gomp_barrier_wait_cancel_start, gomp_team_barrier_wait_final_start,
	gomp_team_barrier_cancelled): New inline functions.
	(gomp_barrier_last_thread,
	gomp_team_barrier_set_task_pending,
	gomp_team_barrier_clear_task_pending,
	gomp_team_barrier_set_waiting_for_tasks,
	gomp_team_barrier_waiting_for_tasks,
	gomp_team_barrier_done): Use BAR_* defines.
	* config/posix/bar.c (gomp_barrier_init): Clear cancellable field.
	(gomp_barrier_wait_end): Use BAR_* defines.
	(gomp_team_barrier_wait_end): Clear BAR_CANCELLED from state.
	Set work_share_cancelled to 0 on last thread, use __atomic_load_n.
	Use BAR_* defines.
	(gomp_team_barrier_wait_cancel_end, gomp_team_barrier_wait_cancel,
	gomp_team_barrier_cancel): New functions.
	* config/posix/affinity.c (gomp_init_thread_affinity): Add place
	argument.
	(gomp_affinity_alloc, gomp_affinity_init_place, gomp_affinity_add_cpus,
	gomp_affinity_remove_cpu, gomp_affinity_copy_place,
	gomp_affinity_same_place, gomp_affinity_finalize_place_list,
	gomp_affinity_init_level, gomp_affinity_print_place): New stubs.
	* config/posix/bar.h (BAR_TASK_PENDING, BAR_WAS_LAST,
	BAR_WAITING_FOR_TASK, BAR_INCR, BAR_CANCELLED): Define.
	(gomp_barrier_t): Add cancellable field.
	(gomp_team_barrier_wait_cancel, gomp_team_barrier_wait_cancel_end,
	gomp_team_barrier_cancel): New prototypes.
	(gomp_barrier_wait_start): Preserve BAR_CANCELLED bit.
	(gomp_barrier_wait_cancel_start, gomp_team_barrier_wait_final,
	gomp_team_barrier_cancelled): New inline functions.
	(gomp_barrier_wait_start, gomp_barrier_last_thread,
	gomp_team_barrier_set_task_pending,
	gomp_team_barrier_clear_task_pending,
	gomp_team_barrier_set_waiting_for_tasks,
	gomp_team_barrier_waiting_for_tasks,
	gomp_team_barrier_done): Use BAR_* defines.
	* barrier.c (GOMP_barrier_cancel): New function.
	* omp_lib.h.in (omp_proc_bind_kind, omp_proc_bind_false,
	omp_proc_bind_true, omp_proc_bind_master, omp_proc_bind_close,
	omp_proc_bind_spread): New params.
	(omp_get_cancellation, omp_get_proc_bind, omp_set_default_device,
	omp_get_default_device, omp_get_num_devices, omp_get_num_teams,
	omp_get_team_num, omp_is_initial_device): New externals.
	* parallel.c (GOMP_parallel, GOMP_cancel, GOMP_cancellation_point):
	New functions.
	(gomp_resolve_num_threads): Adjust for thread_limit now being in
	icv->thread_limit_var.  Use UINT_MAX instead of ULONG_MAX as
	infinity.  If not nested, just return minimum of max_num_threads
	and icv->thread_limit_var and if thr->thread_pool, set threads_busy
	to the returned value.  Otherwise, don't update atomically
	gomp_remaining_threads_count, but instead thr->thread_pool->threads_busy.
	(GOMP_parallel_end): Adjust for thread_limit now being in
	icv->thread_limit_var.  Use UINT_MAX instead of ULONG_MAX as
	infinity.  Adjust threads_busy in the pool rather than
	gomp_remaining_threads_count.  Remember team->nthreads and call
	gomp_team_end before adjusting threads_busy, if not nested
	afterwards, just set it to 1 non-atomically.  Add ialias.
	(GOMP_parallel_start): Adjust gomp_team_start caller.
	* testsuite/libgomp.c/atomic-14.c: Add parens to make it valid.
	* testsuite/libgomp.c/affinity-1.c: New test.
	* testsuite/libgomp.c/atomic-15.c: New test.
	* testsuite/libgomp.c/atomic-16.c: New test.
	* testsuite/libgomp.c/atomic-17.c: New test.
	* testsuite/libgomp.c/cancel-for-1.c: New test.
	* testsuite/libgomp.c/cancel-for-2.c: New test.
	* testsuite/libgomp.c/cancel-parallel-1.c: New test.
	* testsuite/libgomp.c/cancel-parallel-2.c: New test.
	* testsuite/libgomp.c/cancel-parallel-3.c: New test.
	* testsuite/libgomp.c/cancel-sections-1.c: New test.
	* testsuite/libgomp.c/cancel-taskgroup-1.c: New test.
	* testsuite/libgomp.c/cancel-taskgroup-2.c: New test.
	* testsuite/libgomp.c/depend-1.c: New test.
	* testsuite/libgomp.c/depend-2.c: New test.
	* testsuite/libgomp.c/depend-3.c: New test.
	* testsuite/libgomp.c/depend-4.c: New test.
	* testsuite/libgomp.c/for-1.c: New test.
	* testsuite/libgomp.c/for-1.h: New file.
	* testsuite/libgomp.c/for-2.c: New test.
	* testsuite/libgomp.c/for-2.h: New file.
	* testsuite/libgomp.c/for-3.c: New test.
	* testsuite/libgomp.c/pr58392.c: New test.
	* testsuite/libgomp.c/simd-1.c: New test.
	* testsuite/libgomp.c/simd-2.c: New test.
	* testsuite/libgomp.c/simd-3.c: New test.
	* testsuite/libgomp.c/simd-4.c: New test.
	* testsuite/libgomp.c/simd-5.c: New test.
	* testsuite/libgomp.c/simd-6.c: New test.
	* testsuite/libgomp.c/target-1.c: New test.
	* testsuite/libgomp.c/target-2.c: New test.
	* testsuite/libgomp.c/target-3.c: New test.
	* testsuite/libgomp.c/target-4.c: New test.
	* testsuite/libgomp.c/target-5.c: New test.
	* testsuite/libgomp.c/target-6.c: New test.
	* testsuite/libgomp.c/target-7.c: New test.
	* testsuite/libgomp.c/taskgroup-1.c: New test.
	* testsuite/libgomp.c/thread-limit-1.c: New test.
	* testsuite/libgomp.c/thread-limit-2.c: New test.
	* testsuite/libgomp.c/thread-limit-3.c: New test.
	* testsuite/libgomp.c/udr-1.c: New test.
	* testsuite/libgomp.c/udr-2.c: New test.
	* testsuite/libgomp.c/udr-3.c: New test.
	* testsuite/libgomp.c++/affinity-1.C: New test.
	* testsuite/libgomp.c++/atomic-10.C: New test.
	* testsuite/libgomp.c++/atomic-11.C: New test.
	* testsuite/libgomp.c++/atomic-12.C: New test.
	* testsuite/libgomp.c++/atomic-13.C: New test.
	* testsuite/libgomp.c++/atomic-14.C: New test.
	* testsuite/libgomp.c++/atomic-15.C: New test.
	* testsuite/libgomp.c++/cancel-for-1.C: New test.
	* testsuite/libgomp.c++/cancel-for-2.C: New test.
	* testsuite/libgomp.c++/cancel-parallel-1.C: New test.
	* testsuite/libgomp.c++/cancel-parallel-2.C: New test.
	* testsuite/libgomp.c++/cancel-parallel-3.C: New test.
	* testsuite/libgomp.c++/cancel-sections-1.C: New test.
	* testsuite/libgomp.c++/cancel-taskgroup-1.C: New test.
	* testsuite/libgomp.c++/cancel-taskgroup-2.C: New test.
	* testsuite/libgomp.c++/cancel-taskgroup-3.C: New test.
	* testsuite/libgomp.c++/cancel-test.h: New file.
	* testsuite/libgomp.c++/for-9.C: New test.
	* testsuite/libgomp.c++/for-10.C: New test.
	* testsuite/libgomp.c++/for-11.C: New test.
	* testsuite/libgomp.c++/simd-1.C: New test.
	* testsuite/libgomp.c++/simd-2.C: New test.
	* testsuite/libgomp.c++/simd-3.C: New test.
	* testsuite/libgomp.c++/simd-4.C: New test.
	* testsuite/libgomp.c++/simd-5.C: New test.
	* testsuite/libgomp.c++/simd-6.C: New test.
	* testsuite/libgomp.c++/simd-7.C: New test.
	* testsuite/libgomp.c++/simd-8.C: New test.
	* testsuite/libgomp.c++/target-1.C: New test.
	* testsuite/libgomp.c++/target-2.C: New test.
	* testsuite/libgomp.c++/target-2-aux.cc: New file.
	* testsuite/libgomp.c++/target-3.C: New test.
	* testsuite/libgomp.c++/taskgroup-1.C: New test.
	* testsuite/libgomp.c++/udr-1.C: New test.
	* testsuite/libgomp.c++/udr-2.C: New test.
	* testsuite/libgomp.c++/udr-3.C: New test.
	* testsuite/libgomp.c++/udr-4.C: New test.
	* testsuite/libgomp.c++/udr-5.C: New test.
	* testsuite/libgomp.c++/udr-6.C: New test.
	* testsuite/libgomp.c++/udr-7.C: New test.
	* testsuite/libgomp.c++/udr-8.C: New test.
	* testsuite/libgomp.c++/udr-9.C: New test.
gcc/
	* tree-pretty-print.c (dump_omp_clause): Handle OMP_CLAUSE__LOOPTEMP_
	and new OpenMP 4.0 clauses, handle UDR OMP_CLAUSE_REDUCTION,
	formatting fixes, use pp_colon instead of pp_character (..., ':'),
	similarly pp_right_paren.
	(dump_generic_node): Handle OMP_DISTRIBUTE, OMP_TEAMS,
	OMP_TARGET_DATA, OMP_TARGET, OMP_TARGET_UPDATE, OMP_TASKGROUP,
	allow OMP_FOR_INIT to be NULL, handle OMP_ATOMIC_SEQ_CST.
	* tree.c (omp_clause_num_ops, omp_clause_code_name): Add OpenMP 4.0
	clauses.
	(omp_declare_simd_clauses_equal,
	omp_remove_redundant_declare_simd_attrs): New functions.
	(attribute_value_equal): Use omp_declare_simd_clauses_equal.
	(walk_tree_1): Handle new OpenMP 4.0 clauses.
	* tree.h (OMP_LOOP_CHECK): Define.
	(OMP_FOR_BODY, OMP_FOR_CLAUSES, OMP_FOR_INIT, OMP_FOR_COND,
	OMP_FOR_INCR, OMP_FOR_PRE_BODY): Use it.
	(OMP_TASKGROUP_BODY, OMP_TEAMS_BODY, OMP_TEAMS_CLAUSES,
	OMP_TARGET_DATA_BODY, OMP_TARGET_DATA_CLAUSES, OMP_TARGET_BODY,
	OMP_TARGET_CLAUSES, OMP_TARGET_UPDATE_CLAUSES, OMP_CLAUSE_SIZE,
	OMP_ATOMIC_SEQ_CST, OMP_CLAUSE_DEPEND_KIND, OMP_CLAUSE_MAP_KIND,
	OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION, OMP_CLAUSE_PROC_BIND_KIND,
	OMP_CLAUSE_REDUCTION_OMP_ORIG_REF, OMP_CLAUSE_ALIGNED_ALIGNMENT,
	OMP_CLAUSE_NUM_TEAMS_EXPR, OMP_CLAUSE_THREAD_LIMIT_EXPR,
	OMP_CLAUSE_DEVICE_ID, OMP_CLAUSE_DIST_SCHEDULE_CHUNK_EXPR,
	OMP_CLAUSE_SIMDLEN_EXPR): Define.
	(OMP_CLAUSE_DECL): Change range up to OMP_CLAUSE__LOOPTEMP_.
	(omp_remove_redundant_declare_simd_attrs): New prototype.
	* gimple.def (GIMPLE_OMP_TASKGROUP, GIMPLE_OMP_TARGET,
	GIMPLE_OMP_TEAMS): New codes.
	(GIMPLE_OMP_RETURN): Use GSS_OMP_ATOMIC_STORE instead of GSS_BASE.
	* omp-low.c (struct omp_context): Add cancel_label and cancellable
	fields.
	(target_nesting_level): New variable.
	(extract_omp_for_data): Handle GF_OMP_FOR_KIND_DISTRIBUTE and
	OMP_CLAUSE_DIST_SCHEDULE.  Don't fallback to library implementation
	for collapse > 1 static schedule unless ordered.
	(get_ws_args_for): Add par_stmt argument.  Handle combined loops.
	(determine_parallel_type): Adjust get_ws_args_for caller.
	(install_var_field): Handle mask & 4 for double indirection.
	(scan_sharing_clauses): Ignore shared clause on teams construct.
	Handle OMP_CLAUSE__LOOPTEMP_ and new OpenMP 4.0 clauses.
	(create_omp_child_function): If inside target or declare target
	constructs, set "omp declare target" attribute on the child
	function.
	(find_combined_for): New function.
	(scan_omp_parallel): Handle combined loops.
	(scan_omp_target, scan_omp_teams): New functions.
	(check_omp_nesting_restrictions): Check new OpenMP 4.0 nesting
	restrictions and set ctx->cancellable for cancellable constructs.
	(scan_omp_1_stmt): Call check_omp_nesting_restrictions also on
	selected builtin calls.  Handle GIMPLE_OMP_TASKGROUP,
	GIMPLE_OMP_TARGET, GIMPLE_OMP_TEAMS.
	(build_omp_barrier): Add lhs argument, return gimple rather than
	tree.
	(omp_clause_aligned_alignment): New function.
	(lower_rec_simd_input_clauses): Only call SET_DECL_VALUE_EXPR
	on decls.
	(lower_rec_input_clauses): Add FD argument.  Ignore shared clauses
	on teams constructs.  Handle user defined reductions and new
	OpenMP 4.0 clauses.
	(lower_reduction_clauses): Don't set placeholder to address of ref
	if it has already the right type.
	(lower_send_clauses): Handle OMP_CLAUSE__LOOPTEMP_.
	(expand_parallel_call): Use the new non-_start suffixed builtins,
	handle OMP_CLAUSE_PROC_BIND, don't call the outlined function
	and GOMP_parallel_end after the call.
	(expand_task_call): Handle OMP_CLAUSE_DEPEND.
	(expand_omp_for_init_counts): Handle combined loops.
	(expand_omp_for_init_vars): Add inner_stmt argument, handle combined
	loops.
	(expand_omp_for_generic): Likewise.  Use GOMP_loop_end_cancel at the
	end of cancellable loops.
	(expand_omp_for_static_nochunk, expand_omp_for_static_chunk):
	Likewise.  Handle collapse > 1 loops.
	(expand_omp_simd): Handle combined loops.
	(expand_omp_for): Add inner_stmt argument, adjust callers of
	expand_omp_for* functions, use expand_omp_for_static*chunk even
	for collapse > 1 unless ordered.
	(expand_omp_sections): Use GOMP_sections_end_cancel at the end
	of cancellable sections.
	(expand_omp_single): Remove need_barrier variable, just rely on
	gimple_omp_return_nowait_p.  Adjust build_omp_barrier caller.
	(expand_omp_synch): Allow GIMPLE_OMP_TASKGROUP and GIMPLE_OMP_TEAMS.
	(expand_omp_atomic_load, expand_omp_atomic_store,
	expand_omp_atomic_fetch_op): Handle gimple_omp_atomic_seq_cst_p.
	(expand_omp_target): New function.
	(expand_omp): Handle combined loops.  Handle GIMPLE_OMP_TASKGROUP,
	GIMPLE_OMP_TEAMS, GIMPLE_OMP_TARGET.
	(build_omp_regions_1): Immediately close region for
	GF_OMP_TARGET_KIND_UPDATE.
	(maybe_add_implicit_barrier_cancel): New function.
	(lower_omp_sections): Adjust lower_rec_input_clauses caller.  Handle
	cancellation.
	(lower_omp_single): Likewise.  Add clobber after the barrier.
	(lower_omp_taskgroup): New function.
	(lower_omp_for): Handle combined loops.  Adjust
	lower_rec_input_clauses caller.  Handle cancellation.
	(lower_depend_clauses): New function.
	(lower_omp_taskreg): Lower depend clauses.  Adjust
	lower_rec_input_clauses caller.  Add clobber after the call.  Handle
	cancellation.
	(lower_omp_target, lower_omp_teams): New functions.
	(lower_omp_1): Handle cancellation.  Handle GIMPLE_OMP_TASKGROUP,
	GIMPLE_OMP_TARGET, GIMPLE_OMP_TEAMS and GOMP_barrier, GOMP_cancel
	and GOMP_cancellation_point calls.
	(lower_omp): Fold stmts inside of target region.
	(diagnose_sb_1, diagnose_sb_2): Handle GIMPLE_OMP_TASKGROUP,
	GIMPLE_OMP_TARGET and GIMPLE_OMP_TEAMS.
	* builtin-types.def (DEF_FUNCTION_TYPE_8): Document.
	(BT_FN_VOID_OMPFN_PTR_UINT,
	BT_FN_VOID_OMPFN_PTR_UINT_LONG_LONG_LONG,
	BT_FN_VOID_OMPFN_PTR_UINT_LONG_LONG_LONG_LONG,
	BT_FN_VOID_OMPFN_PTR_OMPCPYFN_LONG_LONG_BOOL_UINT): Remove.
	(BT_FN_VOID_OMPFN_PTR_UINT_UINT_UINT,
	BT_FN_VOID_OMPFN_PTR_UINT_LONG_LONG_LONG_UINT,
	BT_FN_VOID_OMPFN_PTR_UINT_LONG_LONG_LONG_LONG_UINT,
	BT_FN_BOOL_INT, BT_FN_BOOL_INT_BOOL, BT_FN_VOID_UINT_UINT,
	BT_FN_VOID_INT_PTR_SIZE_PTR_PTR_PTR,
	BT_FN_VOID_INT_OMPFN_PTR_SIZE_PTR_PTR_PTR,
	BT_FN_VOID_OMPFN_PTR_OMPCPYFN_LONG_LONG_BOOL_UINT_PTR): New.
	* tree-ssa-alias.c (ref_maybe_used_by_call_p_1,
	call_may_clobber_ref_p_1): Handle BUILT_IN_GOMP_BARRIER_CANCEL,
	BUILT_IN_GOMP_TASKGROUP_END, BUILT_IN_GOMP_LOOP_END_CANCEL,
	BUILT_IN_GOMP_SECTIONS_END_CANCEL.  Don't handle
	BUILT_IN_GOMP_PARALLEL_END.
	* gimple-low.c (lower_stmt): Handle GIMPLE_OMP_TASKGROUP,
	GIMPLE_OMP_TARGET and GIMPLE_OMP_TEAMS.
	* gimple-pretty-print.c (dump_gimple_omp_for): Handle
	GF_OMP_FOR_KIND_DISTRIBUTE.
	(dump_gimple_omp_target, dump_gimple_omp_teams): New functions.
	(dump_gimple_omp_block): Handle GIMPLE_OMP_TASKGROUP.
	(dump_gimple_omp_return): Print lhs if it has any.
	(dump_gimple_omp_atomic_load, dump_gimple_omp_atomic_store): Handle
	gimple_omp_atomic_seq_cst_p.
	(pp_gimple_stmt_1): Handle GIMPLE_OMP_TASKGROUP, GIMPLE_OMP_TARGET
	and GIMPLE_OMP_TEAMS.
	* langhooks.c (lhd_omp_mappable_type): New function.
	* tree-vectorizer.c (struct simd_array_to_simduid): Fix up comment.
	* langhooks.h (struct lang_hooks_for_types): Add omp_mappable_type
	hook.
	* gimplify.c (enum gimplify_omp_var_data): Add GOVD_MAP,
	GOVD_ALIGNED and GOVD_MAP_TO_ONLY.
	(enum omp_region_type): Add ORT_TEAMS, ORT_TARGET_DATA and
	ORT_TARGET.
	(struct gimplify_omp_ctx): Add combined_loop field.
	(gimplify_call_expr, gimplify_modify_expr): Don't call fold_stmt
	on stmts inside of target region.
	(is_gimple_stmt): Return true for OMP_DISTRIBUTE and OMP_TASKGROUP.
	(omp_firstprivatize_variable): Handle GOVD_MAP, GOVD_ALIGNED,
	ORT_TARGET and ORT_TARGET_DATA.
	(omp_add_variable): Avoid checks on readding var for GOVD_ALIGNED.
	Handle GOVD_MAP.
	(omp_notice_threadprivate_variable): Complain about threadprivate
	variables in target region.
	(omp_notice_variable): Complain about vars with non-mappable type
	in target region.  Handle ORT_TEAMS, ORT_TARGET and ORT_TARGET_DATA.
	(omp_check_private): Ignore ORT_TARGET* regions.
	(gimplify_scan_omp_clauses, gimplify_adjust_omp_clauses_1,
	gimplify_adjust_omp_clauses): Handle new OpenMP 4.0 clauses.
	(find_combined_omp_for): New function.
	(gimplify_omp_for): Handle gimplification of combined loops.
	(gimplify_omp_workshare): Gimplify also OMP_TARGET, OMP_TARGET_DATA,
	OMP_TEAMS.
	(gimplify_omp_target_update): New function.
	(gimplify_omp_atomic): Handle OMP_ATOMIC_SEQ_CST.
	(gimplify_expr): Handle OMP_DISTRIBUTE, OMP_TARGET, OMP_TARGET_DATA,
	OMP_TARGET_UPDATE, OMP_TEAMS, OMP_TASKGROUP.
	(gimplify_body): If fndecl has "omp declare target" attribute, add
	implicit ORT_TARGET context around it.
	* tree.def (OMP_DISTRIBUTE, OMP_TEAMS, OMP_TARGET_DATA, OMP_TARGET,
	OMP_TASKGROUP, OMP_TARGET_UPDATE): New tree codes.
	* tree-nested.c (convert_nonlocal_reference_stmt,
	convert_local_reference_stmt, convert_gimple_call): Handle
	GIMPLE_OMP_TARGET, GIMPLE_OMP_TEAMS and GIMPLE_OMP_TASKGROUP.
	* omp-builtins.def (BUILT_IN_GOMP_TASK): Use
	BT_FN_VOID_OMPFN_PTR_OMPCPYFN_LONG_LONG_BOOL_UINT_PTR
	instead of BT_FN_VOID_OMPFN_PTR_OMPCPYFN_LONG_LONG_BOOL_UINT.
	(BUILT_IN_GOMP_TARGET, BUILT_IN_GOMP_TARGET_DATA,
	BUILT_IN_GOMP_TARGET_END_DATA, BUILT_IN_GOMP_TARGET_UPDATE,
	BUILT_IN_GOMP_TEAMS, BUILT_IN_BARRIER_CANCEL,
	BUILT_IN_GOMP_LOOP_END_CANCEL,
	BUILT_IN_GOMP_SECTIONS_END_CANCEL, BUILT_IN_OMP_GET_TEAM_NUM,
	BUILT_IN_OMP_GET_NUM_TEAMS, BUILT_IN_GOMP_TASKGROUP_START,
	BUILT_IN_GOMP_TASKGROUP_END, BUILT_IN_GOMP_PARALLEL_LOOP_STATIC,
	BUILT_IN_GOMP_PARALLEL_LOOP_DYNAMIC,
	BUILT_IN_GOMP_PARALLEL_LOOP_GUIDED,
	BUILT_IN_GOMP_PARALLEL_LOOP_RUNTIME, BUILT_IN_GOMP_PARALLEL,
	BUILT_IN_GOMP_PARALLEL_SECTIONS, BUILT_IN_GOMP_CANCEL,
	BUILT_IN_GOMP_CANCELLATION_POINT): New built-ins.
	(BUILT_IN_GOMP_PARALLEL_LOOP_STATIC_START,
	BUILT_IN_GOMP_PARALLEL_LOOP_DYNAMIC_START,
	BUILT_IN_GOMP_PARALLEL_LOOP_GUIDED_START,
	BUILT_IN_GOMP_PARALLEL_LOOP_RUNTIME_START,
	BUILT_IN_GOMP_PARALLEL_START, BUILT_IN_GOMP_PARALLEL_END,
	BUILT_IN_GOMP_PARALLEL_SECTIONS_START): Remove.
	* tree-inline.c (remap_gimple_stmt, estimate_num_insns):
	Handle GIMPLE_OMP_TARGET, GIMPLE_OMP_TEAMS and GIMPLE_OMP_TASKGROUP.
	* gimple.c (gimple_build_omp_taskgroup, gimple_build_omp_target,
	gimple_build_omp_teams): New functions.
	(walk_gimple_op): Handle GIMPLE_OMP_TARGET, GIMPLE_OMP_TEAMS and
	GIMPLE_OMP_TASKGROUP.  Walk optional lhs on GIMPLE_OMP_RETURN.
	(walk_gimple_stmt, gimple_copy): Handle GIMPLE_OMP_TARGET,
	GIMPLE_OMP_TEAMS and GIMPLE_OMP_TASKGROUP.
	* gimple.h (enum gf_mask): GF_OMP_FOR_KIND_DISTRIBUTE,
	GF_OMP_FOR_COMBINED, GF_OMP_FOR_COMBINED_INTO,
	GF_OMP_TARGET_KIND_MASK, GF_OMP_TARGET_KIND_REGION,
	GF_OMP_TARGET_KIND_DATA, GF_OMP_TARGET_KIND_UPDATE,
	GF_OMP_ATOMIC_SEQ_CST): New.
	(gimple_build_omp_taskgroup, gimple_build_omp_target,
	gimple_build_omp_teams): New prototypes.
	(gimple_has_substatements): Handle GIMPLE_OMP_TARGET,
	GIMPLE_OMP_TEAMS and GIMPLE_OMP_TASKGROUP.
	(gimple_omp_subcode): Use GIMPLE_OMP_TEAMS instead of
	GIMPLE_OMP_SINGLE as end of range.
	(gimple_omp_return_set_lhs, gimple_omp_return_lhs,
	gimple_omp_return_lhs_ptr, gimple_omp_atomic_seq_cst_p,
	gimple_omp_atomic_set_seq_cst, gimple_omp_for_combined_p,
	gimple_omp_for_set_combined_p, gimple_omp_for_combined_into_p,
	gimple_omp_for_set_combined_into_p, gimple_omp_target_clauses,
	gimple_omp_target_clauses_ptr, gimple_omp_target_set_clauses,
	gimple_omp_target_kind, gimple_omp_target_set_kind,
	gimple_omp_target_child_fn, gimple_omp_target_child_fn_ptr,
	gimple_omp_target_set_child_fn, gimple_omp_target_data_arg,
	gimple_omp_target_data_arg_ptr, gimple_omp_target_set_data_arg,
	gimple_omp_teams_clauses, gimple_omp_teams_clauses_ptr,
	gimple_omp_teams_set_clauses): New inlines.
	(CASE_GIMPLE_OMP): Add GIMPLE_OMP_TARGET, GIMPLE_OMP_TEAMS
	and GIMPLE_OMP_TASKGROUP.
	* tree-core.h (enum omp_clause_code): Add new OpenMP 4.0 clause
	codes.
	(enum omp_clause_depend_kind, enum omp_clause_map_kind,
	enum omp_clause_proc_bind_kind): New.
	(union omp_clause_subcode): Add depend_kind, map_kind and
	proc_bind_kind fields.
	* tree-cfg.c (make_edges): Handle GIMPLE_OMP_TARGET,
	GIMPLE_OMP_TEAMS and GIMPLE_OMP_TASKGROUP.
	* langhooks-def.h (lhd_omp_mappable_type): New prototype.
	(LANG_HOOKS_OMP_MAPPABLE_TYPE): Define.
	(LANG_HOOKS_FOR_TYPES_INITIALIZER): Add it.
gcc/c-family/
	* c-cppbuiltin.c (c_cpp_builtins): Predefine _OPENMP to
	201307 instead of 201107.
	* c-common.c (DEF_FUNCTION_TYPE_8): Define.
	(c_common_attribute_table): Add "omp declare target" and
	"omp declare simd" attributes.
	(handle_omp_declare_target_attribute,
	handle_omp_declare_simd_attribute): New functions.
	* c-omp.c: Include c-pragma.h.
	(c_finish_omp_taskgroup): New function.
	(c_finish_omp_atomic): Add swapped argument, if true,
	build the operation first with rhs, lhs arguments and use NOP_EXPR
	build_modify_expr.
	(c_finish_omp_for): Add code argument, pass it down to make_code.
	(c_omp_split_clauses): New function.
	(c_split_parallel_clauses): Removed.
	(c_omp_declare_simd_clause_cmp, c_omp_declare_simd_clauses_to_numbers,
	c_omp_declare_simd_clauses_to_decls): New functions.
	* c-common.h (omp_clause_mask): New type.
	(OMP_CLAUSE_MASK_1): Define.
	(omp_clause_mask::omp_clause_mask, omp_clause_mask::operator &=,
	omp_clause_mask::operator |=, omp_clause_mask::operator ~,
	omp_clause_mask::operator |, omp_clause_mask::operator &,
	omp_clause_mask::operator <<, omp_clause_mask::operator >>,
	omp_clause_mask::operator ==): New methods.
	(enum c_omp_clause_split): New.
	(c_finish_omp_taskgroup): New prototype.
	(c_finish_omp_atomic): Add swapped argument.
	(c_finish_omp_for): Add code argument.
	(c_omp_split_clauses): New prototype.
	(c_split_parallel_clauses): Removed.
	(c_omp_declare_simd_clauses_to_numbers,
	c_omp_declare_simd_clauses_to_decls): New prototypes.
	* c-pragma.c (omp_pragmas): Add new OpenMP 4.0 constructs.
	* c-pragma.h (enum pragma_kind): Add PRAGMA_OMP_CANCEL,
	PRAGMA_OMP_CANCELLATION_POINT, PRAGMA_OMP_DECLARE_REDUCTION,
	PRAGMA_OMP_DISTRIBUTE, PRAGMA_OMP_END_DECLARE_TARGET, PRAGMA_OMP_SIMD,
	PRAGMA_OMP_TARGET, PRAGMA_OMP_TASKGROUP and PRAGMA_OMP_TEAMS.
	Remove PRAGMA_OMP_PARALLEL_FOR and PRAGMA_OMP_PARALLEL_SECTIONS.
	(enum pragma_omp_clause): Add PRAGMA_OMP_CLAUSE_ALIGNED,
	PRAGMA_OMP_CLAUSE_DEPEND, PRAGMA_OMP_CLAUSE_DEVICE,
	PRAGMA_OMP_CLAUSE_DIST_SCHEDULE, PRAGMA_OMP_CLAUSE_FOR,
	PRAGMA_OMP_CLAUSE_FROM, PRAGMA_OMP_CLAUSE_INBRANCH,
	PRAGMA_OMP_CLAUSE_LINEAR, PRAGMA_OMP_CLAUSE_MAP,
	PRAGMA_OMP_CLAUSE_NOTINBRANCH, PRAGMA_OMP_CLAUSE_NUM_TEAMS,
	PRAGMA_OMP_CLAUSE_PARALLEL, PRAGMA_OMP_CLAUSE_PROC_BIND,
	PRAGMA_OMP_CLAUSE_SAFELEN, PRAGMA_OMP_CLAUSE_SECTIONS,
	PRAGMA_OMP_CLAUSE_SIMDLEN, PRAGMA_OMP_CLAUSE_TASKGROUP,
	PRAGMA_OMP_CLAUSE_THREAD_LIMIT, PRAGMA_OMP_CLAUSE_TO and
	PRAGMA_OMP_CLAUSE_UNIFORM.
gcc/ada/
	* gcc-interface/utils.c (DEF_FUNCTION_TYPE_8): Define.
gcc/fortran/
	* trans-openmp.c (gfc_omp_clause_default_ctor,
	gfc_omp_clause_dtor): Return NULL for OMP_CLAUSE_REDUCTION.
	* f95-lang.c (ATTR_NULL, DEF_FUNCTION_TYPE_8): Define.
	* types.def (DEF_FUNCTION_TYPE_8): Document.
	(BT_FN_VOID_OMPFN_PTR_UINT,
	BT_FN_VOID_OMPFN_PTR_UINT_LONG_LONG_LONG,
	BT_FN_VOID_OMPFN_PTR_UINT_LONG_LONG_LONG_LONG,
	BT_FN_VOID_OMPFN_PTR_OMPCPYFN_LONG_LONG_BOOL_UINT): Remove.
	(BT_FN_VOID_OMPFN_PTR_UINT_UINT_UINT,
	BT_FN_VOID_OMPFN_PTR_UINT_LONG_LONG_LONG_UINT,
	BT_FN_VOID_OMPFN_PTR_UINT_LONG_LONG_LONG_LONG_UINT,
	BT_FN_BOOL_INT, BT_FN_BOOL_INT_BOOL, BT_FN_VOID_UINT_UINT,
	BT_FN_VOID_INT_PTR_SIZE_PTR_PTR_PTR,
	BT_FN_VOID_INT_OMPFN_PTR_SIZE_PTR_PTR_PTR,
	BT_FN_VOID_OMPFN_PTR_OMPCPYFN_LONG_LONG_BOOL_UINT_PTR): New.
gcc/lto/
	* lto-lang.c (DEF_FUNCTION_TYPE_8): Define.
gcc/c/
	* c-lang.h (current_omp_declare_target_attribute): New extern
	decl.
	* c-parser.c: Include c-lang.h.
	(struct c_parser): Change tokens to c_token *.
	Add tokens_buf field.  Change tokens_avail type to unsigned int.
	(c_parser_consume_token): If parser->tokens isn't
	&parser->tokens_buf[0], increment parser->tokens.
	(c_parser_consume_pragma): Likewise.
	(enum pragma_context): Add pragma_struct and pragma_param.
	(c_parser_external_declaration): Adjust
	c_parser_declaration_or_fndef caller.
	(c_parser_declaration_or_fndef): Add omp_declare_simd_clauses
	argument, if it is non-vNULL vector, call c_finish_omp_declare_simd.
	Adjust recursive call.
	(c_parser_struct_or_union_specifier): Use pragma_struct instead
	of pragma_external.
	(c_parser_parameter_declaration): Use pragma_param instead of
	pragma_external.
	(c_parser_compound_statement_nostart, c_parser_label,
	c_parser_for_statement): Adjust
	c_parser_declaration_or_fndef callers.
	(c_parser_expr_no_commas): Add omp_atomic_lhs argument, pass
	it through to c_parser_conditional_expression.
	(c_parser_conditional_expression): Add omp_atomic_lhs argument,
	pass it through to c_parser_binary_expression.  Adjust recursive
	call.
	(c_parser_binary_expression): Remove prec argument, add
	omp_atomic_lhs argument instead.  Always start from PREC_NONE, if
	omp_atomic_lhs is non-NULL and one of the arguments of toplevel
	binop matches it, use build2 instead of parser_build_binary_op.
	(c_parser_pragma): Handle PRAGMA_OMP_CANCEL,
	PRAGMA_OMP_CANCELLATION_POINT, PRAGMA_OMP_TARGET,
	PRAGMA_OMP_END_DECLARE_TARGET, PRAGMA_OMP_DECLARE_REDUCTION.
	Handle pragma_struct and pragma_param the same as pragma_external.
	(c_parser_omp_clause_name): Parse new OpenMP 4.0 clause names.
	(c_parser_omp_variable_list): Parse array sections for
	OMP_CLAUSE_{DEPEND,MAP,TO,FROM} clauses.
	(c_parser_omp_clause_collapse): Fully fold collapse expression.
	(c_parser_omp_clause_reduction): Handle user defined reductions.
	(c_parser_omp_clause_branch, c_parser_omp_clause_cancelkind,
	c_parser_omp_clause_num_teams, c_parser_omp_clause_thread_limit,
	c_parser_omp_clause_aligned, c_parser_omp_clause_linear,
	c_parser_omp_clause_safelen, c_parser_omp_clause_simdlen,
	c_parser_omp_clause_depend, c_parser_omp_clause_map,
	c_parser_omp_clause_device, c_parser_omp_clause_dist_schedule,
	c_parser_omp_clause_proc_bind, c_parser_omp_clause_to,
	c_parser_omp_clause_from, c_parser_omp_clause_uniform): New functions.
	(c_parser_omp_all_clauses): Add finish_p argument.  Don't call
	c_finish_omp_clauses if it is false.  Handle new OpenMP 4.0 clauses.
	(c_parser_omp_atomic): Parse seq_cst clause, pass true if it is
	present to c_finish_omp_atomic.  Handle OpenMP 4.0 atomic forms.
	(c_parser_omp_for_loop): Add CODE argument, pass it through
	to c_finish_omp_for.  Change last argument to cclauses,
	and adjust uses to grab parallel clauses from the array of all
	the split clauses.  Adjust c_parser_binary_expression,
	c_parser_declaration_or_fndef and c_finish_omp_for callers.
	(omp_split_clauses): New function.
	(c_parser_omp_simd): New function.
	(c_parser_omp_for): Add p_name, mask and cclauses arguments.
	Allow the function to be called also when parsing combined constructs,
	and call c_parser_omp_simd when parsing for simd.
	(c_parser_omp_sections_scope): If section-sequence doesn't start with
	#pragma omp section, require exactly one structured-block instead of
	sequence of statements.
	(c_parser_omp_sections): Add p_name, mask and cclauses arguments.
	Allow the function to be called also when parsing combined constructs.
	(c_parser_omp_parallel): Add p_name, mask and cclauses arguments.
	Allow the function to be called also when parsing combined
	constructs.
	(c_parser_omp_taskgroup, c_parser_omp_cancel,
	c_parser_omp_cancellation_point, c_parser_omp_distribute,
	c_parser_omp_teams, c_parser_omp_target_data,
	c_parser_omp_target_update, c_parser_omp_target,
	c_parser_omp_declare_simd, c_finish_omp_declare_simd,
	c_parser_omp_declare_target, c_parser_omp_end_declare_target,
	c_parser_omp_declare_reduction, c_parser_omp_declare): New functions.
	(c_parser_omp_construct): Add p_name and mask vars.  Handle
	PRAGMA_OMP_DISTRIBUTE, PRAGMA_OMP_SIMD, PRAGMA_OMP_TASKGROUP,
	PRAGMA_OMP_TEAMS.  Adjust c_parser_omp_for, c_parser_omp_parallel
	and c_parser_omp_sections callers.
	(c_parse_file): Initialize tparser.tokens and the_parser->tokens here.
	(OMP_FOR_CLAUSE_MASK, OMP_SECTIONS_CLAUSE_MASK,
	OMP_SINGLE_CLAUSE_MASK): Use OMP_CLAUSE_MASK_1 instead of 1.
	(OMP_PARALLEL_CLAUSE_MASK): Likewise.  Add OMP_CLAUSE_PROC_BIND.
	(OMP_TASK_CLAUSE_MASK): Use OMP_CLAUSE_MASK_1 instead of 1.  Add
	OMP_CLAUSE_DEPEND.
	(OMP_SIMD_CLAUSE_MASK, OMP_CANCEL_CLAUSE_MASK,
	OMP_CANCELLATION_POINT_CLAUSE_MASK, OMP_DISTRIBUTE_CLAUSE_MASK,
	OMP_TEAMS_CLAUSE_MASK, OMP_TARGET_DATA_CLAUSE_MASK,
	OMP_TARGET_UPDATE_CLAUSE_MASK, OMP_TARGET_CLAUSE_MASK,
	OMP_DECLARE_SIMD_CLAUSE_MASK): Define.
	* c-typeck.c: Include tree-inline.h.
	(c_finish_omp_cancel, c_finish_omp_cancellation_point,
	handle_omp_array_sections_1, handle_omp_array_sections,
	c_clone_omp_udr, c_find_omp_placeholder_r): New functions.
	(c_finish_omp_clauses): Handle new OpenMP 4.0 clauses and
	user defined reductions.
	(c_tree_equal): New function.
	* c-tree.h (temp_store_parm_decls, temp_pop_parm_decls,
	c_finish_omp_cancel, c_finish_omp_cancellation_point, c_tree_equal,
	c_omp_reduction_id, c_omp_reduction_decl, c_omp_reduction_lookup,
	c_check_omp_declare_reduction_r): New prototypes.
	* c-decl.c (current_omp_declare_target_attribute): New variable.
	(c_decl_attributes): New function.
	(start_decl, start_function): Use it instead of decl_attributes.
	(temp_store_parm_decls, temp_pop_parm_decls, c_omp_reduction_id,
	c_omp_reduction_decl, c_omp_reduction_lookup,
	c_check_omp_declare_reduction_r): New functions.
gcc/cp/
	* decl.c (duplicate_decls): Error out for redeclaration of UDRs.
	(declare_simd_adjust_this): New function.
	(grokfndecl): If "omp declare simd" attribute is present,
	call declare_simd_adjust_this if needed and
	c_omp_declare_simd_clauses_to_numbers.
	* cp-array-notation.c (expand_array_notation_exprs): Handle
	OMP_TASKGROUP.
	* cp-gimplify.c (cp_gimplify_expr): Handle OMP_SIMD and
	OMP_DISTRIBUTE.  Handle is_invisiref_parm decls in
	OMP_CLAUSE_REDUCTION.
	(cp_genericize_r): Handle OMP_SIMD and OMP_DISTRIBUTE like
	OMP_FOR.
	(cxx_omp_privatize_by_reference): Return true for
	is_invisiref_parm decls.
	(cxx_omp_finish_clause): Adjust cxx_omp_create_clause_info
	caller.
	* pt.c (apply_late_template_attributes): For "omp declare simd"
	attribute call tsubst_omp_clauses,
	c_omp_declare_simd_clauses_to_decls, finish_omp_clauses
	and c_omp_declare_simd_clauses_to_numbers.
	(instantiate_class_template_1): Call cp_check_omp_declare_reduction
	for UDRs.
	(tsubst_decl): Handle UDRs.
	(tsubst_omp_clauses): Add declare_simd argument, if true don't
	call finish_omp_clauses.  Handle new OpenMP 4.0 clauses.
	Handle non-NULL OMP_CLAUSE_REDUCTION_PLACEHOLDER on
	OMP_CLAUSE_REDUCTION.
	(tsubst_expr): For UDRs call pushdecl and
	cp_check_omp_declare_reduction.  Adjust tsubst_omp_clauses
	callers.  Handle OMP_SIMD, OMP_DISTRIBUTE, OMP_TEAMS,
	OMP_TARGET_DATA, OMP_TARGET_UPDATE, OMP_TARGET, OMP_TASKGROUP.
	Adjust finish_omp_atomic caller.
	(tsubst_omp_udr): New function.
	(instantiate_decl): For UDRs at block scope, don't call
	start_preparsed_function/finish_function.  Call tsubst_omp_udr.
	* semantics.c (cxx_omp_create_clause_info): Add need_dtor argument,
	use it instead of need_default_ctor || need_copy_ctor.
	(struct cp_check_omp_declare_reduction_data): New type.
	(handle_omp_array_sections_1, handle_omp_array_sections,
	omp_reduction_id, omp_reduction_lookup,
	cp_remove_omp_priv_cleanup_stmt, cp_check_omp_declare_reduction_r,
	cp_check_omp_declare_reduction, clone_omp_udr,
	find_omp_placeholder_r, finish_omp_reduction_clause): New functions.
	(finish_omp_clauses): Handle new OpenMP 4.0 clauses and user defined
	reductions.
	(finish_omp_for): Add CODE argument, use it instead of hardcoded
	OMP_FOR.  Adjust c_finish_omp_for caller.
	(finish_omp_atomic): Add seq_cst argument, adjust
	c_finish_omp_atomic callers, handle seq_cst and new OpenMP 4.0
	atomic variants.
	(finish_omp_cancel, finish_omp_cancellation_point): New functions.
	* decl2.c (mark_used): Force immediate instantiation of
	DECL_OMP_DECLARE_REDUCTION_P decls.
	(is_late_template_attribute): Return true for "omp declare simd"
	attribute.
	(cp_omp_mappable_type): New function.
	(cplus_decl_attributes): Add implicit "omp declare target" attribute
	if requested.
	* parser.c (cp_debug_parser): Print
	parser->colon_doesnt_start_class_def_p.
	(cp_ensure_no_omp_declare_simd, cp_finalize_omp_declare_simd): New
	functions.
	(enum pragma_context): Add pragma_member and pragma_objc_icode.
	(cp_parser_binary_expression): Handle no_toplevel_fold_p
	even for binary operations other than comparison.
	(cp_parser_linkage_specification): Call
	cp_ensure_no_omp_declare_simd if needed.
	(cp_parser_namespace_definition): Likewise.
	(cp_parser_init_declarator): Call cp_finalize_omp_declare_simd.
	(cp_parser_direct_declarator): Pass declarator to
	cp_parser_late_return_type_opt.
	(cp_parser_late_return_type_opt): Add declarator argument,
	call cp_parser_late_parsing_omp_declare_simd for declare simd.
	(cp_parser_class_specifier_1): Call cp_ensure_no_omp_declare_simd.
	Parse UDRs before all other methods.
	(cp_parser_member_specification_opt): Use pragma_member instead of
	pragma_external.
	(cp_parser_member_declaration): Call cp_finalize_omp_declare_simd.
	(cp_parser_function_definition_from_specifiers_and_declarator,
	cp_parser_save_member_function_body): Likewise.
	(cp_parser_late_parsing_for_member): Handle UDRs specially.
	(cp_parser_next_token_starts_class_definition_p): Don't allow
	CPP_COLON if colon_doesnt_start_class_def_p flag is true.
	(cp_parser_objc_interstitial_code): Use pragma_objc_icode
	instead of pragma_external.
	(cp_parser_omp_clause_name): Parse new OpenMP 4.0 clause names.
	(cp_parser_omp_var_list_no_open): Parse array sections for
	OMP_CLAUSE_{DEPEND,MAP,TO,FROM} clauses.  Add COLON argument,
	if non-NULL, allow parsing to end with a colon rather than close
	paren.
	(cp_parser_omp_var_list): Adjust cp_parser_omp_var_list_no_open
	caller.
	(cp_parser_omp_clause_reduction): Handle user defined reductions.
	(cp_parser_omp_clause_branch, cp_parser_omp_clause_cancelkind,
	cp_parser_omp_clause_num_teams, cp_parser_omp_clause_thread_limit,
	cp_parser_omp_clause_aligned, cp_parser_omp_clause_linear,
	cp_parser_omp_clause_safelen, cp_parser_omp_clause_simdlen,
	cp_parser_omp_clause_depend, cp_parser_omp_clause_map,
	cp_parser_omp_clause_device, cp_parser_omp_clause_dist_schedule,
	cp_parser_omp_clause_proc_bind, cp_parser_omp_clause_to,
	cp_parser_omp_clause_from, cp_parser_omp_clause_uniform): New
	functions.
	(cp_parser_omp_all_clauses): Add finish_p argument.  Don't call
	finish_omp_clauses if it is false.  Handle new OpenMP 4.0 clauses.
	(cp_parser_omp_atomic): Parse seq_cst clause, pass
	true if it is present to finish_omp_atomic.  Handle new OpenMP 4.0
	atomic forms.
	(cp_parser_omp_for_loop): Add CODE argument, pass it through
	to finish_omp_for.  Change last argument to cclauses,
	and adjust uses to grab parallel clauses from the array of all
	the split clauses.
	(cp_omp_split_clauses): New function.
	(cp_parser_omp_simd): New function.
	(cp_parser_omp_for): Add p_name, mask and cclauses arguments.
	Allow the function to be called also when parsing combined constructs,
	and call c_parser_omp_simd when parsing for simd.
	(cp_parser_omp_sections_scope): If section-sequence doesn't start with
	#pragma omp section, require exactly one structured-block instead of
	sequence of statements.
	(cp_parser_omp_sections): Add p_name, mask and cclauses arguments.
	Allow the function to be called also when parsing combined constructs.
	(cp_parser_omp_parallel): Add p_name, mask and cclauses arguments.
	Allow the function to be called also when parsing combined
	constructs.
	(cp_parser_omp_taskgroup, cp_parser_omp_cancel,
	cp_parser_omp_cancellation_point, cp_parser_omp_distribute,
	cp_parser_omp_teams, cp_parser_omp_target_data,
	cp_parser_omp_target_update, cp_parser_omp_target,
	cp_parser_omp_declare_simd, cp_parser_late_parsing_omp_declare_simd,
	cp_parser_omp_declare_target, cp_parser_omp_end_declare_target,
	cp_parser_omp_declare_reduction_exprs, cp_parser_omp_declare_reduction,
	cp_parser_omp_declare): New functions.
	(cp_parser_omp_construct): Add p_name and mask vars.  Handle
	PRAGMA_OMP_DISTRIBUTE, PRAGMA_OMP_SIMD, PRAGMA_OMP_TASKGROUP,
	PRAGMA_OMP_TEAMS.  Adjust cp_parser_omp_for, cp_parser_omp_parallel
	and cp_parser_omp_sections callers.
	(cp_parser_pragma): Handle PRAGMA_OMP_CANCEL,
	PRAGMA_OMP_CANCELLATION_POINT, PRAGMA_OMP_DECLARE_REDUCTION,
	PRAGMA_OMP_DISTRIBUTE, PRAGMA_OMP_SIMD, PRAGMA_OMP_TASKGROUP,
	PRAGMA_OMP_TEAMS, PRAGMA_OMP_TARGET, PRAGMA_OMP_END_DECLARE_TARGET.
	Handle pragma_member and pragma_objc_icode like pragma_external.
	(OMP_FOR_CLAUSE_MASK, OMP_SECTIONS_CLAUSE_MASK,
	OMP_SINGLE_CLAUSE_MASK): Use OMP_CLAUSE_MASK_1 instead of 1.
	(OMP_PARALLEL_CLAUSE_MASK): Likewise.  Add OMP_CLAUSE_PROC_BIND.
	(OMP_TASK_CLAUSE_MASK): Use OMP_CLAUSE_MASK_1 instead of 1.  Add
	OMP_CLAUSE_DEPEND.
	(OMP_SIMD_CLAUSE_MASK, OMP_CANCEL_CLAUSE_MASK,
	OMP_CANCELLATION_POINT_CLAUSE_MASK, OMP_DISTRIBUTE_CLAUSE_MASK,
	OMP_TEAMS_CLAUSE_MASK, OMP_TARGET_DATA_CLAUSE_MASK,
	OMP_TARGET_UPDATE_CLAUSE_MASK, OMP_TARGET_CLAUSE_MASK,
	OMP_DECLARE_SIMD_CLAUSE_MASK): Define.
	* parser.h (struct cp_omp_declare_simd_data): New type.
	(struct cp_parser): Add colon_doesnt_start_class_def_p and
	omp_declare_simd fields.
	* cp-objcp-common.h (LANG_HOOKS_OMP_MAPPABLE_TYPE): Define.
	* cp-tree.h (struct lang_decl_fn): Add omp_declare_reduction_p
	bit.
	(DECL_OMP_DECLARE_REDUCTION_P): Define.
	(OMP_FOR_GIMPLIFYING_P): Use OMP_LOOP_CHECK macro.
	(struct saved_scope): Add omp_declare_target_attribute field.
	(cp_omp_mappable_type, omp_reduction_id,
	cp_remove_omp_priv_cleanup_stmt, cp_check_omp_declare_reduction,
	finish_omp_cancel, finish_omp_cancellation_point): New prototypes.
	(finish_omp_for): Add CODE argument.
	(finish_omp_atomic): Add seq_cst argument.
	(cxx_omp_create_clause_info): Add need_dtor argument.
gcc/testsuite/
	* c-c++-common/gomp/atomic-15.c: Adjust for C diagnostics.
	Remove error test that is now valid in OpenMP 4.0.
	* c-c++-common/gomp/atomic-16.c: New test.
	* c-c++-common/gomp/cancel-1.c: New test.
	* c-c++-common/gomp/depend-1.c: New test.
	* c-c++-common/gomp/depend-2.c: New test.
	* c-c++-common/gomp/map-1.c: New test.
	* c-c++-common/gomp/pr58472.c: New test.
	* c-c++-common/gomp/sections1.c: New test.
	* c-c++-common/gomp/simd1.c: New test.
	* c-c++-common/gomp/simd2.c: New test.
	* c-c++-common/gomp/simd3.c: New test.
	* c-c++-common/gomp/simd4.c: New test.
	* c-c++-common/gomp/simd5.c: New test.
	* c-c++-common/gomp/single1.c: New test.
	* g++.dg/gomp/block-0.C: Adjust for stricter #pragma omp sections
	parser.
	* g++.dg/gomp/block-3.C: Likewise.
	* g++.dg/gomp/clause-3.C: Adjust error messages.
	* g++.dg/gomp/declare-simd-1.C: New test.
	* g++.dg/gomp/declare-simd-2.C: New test.
	* g++.dg/gomp/depend-1.C: New test.
	* g++.dg/gomp/depend-2.C: New test.
	* g++.dg/gomp/target-1.C: New test.
	* g++.dg/gomp/target-2.C: New test.
	* g++.dg/gomp/taskgroup-1.C: New test.
	* g++.dg/gomp/teams-1.C: New test.
	* g++.dg/gomp/udr-1.C: New test.
	* g++.dg/gomp/udr-2.C: New test.
	* g++.dg/gomp/udr-3.C: New test.
	* g++.dg/gomp/udr-4.C: New test.
	* g++.dg/gomp/udr-5.C: New test.
	* g++.dg/gomp/udr-6.C: New test.
	* gcc.dg/autopar/outer-1.c: Expect 4 instead of 5 loopfn matches.
	* gcc.dg/autopar/outer-2.c: Likewise.
	* gcc.dg/autopar/outer-3.c: Likewise.
	* gcc.dg/autopar/outer-4.c: Likewise.
	* gcc.dg/autopar/outer-5.c: Likewise.
	* gcc.dg/autopar/outer-6.c: Likewise.
	* gcc.dg/autopar/parallelization-1.c: Likewise.
	* gcc.dg/gomp/block-3.c: Adjust for stricter #pragma omp sections
	parser.
	* gcc.dg/gomp/clause-1.c: Adjust error messages.
	* gcc.dg/gomp/combined-1.c: Look for GOMP_parallel_loop_runtime
	instead of GOMP_parallel_loop_runtime_start.
	* gcc.dg/gomp/declare-simd-1.c: New test.
	* gcc.dg/gomp/declare-simd-2.c: New test.
	* gcc.dg/gomp/nesting-1.c: Adjust for stricter #pragma omp sections
	parser.  Add further #pragma omp sections nesting tests.
	* gcc.dg/gomp/target-1.c: New test.
	* gcc.dg/gomp/target-2.c: New test.
	* gcc.dg/gomp/taskgroup-1.c: New test.
	* gcc.dg/gomp/teams-1.c: New test.
	* gcc.dg/gomp/udr-1.c: New test.
	* gcc.dg/gomp/udr-2.c: New test.
	* gcc.dg/gomp/udr-3.c: New test.
	* gcc.dg/gomp/udr-4.c: New test.
	* gfortran.dg/gomp/appendix-a/a.35.5.f90: Add dg-error.

Co-Authored-By: Richard Henderson <rth@redhat.com>
Co-Authored-By: Tobias Burnus <burnus@net-b.de>

From-SVN: r203408
2013-10-11 11:26:50 +02:00

1000 lines
27 KiB
C

/* Copyright (C) 2007-2013 Free Software Foundation, Inc.
Contributed by Richard Henderson <rth@redhat.com>.
This file is part of the GNU OpenMP Library (libgomp).
Libgomp is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option)
any later version.
Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
/* This file handles the maintainence of tasks in response to task
creation and termination. */
#include "libgomp.h"
#include <stdlib.h>
#include <string.h>
typedef struct gomp_task_depend_entry *hash_entry_type;
static inline void *
htab_alloc (size_t size)
{
return gomp_malloc (size);
}
static inline void
htab_free (void *ptr)
{
free (ptr);
}
#include "hashtab.h"
static inline hashval_t
htab_hash (hash_entry_type element)
{
return hash_pointer (element->addr);
}
static inline bool
htab_eq (hash_entry_type x, hash_entry_type y)
{
return x->addr == y->addr;
}
/* Create a new task data structure. */
void
gomp_init_task (struct gomp_task *task, struct gomp_task *parent_task,
struct gomp_task_icv *prev_icv)
{
task->parent = parent_task;
task->icv = *prev_icv;
task->kind = GOMP_TASK_IMPLICIT;
task->in_taskwait = false;
task->in_tied_task = false;
task->final_task = false;
task->copy_ctors_done = false;
task->children = NULL;
task->taskgroup = NULL;
task->dependers = NULL;
task->depend_hash = NULL;
task->depend_count = 0;
gomp_sem_init (&task->taskwait_sem, 0);
}
/* Clean up a task, after completing it. */
void
gomp_end_task (void)
{
struct gomp_thread *thr = gomp_thread ();
struct gomp_task *task = thr->task;
gomp_finish_task (task);
thr->task = task->parent;
}
static inline void
gomp_clear_parent (struct gomp_task *children)
{
struct gomp_task *task = children;
if (task)
do
{
task->parent = NULL;
task = task->next_child;
}
while (task != children);
}
/* Called when encountering an explicit task directive. If IF_CLAUSE is
false, then we must not delay in executing the task. If UNTIED is true,
then the task may be executed by any member of the team. */
void
GOMP_task (void (*fn) (void *), void *data, void (*cpyfn) (void *, void *),
long arg_size, long arg_align, bool if_clause, unsigned flags,
void **depend)
{
struct gomp_thread *thr = gomp_thread ();
struct gomp_team *team = thr->ts.team;
#ifdef HAVE_BROKEN_POSIX_SEMAPHORES
/* If pthread_mutex_* is used for omp_*lock*, then each task must be
tied to one thread all the time. This means UNTIED tasks must be
tied and if CPYFN is non-NULL IF(0) must be forced, as CPYFN
might be running on different thread than FN. */
if (cpyfn)
if_clause = false;
if (flags & 1)
flags &= ~1;
#endif
/* If parallel or taskgroup has been cancelled, don't start new tasks. */
if (team
&& (gomp_team_barrier_cancelled (&team->barrier)
|| (thr->task->taskgroup && thr->task->taskgroup->cancelled)))
return;
if (!if_clause || team == NULL
|| (thr->task && thr->task->final_task)
|| team->task_count > 64 * team->nthreads)
{
struct gomp_task task;
/* If there are depend clauses and earlier deferred sibling tasks
with depend clauses, check if there isn't a dependency. If there
is, fall through to the deferred task handling, as we can't
schedule such tasks right away. There is no need to handle
depend clauses for non-deferred tasks other than this, because
the parent task is suspended until the child task finishes and thus
it can't start further child tasks. */
if ((flags & 8) && thr->task && thr->task->depend_hash)
{
struct gomp_task *parent = thr->task;
struct gomp_task_depend_entry elem, *ent = NULL;
size_t ndepend = (uintptr_t) depend[0];
size_t nout = (uintptr_t) depend[1];
size_t i;
gomp_mutex_lock (&team->task_lock);
for (i = 0; i < ndepend; i++)
{
elem.addr = depend[i + 2];
ent = htab_find (parent->depend_hash, &elem);
for (; ent; ent = ent->next)
if (i >= nout && ent->is_in)
continue;
else
break;
if (ent)
break;
}
gomp_mutex_unlock (&team->task_lock);
if (ent)
goto defer;
}
gomp_init_task (&task, thr->task, gomp_icv (false));
task.kind = GOMP_TASK_IFFALSE;
task.final_task = (thr->task && thr->task->final_task) || (flags & 2);
if (thr->task)
{
task.in_tied_task = thr->task->in_tied_task;
task.taskgroup = thr->task->taskgroup;
}
thr->task = &task;
if (__builtin_expect (cpyfn != NULL, 0))
{
char buf[arg_size + arg_align - 1];
char *arg = (char *) (((uintptr_t) buf + arg_align - 1)
& ~(uintptr_t) (arg_align - 1));
cpyfn (arg, data);
fn (arg);
}
else
fn (data);
/* Access to "children" is normally done inside a task_lock
mutex region, but the only way this particular task.children
can be set is if this thread's task work function (fn)
creates children. So since the setter is *this* thread, we
need no barriers here when testing for non-NULL. We can have
task.children set by the current thread then changed by a
child thread, but seeing a stale non-NULL value is not a
problem. Once past the task_lock acquisition, this thread
will see the real value of task.children. */
if (task.children != NULL)
{
gomp_mutex_lock (&team->task_lock);
gomp_clear_parent (task.children);
gomp_mutex_unlock (&team->task_lock);
}
gomp_end_task ();
}
else
{
defer:;
struct gomp_task *task;
struct gomp_task *parent = thr->task;
struct gomp_taskgroup *taskgroup = parent->taskgroup;
char *arg;
bool do_wake;
size_t depend_size = 0;
if (flags & 8)
depend_size = ((uintptr_t) depend[0]
* sizeof (struct gomp_task_depend_entry));
task = gomp_malloc (sizeof (*task) + depend_size
+ arg_size + arg_align - 1);
arg = (char *) (((uintptr_t) (task + 1) + depend_size + arg_align - 1)
& ~(uintptr_t) (arg_align - 1));
gomp_init_task (task, parent, gomp_icv (false));
task->kind = GOMP_TASK_IFFALSE;
task->in_tied_task = parent->in_tied_task;
task->taskgroup = taskgroup;
thr->task = task;
if (cpyfn)
{
cpyfn (arg, data);
task->copy_ctors_done = true;
}
else
memcpy (arg, data, arg_size);
thr->task = parent;
task->kind = GOMP_TASK_WAITING;
task->fn = fn;
task->fn_data = arg;
task->final_task = (flags & 2) >> 1;
gomp_mutex_lock (&team->task_lock);
/* If parallel or taskgroup has been cancelled, don't start new
tasks. */
if (__builtin_expect ((gomp_team_barrier_cancelled (&team->barrier)
|| (taskgroup && taskgroup->cancelled))
&& !task->copy_ctors_done, 0))
{
gomp_mutex_unlock (&team->task_lock);
gomp_finish_task (task);
free (task);
return;
}
if (taskgroup)
taskgroup->num_children++;
if (depend_size)
{
size_t ndepend = (uintptr_t) depend[0];
size_t nout = (uintptr_t) depend[1];
size_t i;
hash_entry_type ent;
task->depend_count = ndepend;
task->num_dependees = 0;
if (parent->depend_hash == NULL)
parent->depend_hash
= htab_create (2 * ndepend > 12 ? 2 * ndepend : 12);
for (i = 0; i < ndepend; i++)
{
task->depend[i].addr = depend[2 + i];
task->depend[i].next = NULL;
task->depend[i].prev = NULL;
task->depend[i].task = task;
task->depend[i].is_in = i >= nout;
task->depend[i].redundant = false;
hash_entry_type *slot
= htab_find_slot (&parent->depend_hash, &task->depend[i],
INSERT);
hash_entry_type out = NULL;
if (*slot)
{
/* If multiple depends on the same task are the
same, all but the first one are redundant.
As inout/out come first, if any of them is
inout/out, it will win, which is the right
semantics. */
if ((*slot)->task == task)
{
task->depend[i].redundant = true;
continue;
}
for (ent = *slot; ent; ent = ent->next)
{
/* depend(in:...) doesn't depend on earlier
depend(in:...). */
if (i >= nout && ent->is_in)
continue;
if (!ent->is_in)
out = ent;
struct gomp_task *tsk = ent->task;
if (tsk->dependers == NULL)
{
tsk->dependers
= gomp_malloc (sizeof (struct gomp_dependers_vec)
+ 6 * sizeof (struct gomp_task *));
tsk->dependers->n_elem = 1;
tsk->dependers->allocated = 6;
tsk->dependers->elem[0] = task;
task->num_dependees++;
continue;
}
/* We already have some other dependency on tsk
from earlier depend clause. */
else if (tsk->dependers->n_elem
&& (tsk->dependers->elem[tsk->dependers->n_elem
- 1]
== task))
continue;
else if (tsk->dependers->n_elem
== tsk->dependers->allocated)
{
tsk->dependers->allocated
= tsk->dependers->allocated * 2 + 2;
tsk->dependers
= gomp_realloc (tsk->dependers,
sizeof (struct gomp_dependers_vec)
+ (tsk->dependers->allocated
* sizeof (struct gomp_task *)));
}
tsk->dependers->elem[tsk->dependers->n_elem++] = task;
task->num_dependees++;
}
task->depend[i].next = *slot;
(*slot)->prev = &task->depend[i];
}
*slot = &task->depend[i];
/* There is no need to store more than one depend({,in}out:)
task per address in the hash table chain, because each out
depends on all earlier outs, thus it is enough to record
just the last depend({,in}out:). For depend(in:), we need
to keep all of the previous ones not terminated yet, because
a later depend({,in}out:) might need to depend on all of
them. So, if the new task's clause is depend({,in}out:),
we know there is at most one other depend({,in}out:) clause
in the list (out) and to maintain the invariant we now
need to remove it from the list. */
if (!task->depend[i].is_in && out)
{
if (out->next)
out->next->prev = out->prev;
out->prev->next = out->next;
out->redundant = true;
}
}
if (task->num_dependees)
{
gomp_mutex_unlock (&team->task_lock);
return;
}
}
if (parent->children)
{
task->next_child = parent->children;
task->prev_child = parent->children->prev_child;
task->next_child->prev_child = task;
task->prev_child->next_child = task;
}
else
{
task->next_child = task;
task->prev_child = task;
}
parent->children = task;
if (taskgroup)
{
if (taskgroup->children)
{
task->next_taskgroup = taskgroup->children;
task->prev_taskgroup = taskgroup->children->prev_taskgroup;
task->next_taskgroup->prev_taskgroup = task;
task->prev_taskgroup->next_taskgroup = task;
}
else
{
task->next_taskgroup = task;
task->prev_taskgroup = task;
}
taskgroup->children = task;
}
if (team->task_queue)
{
task->next_queue = team->task_queue;
task->prev_queue = team->task_queue->prev_queue;
task->next_queue->prev_queue = task;
task->prev_queue->next_queue = task;
}
else
{
task->next_queue = task;
task->prev_queue = task;
team->task_queue = task;
}
++team->task_count;
++team->task_queued_count;
gomp_team_barrier_set_task_pending (&team->barrier);
do_wake = team->task_running_count + !parent->in_tied_task
< team->nthreads;
gomp_mutex_unlock (&team->task_lock);
if (do_wake)
gomp_team_barrier_wake (&team->barrier, 1);
}
}
static inline bool
gomp_task_run_pre (struct gomp_task *child_task, struct gomp_task *parent,
struct gomp_taskgroup *taskgroup, struct gomp_team *team)
{
if (parent && parent->children == child_task)
parent->children = child_task->next_child;
if (taskgroup && taskgroup->children == child_task)
taskgroup->children = child_task->next_taskgroup;
child_task->prev_queue->next_queue = child_task->next_queue;
child_task->next_queue->prev_queue = child_task->prev_queue;
if (team->task_queue == child_task)
{
if (child_task->next_queue != child_task)
team->task_queue = child_task->next_queue;
else
team->task_queue = NULL;
}
child_task->kind = GOMP_TASK_TIED;
if (--team->task_queued_count == 0)
gomp_team_barrier_clear_task_pending (&team->barrier);
if ((gomp_team_barrier_cancelled (&team->barrier)
|| (taskgroup && taskgroup->cancelled))
&& !child_task->copy_ctors_done)
return true;
return false;
}
static void
gomp_task_run_post_handle_depend_hash (struct gomp_task *child_task)
{
struct gomp_task *parent = child_task->parent;
size_t i;
for (i = 0; i < child_task->depend_count; i++)
if (!child_task->depend[i].redundant)
{
if (child_task->depend[i].next)
child_task->depend[i].next->prev = child_task->depend[i].prev;
if (child_task->depend[i].prev)
child_task->depend[i].prev->next = child_task->depend[i].next;
else
{
hash_entry_type *slot
= htab_find_slot (&parent->depend_hash, &child_task->depend[i],
NO_INSERT);
if (*slot != &child_task->depend[i])
abort ();
if (child_task->depend[i].next)
*slot = child_task->depend[i].next;
else
htab_clear_slot (parent->depend_hash, slot);
}
}
}
static size_t
gomp_task_run_post_handle_dependers (struct gomp_task *child_task,
struct gomp_team *team)
{
struct gomp_task *parent = child_task->parent;
size_t i, count = child_task->dependers->n_elem, ret = 0;
for (i = 0; i < count; i++)
{
struct gomp_task *task = child_task->dependers->elem[i];
if (--task->num_dependees != 0)
continue;
struct gomp_taskgroup *taskgroup = task->taskgroup;
if (parent)
{
if (parent->children)
{
task->next_child = parent->children;
task->prev_child = parent->children->prev_child;
task->next_child->prev_child = task;
task->prev_child->next_child = task;
}
else
{
task->next_child = task;
task->prev_child = task;
}
parent->children = task;
if (parent->in_taskwait)
{
parent->in_taskwait = false;
gomp_sem_post (&parent->taskwait_sem);
}
}
if (taskgroup)
{
if (taskgroup->children)
{
task->next_taskgroup = taskgroup->children;
task->prev_taskgroup = taskgroup->children->prev_taskgroup;
task->next_taskgroup->prev_taskgroup = task;
task->prev_taskgroup->next_taskgroup = task;
}
else
{
task->next_taskgroup = task;
task->prev_taskgroup = task;
}
taskgroup->children = task;
if (taskgroup->in_taskgroup_wait)
{
taskgroup->in_taskgroup_wait = false;
gomp_sem_post (&taskgroup->taskgroup_sem);
}
}
if (team->task_queue)
{
task->next_queue = team->task_queue;
task->prev_queue = team->task_queue->prev_queue;
task->next_queue->prev_queue = task;
task->prev_queue->next_queue = task;
}
else
{
task->next_queue = task;
task->prev_queue = task;
team->task_queue = task;
}
++team->task_count;
++team->task_queued_count;
++ret;
}
free (child_task->dependers);
child_task->dependers = NULL;
if (ret > 1)
gomp_team_barrier_set_task_pending (&team->barrier);
return ret;
}
static inline size_t
gomp_task_run_post_handle_depend (struct gomp_task *child_task,
struct gomp_team *team)
{
if (child_task->depend_count == 0)
return 0;
/* If parent is gone already, the hash table is freed and nothing
will use the hash table anymore, no need to remove anything from it. */
if (child_task->parent != NULL)
gomp_task_run_post_handle_depend_hash (child_task);
if (child_task->dependers == NULL)
return 0;
return gomp_task_run_post_handle_dependers (child_task, team);
}
static inline void
gomp_task_run_post_remove_parent (struct gomp_task *child_task)
{
struct gomp_task *parent = child_task->parent;
if (parent == NULL)
return;
child_task->prev_child->next_child = child_task->next_child;
child_task->next_child->prev_child = child_task->prev_child;
if (parent->children != child_task)
return;
if (child_task->next_child != child_task)
parent->children = child_task->next_child;
else
{
/* We access task->children in GOMP_taskwait
outside of the task lock mutex region, so
need a release barrier here to ensure memory
written by child_task->fn above is flushed
before the NULL is written. */
__atomic_store_n (&parent->children, NULL, MEMMODEL_RELEASE);
if (parent->in_taskwait)
{
parent->in_taskwait = false;
gomp_sem_post (&parent->taskwait_sem);
}
}
}
static inline void
gomp_task_run_post_remove_taskgroup (struct gomp_task *child_task)
{
struct gomp_taskgroup *taskgroup = child_task->taskgroup;
if (taskgroup == NULL)
return;
child_task->prev_taskgroup->next_taskgroup = child_task->next_taskgroup;
child_task->next_taskgroup->prev_taskgroup = child_task->prev_taskgroup;
if (taskgroup->num_children > 1)
--taskgroup->num_children;
else
{
/* We access taskgroup->num_children in GOMP_taskgroup_end
outside of the task lock mutex region, so
need a release barrier here to ensure memory
written by child_task->fn above is flushed
before the NULL is written. */
__atomic_store_n (&taskgroup->num_children, 0, MEMMODEL_RELEASE);
}
if (taskgroup->children != child_task)
return;
if (child_task->next_taskgroup != child_task)
taskgroup->children = child_task->next_taskgroup;
else
{
taskgroup->children = NULL;
if (taskgroup->in_taskgroup_wait)
{
taskgroup->in_taskgroup_wait = false;
gomp_sem_post (&taskgroup->taskgroup_sem);
}
}
}
void
gomp_barrier_handle_tasks (gomp_barrier_state_t state)
{
struct gomp_thread *thr = gomp_thread ();
struct gomp_team *team = thr->ts.team;
struct gomp_task *task = thr->task;
struct gomp_task *child_task = NULL;
struct gomp_task *to_free = NULL;
int do_wake = 0;
gomp_mutex_lock (&team->task_lock);
if (gomp_barrier_last_thread (state))
{
if (team->task_count == 0)
{
gomp_team_barrier_done (&team->barrier, state);
gomp_mutex_unlock (&team->task_lock);
gomp_team_barrier_wake (&team->barrier, 0);
return;
}
gomp_team_barrier_set_waiting_for_tasks (&team->barrier);
}
while (1)
{
bool cancelled = false;
if (team->task_queue != NULL)
{
child_task = team->task_queue;
cancelled = gomp_task_run_pre (child_task, child_task->parent,
child_task->taskgroup, team);
if (__builtin_expect (cancelled, 0))
{
if (to_free)
{
gomp_finish_task (to_free);
free (to_free);
to_free = NULL;
}
goto finish_cancelled;
}
team->task_running_count++;
child_task->in_tied_task = true;
}
gomp_mutex_unlock (&team->task_lock);
if (do_wake)
{
gomp_team_barrier_wake (&team->barrier, do_wake);
do_wake = 0;
}
if (to_free)
{
gomp_finish_task (to_free);
free (to_free);
to_free = NULL;
}
if (child_task)
{
thr->task = child_task;
child_task->fn (child_task->fn_data);
thr->task = task;
}
else
return;
gomp_mutex_lock (&team->task_lock);
if (child_task)
{
finish_cancelled:;
size_t new_tasks
= gomp_task_run_post_handle_depend (child_task, team);
gomp_task_run_post_remove_parent (child_task);
gomp_clear_parent (child_task->children);
gomp_task_run_post_remove_taskgroup (child_task);
to_free = child_task;
child_task = NULL;
if (!cancelled)
team->task_running_count--;
if (new_tasks > 1)
{
do_wake = team->nthreads - team->task_running_count;
if (do_wake > new_tasks)
do_wake = new_tasks;
}
if (--team->task_count == 0
&& gomp_team_barrier_waiting_for_tasks (&team->barrier))
{
gomp_team_barrier_done (&team->barrier, state);
gomp_mutex_unlock (&team->task_lock);
gomp_team_barrier_wake (&team->barrier, 0);
gomp_mutex_lock (&team->task_lock);
}
}
}
}
/* Called when encountering a taskwait directive. */
void
GOMP_taskwait (void)
{
struct gomp_thread *thr = gomp_thread ();
struct gomp_team *team = thr->ts.team;
struct gomp_task *task = thr->task;
struct gomp_task *child_task = NULL;
struct gomp_task *to_free = NULL;
int do_wake = 0;
/* The acquire barrier on load of task->children here synchronizes
with the write of a NULL in gomp_task_run_post_remove_parent. It is
not necessary that we synchronize with other non-NULL writes at
this point, but we must ensure that all writes to memory by a
child thread task work function are seen before we exit from
GOMP_taskwait. */
if (task == NULL
|| __atomic_load_n (&task->children, MEMMODEL_ACQUIRE) == NULL)
return;
gomp_mutex_lock (&team->task_lock);
while (1)
{
bool cancelled = false;
if (task->children == NULL)
{
gomp_mutex_unlock (&team->task_lock);
if (to_free)
{
gomp_finish_task (to_free);
free (to_free);
}
return;
}
if (task->children->kind == GOMP_TASK_WAITING)
{
child_task = task->children;
cancelled
= gomp_task_run_pre (child_task, task, child_task->taskgroup,
team);
if (__builtin_expect (cancelled, 0))
{
if (to_free)
{
gomp_finish_task (to_free);
free (to_free);
to_free = NULL;
}
goto finish_cancelled;
}
}
else
/* All tasks we are waiting for are already running
in other threads. Wait for them. */
task->in_taskwait = true;
gomp_mutex_unlock (&team->task_lock);
if (do_wake)
{
gomp_team_barrier_wake (&team->barrier, do_wake);
do_wake = 0;
}
if (to_free)
{
gomp_finish_task (to_free);
free (to_free);
to_free = NULL;
}
if (child_task)
{
thr->task = child_task;
child_task->fn (child_task->fn_data);
thr->task = task;
}
else
gomp_sem_wait (&task->taskwait_sem);
gomp_mutex_lock (&team->task_lock);
if (child_task)
{
finish_cancelled:;
size_t new_tasks
= gomp_task_run_post_handle_depend (child_task, team);
child_task->prev_child->next_child = child_task->next_child;
child_task->next_child->prev_child = child_task->prev_child;
if (task->children == child_task)
{
if (child_task->next_child != child_task)
task->children = child_task->next_child;
else
task->children = NULL;
}
gomp_clear_parent (child_task->children);
gomp_task_run_post_remove_taskgroup (child_task);
to_free = child_task;
child_task = NULL;
team->task_count--;
if (new_tasks > 1)
{
do_wake = team->nthreads - team->task_running_count
- !task->in_tied_task;
if (do_wake > new_tasks)
do_wake = new_tasks;
}
}
}
}
/* Called when encountering a taskyield directive. */
void
GOMP_taskyield (void)
{
/* Nothing at the moment. */
}
void
GOMP_taskgroup_start (void)
{
struct gomp_thread *thr = gomp_thread ();
struct gomp_team *team = thr->ts.team;
struct gomp_task *task = thr->task;
struct gomp_taskgroup *taskgroup;
/* If team is NULL, all tasks are executed as
GOMP_TASK_IFFALSE tasks and thus all children tasks of
taskgroup and their descendant tasks will be finished
by the time GOMP_taskgroup_end is called. */
if (team == NULL)
return;
taskgroup = gomp_malloc (sizeof (struct gomp_taskgroup));
taskgroup->prev = task->taskgroup;
taskgroup->children = NULL;
taskgroup->in_taskgroup_wait = false;
taskgroup->cancelled = false;
taskgroup->num_children = 0;
gomp_sem_init (&taskgroup->taskgroup_sem, 0);
task->taskgroup = taskgroup;
}
void
GOMP_taskgroup_end (void)
{
struct gomp_thread *thr = gomp_thread ();
struct gomp_team *team = thr->ts.team;
struct gomp_task *task = thr->task;
struct gomp_taskgroup *taskgroup;
struct gomp_task *child_task = NULL;
struct gomp_task *to_free = NULL;
int do_wake = 0;
if (team == NULL)
return;
taskgroup = task->taskgroup;
/* The acquire barrier on load of taskgroup->num_children here
synchronizes with the write of 0 in gomp_task_run_post_remove_taskgroup.
It is not necessary that we synchronize with other non-0 writes at
this point, but we must ensure that all writes to memory by a
child thread task work function are seen before we exit from
GOMP_taskgroup_end. */
if (__atomic_load_n (&taskgroup->num_children, MEMMODEL_ACQUIRE) == 0)
goto finish;
gomp_mutex_lock (&team->task_lock);
while (1)
{
bool cancelled = false;
if (taskgroup->children == NULL)
{
if (taskgroup->num_children)
goto do_wait;
gomp_mutex_unlock (&team->task_lock);
if (to_free)
{
gomp_finish_task (to_free);
free (to_free);
}
goto finish;
}
if (taskgroup->children->kind == GOMP_TASK_WAITING)
{
child_task = taskgroup->children;
cancelled
= gomp_task_run_pre (child_task, child_task->parent, taskgroup,
team);
if (__builtin_expect (cancelled, 0))
{
if (to_free)
{
gomp_finish_task (to_free);
free (to_free);
to_free = NULL;
}
goto finish_cancelled;
}
}
else
{
do_wait:
/* All tasks we are waiting for are already running
in other threads. Wait for them. */
taskgroup->in_taskgroup_wait = true;
}
gomp_mutex_unlock (&team->task_lock);
if (do_wake)
{
gomp_team_barrier_wake (&team->barrier, do_wake);
do_wake = 0;
}
if (to_free)
{
gomp_finish_task (to_free);
free (to_free);
to_free = NULL;
}
if (child_task)
{
thr->task = child_task;
child_task->fn (child_task->fn_data);
thr->task = task;
}
else
gomp_sem_wait (&taskgroup->taskgroup_sem);
gomp_mutex_lock (&team->task_lock);
if (child_task)
{
finish_cancelled:;
size_t new_tasks
= gomp_task_run_post_handle_depend (child_task, team);
child_task->prev_taskgroup->next_taskgroup
= child_task->next_taskgroup;
child_task->next_taskgroup->prev_taskgroup
= child_task->prev_taskgroup;
--taskgroup->num_children;
if (taskgroup->children == child_task)
{
if (child_task->next_taskgroup != child_task)
taskgroup->children = child_task->next_taskgroup;
else
taskgroup->children = NULL;
}
gomp_task_run_post_remove_parent (child_task);
gomp_clear_parent (child_task->children);
to_free = child_task;
child_task = NULL;
team->task_count--;
if (new_tasks > 1)
{
do_wake = team->nthreads - team->task_running_count
- !task->in_tied_task;
if (do_wake > new_tasks)
do_wake = new_tasks;
}
}
}
finish:
task->taskgroup = taskgroup->prev;
gomp_sem_destroy (&taskgroup->taskgroup_sem);
free (taskgroup);
}
int
omp_in_final (void)
{
struct gomp_thread *thr = gomp_thread ();
return thr->task && thr->task->final_task;
}
ialias (omp_in_final)