diff options
author | jakub <jakub@138bc75d-0d04-0410-961f-82ee72b054a4> | 2013-10-11 09:26:50 +0000 |
---|---|---|
committer | jakub <jakub@138bc75d-0d04-0410-961f-82ee72b054a4> | 2013-10-11 09:26:50 +0000 |
commit | bc7bff742355562cd43792f0814bae55eb21d012 (patch) | |
tree | 2a3d60fbf15f9346c02647762dcc441fe3841855 /libgomp/config | |
parent | cf3cae555d03f07e989fd18e4db778fba44d9abd (diff) | |
download | gcc-bc7bff742355562cd43792f0814bae55eb21d012.tar.gz |
libgomp/
* target.c: New file.
* Makefile.am (libgomp_la_SOURCES): Add target.c.
* Makefile.in: Regenerated.
* libgomp_g.h (GOMP_task): Add depend argument.
(GOMP_barrier_cancel, GOMP_loop_end_cancel,
GOMP_sections_end_cancel, GOMP_target, GOMP_target_data,
GOMP_target_end_data, GOMP_target_update, GOMP_teams,
GOMP_parallel_loop_static, GOMP_parallel_loop_dynamic,
GOMP_parallel_loop_guided, GOMP_parallel_loop_runtime,
GOMP_parallel, GOMP_cancel, GOMP_cancellation_point,
GOMP_taskgroup_start, GOMP_taskgroup_end,
GOMP_parallel_sections): New prototypes.
* fortran.c (omp_is_initial_device): Add ialias_redirect.
(omp_is_initial_device_): New function.
(ULP, STR1, STR2, ialias_redirect): Removed.
(omp_get_cancellation_, omp_get_proc_bind_, omp_set_default_device_,
omp_set_default_device_8_, omp_get_default_device_,
omp_get_num_devices_, omp_get_num_teams_, omp_get_team_num_): New
functions.
* libgomp.map (GOMP_barrier_cancel, GOMP_loop_end_cancel,
GOMP_sections_end_cancel, GOMP_target, GOMP_target_data,
GOMP_target_end_data, GOMP_target_update, GOMP_teams): Export
@@GOMP_4.0.
(omp_is_initial_device, omp_is_initial_device_, omp_get_cancellation,
omp_get_cancellation_, omp_get_proc_bind, omp_get_proc_bind_,
omp_set_default_device, omp_set_default_device_,
omp_set_default_device_8_, omp_get_default_device,
omp_get_default_device_, omp_get_num_devices, omp_get_num_devices_,
omp_get_num_teams, omp_get_num_teams_, omp_get_team_num,
omp_get_team_num_): Export @@OMP_4.0.
* team.c (struct gomp_thread_start_data): Add place field.
(gomp_thread_start): Clear thr->thread_pool and
thr->task before returning. Use gomp_team_barrier_wait_final
instead of gomp_team_barrier_wait. Initialize thr->place.
(gomp_new_team): Initialize work_shares_to_free, work_share_cancelled,
team_cancelled and task_queued_count fields.
(gomp_free_pool_helper): Clear thr->thread_pool and thr->task
before calling pthread_exit.
(gomp_free_thread): No longer static. Use
gomp_managed_threads_lock instead of gomp_remaining_threads_lock.
(gomp_team_start): Add flags argument. Set
thr->thread_pool->threads_busy to nthreads immediately after creating
new pool. Use gomp_managed_threads_lock instead of
gomp_remaining_threads_lock. Handle OpenMP 4.0 affinity.
(gomp_team_end): Use gomp_managed_threads_lock instead of
gomp_remaining_threads_lock. Use gomp_team_barrier_wait_final instead
of gomp_team_barrier_wait. If team->team_cancelled, call
gomp_fini_worshare on ws chain starting at team->work_shares_to_free
rather than thr->ts.work_share.
(initialize_team): Don't call gomp_sem_init here.
* sections.c (GOMP_parallel_sections_start): Adjust gomp_team_start
caller.
(GOMP_parallel_sections, GOMP_sections_end_cancel): New functions.
* env.c (gomp_global_icv): Add default_device_var, target_data and
bind_var initializers.
(gomp_cpu_affinity, gomp_cpu_affinity_len): Remove.
(gomp_bind_var_list, gomp_bind_var_list_len, gomp_places_list,
gomp_places_list_len): New variables.
(parse_bind_var, parse_one_place, parse_places_var): New functions.
(parse_affinity): Rewritten to construct OMP_PLACES list with unit
sized places.
(gomp_cancel_var): New global variable.
(parse_int): New function.
(handle_omp_display_env): New function.
(initialize_env): Use it. Initialize default_device_var.
Parse OMP_CANCELLATION env var. Use parse_bind_var to parse
OMP_PROC_BIND instead of parse_boolean. Use parse_places_var for
OMP_PLACES parsing. Don't call parse_affinity if OMP_PLACES has
been successfully parsed (and call gomp_init_affinity in that case).
(omp_get_cancellation, omp_get_proc_bind, omp_set_default_device,
omp_get_default_device, omp_get_num_devices, omp_get_num_teams,
omp_get_team_num, omp_is_initial_device): New functions.
* libgomp.h: Include stdlib.h.
(ialias_ulp, ialias_str1, ialias_str2, ialias_redirect, ialias_call):
Define.
(struct target_mem_desc): Forward declare.
(struct gomp_task_icv): Add default_device_var, target_data, bind_var
and thread_limit_var fields.
(gomp_get_num_devices): New prototype.
(gomp_cancel_var): New extern decl.
(struct gomp_team): Add work_shares_to_free, work_share_cancelled,
team_cancelled and task_queued_count fields. Add comments about
task_{,queued_,running_}count.
(gomp_cancel_kind): New enum.
(gomp_work_share_end_cancel): New prototype.
(struct gomp_task): Add next_taskgroup, prev_taskgroup, taskgroup,
copy_ctors_done, dependers, depend_hash, depend_count, num_dependees
and depend fields.
(struct gomp_taskgroup): New type.
(struct gomp_task_depend_entry,
struct gomp_dependers_vec): New types.
(gomp_finish_task): Free depend_hash if non-NULL.
(struct gomp_team_state): Add place_partition_off
and place_partition_len fields.
(gomp_bind_var_list, gomp_bind_var_list_len, gomp_places_list,
gomp_places_list_len): New extern decls.
(struct gomp_thread): Add place field.
(gomp_cpu_affinity, gomp_cpu_affinity_len): Remove.
(gomp_init_thread_affinity): Add place argument.
(gomp_affinity_alloc, gomp_affinity_init_place, gomp_affinity_add_cpus,
gomp_affinity_remove_cpu, gomp_affinity_copy_place,
gomp_affinity_same_place, gomp_affinity_finalize_place_list,
gomp_affinity_init_level, gomp_affinity_print_place): New
prototypes.
(gomp_team_start): Add flags argument.
(gomp_thread_limit_var, gomp_remaining_threads_count,
gomp_remaining_threads_lock): Remove.
(gomp_managed_threads_lock): New variable.
(struct gomp_thread_pool): Add threads_busy field.
(gomp_free_thread): New prototype.
* task.c: Include hashtab.h.
(hash_entry_type): New typedef.
(htab_alloc, htab_free, htab_hash, htab_eq): New inlines.
(gomp_init_task): Clear dependers, depend_hash, depend_count,
copy_ctors_done and taskgroup fields.
(GOMP_task): Add depend argument, handle depend clauses. If
gomp_team_barrier_cancelled or if it's taskgroup has been
cancelled, don't queue or start new tasks. Set copy_ctors_done
field if needed. Initialize taskgroup field. If copy_ctors_done
and already cancelled, don't discard the task. If taskgroup is
non-NULL, enqueue the task into taskgroup queue. Increment
num_children field in taskgroup. Increment task_queued_count.
(gomp_task_run_pre, gomp_task_run_post_remove_parent,
gomp_task_run_post_remove_taskgroup): New inline functions.
(gomp_task_run_post_handle_depend_hash,
gomp_task_run_post_handle_dependers,
gomp_task_run_post_handle_depend): New functions.
(GOMP_taskwait): Use them. If more than one new tasks
have been queued, wake other threads if needed.
(gomp_barrier_handle_tasks): Likewise. If
gomp_team_barrier_cancelled, don't start any new tasks, just free
all tasks.
(GOMP_taskgroup_start, GOMP_taskgroup_end): New functions.
* omp_lib.f90.in
(omp_proc_bind_kind, omp_proc_bind_false,
omp_proc_bind_true, omp_proc_bind_master, omp_proc_bind_close,
omp_proc_bind_spread): New params.
(omp_get_cancellation, omp_get_proc_bind, omp_set_default_device,
omp_get_default_device, omp_get_num_devices, omp_get_num_teams,
omp_get_team_num, omp_is_initial_device): New interfaces.
(omp_get_dynamic, omp_get_nested, omp_in_parallel,
omp_get_max_threads, omp_get_num_procs, omp_get_num_threads,
omp_get_thread_num, omp_get_thread_limit, omp_set_max_active_levels,
omp_get_max_active_levels, omp_get_level, omp_get_ancestor_thread_num,
omp_get_team_size, omp_get_active_level, omp_in_final): Remove
useless use omp_lib_kinds.
* omp.h.in (omp_proc_bind_t): New typedef.
(omp_get_cancellation, omp_get_proc_bind, omp_set_default_device,
omp_get_default_device, omp_get_num_devices, omp_get_num_teams,
omp_get_team_num, omp_is_initial_device): New prototypes.
* loop.c (gomp_parallel_loop_start): Add flags argument, pass it
through to gomp_team_start.
(GOMP_parallel_loop_static_start, GOMP_parallel_loop_dynamic_start,
GOMP_parallel_loop_guided_start, GOMP_parallel_loop_runtime_start):
Adjust gomp_parallel_loop_start callers.
(GOMP_parallel_loop_static, GOMP_parallel_loop_dynamic,
GOMP_parallel_loop_guided, GOMP_parallel_loop_runtime,
GOMP_loop_end_cancel): New functions.
(GOMP_parallel_end): Add ialias_redirect.
* hashtab.h: New file.
* libgomp.texi (Environment Variables): Minor cleanup,
update section refs to OpenMP 4.0rc2.
(OMP_DISPLAY_ENV, GOMP_SPINCOUNT): Document these
environment variables.
* work.c (gomp_work_share_end, gomp_work_share_end_nowait): Set
team->work_shares_to_free to thr->ts.work_share before calling
free_work_share.
(gomp_work_share_end_cancel): New function.
* config/linux/proc.c: Include errno.h.
(gomp_get_cpuset_size, gomp_cpuset_size, gomp_cpusetp): New variables.
(gomp_cpuset_popcount): Add cpusetsize argument, use it instead of
sizeof (cpu_set_t) to determine number of iterations. Fix up check
extern decl. Use CPU_COUNT_S if available, or CPU_COUNT if
gomp_cpuset_size is sizeof (cpu_set_t).
(gomp_init_num_threads): Initialize gomp_cpuset_size,
gomp_get_cpuset_size and gomp_cpusetp here, use gomp_cpusetp instead
of &cpuset and pass gomp_cpuset_size instead of sizeof (cpu_set_t)
to pthread_getaffinity_np. Free and clear gomp_cpusetp if it didn't
contain any logical CPUs.
(get_num_procs): Don't call pthread_getaffinity_np if gomp_cpusetp
is NULL. Use gomp_cpusetp instead of &cpuset and pass
gomp_get_cpuset_size instead of sizeof (cpu_set_t) to
pthread_getaffinity_np. Check gomp_places_list instead of
gomp_cpu_affinity. Adjust gomp_cpuset_popcount caller.
* config/linux/bar.c (gomp_barrier_wait_end,
gomp_barrier_wait_last): Use BAR_* defines.
(gomp_team_barrier_wait_end): Likewise. Clear BAR_CANCELLED
from state where needed. Set work_share_cancelled to 0 on last
thread.
(gomp_team_barrier_wait_final, gomp_team_barrier_wait_cancel_end,
gomp_team_barrier_wait_cancel, gomp_team_barrier_cancel): New
functions.
* config/linux/proc.h (gomp_cpuset_popcount): Add attribute_hidden.
Add cpusetsize argument.
(gomp_cpuset_size, gomp_cpusetp): Declare.
* config/linux/affinity.c: Include errno.h, stdio.h and string.h.
(affinity_counter): Remove.
(CPU_ISSET_S, CPU_ZERO_S, CPU_SET_S, CPU_CLR_S): Define
if CPU_ALLOC_SIZE isn't defined.
(gomp_init_affinity): Rewritten, if gomp_places_list is NULL, try
silently create OMP_PLACES=threads, if it is non-NULL afterwards,
bind current thread to the first place.
(gomp_init_thread_affinity): Rewritten. Add place argument, just
pthread_setaffinity_np to gomp_places_list[place].
(gomp_affinity_alloc, gomp_affinity_init_place, gomp_affinity_add_cpus,
gomp_affinity_remove_cpu, gomp_affinity_copy_place,
gomp_affinity_same_place, gomp_affinity_finalize_place_list,
gomp_affinity_init_level, gomp_affinity_print_place): New functions.
* config/linux/bar.h (BAR_TASK_PENDING, BAR_WAS_LAST,
BAR_WAITING_FOR_TASK, BAR_INCR, BAR_CANCELLED): Define.
(gomp_barrier_t): Add awaited_final field.
(gomp_barrier_init): Initialize awaited_final field.
(gomp_team_barrier_wait_final, gomp_team_barrier_wait_cancel,
gomp_team_barrier_wait_cancel_end, gomp_team_barrier_cancel): New
prototypes.
(gomp_barrier_wait_start): Preserve BAR_CANCELLED bit. Use BAR_*
defines.
(gomp_barrier_wait_cancel_start, gomp_team_barrier_wait_final_start,
gomp_team_barrier_cancelled): New inline functions.
(gomp_barrier_last_thread,
gomp_team_barrier_set_task_pending,
gomp_team_barrier_clear_task_pending,
gomp_team_barrier_set_waiting_for_tasks,
gomp_team_barrier_waiting_for_tasks,
gomp_team_barrier_done): Use BAR_* defines.
* config/posix/bar.c (gomp_barrier_init): Clear cancellable field.
(gomp_barrier_wait_end): Use BAR_* defines.
(gomp_team_barrier_wait_end): Clear BAR_CANCELLED from state.
Set work_share_cancelled to 0 on last thread, use __atomic_load_n.
Use BAR_* defines.
(gomp_team_barrier_wait_cancel_end, gomp_team_barrier_wait_cancel,
gomp_team_barrier_cancel): New functions.
* config/posix/affinity.c (gomp_init_thread_affinity): Add place
argument.
(gomp_affinity_alloc, gomp_affinity_init_place, gomp_affinity_add_cpus,
gomp_affinity_remove_cpu, gomp_affinity_copy_place,
gomp_affinity_same_place, gomp_affinity_finalize_place_list,
gomp_affinity_init_level, gomp_affinity_print_place): New stubs.
* config/posix/bar.h (BAR_TASK_PENDING, BAR_WAS_LAST,
BAR_WAITING_FOR_TASK, BAR_INCR, BAR_CANCELLED): Define.
(gomp_barrier_t): Add cancellable field.
(gomp_team_barrier_wait_cancel, gomp_team_barrier_wait_cancel_end,
gomp_team_barrier_cancel): New prototypes.
(gomp_barrier_wait_start): Preserve BAR_CANCELLED bit.
(gomp_barrier_wait_cancel_start, gomp_team_barrier_wait_final,
gomp_team_barrier_cancelled): New inline functions.
(gomp_barrier_wait_start, gomp_barrier_last_thread,
gomp_team_barrier_set_task_pending,
gomp_team_barrier_clear_task_pending,
gomp_team_barrier_set_waiting_for_tasks,
gomp_team_barrier_waiting_for_tasks,
gomp_team_barrier_done): Use BAR_* defines.
* barrier.c (GOMP_barrier_cancel): New function.
* omp_lib.h.in (omp_proc_bind_kind, omp_proc_bind_false,
omp_proc_bind_true, omp_proc_bind_master, omp_proc_bind_close,
omp_proc_bind_spread): New params.
(omp_get_cancellation, omp_get_proc_bind, omp_set_default_device,
omp_get_default_device, omp_get_num_devices, omp_get_num_teams,
omp_get_team_num, omp_is_initial_device): New externals.
* parallel.c (GOMP_parallel, GOMP_cancel, GOMP_cancellation_point):
New functions.
(gomp_resolve_num_threads): Adjust for thread_limit now being in
icv->thread_limit_var. Use UINT_MAX instead of ULONG_MAX as
infinity. If not nested, just return minimum of max_num_threads
and icv->thread_limit_var and if thr->thread_pool, set threads_busy
to the returned value. Otherwise, don't update atomically
gomp_remaining_threads_count, but instead thr->thread_pool->threads_busy.
(GOMP_parallel_end): Adjust for thread_limit now being in
icv->thread_limit_var. Use UINT_MAX instead of ULONG_MAX as
infinity. Adjust threads_busy in the pool rather than
gomp_remaining_threads_count. Remember team->nthreads and call
gomp_team_end before adjusting threads_busy, if not nested
afterwards, just set it to 1 non-atomically. Add ialias.
(GOMP_parallel_start): Adjust gomp_team_start caller.
* testsuite/libgomp.c/atomic-14.c: Add parens to make it valid.
* testsuite/libgomp.c/affinity-1.c: New test.
* testsuite/libgomp.c/atomic-15.c: New test.
* testsuite/libgomp.c/atomic-16.c: New test.
* testsuite/libgomp.c/atomic-17.c: New test.
* testsuite/libgomp.c/cancel-for-1.c: New test.
* testsuite/libgomp.c/cancel-for-2.c: New test.
* testsuite/libgomp.c/cancel-parallel-1.c: New test.
* testsuite/libgomp.c/cancel-parallel-2.c: New test.
* testsuite/libgomp.c/cancel-parallel-3.c: New test.
* testsuite/libgomp.c/cancel-sections-1.c: New test.
* testsuite/libgomp.c/cancel-taskgroup-1.c: New test.
* testsuite/libgomp.c/cancel-taskgroup-2.c: New test.
* testsuite/libgomp.c/depend-1.c: New test.
* testsuite/libgomp.c/depend-2.c: New test.
* testsuite/libgomp.c/depend-3.c: New test.
* testsuite/libgomp.c/depend-4.c: New test.
* testsuite/libgomp.c/for-1.c: New test.
* testsuite/libgomp.c/for-1.h: New file.
* testsuite/libgomp.c/for-2.c: New test.
* testsuite/libgomp.c/for-2.h: New file.
* testsuite/libgomp.c/for-3.c: New test.
* testsuite/libgomp.c/pr58392.c: New test.
* testsuite/libgomp.c/simd-1.c: New test.
* testsuite/libgomp.c/simd-2.c: New test.
* testsuite/libgomp.c/simd-3.c: New test.
* testsuite/libgomp.c/simd-4.c: New test.
* testsuite/libgomp.c/simd-5.c: New test.
* testsuite/libgomp.c/simd-6.c: New test.
* testsuite/libgomp.c/target-1.c: New test.
* testsuite/libgomp.c/target-2.c: New test.
* testsuite/libgomp.c/target-3.c: New test.
* testsuite/libgomp.c/target-4.c: New test.
* testsuite/libgomp.c/target-5.c: New test.
* testsuite/libgomp.c/target-6.c: New test.
* testsuite/libgomp.c/target-7.c: New test.
* testsuite/libgomp.c/taskgroup-1.c: New test.
* testsuite/libgomp.c/thread-limit-1.c: New test.
* testsuite/libgomp.c/thread-limit-2.c: New test.
* testsuite/libgomp.c/thread-limit-3.c: New test.
* testsuite/libgomp.c/udr-1.c: New test.
* testsuite/libgomp.c/udr-2.c: New test.
* testsuite/libgomp.c/udr-3.c: New test.
* testsuite/libgomp.c++/affinity-1.C: New test.
* testsuite/libgomp.c++/atomic-10.C: New test.
* testsuite/libgomp.c++/atomic-11.C: New test.
* testsuite/libgomp.c++/atomic-12.C: New test.
* testsuite/libgomp.c++/atomic-13.C: New test.
* testsuite/libgomp.c++/atomic-14.C: New test.
* testsuite/libgomp.c++/atomic-15.C: New test.
* testsuite/libgomp.c++/cancel-for-1.C: New test.
* testsuite/libgomp.c++/cancel-for-2.C: New test.
* testsuite/libgomp.c++/cancel-parallel-1.C: New test.
* testsuite/libgomp.c++/cancel-parallel-2.C: New test.
* testsuite/libgomp.c++/cancel-parallel-3.C: New test.
* testsuite/libgomp.c++/cancel-sections-1.C: New test.
* testsuite/libgomp.c++/cancel-taskgroup-1.C: New test.
* testsuite/libgomp.c++/cancel-taskgroup-2.C: New test.
* testsuite/libgomp.c++/cancel-taskgroup-3.C: New test.
* testsuite/libgomp.c++/cancel-test.h: New file.
* testsuite/libgomp.c++/for-9.C: New test.
* testsuite/libgomp.c++/for-10.C: New test.
* testsuite/libgomp.c++/for-11.C: New test.
* testsuite/libgomp.c++/simd-1.C: New test.
* testsuite/libgomp.c++/simd-2.C: New test.
* testsuite/libgomp.c++/simd-3.C: New test.
* testsuite/libgomp.c++/simd-4.C: New test.
* testsuite/libgomp.c++/simd-5.C: New test.
* testsuite/libgomp.c++/simd-6.C: New test.
* testsuite/libgomp.c++/simd-7.C: New test.
* testsuite/libgomp.c++/simd-8.C: New test.
* testsuite/libgomp.c++/target-1.C: New test.
* testsuite/libgomp.c++/target-2.C: New test.
* testsuite/libgomp.c++/target-2-aux.cc: New file.
* testsuite/libgomp.c++/target-3.C: New test.
* testsuite/libgomp.c++/taskgroup-1.C: New test.
* testsuite/libgomp.c++/udr-1.C: New test.
* testsuite/libgomp.c++/udr-2.C: New test.
* testsuite/libgomp.c++/udr-3.C: New test.
* testsuite/libgomp.c++/udr-4.C: New test.
* testsuite/libgomp.c++/udr-5.C: New test.
* testsuite/libgomp.c++/udr-6.C: New test.
* testsuite/libgomp.c++/udr-7.C: New test.
* testsuite/libgomp.c++/udr-8.C: New test.
* testsuite/libgomp.c++/udr-9.C: New test.
gcc/
* tree-pretty-print.c (dump_omp_clause): Handle OMP_CLAUSE__LOOPTEMP_
and new OpenMP 4.0 clauses, handle UDR OMP_CLAUSE_REDUCTION,
formatting fixes, use pp_colon instead of pp_character (..., ':'),
similarly pp_right_paren.
(dump_generic_node): Handle OMP_DISTRIBUTE, OMP_TEAMS,
OMP_TARGET_DATA, OMP_TARGET, OMP_TARGET_UPDATE, OMP_TASKGROUP,
allow OMP_FOR_INIT to be NULL, handle OMP_ATOMIC_SEQ_CST.
* tree.c (omp_clause_num_ops, omp_clause_code_name): Add OpenMP 4.0
clauses.
(omp_declare_simd_clauses_equal,
omp_remove_redundant_declare_simd_attrs): New functions.
(attribute_value_equal): Use omp_declare_simd_clauses_equal.
(walk_tree_1): Handle new OpenMP 4.0 clauses.
* tree.h (OMP_LOOP_CHECK): Define.
(OMP_FOR_BODY, OMP_FOR_CLAUSES, OMP_FOR_INIT, OMP_FOR_COND,
OMP_FOR_INCR, OMP_FOR_PRE_BODY): Use it.
(OMP_TASKGROUP_BODY, OMP_TEAMS_BODY, OMP_TEAMS_CLAUSES,
OMP_TARGET_DATA_BODY, OMP_TARGET_DATA_CLAUSES, OMP_TARGET_BODY,
OMP_TARGET_CLAUSES, OMP_TARGET_UPDATE_CLAUSES, OMP_CLAUSE_SIZE,
OMP_ATOMIC_SEQ_CST, OMP_CLAUSE_DEPEND_KIND, OMP_CLAUSE_MAP_KIND,
OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION, OMP_CLAUSE_PROC_BIND_KIND,
OMP_CLAUSE_REDUCTION_OMP_ORIG_REF, OMP_CLAUSE_ALIGNED_ALIGNMENT,
OMP_CLAUSE_NUM_TEAMS_EXPR, OMP_CLAUSE_THREAD_LIMIT_EXPR,
OMP_CLAUSE_DEVICE_ID, OMP_CLAUSE_DIST_SCHEDULE_CHUNK_EXPR,
OMP_CLAUSE_SIMDLEN_EXPR): Define.
(OMP_CLAUSE_DECL): Change range up to OMP_CLAUSE__LOOPTEMP_.
(omp_remove_redundant_declare_simd_attrs): New prototype.
* gimple.def (GIMPLE_OMP_TASKGROUP, GIMPLE_OMP_TARGET,
GIMPLE_OMP_TEAMS): New codes.
(GIMPLE_OMP_RETURN): Use GSS_OMP_ATOMIC_STORE instead of GSS_BASE.
* omp-low.c (struct omp_context): Add cancel_label and cancellable
fields.
(target_nesting_level): New variable.
(extract_omp_for_data): Handle GF_OMP_FOR_KIND_DISTRIBUTE and
OMP_CLAUSE_DIST_SCHEDULE. Don't fallback to library implementation
for collapse > 1 static schedule unless ordered.
(get_ws_args_for): Add par_stmt argument. Handle combined loops.
(determine_parallel_type): Adjust get_ws_args_for caller.
(install_var_field): Handle mask & 4 for double indirection.
(scan_sharing_clauses): Ignore shared clause on teams construct.
Handle OMP_CLAUSE__LOOPTEMP_ and new OpenMP 4.0 clauses.
(create_omp_child_function): If inside target or declare target
constructs, set "omp declare target" attribute on the child
function.
(find_combined_for): New function.
(scan_omp_parallel): Handle combined loops.
(scan_omp_target, scan_omp_teams): New functions.
(check_omp_nesting_restrictions): Check new OpenMP 4.0 nesting
restrictions and set ctx->cancellable for cancellable constructs.
(scan_omp_1_stmt): Call check_omp_nesting_restrictions also on
selected builtin calls. Handle GIMPLE_OMP_TASKGROUP,
GIMPLE_OMP_TARGET, GIMPLE_OMP_TEAMS.
(build_omp_barrier): Add lhs argument, return gimple rather than
tree.
(omp_clause_aligned_alignment): New function.
(lower_rec_simd_input_clauses): Only call SET_DECL_VALUE_EXPR
on decls.
(lower_rec_input_clauses): Add FD argument. Ignore shared clauses
on teams constructs. Handle user defined reductions and new
OpenMP 4.0 clauses.
(lower_reduction_clauses): Don't set placeholder to address of ref
if it has already the right type.
(lower_send_clauses): Handle OMP_CLAUSE__LOOPTEMP_.
(expand_parallel_call): Use the new non-_start suffixed builtins,
handle OMP_CLAUSE_PROC_BIND, don't call the outlined function
and GOMP_parallel_end after the call.
(expand_task_call): Handle OMP_CLAUSE_DEPEND.
(expand_omp_for_init_counts): Handle combined loops.
(expand_omp_for_init_vars): Add inner_stmt argument, handle combined
loops.
(expand_omp_for_generic): Likewise. Use GOMP_loop_end_cancel at the
end of cancellable loops.
(expand_omp_for_static_nochunk, expand_omp_for_static_chunk):
Likewise. Handle collapse > 1 loops.
(expand_omp_simd): Handle combined loops.
(expand_omp_for): Add inner_stmt argument, adjust callers of
expand_omp_for* functions, use expand_omp_for_static*chunk even
for collapse > 1 unless ordered.
(expand_omp_sections): Use GOMP_sections_end_cancel at the end
of cancellable sections.
(expand_omp_single): Remove need_barrier variable, just rely on
gimple_omp_return_nowait_p. Adjust build_omp_barrier caller.
(expand_omp_synch): Allow GIMPLE_OMP_TASKGROUP and GIMPLE_OMP_TEAMS.
(expand_omp_atomic_load, expand_omp_atomic_store,
expand_omp_atomic_fetch_op): Handle gimple_omp_atomic_seq_cst_p.
(expand_omp_target): New function.
(expand_omp): Handle combined loops. Handle GIMPLE_OMP_TASKGROUP,
GIMPLE_OMP_TEAMS, GIMPLE_OMP_TARGET.
(build_omp_regions_1): Immediately close region for
GF_OMP_TARGET_KIND_UPDATE.
(maybe_add_implicit_barrier_cancel): New function.
(lower_omp_sections): Adjust lower_rec_input_clauses caller. Handle
cancellation.
(lower_omp_single): Likewise. Add clobber after the barrier.
(lower_omp_taskgroup): New function.
(lower_omp_for): Handle combined loops. Adjust
lower_rec_input_clauses caller. Handle cancellation.
(lower_depend_clauses): New function.
(lower_omp_taskreg): Lower depend clauses. Adjust
lower_rec_input_clauses caller. Add clobber after the call. Handle
cancellation.
(lower_omp_target, lower_omp_teams): New functions.
(lower_omp_1): Handle cancellation. Handle GIMPLE_OMP_TASKGROUP,
GIMPLE_OMP_TARGET, GIMPLE_OMP_TEAMS and GOMP_barrier, GOMP_cancel
and GOMP_cancellation_point calls.
(lower_omp): Fold stmts inside of target region.
(diagnose_sb_1, diagnose_sb_2): Handle GIMPLE_OMP_TASKGROUP,
GIMPLE_OMP_TARGET and GIMPLE_OMP_TEAMS.
* builtin-types.def (DEF_FUNCTION_TYPE_8): Document.
(BT_FN_VOID_OMPFN_PTR_UINT,
BT_FN_VOID_OMPFN_PTR_UINT_LONG_LONG_LONG,
BT_FN_VOID_OMPFN_PTR_UINT_LONG_LONG_LONG_LONG,
BT_FN_VOID_OMPFN_PTR_OMPCPYFN_LONG_LONG_BOOL_UINT): Remove.
(BT_FN_VOID_OMPFN_PTR_UINT_UINT_UINT,
BT_FN_VOID_OMPFN_PTR_UINT_LONG_LONG_LONG_UINT,
BT_FN_VOID_OMPFN_PTR_UINT_LONG_LONG_LONG_LONG_UINT,
BT_FN_BOOL_INT, BT_FN_BOOL_INT_BOOL, BT_FN_VOID_UINT_UINT,
BT_FN_VOID_INT_PTR_SIZE_PTR_PTR_PTR,
BT_FN_VOID_INT_OMPFN_PTR_SIZE_PTR_PTR_PTR,
BT_FN_VOID_OMPFN_PTR_OMPCPYFN_LONG_LONG_BOOL_UINT_PTR): New.
* tree-ssa-alias.c (ref_maybe_used_by_call_p_1,
call_may_clobber_ref_p_1): Handle BUILT_IN_GOMP_BARRIER_CANCEL,
BUILT_IN_GOMP_TASKGROUP_END, BUILT_IN_GOMP_LOOP_END_CANCEL,
BUILT_IN_GOMP_SECTIONS_END_CANCEL. Don't handle
BUILT_IN_GOMP_PARALLEL_END.
* gimple-low.c (lower_stmt): Handle GIMPLE_OMP_TASKGROUP,
GIMPLE_OMP_TARGET and GIMPLE_OMP_TEAMS.
* gimple-pretty-print.c (dump_gimple_omp_for): Handle
GF_OMP_FOR_KIND_DISTRIBUTE.
(dump_gimple_omp_target, dump_gimple_omp_teams): New functions.
(dump_gimple_omp_block): Handle GIMPLE_OMP_TASKGROUP.
(dump_gimple_omp_return): Print lhs if it has any.
(dump_gimple_omp_atomic_load, dump_gimple_omp_atomic_store): Handle
gimple_omp_atomic_seq_cst_p.
(pp_gimple_stmt_1): Handle GIMPLE_OMP_TASKGROUP, GIMPLE_OMP_TARGET
and GIMPLE_OMP_TEAMS.
* langhooks.c (lhd_omp_mappable_type): New function.
* tree-vectorizer.c (struct simd_array_to_simduid): Fix up comment.
* langhooks.h (struct lang_hooks_for_types): Add omp_mappable_type
hook.
* gimplify.c (enum gimplify_omp_var_data): Add GOVD_MAP,
GOVD_ALIGNED and GOVD_MAP_TO_ONLY.
(enum omp_region_type): Add ORT_TEAMS, ORT_TARGET_DATA and
ORT_TARGET.
(struct gimplify_omp_ctx): Add combined_loop field.
(gimplify_call_expr, gimplify_modify_expr): Don't call fold_stmt
on stmts inside of target region.
(is_gimple_stmt): Return true for OMP_DISTRIBUTE and OMP_TASKGROUP.
(omp_firstprivatize_variable): Handle GOVD_MAP, GOVD_ALIGNED,
ORT_TARGET and ORT_TARGET_DATA.
(omp_add_variable): Avoid checks on readding var for GOVD_ALIGNED.
Handle GOVD_MAP.
(omp_notice_threadprivate_variable): Complain about threadprivate
variables in target region.
(omp_notice_variable): Complain about vars with non-mappable type
in target region. Handle ORT_TEAMS, ORT_TARGET and ORT_TARGET_DATA.
(omp_check_private): Ignore ORT_TARGET* regions.
(gimplify_scan_omp_clauses, gimplify_adjust_omp_clauses_1,
gimplify_adjust_omp_clauses): Handle new OpenMP 4.0 clauses.
(find_combined_omp_for): New function.
(gimplify_omp_for): Handle gimplification of combined loops.
(gimplify_omp_workshare): Gimplify also OMP_TARGET, OMP_TARGET_DATA,
OMP_TEAMS.
(gimplify_omp_target_update): New function.
(gimplify_omp_atomic): Handle OMP_ATOMIC_SEQ_CST.
(gimplify_expr): Handle OMP_DISTRIBUTE, OMP_TARGET, OMP_TARGET_DATA,
OMP_TARGET_UPDATE, OMP_TEAMS, OMP_TASKGROUP.
(gimplify_body): If fndecl has "omp declare target" attribute, add
implicit ORT_TARGET context around it.
* tree.def (OMP_DISTRIBUTE, OMP_TEAMS, OMP_TARGET_DATA, OMP_TARGET,
OMP_TASKGROUP, OMP_TARGET_UPDATE): New tree codes.
* tree-nested.c (convert_nonlocal_reference_stmt,
convert_local_reference_stmt, convert_gimple_call): Handle
GIMPLE_OMP_TARGET, GIMPLE_OMP_TEAMS and GIMPLE_OMP_TASKGROUP.
* omp-builtins.def (BUILT_IN_GOMP_TASK): Use
BT_FN_VOID_OMPFN_PTR_OMPCPYFN_LONG_LONG_BOOL_UINT_PTR
instead of BT_FN_VOID_OMPFN_PTR_OMPCPYFN_LONG_LONG_BOOL_UINT.
(BUILT_IN_GOMP_TARGET, BUILT_IN_GOMP_TARGET_DATA,
BUILT_IN_GOMP_TARGET_END_DATA, BUILT_IN_GOMP_TARGET_UPDATE,
BUILT_IN_GOMP_TEAMS, BUILT_IN_BARRIER_CANCEL,
BUILT_IN_GOMP_LOOP_END_CANCEL,
BUILT_IN_GOMP_SECTIONS_END_CANCEL, BUILT_IN_OMP_GET_TEAM_NUM,
BUILT_IN_OMP_GET_NUM_TEAMS, BUILT_IN_GOMP_TASKGROUP_START,
BUILT_IN_GOMP_TASKGROUP_END, BUILT_IN_GOMP_PARALLEL_LOOP_STATIC,
BUILT_IN_GOMP_PARALLEL_LOOP_DYNAMIC,
BUILT_IN_GOMP_PARALLEL_LOOP_GUIDED,
BUILT_IN_GOMP_PARALLEL_LOOP_RUNTIME, BUILT_IN_GOMP_PARALLEL,
BUILT_IN_GOMP_PARALLEL_SECTIONS, BUILT_IN_GOMP_CANCEL,
BUILT_IN_GOMP_CANCELLATION_POINT): New built-ins.
(BUILT_IN_GOMP_PARALLEL_LOOP_STATIC_START,
BUILT_IN_GOMP_PARALLEL_LOOP_DYNAMIC_START,
BUILT_IN_GOMP_PARALLEL_LOOP_GUIDED_START,
BUILT_IN_GOMP_PARALLEL_LOOP_RUNTIME_START,
BUILT_IN_GOMP_PARALLEL_START, BUILT_IN_GOMP_PARALLEL_END,
BUILT_IN_GOMP_PARALLEL_SECTIONS_START): Remove.
* tree-inline.c (remap_gimple_stmt, estimate_num_insns):
Handle GIMPLE_OMP_TARGET, GIMPLE_OMP_TEAMS and GIMPLE_OMP_TASKGROUP.
* gimple.c (gimple_build_omp_taskgroup, gimple_build_omp_target,
gimple_build_omp_teams): New functions.
(walk_gimple_op): Handle GIMPLE_OMP_TARGET, GIMPLE_OMP_TEAMS and
GIMPLE_OMP_TASKGROUP. Walk optional lhs on GIMPLE_OMP_RETURN.
(walk_gimple_stmt, gimple_copy): Handle GIMPLE_OMP_TARGET,
GIMPLE_OMP_TEAMS and GIMPLE_OMP_TASKGROUP.
* gimple.h (enum gf_mask): GF_OMP_FOR_KIND_DISTRIBUTE,
GF_OMP_FOR_COMBINED, GF_OMP_FOR_COMBINED_INTO,
GF_OMP_TARGET_KIND_MASK, GF_OMP_TARGET_KIND_REGION,
GF_OMP_TARGET_KIND_DATA, GF_OMP_TARGET_KIND_UPDATE,
GF_OMP_ATOMIC_SEQ_CST): New.
(gimple_build_omp_taskgroup, gimple_build_omp_target,
gimple_build_omp_teams): New prototypes.
(gimple_has_substatements): Handle GIMPLE_OMP_TARGET,
GIMPLE_OMP_TEAMS and GIMPLE_OMP_TASKGROUP.
(gimple_omp_subcode): Use GIMPLE_OMP_TEAMS instead of
GIMPLE_OMP_SINGLE as end of range.
(gimple_omp_return_set_lhs, gimple_omp_return_lhs,
gimple_omp_return_lhs_ptr, gimple_omp_atomic_seq_cst_p,
gimple_omp_atomic_set_seq_cst, gimple_omp_for_combined_p,
gimple_omp_for_set_combined_p, gimple_omp_for_combined_into_p,
gimple_omp_for_set_combined_into_p, gimple_omp_target_clauses,
gimple_omp_target_clauses_ptr, gimple_omp_target_set_clauses,
gimple_omp_target_kind, gimple_omp_target_set_kind,
gimple_omp_target_child_fn, gimple_omp_target_child_fn_ptr,
gimple_omp_target_set_child_fn, gimple_omp_target_data_arg,
gimple_omp_target_data_arg_ptr, gimple_omp_target_set_data_arg,
gimple_omp_teams_clauses, gimple_omp_teams_clauses_ptr,
gimple_omp_teams_set_clauses): New inlines.
(CASE_GIMPLE_OMP): Add GIMPLE_OMP_TARGET, GIMPLE_OMP_TEAMS
and GIMPLE_OMP_TASKGROUP.
* tree-core.h (enum omp_clause_code): Add new OpenMP 4.0 clause
codes.
(enum omp_clause_depend_kind, enum omp_clause_map_kind,
enum omp_clause_proc_bind_kind): New.
(union omp_clause_subcode): Add depend_kind, map_kind and
proc_bind_kind fields.
* tree-cfg.c (make_edges): Handle GIMPLE_OMP_TARGET,
GIMPLE_OMP_TEAMS and GIMPLE_OMP_TASKGROUP.
* langhooks-def.h (lhd_omp_mappable_type): New prototype.
(LANG_HOOKS_OMP_MAPPABLE_TYPE): Define.
(LANG_HOOKS_FOR_TYPES_INITIALIZER): Add it.
gcc/c-family/
* c-cppbuiltin.c (c_cpp_builtins): Predefine _OPENMP to
201307 instead of 201107.
* c-common.c (DEF_FUNCTION_TYPE_8): Define.
(c_common_attribute_table): Add "omp declare target" and
"omp declare simd" attributes.
(handle_omp_declare_target_attribute,
handle_omp_declare_simd_attribute): New functions.
* c-omp.c: Include c-pragma.h.
(c_finish_omp_taskgroup): New function.
(c_finish_omp_atomic): Add swapped argument, if true,
build the operation first with rhs, lhs arguments and use NOP_EXPR
build_modify_expr.
(c_finish_omp_for): Add code argument, pass it down to make_code.
(c_omp_split_clauses): New function.
(c_split_parallel_clauses): Removed.
(c_omp_declare_simd_clause_cmp, c_omp_declare_simd_clauses_to_numbers,
c_omp_declare_simd_clauses_to_decls): New functions.
* c-common.h (omp_clause_mask): New type.
(OMP_CLAUSE_MASK_1): Define.
(omp_clause_mask::omp_clause_mask, omp_clause_mask::operator &=,
omp_clause_mask::operator |=, omp_clause_mask::operator ~,
omp_clause_mask::operator |, omp_clause_mask::operator &,
omp_clause_mask::operator <<, omp_clause_mask::operator >>,
omp_clause_mask::operator ==): New methods.
(enum c_omp_clause_split): New.
(c_finish_omp_taskgroup): New prototype.
(c_finish_omp_atomic): Add swapped argument.
(c_finish_omp_for): Add code argument.
(c_omp_split_clauses): New prototype.
(c_split_parallel_clauses): Removed.
(c_omp_declare_simd_clauses_to_numbers,
c_omp_declare_simd_clauses_to_decls): New prototypes.
* c-pragma.c (omp_pragmas): Add new OpenMP 4.0 constructs.
* c-pragma.h (enum pragma_kind): Add PRAGMA_OMP_CANCEL,
PRAGMA_OMP_CANCELLATION_POINT, PRAGMA_OMP_DECLARE_REDUCTION,
PRAGMA_OMP_DISTRIBUTE, PRAGMA_OMP_END_DECLARE_TARGET, PRAGMA_OMP_SIMD,
PRAGMA_OMP_TARGET, PRAGMA_OMP_TASKGROUP and PRAGMA_OMP_TEAMS.
Remove PRAGMA_OMP_PARALLEL_FOR and PRAGMA_OMP_PARALLEL_SECTIONS.
(enum pragma_omp_clause): Add PRAGMA_OMP_CLAUSE_ALIGNED,
PRAGMA_OMP_CLAUSE_DEPEND, PRAGMA_OMP_CLAUSE_DEVICE,
PRAGMA_OMP_CLAUSE_DIST_SCHEDULE, PRAGMA_OMP_CLAUSE_FOR,
PRAGMA_OMP_CLAUSE_FROM, PRAGMA_OMP_CLAUSE_INBRANCH,
PRAGMA_OMP_CLAUSE_LINEAR, PRAGMA_OMP_CLAUSE_MAP,
PRAGMA_OMP_CLAUSE_NOTINBRANCH, PRAGMA_OMP_CLAUSE_NUM_TEAMS,
PRAGMA_OMP_CLAUSE_PARALLEL, PRAGMA_OMP_CLAUSE_PROC_BIND,
PRAGMA_OMP_CLAUSE_SAFELEN, PRAGMA_OMP_CLAUSE_SECTIONS,
PRAGMA_OMP_CLAUSE_SIMDLEN, PRAGMA_OMP_CLAUSE_TASKGROUP,
PRAGMA_OMP_CLAUSE_THREAD_LIMIT, PRAGMA_OMP_CLAUSE_TO and
PRAGMA_OMP_CLAUSE_UNIFORM.
gcc/ada/
* gcc-interface/utils.c (DEF_FUNCTION_TYPE_8): Define.
gcc/fortran/
* trans-openmp.c (gfc_omp_clause_default_ctor,
gfc_omp_clause_dtor): Return NULL for OMP_CLAUSE_REDUCTION.
* f95-lang.c (ATTR_NULL, DEF_FUNCTION_TYPE_8): Define.
* types.def (DEF_FUNCTION_TYPE_8): Document.
(BT_FN_VOID_OMPFN_PTR_UINT,
BT_FN_VOID_OMPFN_PTR_UINT_LONG_LONG_LONG,
BT_FN_VOID_OMPFN_PTR_UINT_LONG_LONG_LONG_LONG,
BT_FN_VOID_OMPFN_PTR_OMPCPYFN_LONG_LONG_BOOL_UINT): Remove.
(BT_FN_VOID_OMPFN_PTR_UINT_UINT_UINT,
BT_FN_VOID_OMPFN_PTR_UINT_LONG_LONG_LONG_UINT,
BT_FN_VOID_OMPFN_PTR_UINT_LONG_LONG_LONG_LONG_UINT,
BT_FN_BOOL_INT, BT_FN_BOOL_INT_BOOL, BT_FN_VOID_UINT_UINT,
BT_FN_VOID_INT_PTR_SIZE_PTR_PTR_PTR,
BT_FN_VOID_INT_OMPFN_PTR_SIZE_PTR_PTR_PTR,
BT_FN_VOID_OMPFN_PTR_OMPCPYFN_LONG_LONG_BOOL_UINT_PTR): New.
gcc/lto/
* lto-lang.c (DEF_FUNCTION_TYPE_8): Define.
gcc/c/
* c-lang.h (current_omp_declare_target_attribute): New extern
decl.
* c-parser.c: Include c-lang.h.
(struct c_parser): Change tokens to c_token *.
Add tokens_buf field. Change tokens_avail type to unsigned int.
(c_parser_consume_token): If parser->tokens isn't
&parser->tokens_buf[0], increment parser->tokens.
(c_parser_consume_pragma): Likewise.
(enum pragma_context): Add pragma_struct and pragma_param.
(c_parser_external_declaration): Adjust
c_parser_declaration_or_fndef caller.
(c_parser_declaration_or_fndef): Add omp_declare_simd_clauses
argument, if it is non-vNULL vector, call c_finish_omp_declare_simd.
Adjust recursive call.
(c_parser_struct_or_union_specifier): Use pragma_struct instead
of pragma_external.
(c_parser_parameter_declaration): Use pragma_param instead of
pragma_external.
(c_parser_compound_statement_nostart, c_parser_label,
c_parser_for_statement): Adjust
c_parser_declaration_or_fndef callers.
(c_parser_expr_no_commas): Add omp_atomic_lhs argument, pass
it through to c_parser_conditional_expression.
(c_parser_conditional_expression): Add omp_atomic_lhs argument,
pass it through to c_parser_binary_expression. Adjust recursive
call.
(c_parser_binary_expression): Remove prec argument, add
omp_atomic_lhs argument instead. Always start from PREC_NONE, if
omp_atomic_lhs is non-NULL and one of the arguments of toplevel
binop matches it, use build2 instead of parser_build_binary_op.
(c_parser_pragma): Handle PRAGMA_OMP_CANCEL,
PRAGMA_OMP_CANCELLATION_POINT, PRAGMA_OMP_TARGET,
PRAGMA_OMP_END_DECLARE_TARGET, PRAGMA_OMP_DECLARE_REDUCTION.
Handle pragma_struct and pragma_param the same as pragma_external.
(c_parser_omp_clause_name): Parse new OpenMP 4.0 clause names.
(c_parser_omp_variable_list): Parse array sections for
OMP_CLAUSE_{DEPEND,MAP,TO,FROM} clauses.
(c_parser_omp_clause_collapse): Fully fold collapse expression.
(c_parser_omp_clause_reduction): Handle user defined reductions.
(c_parser_omp_clause_branch, c_parser_omp_clause_cancelkind,
c_parser_omp_clause_num_teams, c_parser_omp_clause_thread_limit,
c_parser_omp_clause_aligned, c_parser_omp_clause_linear,
c_parser_omp_clause_safelen, c_parser_omp_clause_simdlen,
c_parser_omp_clause_depend, c_parser_omp_clause_map,
c_parser_omp_clause_device, c_parser_omp_clause_dist_schedule,
c_parser_omp_clause_proc_bind, c_parser_omp_clause_to,
c_parser_omp_clause_from, c_parser_omp_clause_uniform): New functions.
(c_parser_omp_all_clauses): Add finish_p argument. Don't call
c_finish_omp_clauses if it is false. Handle new OpenMP 4.0 clauses.
(c_parser_omp_atomic): Parse seq_cst clause, pass true if it is
present to c_finish_omp_atomic. Handle OpenMP 4.0 atomic forms.
(c_parser_omp_for_loop): Add CODE argument, pass it through
to c_finish_omp_for. Change last argument to cclauses,
and adjust uses to grab parallel clauses from the array of all
the split clauses. Adjust c_parser_binary_expression,
c_parser_declaration_or_fndef and c_finish_omp_for callers.
(omp_split_clauses): New function.
(c_parser_omp_simd): New function.
(c_parser_omp_for): Add p_name, mask and cclauses arguments.
Allow the function to be called also when parsing combined constructs,
and call c_parser_omp_simd when parsing for simd.
(c_parser_omp_sections_scope): If section-sequence doesn't start with
#pragma omp section, require exactly one structured-block instead of
sequence of statements.
(c_parser_omp_sections): Add p_name, mask and cclauses arguments.
Allow the function to be called also when parsing combined constructs.
(c_parser_omp_parallel): Add p_name, mask and cclauses arguments.
Allow the function to be called also when parsing combined
constructs.
(c_parser_omp_taskgroup, c_parser_omp_cancel,
c_parser_omp_cancellation_point, c_parser_omp_distribute,
c_parser_omp_teams, c_parser_omp_target_data,
c_parser_omp_target_update, c_parser_omp_target,
c_parser_omp_declare_simd, c_finish_omp_declare_simd,
c_parser_omp_declare_target, c_parser_omp_end_declare_target,
c_parser_omp_declare_reduction, c_parser_omp_declare): New functions.
(c_parser_omp_construct): Add p_name and mask vars. Handle
PRAGMA_OMP_DISTRIBUTE, PRAGMA_OMP_SIMD, PRAGMA_OMP_TASKGROUP,
PRAGMA_OMP_TEAMS. Adjust c_parser_omp_for, c_parser_omp_parallel
and c_parser_omp_sections callers.
(c_parse_file): Initialize tparser.tokens and the_parser->tokens here.
(OMP_FOR_CLAUSE_MASK, OMP_SECTIONS_CLAUSE_MASK,
OMP_SINGLE_CLAUSE_MASK): Use OMP_CLAUSE_MASK_1 instead of 1.
(OMP_PARALLEL_CLAUSE_MASK): Likewise. Add OMP_CLAUSE_PROC_BIND.
(OMP_TASK_CLAUSE_MASK): Use OMP_CLAUSE_MASK_1 instead of 1. Add
OMP_CLAUSE_DEPEND.
(OMP_SIMD_CLAUSE_MASK, OMP_CANCEL_CLAUSE_MASK,
OMP_CANCELLATION_POINT_CLAUSE_MASK, OMP_DISTRIBUTE_CLAUSE_MASK,
OMP_TEAMS_CLAUSE_MASK, OMP_TARGET_DATA_CLAUSE_MASK,
OMP_TARGET_UPDATE_CLAUSE_MASK, OMP_TARGET_CLAUSE_MASK,
OMP_DECLARE_SIMD_CLAUSE_MASK): Define.
* c-typeck.c: Include tree-inline.h.
(c_finish_omp_cancel, c_finish_omp_cancellation_point,
handle_omp_array_sections_1, handle_omp_array_sections,
c_clone_omp_udr, c_find_omp_placeholder_r): New functions.
(c_finish_omp_clauses): Handle new OpenMP 4.0 clauses and
user defined reductions.
(c_tree_equal): New function.
* c-tree.h (temp_store_parm_decls, temp_pop_parm_decls,
c_finish_omp_cancel, c_finish_omp_cancellation_point, c_tree_equal,
c_omp_reduction_id, c_omp_reduction_decl, c_omp_reduction_lookup,
c_check_omp_declare_reduction_r): New prototypes.
* c-decl.c (current_omp_declare_target_attribute): New variable.
(c_decl_attributes): New function.
(start_decl, start_function): Use it instead of decl_attributes.
(temp_store_parm_decls, temp_pop_parm_decls, c_omp_reduction_id,
c_omp_reduction_decl, c_omp_reduction_lookup,
c_check_omp_declare_reduction_r): New functions.
gcc/cp/
* decl.c (duplicate_decls): Error out for redeclaration of UDRs.
(declare_simd_adjust_this): New function.
(grokfndecl): If "omp declare simd" attribute is present,
call declare_simd_adjust_this if needed and
c_omp_declare_simd_clauses_to_numbers.
* cp-array-notation.c (expand_array_notation_exprs): Handle
OMP_TASKGROUP.
* cp-gimplify.c (cp_gimplify_expr): Handle OMP_SIMD and
OMP_DISTRIBUTE. Handle is_invisiref_parm decls in
OMP_CLAUSE_REDUCTION.
(cp_genericize_r): Handle OMP_SIMD and OMP_DISTRIBUTE like
OMP_FOR.
(cxx_omp_privatize_by_reference): Return true for
is_invisiref_parm decls.
(cxx_omp_finish_clause): Adjust cxx_omp_create_clause_info
caller.
* pt.c (apply_late_template_attributes): For "omp declare simd"
attribute call tsubst_omp_clauses,
c_omp_declare_simd_clauses_to_decls, finish_omp_clauses
and c_omp_declare_simd_clauses_to_numbers.
(instantiate_class_template_1): Call cp_check_omp_declare_reduction
for UDRs.
(tsubst_decl): Handle UDRs.
(tsubst_omp_clauses): Add declare_simd argument, if true don't
call finish_omp_clauses. Handle new OpenMP 4.0 clauses.
Handle non-NULL OMP_CLAUSE_REDUCTION_PLACEHOLDER on
OMP_CLAUSE_REDUCTION.
(tsubst_expr): For UDRs call pushdecl and
cp_check_omp_declare_reduction. Adjust tsubst_omp_clauses
callers. Handle OMP_SIMD, OMP_DISTRIBUTE, OMP_TEAMS,
OMP_TARGET_DATA, OMP_TARGET_UPDATE, OMP_TARGET, OMP_TASKGROUP.
Adjust finish_omp_atomic caller.
(tsubst_omp_udr): New function.
(instantiate_decl): For UDRs at block scope, don't call
start_preparsed_function/finish_function. Call tsubst_omp_udr.
* semantics.c (cxx_omp_create_clause_info): Add need_dtor argument,
use it instead of need_default_ctor || need_copy_ctor.
(struct cp_check_omp_declare_reduction_data): New type.
(handle_omp_array_sections_1, handle_omp_array_sections,
omp_reduction_id, omp_reduction_lookup,
cp_remove_omp_priv_cleanup_stmt, cp_check_omp_declare_reduction_r,
cp_check_omp_declare_reduction, clone_omp_udr,
find_omp_placeholder_r, finish_omp_reduction_clause): New functions.
(finish_omp_clauses): Handle new OpenMP 4.0 clauses and user defined
reductions.
(finish_omp_for): Add CODE argument, use it instead of hardcoded
OMP_FOR. Adjust c_finish_omp_for caller.
(finish_omp_atomic): Add seq_cst argument, adjust
c_finish_omp_atomic callers, handle seq_cst and new OpenMP 4.0
atomic variants.
(finish_omp_cancel, finish_omp_cancellation_point): New functions.
* decl2.c (mark_used): Force immediate instantiation of
DECL_OMP_DECLARE_REDUCTION_P decls.
(is_late_template_attribute): Return true for "omp declare simd"
attribute.
(cp_omp_mappable_type): New function.
(cplus_decl_attributes): Add implicit "omp declare target" attribute
if requested.
* parser.c (cp_debug_parser): Print
parser->colon_doesnt_start_class_def_p.
(cp_ensure_no_omp_declare_simd, cp_finalize_omp_declare_simd): New
functions.
(enum pragma_context): Add pragma_member and pragma_objc_icode.
(cp_parser_binary_expression): Handle no_toplevel_fold_p
even for binary operations other than comparison.
(cp_parser_linkage_specification): Call
cp_ensure_no_omp_declare_simd if needed.
(cp_parser_namespace_definition): Likewise.
(cp_parser_init_declarator): Call cp_finalize_omp_declare_simd.
(cp_parser_direct_declarator): Pass declarator to
cp_parser_late_return_type_opt.
(cp_parser_late_return_type_opt): Add declarator argument,
call cp_parser_late_parsing_omp_declare_simd for declare simd.
(cp_parser_class_specifier_1): Call cp_ensure_no_omp_declare_simd.
Parse UDRs before all other methods.
(cp_parser_member_specification_opt): Use pragma_member instead of
pragma_external.
(cp_parser_member_declaration): Call cp_finalize_omp_declare_simd.
(cp_parser_function_definition_from_specifiers_and_declarator,
cp_parser_save_member_function_body): Likewise.
(cp_parser_late_parsing_for_member): Handle UDRs specially.
(cp_parser_next_token_starts_class_definition_p): Don't allow
CPP_COLON if colon_doesnt_start_class_def_p flag is true.
(cp_parser_objc_interstitial_code): Use pragma_objc_icode
instead of pragma_external.
(cp_parser_omp_clause_name): Parse new OpenMP 4.0 clause names.
(cp_parser_omp_var_list_no_open): Parse array sections for
OMP_CLAUSE_{DEPEND,MAP,TO,FROM} clauses. Add COLON argument,
if non-NULL, allow parsing to end with a colon rather than close
paren.
(cp_parser_omp_var_list): Adjust cp_parser_omp_var_list_no_open
caller.
(cp_parser_omp_clause_reduction): Handle user defined reductions.
(cp_parser_omp_clause_branch, cp_parser_omp_clause_cancelkind,
cp_parser_omp_clause_num_teams, cp_parser_omp_clause_thread_limit,
cp_parser_omp_clause_aligned, cp_parser_omp_clause_linear,
cp_parser_omp_clause_safelen, cp_parser_omp_clause_simdlen,
cp_parser_omp_clause_depend, cp_parser_omp_clause_map,
cp_parser_omp_clause_device, cp_parser_omp_clause_dist_schedule,
cp_parser_omp_clause_proc_bind, cp_parser_omp_clause_to,
cp_parser_omp_clause_from, cp_parser_omp_clause_uniform): New
functions.
(cp_parser_omp_all_clauses): Add finish_p argument. Don't call
finish_omp_clauses if it is false. Handle new OpenMP 4.0 clauses.
(cp_parser_omp_atomic): Parse seq_cst clause, pass
true if it is present to finish_omp_atomic. Handle new OpenMP 4.0
atomic forms.
(cp_parser_omp_for_loop): Add CODE argument, pass it through
to finish_omp_for. Change last argument to cclauses,
and adjust uses to grab parallel clauses from the array of all
the split clauses.
(cp_omp_split_clauses): New function.
(cp_parser_omp_simd): New function.
(cp_parser_omp_for): Add p_name, mask and cclauses arguments.
Allow the function to be called also when parsing combined constructs,
and call c_parser_omp_simd when parsing for simd.
(cp_parser_omp_sections_scope): If section-sequence doesn't start with
#pragma omp section, require exactly one structured-block instead of
sequence of statements.
(cp_parser_omp_sections): Add p_name, mask and cclauses arguments.
Allow the function to be called also when parsing combined constructs.
(cp_parser_omp_parallel): Add p_name, mask and cclauses arguments.
Allow the function to be called also when parsing combined
constructs.
(cp_parser_omp_taskgroup, cp_parser_omp_cancel,
cp_parser_omp_cancellation_point, cp_parser_omp_distribute,
cp_parser_omp_teams, cp_parser_omp_target_data,
cp_parser_omp_target_update, cp_parser_omp_target,
cp_parser_omp_declare_simd, cp_parser_late_parsing_omp_declare_simd,
cp_parser_omp_declare_target, cp_parser_omp_end_declare_target,
cp_parser_omp_declare_reduction_exprs, cp_parser_omp_declare_reduction,
cp_parser_omp_declare): New functions.
(cp_parser_omp_construct): Add p_name and mask vars. Handle
PRAGMA_OMP_DISTRIBUTE, PRAGMA_OMP_SIMD, PRAGMA_OMP_TASKGROUP,
PRAGMA_OMP_TEAMS. Adjust cp_parser_omp_for, cp_parser_omp_parallel
and cp_parser_omp_sections callers.
(cp_parser_pragma): Handle PRAGMA_OMP_CANCEL,
PRAGMA_OMP_CANCELLATION_POINT, PRAGMA_OMP_DECLARE_REDUCTION,
PRAGMA_OMP_DISTRIBUTE, PRAGMA_OMP_SIMD, PRAGMA_OMP_TASKGROUP,
PRAGMA_OMP_TEAMS, PRAGMA_OMP_TARGET, PRAGMA_OMP_END_DECLARE_TARGET.
Handle pragma_member and pragma_objc_icode like pragma_external.
(OMP_FOR_CLAUSE_MASK, OMP_SECTIONS_CLAUSE_MASK,
OMP_SINGLE_CLAUSE_MASK): Use OMP_CLAUSE_MASK_1 instead of 1.
(OMP_PARALLEL_CLAUSE_MASK): Likewise. Add OMP_CLAUSE_PROC_BIND.
(OMP_TASK_CLAUSE_MASK): Use OMP_CLAUSE_MASK_1 instead of 1. Add
OMP_CLAUSE_DEPEND.
(OMP_SIMD_CLAUSE_MASK, OMP_CANCEL_CLAUSE_MASK,
OMP_CANCELLATION_POINT_CLAUSE_MASK, OMP_DISTRIBUTE_CLAUSE_MASK,
OMP_TEAMS_CLAUSE_MASK, OMP_TARGET_DATA_CLAUSE_MASK,
OMP_TARGET_UPDATE_CLAUSE_MASK, OMP_TARGET_CLAUSE_MASK,
OMP_DECLARE_SIMD_CLAUSE_MASK): Define.
* parser.h (struct cp_omp_declare_simd_data): New type.
(struct cp_parser): Add colon_doesnt_start_class_def_p and
omp_declare_simd fields.
* cp-objcp-common.h (LANG_HOOKS_OMP_MAPPABLE_TYPE): Define.
* cp-tree.h (struct lang_decl_fn): Add omp_declare_reduction_p
bit.
(DECL_OMP_DECLARE_REDUCTION_P): Define.
(OMP_FOR_GIMPLIFYING_P): Use OMP_LOOP_CHECK macro.
(struct saved_scope): Add omp_declare_target_attribute field.
(cp_omp_mappable_type, omp_reduction_id,
cp_remove_omp_priv_cleanup_stmt, cp_check_omp_declare_reduction,
finish_omp_cancel, finish_omp_cancellation_point): New prototypes.
(finish_omp_for): Add CODE argument.
(finish_omp_atomic): Add seq_cst argument.
(cxx_omp_create_clause_info): Add need_dtor argument.
gcc/testsuite/
* c-c++-common/gomp/atomic-15.c: Adjust for C diagnostics.
Remove error test that is now valid in OpenMP 4.0.
* c-c++-common/gomp/atomic-16.c: New test.
* c-c++-common/gomp/cancel-1.c: New test.
* c-c++-common/gomp/depend-1.c: New test.
* c-c++-common/gomp/depend-2.c: New test.
* c-c++-common/gomp/map-1.c: New test.
* c-c++-common/gomp/pr58472.c: New test.
* c-c++-common/gomp/sections1.c: New test.
* c-c++-common/gomp/simd1.c: New test.
* c-c++-common/gomp/simd2.c: New test.
* c-c++-common/gomp/simd3.c: New test.
* c-c++-common/gomp/simd4.c: New test.
* c-c++-common/gomp/simd5.c: New test.
* c-c++-common/gomp/single1.c: New test.
* g++.dg/gomp/block-0.C: Adjust for stricter #pragma omp sections
parser.
* g++.dg/gomp/block-3.C: Likewise.
* g++.dg/gomp/clause-3.C: Adjust error messages.
* g++.dg/gomp/declare-simd-1.C: New test.
* g++.dg/gomp/declare-simd-2.C: New test.
* g++.dg/gomp/depend-1.C: New test.
* g++.dg/gomp/depend-2.C: New test.
* g++.dg/gomp/target-1.C: New test.
* g++.dg/gomp/target-2.C: New test.
* g++.dg/gomp/taskgroup-1.C: New test.
* g++.dg/gomp/teams-1.C: New test.
* g++.dg/gomp/udr-1.C: New test.
* g++.dg/gomp/udr-2.C: New test.
* g++.dg/gomp/udr-3.C: New test.
* g++.dg/gomp/udr-4.C: New test.
* g++.dg/gomp/udr-5.C: New test.
* g++.dg/gomp/udr-6.C: New test.
* gcc.dg/autopar/outer-1.c: Expect 4 instead of 5 loopfn matches.
* gcc.dg/autopar/outer-2.c: Likewise.
* gcc.dg/autopar/outer-3.c: Likewise.
* gcc.dg/autopar/outer-4.c: Likewise.
* gcc.dg/autopar/outer-5.c: Likewise.
* gcc.dg/autopar/outer-6.c: Likewise.
* gcc.dg/autopar/parallelization-1.c: Likewise.
* gcc.dg/gomp/block-3.c: Adjust for stricter #pragma omp sections
parser.
* gcc.dg/gomp/clause-1.c: Adjust error messages.
* gcc.dg/gomp/combined-1.c: Look for GOMP_parallel_loop_runtime
instead of GOMP_parallel_loop_runtime_start.
* gcc.dg/gomp/declare-simd-1.c: New test.
* gcc.dg/gomp/declare-simd-2.c: New test.
* gcc.dg/gomp/nesting-1.c: Adjust for stricter #pragma omp sections
parser. Add further #pragma omp sections nesting tests.
* gcc.dg/gomp/target-1.c: New test.
* gcc.dg/gomp/target-2.c: New test.
* gcc.dg/gomp/taskgroup-1.c: New test.
* gcc.dg/gomp/teams-1.c: New test.
* gcc.dg/gomp/udr-1.c: New test.
* gcc.dg/gomp/udr-2.c: New test.
* gcc.dg/gomp/udr-3.c: New test.
* gcc.dg/gomp/udr-4.c: New test.
* gfortran.dg/gomp/appendix-a/a.35.5.f90: Add dg-error.
git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@203408 138bc75d-0d04-0410-961f-82ee72b054a4
Diffstat (limited to 'libgomp/config')
-rw-r--r-- | libgomp/config/linux/affinity.c | 351 | ||||
-rw-r--r-- | libgomp/config/linux/bar.c | 106 | ||||
-rw-r--r-- | libgomp/config/linux/bar.h | 62 | ||||
-rw-r--r-- | libgomp/config/linux/proc.c | 96 | ||||
-rw-r--r-- | libgomp/config/linux/proc.h | 5 | ||||
-rw-r--r-- | libgomp/config/posix/affinity.c | 79 | ||||
-rw-r--r-- | libgomp/config/posix/bar.c | 132 | ||||
-rw-r--r-- | libgomp/config/posix/bar.h | 58 |
8 files changed, 778 insertions, 111 deletions
diff --git a/libgomp/config/linux/affinity.c b/libgomp/config/linux/affinity.c index dc6c7e5ed3b..789cdce077d 100644 --- a/libgomp/config/linux/affinity.c +++ b/libgomp/config/linux/affinity.c @@ -29,90 +29,327 @@ #endif #include "libgomp.h" #include "proc.h" +#include <errno.h> #include <stdlib.h> +#include <stdio.h> +#include <string.h> #include <unistd.h> #ifdef HAVE_PTHREAD_AFFINITY_NP -static unsigned int affinity_counter; +#ifndef CPU_ALLOC_SIZE +#define CPU_ISSET_S(idx, size, set) CPU_ISSET(idx, set) +#define CPU_ZERO_S(size, set) CPU_ZERO(set) +#define CPU_SET_S(idx, size, set) CPU_SET(idx, set) +#define CPU_CLR_S(idx, size, set) CPU_CLR(idx, set) +#endif void gomp_init_affinity (void) { - cpu_set_t cpuset, cpusetnew; - size_t idx, widx; - unsigned long cpus = 0; + if (gomp_places_list == NULL) + { + if (!gomp_affinity_init_level (1, ULONG_MAX, true)) + return; + } + + struct gomp_thread *thr = gomp_thread (); + pthread_setaffinity_np (pthread_self (), gomp_cpuset_size, + (cpu_set_t *) gomp_places_list[0]); + thr->place = 1; + thr->ts.place_partition_off = 0; + thr->ts.place_partition_len = gomp_places_list_len; +} + +void +gomp_init_thread_affinity (pthread_attr_t *attr, unsigned int place) +{ + pthread_attr_setaffinity_np (attr, gomp_cpuset_size, + (cpu_set_t *) gomp_places_list[place]); +} + +void ** +gomp_affinity_alloc (unsigned long count, bool quiet) +{ + unsigned long i; + void **ret; + char *p; + + if (gomp_cpusetp == NULL) + { + if (!quiet) + gomp_error ("Could not get CPU affinity set"); + return NULL; + } - if (pthread_getaffinity_np (pthread_self (), sizeof (cpuset), &cpuset)) + ret = malloc (count * sizeof (void *) + count * gomp_cpuset_size); + if (ret == NULL) { - gomp_error ("could not get CPU affinity set"); - free (gomp_cpu_affinity); - gomp_cpu_affinity = NULL; - gomp_cpu_affinity_len = 0; - return; + if (!quiet) + gomp_error ("Out of memory trying to allocate places list"); + return NULL; } - CPU_ZERO (&cpusetnew); - if (gomp_cpu_affinity_len == 0) + p = (char *) (ret + count); + for (i = 0; i < count; i++, p += gomp_cpuset_size) + ret[i] = p; + return ret; +} + +void +gomp_affinity_init_place (void *p) +{ + cpu_set_t *cpusetp = (cpu_set_t *) p; + CPU_ZERO_S (gomp_cpuset_size, cpusetp); +} + +bool +gomp_affinity_add_cpus (void *p, unsigned long num, + unsigned long len, long stride, bool quiet) +{ + cpu_set_t *cpusetp = (cpu_set_t *) p; + unsigned long max = 8 * gomp_cpuset_size; + for (;;) { - unsigned long count = gomp_cpuset_popcount (&cpuset); - if (count >= 65536) - count = 65536; - gomp_cpu_affinity = malloc (count * sizeof (unsigned short)); - if (gomp_cpu_affinity == NULL) + if (num >= max) + { + if (!quiet) + gomp_error ("Logical CPU number %lu out of range", num); + return false; + } + CPU_SET_S (num, gomp_cpuset_size, cpusetp); + if (--len == 0) + return true; + if ((stride < 0 && num + stride > num) + || (stride > 0 && num + stride < num)) { - gomp_error ("not enough memory to store CPU affinity list"); - return; + if (!quiet) + gomp_error ("Logical CPU number %lu+%ld out of range", + num, stride); + return false; } - for (widx = idx = 0; widx < count && idx < 65536; idx++) - if (CPU_ISSET (idx, &cpuset)) + num += stride; + } +} + +bool +gomp_affinity_remove_cpu (void *p, unsigned long num) +{ + cpu_set_t *cpusetp = (cpu_set_t *) p; + if (num >= 8 * gomp_cpuset_size) + { + gomp_error ("Logical CPU number %lu out of range", num); + return false; + } + if (!CPU_ISSET_S (num, gomp_cpuset_size, cpusetp)) + { + gomp_error ("Logical CPU %lu to be removed is not in the set", num); + return false; + } + CPU_CLR_S (num, gomp_cpuset_size, cpusetp); + return true; +} + +bool +gomp_affinity_copy_place (void *p, void *q, long stride) +{ + unsigned long i, max = 8 * gomp_cpuset_size; + cpu_set_t *destp = (cpu_set_t *) p; + cpu_set_t *srcp = (cpu_set_t *) q; + + CPU_ZERO_S (gomp_cpuset_size, destp); + for (i = 0; i < max; i++) + if (CPU_ISSET_S (i, gomp_cpuset_size, srcp)) + { + if ((stride < 0 && i + stride > i) + || (stride > 0 && (i + stride < i || i + stride >= max))) + { + gomp_error ("Logical CPU number %lu+%ld out of range", i, stride); + return false; + } + CPU_SET_S (i + stride, gomp_cpuset_size, destp); + } + return true; +} + +bool +gomp_affinity_same_place (void *p, void *q) +{ +#ifdef CPU_EQUAL_S + return CPU_EQUAL_S (gomp_cpuset_size, (cpu_set_t *) p, (cpu_set_t *) q); +#else + return memcmp (p, q, gomp_cpuset_size) == 0; +#endif +} + +bool +gomp_affinity_finalize_place_list (bool quiet) +{ + unsigned long i, j; + + for (i = 0, j = 0; i < gomp_places_list_len; i++) + { + cpu_set_t *cpusetp = (cpu_set_t *) gomp_places_list[i]; + bool nonempty = false; +#ifdef CPU_AND_S + CPU_AND_S (gomp_cpuset_size, cpusetp, cpusetp, gomp_cpusetp); + nonempty = gomp_cpuset_popcount (gomp_cpuset_size, cpusetp) != 0; +#else + unsigned long k, max = gomp_cpuset_size / sizeof (cpusetp->__bits[0]); + for (k = 0; k < max; k++) + if ((cpusetp->__bits[k] &= gomp_cpusetp->__bits[k]) != 0) + nonempty = true; +#endif + if (nonempty) + gomp_places_list[j++] = gomp_places_list[i]; + } + + if (j == 0) + { + if (!quiet) + gomp_error ("None of the places contain usable logical CPUs"); + return false; + } + else if (j < gomp_places_list_len) + { + if (!quiet) + gomp_error ("Number of places reduced from %ld to %ld because some " + "places didn't contain any usable logical CPUs", + gomp_places_list_len, j); + gomp_places_list_len = j; + } + return true; +} + +bool +gomp_affinity_init_level (int level, unsigned long count, bool quiet) +{ + unsigned long i, max = 8 * gomp_cpuset_size; + + if (gomp_cpusetp) + { + unsigned long maxcount + = gomp_cpuset_popcount (gomp_cpuset_size, gomp_cpusetp); + if (count > maxcount) + count = maxcount; + } + gomp_places_list = gomp_affinity_alloc (count, quiet); + gomp_places_list_len = 0; + if (gomp_places_list == NULL) + return false; + /* SMT (threads). */ + if (level == 1) + { + for (i = 0; i < max && gomp_places_list_len < count; i++) + if (CPU_ISSET_S (i, gomp_cpuset_size, gomp_cpusetp)) { - cpus++; - gomp_cpu_affinity[widx++] = idx; + gomp_affinity_init_place (gomp_places_list[gomp_places_list_len]); + gomp_affinity_add_cpus (gomp_places_list[gomp_places_list_len], + i, 1, 0, true); + ++gomp_places_list_len; } + return true; } else - for (widx = idx = 0; idx < gomp_cpu_affinity_len; idx++) - if (gomp_cpu_affinity[idx] < CPU_SETSIZE - && CPU_ISSET (gomp_cpu_affinity[idx], &cpuset)) + { + char name[sizeof ("/sys/devices/system/cpu/cpu/topology/" + "thread_siblings_list") + 3 * sizeof (unsigned long)]; + size_t prefix_len = sizeof ("/sys/devices/system/cpu/cpu") - 1; + cpu_set_t *copy = gomp_alloca (gomp_cpuset_size); + FILE *f; + char *line = NULL; + size_t linelen = 0; + + memcpy (name, "/sys/devices/system/cpu/cpu", prefix_len); + memcpy (copy, gomp_cpusetp, gomp_cpuset_size); + for (i = 0; i < max && gomp_places_list_len < count; i++) + if (CPU_ISSET_S (i, gomp_cpuset_size, copy)) + { + sprintf (name + prefix_len, "%lu/topology/%s_siblings_list", + i, level == 2 ? "thread" : "core"); + f = fopen (name, "r"); + if (f != NULL) + { + if (getline (&line, &linelen, f) > 0) + { + char *p = line; + bool seen_i = false; + void *pl = gomp_places_list[gomp_places_list_len]; + gomp_affinity_init_place (pl); + while (*p && *p != '\n') + { + unsigned long first, last; + errno = 0; + first = strtoul (p, &p, 10); + if (errno) + break; + last = first; + if (*p == '-') + { + errno = 0; + last = strtoul (p + 1, &p, 10); + if (errno || last < first) + break; + } + for (; first <= last; first++) + if (CPU_ISSET_S (first, gomp_cpuset_size, copy) + && gomp_affinity_add_cpus (pl, first, 1, 0, + true)) + { + CPU_CLR_S (first, gomp_cpuset_size, copy); + if (first == i) + seen_i = true; + } + if (*p == ',') + ++p; + } + if (seen_i) + gomp_places_list_len++; + } + fclose (f); + } + } + if (gomp_places_list == 0) { - if (! CPU_ISSET (gomp_cpu_affinity[idx], &cpusetnew)) - { - cpus++; - CPU_SET (gomp_cpu_affinity[idx], &cpusetnew); - } - gomp_cpu_affinity[widx++] = gomp_cpu_affinity[idx]; + if (!quiet) + gomp_error ("Error reading %s topology", + level == 2 ? "core" : "socket"); + free (gomp_places_list); + gomp_places_list = NULL; + return false; } - - if (widx == 0) - { - gomp_error ("no CPUs left for affinity setting"); - free (gomp_cpu_affinity); - gomp_cpu_affinity = NULL; - gomp_cpu_affinity_len = 0; - return; + return true; } - - gomp_cpu_affinity_len = widx; - if (cpus < gomp_available_cpus) - gomp_available_cpus = cpus; - CPU_ZERO (&cpuset); - CPU_SET (gomp_cpu_affinity[0], &cpuset); - pthread_setaffinity_np (pthread_self (), sizeof (cpuset), &cpuset); - affinity_counter = 1; + return false; } void -gomp_init_thread_affinity (pthread_attr_t *attr) +gomp_affinity_print_place (void *p) { - unsigned int cpu; - cpu_set_t cpuset; - - cpu = __atomic_fetch_add (&affinity_counter, 1, MEMMODEL_RELAXED); - cpu %= gomp_cpu_affinity_len; - CPU_ZERO (&cpuset); - CPU_SET (gomp_cpu_affinity[cpu], &cpuset); - pthread_attr_setaffinity_np (attr, sizeof (cpu_set_t), &cpuset); + unsigned long i, max = 8 * gomp_cpuset_size, len; + cpu_set_t *cpusetp = (cpu_set_t *) p; + bool notfirst = false; + + for (i = 0, len = 0; i < max; i++) + if (CPU_ISSET_S (i, gomp_cpuset_size, cpusetp)) + { + if (len == 0) + { + if (notfirst) + fputc (',', stderr); + notfirst = true; + fprintf (stderr, "%lu", i); + } + ++len; + } + else + { + if (len > 1) + fprintf (stderr, ":%lu", len); + len = 0; + } + if (len > 1) + fprintf (stderr, ":%lu", len); } #else diff --git a/libgomp/config/linux/bar.c b/libgomp/config/linux/bar.c index 35baa886ab4..6b591e5a6c5 100644 --- a/libgomp/config/linux/bar.c +++ b/libgomp/config/linux/bar.c @@ -33,11 +33,11 @@ void gomp_barrier_wait_end (gomp_barrier_t *bar, gomp_barrier_state_t state) { - if (__builtin_expect ((state & 1) != 0, 0)) + if (__builtin_expect (state & BAR_WAS_LAST, 0)) { /* Next time we'll be awaiting TOTAL threads again. */ bar->awaited = bar->total; - __atomic_store_n (&bar->generation, bar->generation + 4, + __atomic_store_n (&bar->generation, bar->generation + BAR_INCR, MEMMODEL_RELEASE); futex_wake ((int *) &bar->generation, INT_MAX); } @@ -66,7 +66,7 @@ void gomp_barrier_wait_last (gomp_barrier_t *bar) { gomp_barrier_state_t state = gomp_barrier_wait_start (bar); - if (state & 1) + if (state & BAR_WAS_LAST) gomp_barrier_wait_end (bar, state); } @@ -81,40 +81,43 @@ gomp_team_barrier_wait_end (gomp_barrier_t *bar, gomp_barrier_state_t state) { unsigned int generation, gen; - if (__builtin_expect ((state & 1) != 0, 0)) + if (__builtin_expect (state & BAR_WAS_LAST, 0)) { /* Next time we'll be awaiting TOTAL threads again. */ struct gomp_thread *thr = gomp_thread (); struct gomp_team *team = thr->ts.team; bar->awaited = bar->total; + team->work_share_cancelled = 0; if (__builtin_expect (team->task_count, 0)) { gomp_barrier_handle_tasks (state); - state &= ~1; + state &= ~BAR_WAS_LAST; } else { - __atomic_store_n (&bar->generation, state + 3, MEMMODEL_RELEASE); + state &= ~BAR_CANCELLED; + state += BAR_INCR - BAR_WAS_LAST; + __atomic_store_n (&bar->generation, state, MEMMODEL_RELEASE); futex_wake ((int *) &bar->generation, INT_MAX); return; } } generation = state; + state &= ~BAR_CANCELLED; do { do_wait ((int *) &bar->generation, generation); gen = __atomic_load_n (&bar->generation, MEMMODEL_ACQUIRE); - if (__builtin_expect (gen & 1, 0)) + if (__builtin_expect (gen & BAR_TASK_PENDING, 0)) { gomp_barrier_handle_tasks (state); gen = __atomic_load_n (&bar->generation, MEMMODEL_ACQUIRE); } - if ((gen & 2) != 0) - generation |= 2; + generation |= gen & BAR_WAITING_FOR_TASK; } - while (gen != state + 4); + while (gen != state + BAR_INCR); } void @@ -122,3 +125,86 @@ gomp_team_barrier_wait (gomp_barrier_t *bar) { gomp_team_barrier_wait_end (bar, gomp_barrier_wait_start (bar)); } + +void +gomp_team_barrier_wait_final (gomp_barrier_t *bar) +{ + gomp_barrier_state_t state = gomp_barrier_wait_final_start (bar); + if (__builtin_expect (state & BAR_WAS_LAST, 0)) + bar->awaited_final = bar->total; + gomp_team_barrier_wait_end (bar, state); +} + +bool +gomp_team_barrier_wait_cancel_end (gomp_barrier_t *bar, + gomp_barrier_state_t state) +{ + unsigned int generation, gen; + + if (__builtin_expect (state & BAR_WAS_LAST, 0)) + { + /* Next time we'll be awaiting TOTAL threads again. */ + /* BAR_CANCELLED should never be set in state here, because + cancellation means that at least one of the threads has been + cancelled, thus on a cancellable barrier we should never see + all threads to arrive. */ + struct gomp_thread *thr = gomp_thread (); + struct gomp_team *team = thr->ts.team; + + bar->awaited = bar->total; + team->work_share_cancelled = 0; + if (__builtin_expect (team->task_count, 0)) + { + gomp_barrier_handle_tasks (state); + state &= ~BAR_WAS_LAST; + } + else + { + state += BAR_INCR - BAR_WAS_LAST; + __atomic_store_n (&bar->generation, state, MEMMODEL_RELEASE); + futex_wake ((int *) &bar->generation, INT_MAX); + return false; + } + } + + if (__builtin_expect (state & BAR_CANCELLED, 0)) + return true; + + generation = state; + do + { + do_wait ((int *) &bar->generation, generation); + gen = __atomic_load_n (&bar->generation, MEMMODEL_ACQUIRE); + if (__builtin_expect (gen & BAR_CANCELLED, 0)) + return true; + if (__builtin_expect (gen & BAR_TASK_PENDING, 0)) + { + gomp_barrier_handle_tasks (state); + gen = __atomic_load_n (&bar->generation, MEMMODEL_ACQUIRE); + } + generation |= gen & BAR_WAITING_FOR_TASK; + } + while (gen != state + BAR_INCR); + + return false; +} + +bool +gomp_team_barrier_wait_cancel (gomp_barrier_t *bar) +{ + return gomp_team_barrier_wait_cancel_end (bar, gomp_barrier_wait_start (bar)); +} + +void +gomp_team_barrier_cancel (struct gomp_team *team) +{ + gomp_mutex_lock (&team->task_lock); + if (team->barrier.generation & BAR_CANCELLED) + { + gomp_mutex_unlock (&team->task_lock); + return; + } + team->barrier.generation |= BAR_CANCELLED; + gomp_mutex_unlock (&team->task_lock); + futex_wake ((int *) &team->barrier.generation, INT_MAX); +} diff --git a/libgomp/config/linux/bar.h b/libgomp/config/linux/bar.h index 69b97069647..914c86778e5 100644 --- a/libgomp/config/linux/bar.h +++ b/libgomp/config/linux/bar.h @@ -38,13 +38,25 @@ typedef struct unsigned total __attribute__((aligned (64))); unsigned generation; unsigned awaited __attribute__((aligned (64))); + unsigned awaited_final; } gomp_barrier_t; + typedef unsigned int gomp_barrier_state_t; +/* The generation field contains a counter in the high bits, with a few + low bits dedicated to flags. Note that TASK_PENDING and WAS_LAST can + share space because WAS_LAST is never stored back to generation. */ +#define BAR_TASK_PENDING 1 +#define BAR_WAS_LAST 1 +#define BAR_WAITING_FOR_TASK 2 +#define BAR_CANCELLED 4 +#define BAR_INCR 8 + static inline void gomp_barrier_init (gomp_barrier_t *bar, unsigned count) { bar->total = count; bar->awaited = count; + bar->awaited_final = count; bar->generation = 0; } @@ -62,27 +74,55 @@ extern void gomp_barrier_wait (gomp_barrier_t *); extern void gomp_barrier_wait_last (gomp_barrier_t *); extern void gomp_barrier_wait_end (gomp_barrier_t *, gomp_barrier_state_t); extern void gomp_team_barrier_wait (gomp_barrier_t *); +extern void gomp_team_barrier_wait_final (gomp_barrier_t *); extern void gomp_team_barrier_wait_end (gomp_barrier_t *, gomp_barrier_state_t); +extern bool gomp_team_barrier_wait_cancel (gomp_barrier_t *); +extern bool gomp_team_barrier_wait_cancel_end (gomp_barrier_t *, + gomp_barrier_state_t); extern void gomp_team_barrier_wake (gomp_barrier_t *, int); +struct gomp_team; +extern void gomp_team_barrier_cancel (struct gomp_team *); static inline gomp_barrier_state_t gomp_barrier_wait_start (gomp_barrier_t *bar) { - unsigned int ret = __atomic_load_n (&bar->generation, MEMMODEL_ACQUIRE) & ~3; + unsigned int ret = __atomic_load_n (&bar->generation, MEMMODEL_ACQUIRE); + ret &= -BAR_INCR | BAR_CANCELLED; /* A memory barrier is needed before exiting from the various forms of gomp_barrier_wait, to satisfy OpenMP API version 3.1 section 2.8.6 flush Construct, which says there is an implicit flush during a barrier region. This is a convenient place to add the barrier, so we use MEMMODEL_ACQ_REL here rather than MEMMODEL_ACQUIRE. */ - ret += __atomic_add_fetch (&bar->awaited, -1, MEMMODEL_ACQ_REL) == 0; + if (__atomic_add_fetch (&bar->awaited, -1, MEMMODEL_ACQ_REL) == 0) + ret |= BAR_WAS_LAST; + return ret; +} + +static inline gomp_barrier_state_t +gomp_barrier_wait_cancel_start (gomp_barrier_t *bar) +{ + return gomp_barrier_wait_start (bar); +} + +/* This is like gomp_barrier_wait_start, except it decrements + bar->awaited_final rather than bar->awaited and should be used + for the gomp_team_end barrier only. */ +static inline gomp_barrier_state_t +gomp_barrier_wait_final_start (gomp_barrier_t *bar) +{ + unsigned int ret = __atomic_load_n (&bar->generation, MEMMODEL_ACQUIRE); + ret &= -BAR_INCR | BAR_CANCELLED; + /* See above gomp_barrier_wait_start comment. */ + if (__atomic_add_fetch (&bar->awaited_final, -1, MEMMODEL_ACQ_REL) == 0) + ret |= BAR_WAS_LAST; return ret; } static inline bool gomp_barrier_last_thread (gomp_barrier_state_t state) { - return state & 1; + return state & BAR_WAS_LAST; } /* All the inlines below must be called with team->task_lock @@ -91,31 +131,37 @@ gomp_barrier_last_thread (gomp_barrier_state_t state) static inline void gomp_team_barrier_set_task_pending (gomp_barrier_t *bar) { - bar->generation |= 1; + bar->generation |= BAR_TASK_PENDING; } static inline void gomp_team_barrier_clear_task_pending (gomp_barrier_t *bar) { - bar->generation &= ~1; + bar->generation &= ~BAR_TASK_PENDING; } static inline void gomp_team_barrier_set_waiting_for_tasks (gomp_barrier_t *bar) { - bar->generation |= 2; + bar->generation |= BAR_WAITING_FOR_TASK; } static inline bool gomp_team_barrier_waiting_for_tasks (gomp_barrier_t *bar) { - return (bar->generation & 2) != 0; + return (bar->generation & BAR_WAITING_FOR_TASK) != 0; +} + +static inline bool +gomp_team_barrier_cancelled (gomp_barrier_t *bar) +{ + return __builtin_expect ((bar->generation & BAR_CANCELLED) != 0, 0); } static inline void gomp_team_barrier_done (gomp_barrier_t *bar, gomp_barrier_state_t state) { - bar->generation = (state & ~3) + 4; + bar->generation = (state & -BAR_INCR) + BAR_INCR; } #endif /* GOMP_BARRIER_H */ diff --git a/libgomp/config/linux/proc.c b/libgomp/config/linux/proc.c index cbb773e6e90..d4ae116e239 100644 --- a/libgomp/config/linux/proc.c +++ b/libgomp/config/linux/proc.c @@ -30,6 +30,7 @@ #endif #include "libgomp.h" #include "proc.h" +#include <errno.h> #include <stdlib.h> #include <unistd.h> #ifdef HAVE_GETLOADAVG @@ -39,19 +40,28 @@ #endif #ifdef HAVE_PTHREAD_AFFINITY_NP +unsigned long gomp_cpuset_size; +static unsigned long gomp_get_cpuset_size; +cpu_set_t *gomp_cpusetp; + unsigned long -gomp_cpuset_popcount (cpu_set_t *cpusetp) +gomp_cpuset_popcount (unsigned long cpusetsize, cpu_set_t *cpusetp) { -#ifdef CPU_COUNT - /* glibc 2.6 and above provide a macro for this. */ - return CPU_COUNT (cpusetp); +#ifdef CPU_COUNT_S + /* glibc 2.7 and above provide a macro for this. */ + return CPU_COUNT_S (cpusetsize, cpusetp); #else +#ifdef CPU_COUNT + if (cpusetsize == sizeof (cpu_set_t)) + /* glibc 2.6 and above provide a macro for this. */ + return CPU_COUNT (cpusetp); +#endif size_t i; unsigned long ret = 0; - extern int check[sizeof (cpusetp->__bits[0]) == sizeof (unsigned long int)]; + extern int check[sizeof (cpusetp->__bits[0]) == sizeof (unsigned long int) + ? 1 : -1]; - (void) check; - for (i = 0; i < sizeof (*cpusetp) / sizeof (cpusetp->__bits[0]); i++) + for (i = 0; i < cpusetsize / sizeof (cpusetp->__bits[0]); i++) { unsigned long int mask = cpusetp->__bits[i]; if (mask == 0) @@ -70,16 +80,63 @@ void gomp_init_num_threads (void) { #ifdef HAVE_PTHREAD_AFFINITY_NP - cpu_set_t cpuset; +#if defined (_SC_NPROCESSORS_CONF) && defined (CPU_ALLOC_SIZE) + gomp_cpuset_size = sysconf (_SC_NPROCESSORS_CONF); + gomp_cpuset_size = CPU_ALLOC_SIZE (gomp_cpuset_size); +#else + gomp_cpuset_size = sizeof (cpu_set_t); +#endif - if (pthread_getaffinity_np (pthread_self (), sizeof (cpuset), &cpuset) == 0) + gomp_cpusetp = (cpu_set_t *) gomp_malloc (gomp_cpuset_size); + do { - /* Count only the CPUs this process can use. */ - gomp_global_icv.nthreads_var = gomp_cpuset_popcount (&cpuset); - if (gomp_global_icv.nthreads_var == 0) - gomp_global_icv.nthreads_var = 1; - return; + int ret = pthread_getaffinity_np (pthread_self (), gomp_cpuset_size, + gomp_cpusetp); + if (ret == 0) + { + unsigned long i; + /* Count only the CPUs this process can use. */ + gomp_global_icv.nthreads_var + = gomp_cpuset_popcount (gomp_cpuset_size, gomp_cpusetp); + if (gomp_global_icv.nthreads_var == 0) + break; + gomp_get_cpuset_size = gomp_cpuset_size; +#ifdef CPU_ALLOC_SIZE + for (i = gomp_cpuset_size * 8; i; i--) + if (CPU_ISSET_S (i - 1, gomp_cpuset_size, gomp_cpusetp)) + break; + gomp_cpuset_size = CPU_ALLOC_SIZE (i); +#endif + return; + } + if (ret != EINVAL) + break; +#ifdef CPU_ALLOC_SIZE + if (gomp_cpuset_size < sizeof (cpu_set_t)) + gomp_cpuset_size = sizeof (cpu_set_t); + else + gomp_cpuset_size = gomp_cpuset_size * 2; + if (gomp_cpuset_size < 8 * sizeof (cpu_set_t)) + gomp_cpusetp + = (cpu_set_t *) gomp_realloc (gomp_cpusetp, gomp_cpuset_size); + else + { + /* Avoid gomp_fatal if too large memory allocation would be + requested, e.g. kernel returning EINVAL all the time. */ + void *p = realloc (gomp_cpusetp, gomp_cpuset_size); + if (p == NULL) + break; + gomp_cpusetp = (cpu_set_t *) p; + } +#else + break; +#endif } + while (1); + gomp_cpuset_size = 0; + gomp_global_icv.nthreads_var = 1; + free (gomp_cpusetp); + gomp_cpusetp = NULL; #endif #ifdef _SC_NPROCESSORS_ONLN gomp_global_icv.nthreads_var = sysconf (_SC_NPROCESSORS_ONLN); @@ -90,15 +147,14 @@ static int get_num_procs (void) { #ifdef HAVE_PTHREAD_AFFINITY_NP - cpu_set_t cpuset; - - if (gomp_cpu_affinity == NULL) + if (gomp_places_list == NULL) { /* Count only the CPUs this process can use. */ - if (pthread_getaffinity_np (pthread_self (), sizeof (cpuset), - &cpuset) == 0) + if (gomp_cpusetp + && pthread_getaffinity_np (pthread_self (), gomp_get_cpuset_size, + gomp_cpusetp) == 0) { - int ret = gomp_cpuset_popcount (&cpuset); + int ret = gomp_cpuset_popcount (gomp_get_cpuset_size, gomp_cpusetp); return ret != 0 ? ret : 1; } } diff --git a/libgomp/config/linux/proc.h b/libgomp/config/linux/proc.h index cba7f4a09e6..bdc85dba99e 100644 --- a/libgomp/config/linux/proc.h +++ b/libgomp/config/linux/proc.h @@ -28,7 +28,10 @@ #include <sched.h> #ifdef HAVE_PTHREAD_AFFINITY_NP -extern unsigned long gomp_cpuset_popcount (cpu_set_t *); +extern unsigned long gomp_cpuset_size attribute_hidden; +extern cpu_set_t *gomp_cpusetp attribute_hidden; +extern unsigned long gomp_cpuset_popcount (unsigned long, cpu_set_t *) + attribute_hidden; #endif #endif /* GOMP_PROC_H */ diff --git a/libgomp/config/posix/affinity.c b/libgomp/config/posix/affinity.c index ac3d14e830c..e7f97ab08d6 100644 --- a/libgomp/config/posix/affinity.c +++ b/libgomp/config/posix/affinity.c @@ -32,7 +32,84 @@ gomp_init_affinity (void) } void -gomp_init_thread_affinity (pthread_attr_t *attr) +gomp_init_thread_affinity (pthread_attr_t *attr, unsigned int place) { (void) attr; + (void) place; +} + +void ** +gomp_affinity_alloc (unsigned long count, bool quiet) +{ + (void) count; + if (!quiet) + gomp_error ("Affinity not supported on this configuration"); + return NULL; +} + +void +gomp_affinity_init_place (void *p) +{ + (void) p; +} + +bool +gomp_affinity_add_cpus (void *p, unsigned long num, + unsigned long len, long stride, bool quiet) +{ + (void) p; + (void) num; + (void) len; + (void) stride; + (void) quiet; + return false; +} + +bool +gomp_affinity_remove_cpu (void *p, unsigned long num) +{ + (void) p; + (void) num; + return false; +} + +bool +gomp_affinity_copy_place (void *p, void *q, long stride) +{ + (void) p; + (void) q; + (void) stride; + return false; +} + +bool +gomp_affinity_same_place (void *p, void *q) +{ + (void) p; + (void) q; + return false; +} + +bool +gomp_affinity_finalize_place_list (bool quiet) +{ + (void) quiet; + return false; +} + +bool +gomp_affinity_init_level (int level, unsigned long count, bool quiet) +{ + (void) level; + (void) count; + (void) quiet; + if (!quiet) + gomp_error ("Affinity not supported on this configuration"); + return NULL; +} + +void +gomp_affinity_print_place (void *p) +{ + (void) p; } diff --git a/libgomp/config/posix/bar.c b/libgomp/config/posix/bar.c index 06a3185c286..bdf3978caee 100644 --- a/libgomp/config/posix/bar.c +++ b/libgomp/config/posix/bar.c @@ -42,6 +42,7 @@ gomp_barrier_init (gomp_barrier_t *bar, unsigned count) bar->total = count; bar->arrived = 0; bar->generation = 0; + bar->cancellable = false; } void @@ -72,7 +73,7 @@ gomp_barrier_wait_end (gomp_barrier_t *bar, gomp_barrier_state_t state) { unsigned int n; - if (state & 1) + if (state & BAR_WAS_LAST) { n = --bar->arrived; if (n > 0) @@ -113,12 +114,14 @@ gomp_team_barrier_wait_end (gomp_barrier_t *bar, gomp_barrier_state_t state) { unsigned int n; - if (state & 1) + state &= ~BAR_CANCELLED; + if (state & BAR_WAS_LAST) { n = --bar->arrived; struct gomp_thread *thr = gomp_thread (); struct gomp_team *team = thr->ts.team; + team->work_share_cancelled = 0; if (team->task_count) { gomp_barrier_handle_tasks (state); @@ -128,7 +131,7 @@ gomp_team_barrier_wait_end (gomp_barrier_t *bar, gomp_barrier_state_t state) return; } - bar->generation = state + 3; + bar->generation = state + BAR_INCR - BAR_WAS_LAST; if (n > 0) { do @@ -141,13 +144,18 @@ gomp_team_barrier_wait_end (gomp_barrier_t *bar, gomp_barrier_state_t state) else { gomp_mutex_unlock (&bar->mutex1); + int gen; do { gomp_sem_wait (&bar->sem1); - if (bar->generation & 1) - gomp_barrier_handle_tasks (state); + gen = __atomic_load_n (&bar->generation, MEMMODEL_ACQUIRE); + if (gen & BAR_TASK_PENDING) + { + gomp_barrier_handle_tasks (state); + gen = __atomic_load_n (&bar->generation, MEMMODEL_ACQUIRE); + } } - while (bar->generation != state + 4); + while (gen != state + BAR_INCR); #ifdef HAVE_SYNC_BUILTINS n = __sync_add_and_fetch (&bar->arrived, -1); @@ -162,6 +170,81 @@ gomp_team_barrier_wait_end (gomp_barrier_t *bar, gomp_barrier_state_t state) } } +bool +gomp_team_barrier_wait_cancel_end (gomp_barrier_t *bar, + gomp_barrier_state_t state) +{ + unsigned int n; + + if (state & BAR_WAS_LAST) + { + bar->cancellable = false; + n = --bar->arrived; + struct gomp_thread *thr = gomp_thread (); + struct gomp_team *team = thr->ts.team; + + team->work_share_cancelled = 0; + if (team->task_count) + { + gomp_barrier_handle_tasks (state); + if (n > 0) + gomp_sem_wait (&bar->sem2); + gomp_mutex_unlock (&bar->mutex1); + return false; + } + + bar->generation = state + BAR_INCR - BAR_WAS_LAST; + if (n > 0) + { + do + gomp_sem_post (&bar->sem1); + while (--n != 0); + gomp_sem_wait (&bar->sem2); + } + gomp_mutex_unlock (&bar->mutex1); + } + else + { + if (state & BAR_CANCELLED) + { + gomp_mutex_unlock (&bar->mutex1); + return true; + } + bar->cancellable = true; + gomp_mutex_unlock (&bar->mutex1); + int gen; + do + { + gomp_sem_wait (&bar->sem1); + gen = __atomic_load_n (&bar->generation, MEMMODEL_ACQUIRE); + if (gen & BAR_CANCELLED) + break; + if (gen & BAR_TASK_PENDING) + { + gomp_barrier_handle_tasks (state); + gen = __atomic_load_n (&bar->generation, MEMMODEL_ACQUIRE); + if (gen & BAR_CANCELLED) + break; + } + } + while (gen != state + BAR_INCR); + +#ifdef HAVE_SYNC_BUILTINS + n = __sync_add_and_fetch (&bar->arrived, -1); +#else + gomp_mutex_lock (&bar->mutex2); + n = --bar->arrived; + gomp_mutex_unlock (&bar->mutex2); +#endif + + if (n == 0) + gomp_sem_post (&bar->sem2); + if (gen & BAR_CANCELLED) + return true; + } + return false; +} + void gomp_team_barrier_wait (gomp_barrier_t *barrier) { @@ -176,3 +259,40 @@ gomp_team_barrier_wake (gomp_barrier_t *bar, int count) while (count-- > 0) gomp_sem_post (&bar->sem1); } + +bool +gomp_team_barrier_wait_cancel (gomp_barrier_t *bar) +{ + gomp_barrier_state_t state = gomp_barrier_wait_cancel_start (bar); + return gomp_team_barrier_wait_cancel_end (bar, state); +} + +void +gomp_team_barrier_cancel (struct gomp_team *team) +{ + if (team->barrier.generation & BAR_CANCELLED) + return; + gomp_mutex_lock (&team->barrier.mutex1); + gomp_mutex_lock (&team->task_lock); + if (team->barrier.generation & BAR_CANCELLED) + { + gomp_mutex_unlock (&team->task_lock); + gomp_mutex_unlock (&team->barrier.mutex1); + return; + } + team->barrier.generation |= BAR_CANCELLED; + gomp_mutex_unlock (&team->task_lock); + if (team->barrier.cancellable) + { + int n = team->barrier.arrived; + if (n > 0) + { + do + gomp_sem_post (&team->barrier.sem1); + while (--n != 0); + gomp_sem_wait (&team->barrier.sem2); + } + team->barrier.cancellable = false; + } + gomp_mutex_unlock (&team->barrier.mutex1); +} diff --git a/libgomp/config/posix/bar.h b/libgomp/config/posix/bar.h index 1a16ca86bba..9fcd4da6723 100644 --- a/libgomp/config/posix/bar.h +++ b/libgomp/config/posix/bar.h @@ -43,9 +43,20 @@ typedef struct unsigned total; unsigned arrived; unsigned generation; + bool cancellable; } gomp_barrier_t; + typedef unsigned int gomp_barrier_state_t; +/* The generation field contains a counter in the high bits, with a few + low bits dedicated to flags. Note that TASK_PENDING and WAS_LAST can + share space because WAS_LAST is never stored back to generation. */ +#define BAR_TASK_PENDING 1 +#define BAR_WAS_LAST 1 +#define BAR_WAITING_FOR_TASK 2 +#define BAR_CANCELLED 4 +#define BAR_INCR 8 + extern void gomp_barrier_init (gomp_barrier_t *, unsigned); extern void gomp_barrier_reinit (gomp_barrier_t *, unsigned); extern void gomp_barrier_destroy (gomp_barrier_t *); @@ -55,22 +66,47 @@ extern void gomp_barrier_wait_end (gomp_barrier_t *, gomp_barrier_state_t); extern void gomp_team_barrier_wait (gomp_barrier_t *); extern void gomp_team_barrier_wait_end (gomp_barrier_t *, gomp_barrier_state_t); +extern bool gomp_team_barrier_wait_cancel (gomp_barrier_t *); +extern bool gomp_team_barrier_wait_cancel_end (gomp_barrier_t *, + gomp_barrier_state_t); extern void gomp_team_barrier_wake (gomp_barrier_t *, int); +struct gomp_team; +extern void gomp_team_barrier_cancel (struct gomp_team *); static inline gomp_barrier_state_t gomp_barrier_wait_start (gomp_barrier_t *bar) { unsigned int ret; gomp_mutex_lock (&bar->mutex1); - ret = bar->generation & ~3; - ret += ++bar->arrived == bar->total; + ret = bar->generation & (-BAR_INCR | BAR_CANCELLED); + if (++bar->arrived == bar->total) + ret |= BAR_WAS_LAST; + return ret; +} + +static inline gomp_barrier_state_t +gomp_barrier_wait_cancel_start (gomp_barrier_t *bar) +{ + unsigned int ret; + gomp_mutex_lock (&bar->mutex1); + ret = bar->generation & (-BAR_INCR | BAR_CANCELLED); + if (ret & BAR_CANCELLED) + return ret; + if (++bar->arrived == bar->total) + ret |= BAR_WAS_LAST; return ret; } +static inline void +gomp_team_barrier_wait_final (gomp_barrier_t *bar) +{ + gomp_team_barrier_wait (bar); +} + static inline bool gomp_barrier_last_thread (gomp_barrier_state_t state) { - return state & 1; + return state & BAR_WAS_LAST; } static inline void @@ -85,31 +121,37 @@ gomp_barrier_wait_last (gomp_barrier_t *bar) static inline void gomp_team_barrier_set_task_pending (gomp_barrier_t *bar) { - bar->generation |= 1; + bar->generation |= BAR_TASK_PENDING; } static inline void gomp_team_barrier_clear_task_pending (gomp_barrier_t *bar) { - bar->generation &= ~1; + bar->generation &= ~BAR_TASK_PENDING; } static inline void gomp_team_barrier_set_waiting_for_tasks (gomp_barrier_t *bar) { - bar->generation |= 2; + bar->generation |= BAR_WAITING_FOR_TASK; } static inline bool gomp_team_barrier_waiting_for_tasks (gomp_barrier_t *bar) { - return (bar->generation & 2) != 0; + return (bar->generation & BAR_WAITING_FOR_TASK) != 0; +} + +static inline bool +gomp_team_barrier_cancelled (gomp_barrier_t *bar) +{ + return __builtin_expect ((bar->generation & BAR_CANCELLED) != 0, 0); } static inline void gomp_team_barrier_done (gomp_barrier_t *bar, gomp_barrier_state_t state) { - bar->generation = (state & ~3) + 4; + bar->generation = (state & -BAR_INCR) + BAR_INCR; } #endif /* GOMP_BARRIER_H */ |