summaryrefslogtreecommitdiff
path: root/gcc/omp-low.c
diff options
context:
space:
mode:
authorjakub <jakub@138bc75d-0d04-0410-961f-82ee72b054a4>2013-10-11 09:26:50 +0000
committerjakub <jakub@138bc75d-0d04-0410-961f-82ee72b054a4>2013-10-11 09:26:50 +0000
commitbc7bff742355562cd43792f0814bae55eb21d012 (patch)
tree2a3d60fbf15f9346c02647762dcc441fe3841855 /gcc/omp-low.c
parentcf3cae555d03f07e989fd18e4db778fba44d9abd (diff)
downloadgcc-bc7bff742355562cd43792f0814bae55eb21d012.tar.gz
libgomp/
* target.c: New file. * Makefile.am (libgomp_la_SOURCES): Add target.c. * Makefile.in: Regenerated. * libgomp_g.h (GOMP_task): Add depend argument. (GOMP_barrier_cancel, GOMP_loop_end_cancel, GOMP_sections_end_cancel, GOMP_target, GOMP_target_data, GOMP_target_end_data, GOMP_target_update, GOMP_teams, GOMP_parallel_loop_static, GOMP_parallel_loop_dynamic, GOMP_parallel_loop_guided, GOMP_parallel_loop_runtime, GOMP_parallel, GOMP_cancel, GOMP_cancellation_point, GOMP_taskgroup_start, GOMP_taskgroup_end, GOMP_parallel_sections): New prototypes. * fortran.c (omp_is_initial_device): Add ialias_redirect. (omp_is_initial_device_): New function. (ULP, STR1, STR2, ialias_redirect): Removed. (omp_get_cancellation_, omp_get_proc_bind_, omp_set_default_device_, omp_set_default_device_8_, omp_get_default_device_, omp_get_num_devices_, omp_get_num_teams_, omp_get_team_num_): New functions. * libgomp.map (GOMP_barrier_cancel, GOMP_loop_end_cancel, GOMP_sections_end_cancel, GOMP_target, GOMP_target_data, GOMP_target_end_data, GOMP_target_update, GOMP_teams): Export @@GOMP_4.0. (omp_is_initial_device, omp_is_initial_device_, omp_get_cancellation, omp_get_cancellation_, omp_get_proc_bind, omp_get_proc_bind_, omp_set_default_device, omp_set_default_device_, omp_set_default_device_8_, omp_get_default_device, omp_get_default_device_, omp_get_num_devices, omp_get_num_devices_, omp_get_num_teams, omp_get_num_teams_, omp_get_team_num, omp_get_team_num_): Export @@OMP_4.0. * team.c (struct gomp_thread_start_data): Add place field. (gomp_thread_start): Clear thr->thread_pool and thr->task before returning. Use gomp_team_barrier_wait_final instead of gomp_team_barrier_wait. Initialize thr->place. (gomp_new_team): Initialize work_shares_to_free, work_share_cancelled, team_cancelled and task_queued_count fields. (gomp_free_pool_helper): Clear thr->thread_pool and thr->task before calling pthread_exit. (gomp_free_thread): No longer static. Use gomp_managed_threads_lock instead of gomp_remaining_threads_lock. (gomp_team_start): Add flags argument. Set thr->thread_pool->threads_busy to nthreads immediately after creating new pool. Use gomp_managed_threads_lock instead of gomp_remaining_threads_lock. Handle OpenMP 4.0 affinity. (gomp_team_end): Use gomp_managed_threads_lock instead of gomp_remaining_threads_lock. Use gomp_team_barrier_wait_final instead of gomp_team_barrier_wait. If team->team_cancelled, call gomp_fini_worshare on ws chain starting at team->work_shares_to_free rather than thr->ts.work_share. (initialize_team): Don't call gomp_sem_init here. * sections.c (GOMP_parallel_sections_start): Adjust gomp_team_start caller. (GOMP_parallel_sections, GOMP_sections_end_cancel): New functions. * env.c (gomp_global_icv): Add default_device_var, target_data and bind_var initializers. (gomp_cpu_affinity, gomp_cpu_affinity_len): Remove. (gomp_bind_var_list, gomp_bind_var_list_len, gomp_places_list, gomp_places_list_len): New variables. (parse_bind_var, parse_one_place, parse_places_var): New functions. (parse_affinity): Rewritten to construct OMP_PLACES list with unit sized places. (gomp_cancel_var): New global variable. (parse_int): New function. (handle_omp_display_env): New function. (initialize_env): Use it. Initialize default_device_var. Parse OMP_CANCELLATION env var. Use parse_bind_var to parse OMP_PROC_BIND instead of parse_boolean. Use parse_places_var for OMP_PLACES parsing. Don't call parse_affinity if OMP_PLACES has been successfully parsed (and call gomp_init_affinity in that case). (omp_get_cancellation, omp_get_proc_bind, omp_set_default_device, omp_get_default_device, omp_get_num_devices, omp_get_num_teams, omp_get_team_num, omp_is_initial_device): New functions. * libgomp.h: Include stdlib.h. (ialias_ulp, ialias_str1, ialias_str2, ialias_redirect, ialias_call): Define. (struct target_mem_desc): Forward declare. (struct gomp_task_icv): Add default_device_var, target_data, bind_var and thread_limit_var fields. (gomp_get_num_devices): New prototype. (gomp_cancel_var): New extern decl. (struct gomp_team): Add work_shares_to_free, work_share_cancelled, team_cancelled and task_queued_count fields. Add comments about task_{,queued_,running_}count. (gomp_cancel_kind): New enum. (gomp_work_share_end_cancel): New prototype. (struct gomp_task): Add next_taskgroup, prev_taskgroup, taskgroup, copy_ctors_done, dependers, depend_hash, depend_count, num_dependees and depend fields. (struct gomp_taskgroup): New type. (struct gomp_task_depend_entry, struct gomp_dependers_vec): New types. (gomp_finish_task): Free depend_hash if non-NULL. (struct gomp_team_state): Add place_partition_off and place_partition_len fields. (gomp_bind_var_list, gomp_bind_var_list_len, gomp_places_list, gomp_places_list_len): New extern decls. (struct gomp_thread): Add place field. (gomp_cpu_affinity, gomp_cpu_affinity_len): Remove. (gomp_init_thread_affinity): Add place argument. (gomp_affinity_alloc, gomp_affinity_init_place, gomp_affinity_add_cpus, gomp_affinity_remove_cpu, gomp_affinity_copy_place, gomp_affinity_same_place, gomp_affinity_finalize_place_list, gomp_affinity_init_level, gomp_affinity_print_place): New prototypes. (gomp_team_start): Add flags argument. (gomp_thread_limit_var, gomp_remaining_threads_count, gomp_remaining_threads_lock): Remove. (gomp_managed_threads_lock): New variable. (struct gomp_thread_pool): Add threads_busy field. (gomp_free_thread): New prototype. * task.c: Include hashtab.h. (hash_entry_type): New typedef. (htab_alloc, htab_free, htab_hash, htab_eq): New inlines. (gomp_init_task): Clear dependers, depend_hash, depend_count, copy_ctors_done and taskgroup fields. (GOMP_task): Add depend argument, handle depend clauses. If gomp_team_barrier_cancelled or if it's taskgroup has been cancelled, don't queue or start new tasks. Set copy_ctors_done field if needed. Initialize taskgroup field. If copy_ctors_done and already cancelled, don't discard the task. If taskgroup is non-NULL, enqueue the task into taskgroup queue. Increment num_children field in taskgroup. Increment task_queued_count. (gomp_task_run_pre, gomp_task_run_post_remove_parent, gomp_task_run_post_remove_taskgroup): New inline functions. (gomp_task_run_post_handle_depend_hash, gomp_task_run_post_handle_dependers, gomp_task_run_post_handle_depend): New functions. (GOMP_taskwait): Use them. If more than one new tasks have been queued, wake other threads if needed. (gomp_barrier_handle_tasks): Likewise. If gomp_team_barrier_cancelled, don't start any new tasks, just free all tasks. (GOMP_taskgroup_start, GOMP_taskgroup_end): New functions. * omp_lib.f90.in (omp_proc_bind_kind, omp_proc_bind_false, omp_proc_bind_true, omp_proc_bind_master, omp_proc_bind_close, omp_proc_bind_spread): New params. (omp_get_cancellation, omp_get_proc_bind, omp_set_default_device, omp_get_default_device, omp_get_num_devices, omp_get_num_teams, omp_get_team_num, omp_is_initial_device): New interfaces. (omp_get_dynamic, omp_get_nested, omp_in_parallel, omp_get_max_threads, omp_get_num_procs, omp_get_num_threads, omp_get_thread_num, omp_get_thread_limit, omp_set_max_active_levels, omp_get_max_active_levels, omp_get_level, omp_get_ancestor_thread_num, omp_get_team_size, omp_get_active_level, omp_in_final): Remove useless use omp_lib_kinds. * omp.h.in (omp_proc_bind_t): New typedef. (omp_get_cancellation, omp_get_proc_bind, omp_set_default_device, omp_get_default_device, omp_get_num_devices, omp_get_num_teams, omp_get_team_num, omp_is_initial_device): New prototypes. * loop.c (gomp_parallel_loop_start): Add flags argument, pass it through to gomp_team_start. (GOMP_parallel_loop_static_start, GOMP_parallel_loop_dynamic_start, GOMP_parallel_loop_guided_start, GOMP_parallel_loop_runtime_start): Adjust gomp_parallel_loop_start callers. (GOMP_parallel_loop_static, GOMP_parallel_loop_dynamic, GOMP_parallel_loop_guided, GOMP_parallel_loop_runtime, GOMP_loop_end_cancel): New functions. (GOMP_parallel_end): Add ialias_redirect. * hashtab.h: New file. * libgomp.texi (Environment Variables): Minor cleanup, update section refs to OpenMP 4.0rc2. (OMP_DISPLAY_ENV, GOMP_SPINCOUNT): Document these environment variables. * work.c (gomp_work_share_end, gomp_work_share_end_nowait): Set team->work_shares_to_free to thr->ts.work_share before calling free_work_share. (gomp_work_share_end_cancel): New function. * config/linux/proc.c: Include errno.h. (gomp_get_cpuset_size, gomp_cpuset_size, gomp_cpusetp): New variables. (gomp_cpuset_popcount): Add cpusetsize argument, use it instead of sizeof (cpu_set_t) to determine number of iterations. Fix up check extern decl. Use CPU_COUNT_S if available, or CPU_COUNT if gomp_cpuset_size is sizeof (cpu_set_t). (gomp_init_num_threads): Initialize gomp_cpuset_size, gomp_get_cpuset_size and gomp_cpusetp here, use gomp_cpusetp instead of &cpuset and pass gomp_cpuset_size instead of sizeof (cpu_set_t) to pthread_getaffinity_np. Free and clear gomp_cpusetp if it didn't contain any logical CPUs. (get_num_procs): Don't call pthread_getaffinity_np if gomp_cpusetp is NULL. Use gomp_cpusetp instead of &cpuset and pass gomp_get_cpuset_size instead of sizeof (cpu_set_t) to pthread_getaffinity_np. Check gomp_places_list instead of gomp_cpu_affinity. Adjust gomp_cpuset_popcount caller. * config/linux/bar.c (gomp_barrier_wait_end, gomp_barrier_wait_last): Use BAR_* defines. (gomp_team_barrier_wait_end): Likewise. Clear BAR_CANCELLED from state where needed. Set work_share_cancelled to 0 on last thread. (gomp_team_barrier_wait_final, gomp_team_barrier_wait_cancel_end, gomp_team_barrier_wait_cancel, gomp_team_barrier_cancel): New functions. * config/linux/proc.h (gomp_cpuset_popcount): Add attribute_hidden. Add cpusetsize argument. (gomp_cpuset_size, gomp_cpusetp): Declare. * config/linux/affinity.c: Include errno.h, stdio.h and string.h. (affinity_counter): Remove. (CPU_ISSET_S, CPU_ZERO_S, CPU_SET_S, CPU_CLR_S): Define if CPU_ALLOC_SIZE isn't defined. (gomp_init_affinity): Rewritten, if gomp_places_list is NULL, try silently create OMP_PLACES=threads, if it is non-NULL afterwards, bind current thread to the first place. (gomp_init_thread_affinity): Rewritten. Add place argument, just pthread_setaffinity_np to gomp_places_list[place]. (gomp_affinity_alloc, gomp_affinity_init_place, gomp_affinity_add_cpus, gomp_affinity_remove_cpu, gomp_affinity_copy_place, gomp_affinity_same_place, gomp_affinity_finalize_place_list, gomp_affinity_init_level, gomp_affinity_print_place): New functions. * config/linux/bar.h (BAR_TASK_PENDING, BAR_WAS_LAST, BAR_WAITING_FOR_TASK, BAR_INCR, BAR_CANCELLED): Define. (gomp_barrier_t): Add awaited_final field. (gomp_barrier_init): Initialize awaited_final field. (gomp_team_barrier_wait_final, gomp_team_barrier_wait_cancel, gomp_team_barrier_wait_cancel_end, gomp_team_barrier_cancel): New prototypes. (gomp_barrier_wait_start): Preserve BAR_CANCELLED bit. Use BAR_* defines. (gomp_barrier_wait_cancel_start, gomp_team_barrier_wait_final_start, gomp_team_barrier_cancelled): New inline functions. (gomp_barrier_last_thread, gomp_team_barrier_set_task_pending, gomp_team_barrier_clear_task_pending, gomp_team_barrier_set_waiting_for_tasks, gomp_team_barrier_waiting_for_tasks, gomp_team_barrier_done): Use BAR_* defines. * config/posix/bar.c (gomp_barrier_init): Clear cancellable field. (gomp_barrier_wait_end): Use BAR_* defines. (gomp_team_barrier_wait_end): Clear BAR_CANCELLED from state. Set work_share_cancelled to 0 on last thread, use __atomic_load_n. Use BAR_* defines. (gomp_team_barrier_wait_cancel_end, gomp_team_barrier_wait_cancel, gomp_team_barrier_cancel): New functions. * config/posix/affinity.c (gomp_init_thread_affinity): Add place argument. (gomp_affinity_alloc, gomp_affinity_init_place, gomp_affinity_add_cpus, gomp_affinity_remove_cpu, gomp_affinity_copy_place, gomp_affinity_same_place, gomp_affinity_finalize_place_list, gomp_affinity_init_level, gomp_affinity_print_place): New stubs. * config/posix/bar.h (BAR_TASK_PENDING, BAR_WAS_LAST, BAR_WAITING_FOR_TASK, BAR_INCR, BAR_CANCELLED): Define. (gomp_barrier_t): Add cancellable field. (gomp_team_barrier_wait_cancel, gomp_team_barrier_wait_cancel_end, gomp_team_barrier_cancel): New prototypes. (gomp_barrier_wait_start): Preserve BAR_CANCELLED bit. (gomp_barrier_wait_cancel_start, gomp_team_barrier_wait_final, gomp_team_barrier_cancelled): New inline functions. (gomp_barrier_wait_start, gomp_barrier_last_thread, gomp_team_barrier_set_task_pending, gomp_team_barrier_clear_task_pending, gomp_team_barrier_set_waiting_for_tasks, gomp_team_barrier_waiting_for_tasks, gomp_team_barrier_done): Use BAR_* defines. * barrier.c (GOMP_barrier_cancel): New function. * omp_lib.h.in (omp_proc_bind_kind, omp_proc_bind_false, omp_proc_bind_true, omp_proc_bind_master, omp_proc_bind_close, omp_proc_bind_spread): New params. (omp_get_cancellation, omp_get_proc_bind, omp_set_default_device, omp_get_default_device, omp_get_num_devices, omp_get_num_teams, omp_get_team_num, omp_is_initial_device): New externals. * parallel.c (GOMP_parallel, GOMP_cancel, GOMP_cancellation_point): New functions. (gomp_resolve_num_threads): Adjust for thread_limit now being in icv->thread_limit_var. Use UINT_MAX instead of ULONG_MAX as infinity. If not nested, just return minimum of max_num_threads and icv->thread_limit_var and if thr->thread_pool, set threads_busy to the returned value. Otherwise, don't update atomically gomp_remaining_threads_count, but instead thr->thread_pool->threads_busy. (GOMP_parallel_end): Adjust for thread_limit now being in icv->thread_limit_var. Use UINT_MAX instead of ULONG_MAX as infinity. Adjust threads_busy in the pool rather than gomp_remaining_threads_count. Remember team->nthreads and call gomp_team_end before adjusting threads_busy, if not nested afterwards, just set it to 1 non-atomically. Add ialias. (GOMP_parallel_start): Adjust gomp_team_start caller. * testsuite/libgomp.c/atomic-14.c: Add parens to make it valid. * testsuite/libgomp.c/affinity-1.c: New test. * testsuite/libgomp.c/atomic-15.c: New test. * testsuite/libgomp.c/atomic-16.c: New test. * testsuite/libgomp.c/atomic-17.c: New test. * testsuite/libgomp.c/cancel-for-1.c: New test. * testsuite/libgomp.c/cancel-for-2.c: New test. * testsuite/libgomp.c/cancel-parallel-1.c: New test. * testsuite/libgomp.c/cancel-parallel-2.c: New test. * testsuite/libgomp.c/cancel-parallel-3.c: New test. * testsuite/libgomp.c/cancel-sections-1.c: New test. * testsuite/libgomp.c/cancel-taskgroup-1.c: New test. * testsuite/libgomp.c/cancel-taskgroup-2.c: New test. * testsuite/libgomp.c/depend-1.c: New test. * testsuite/libgomp.c/depend-2.c: New test. * testsuite/libgomp.c/depend-3.c: New test. * testsuite/libgomp.c/depend-4.c: New test. * testsuite/libgomp.c/for-1.c: New test. * testsuite/libgomp.c/for-1.h: New file. * testsuite/libgomp.c/for-2.c: New test. * testsuite/libgomp.c/for-2.h: New file. * testsuite/libgomp.c/for-3.c: New test. * testsuite/libgomp.c/pr58392.c: New test. * testsuite/libgomp.c/simd-1.c: New test. * testsuite/libgomp.c/simd-2.c: New test. * testsuite/libgomp.c/simd-3.c: New test. * testsuite/libgomp.c/simd-4.c: New test. * testsuite/libgomp.c/simd-5.c: New test. * testsuite/libgomp.c/simd-6.c: New test. * testsuite/libgomp.c/target-1.c: New test. * testsuite/libgomp.c/target-2.c: New test. * testsuite/libgomp.c/target-3.c: New test. * testsuite/libgomp.c/target-4.c: New test. * testsuite/libgomp.c/target-5.c: New test. * testsuite/libgomp.c/target-6.c: New test. * testsuite/libgomp.c/target-7.c: New test. * testsuite/libgomp.c/taskgroup-1.c: New test. * testsuite/libgomp.c/thread-limit-1.c: New test. * testsuite/libgomp.c/thread-limit-2.c: New test. * testsuite/libgomp.c/thread-limit-3.c: New test. * testsuite/libgomp.c/udr-1.c: New test. * testsuite/libgomp.c/udr-2.c: New test. * testsuite/libgomp.c/udr-3.c: New test. * testsuite/libgomp.c++/affinity-1.C: New test. * testsuite/libgomp.c++/atomic-10.C: New test. * testsuite/libgomp.c++/atomic-11.C: New test. * testsuite/libgomp.c++/atomic-12.C: New test. * testsuite/libgomp.c++/atomic-13.C: New test. * testsuite/libgomp.c++/atomic-14.C: New test. * testsuite/libgomp.c++/atomic-15.C: New test. * testsuite/libgomp.c++/cancel-for-1.C: New test. * testsuite/libgomp.c++/cancel-for-2.C: New test. * testsuite/libgomp.c++/cancel-parallel-1.C: New test. * testsuite/libgomp.c++/cancel-parallel-2.C: New test. * testsuite/libgomp.c++/cancel-parallel-3.C: New test. * testsuite/libgomp.c++/cancel-sections-1.C: New test. * testsuite/libgomp.c++/cancel-taskgroup-1.C: New test. * testsuite/libgomp.c++/cancel-taskgroup-2.C: New test. * testsuite/libgomp.c++/cancel-taskgroup-3.C: New test. * testsuite/libgomp.c++/cancel-test.h: New file. * testsuite/libgomp.c++/for-9.C: New test. * testsuite/libgomp.c++/for-10.C: New test. * testsuite/libgomp.c++/for-11.C: New test. * testsuite/libgomp.c++/simd-1.C: New test. * testsuite/libgomp.c++/simd-2.C: New test. * testsuite/libgomp.c++/simd-3.C: New test. * testsuite/libgomp.c++/simd-4.C: New test. * testsuite/libgomp.c++/simd-5.C: New test. * testsuite/libgomp.c++/simd-6.C: New test. * testsuite/libgomp.c++/simd-7.C: New test. * testsuite/libgomp.c++/simd-8.C: New test. * testsuite/libgomp.c++/target-1.C: New test. * testsuite/libgomp.c++/target-2.C: New test. * testsuite/libgomp.c++/target-2-aux.cc: New file. * testsuite/libgomp.c++/target-3.C: New test. * testsuite/libgomp.c++/taskgroup-1.C: New test. * testsuite/libgomp.c++/udr-1.C: New test. * testsuite/libgomp.c++/udr-2.C: New test. * testsuite/libgomp.c++/udr-3.C: New test. * testsuite/libgomp.c++/udr-4.C: New test. * testsuite/libgomp.c++/udr-5.C: New test. * testsuite/libgomp.c++/udr-6.C: New test. * testsuite/libgomp.c++/udr-7.C: New test. * testsuite/libgomp.c++/udr-8.C: New test. * testsuite/libgomp.c++/udr-9.C: New test. gcc/ * tree-pretty-print.c (dump_omp_clause): Handle OMP_CLAUSE__LOOPTEMP_ and new OpenMP 4.0 clauses, handle UDR OMP_CLAUSE_REDUCTION, formatting fixes, use pp_colon instead of pp_character (..., ':'), similarly pp_right_paren. (dump_generic_node): Handle OMP_DISTRIBUTE, OMP_TEAMS, OMP_TARGET_DATA, OMP_TARGET, OMP_TARGET_UPDATE, OMP_TASKGROUP, allow OMP_FOR_INIT to be NULL, handle OMP_ATOMIC_SEQ_CST. * tree.c (omp_clause_num_ops, omp_clause_code_name): Add OpenMP 4.0 clauses. (omp_declare_simd_clauses_equal, omp_remove_redundant_declare_simd_attrs): New functions. (attribute_value_equal): Use omp_declare_simd_clauses_equal. (walk_tree_1): Handle new OpenMP 4.0 clauses. * tree.h (OMP_LOOP_CHECK): Define. (OMP_FOR_BODY, OMP_FOR_CLAUSES, OMP_FOR_INIT, OMP_FOR_COND, OMP_FOR_INCR, OMP_FOR_PRE_BODY): Use it. (OMP_TASKGROUP_BODY, OMP_TEAMS_BODY, OMP_TEAMS_CLAUSES, OMP_TARGET_DATA_BODY, OMP_TARGET_DATA_CLAUSES, OMP_TARGET_BODY, OMP_TARGET_CLAUSES, OMP_TARGET_UPDATE_CLAUSES, OMP_CLAUSE_SIZE, OMP_ATOMIC_SEQ_CST, OMP_CLAUSE_DEPEND_KIND, OMP_CLAUSE_MAP_KIND, OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION, OMP_CLAUSE_PROC_BIND_KIND, OMP_CLAUSE_REDUCTION_OMP_ORIG_REF, OMP_CLAUSE_ALIGNED_ALIGNMENT, OMP_CLAUSE_NUM_TEAMS_EXPR, OMP_CLAUSE_THREAD_LIMIT_EXPR, OMP_CLAUSE_DEVICE_ID, OMP_CLAUSE_DIST_SCHEDULE_CHUNK_EXPR, OMP_CLAUSE_SIMDLEN_EXPR): Define. (OMP_CLAUSE_DECL): Change range up to OMP_CLAUSE__LOOPTEMP_. (omp_remove_redundant_declare_simd_attrs): New prototype. * gimple.def (GIMPLE_OMP_TASKGROUP, GIMPLE_OMP_TARGET, GIMPLE_OMP_TEAMS): New codes. (GIMPLE_OMP_RETURN): Use GSS_OMP_ATOMIC_STORE instead of GSS_BASE. * omp-low.c (struct omp_context): Add cancel_label and cancellable fields. (target_nesting_level): New variable. (extract_omp_for_data): Handle GF_OMP_FOR_KIND_DISTRIBUTE and OMP_CLAUSE_DIST_SCHEDULE. Don't fallback to library implementation for collapse > 1 static schedule unless ordered. (get_ws_args_for): Add par_stmt argument. Handle combined loops. (determine_parallel_type): Adjust get_ws_args_for caller. (install_var_field): Handle mask & 4 for double indirection. (scan_sharing_clauses): Ignore shared clause on teams construct. Handle OMP_CLAUSE__LOOPTEMP_ and new OpenMP 4.0 clauses. (create_omp_child_function): If inside target or declare target constructs, set "omp declare target" attribute on the child function. (find_combined_for): New function. (scan_omp_parallel): Handle combined loops. (scan_omp_target, scan_omp_teams): New functions. (check_omp_nesting_restrictions): Check new OpenMP 4.0 nesting restrictions and set ctx->cancellable for cancellable constructs. (scan_omp_1_stmt): Call check_omp_nesting_restrictions also on selected builtin calls. Handle GIMPLE_OMP_TASKGROUP, GIMPLE_OMP_TARGET, GIMPLE_OMP_TEAMS. (build_omp_barrier): Add lhs argument, return gimple rather than tree. (omp_clause_aligned_alignment): New function. (lower_rec_simd_input_clauses): Only call SET_DECL_VALUE_EXPR on decls. (lower_rec_input_clauses): Add FD argument. Ignore shared clauses on teams constructs. Handle user defined reductions and new OpenMP 4.0 clauses. (lower_reduction_clauses): Don't set placeholder to address of ref if it has already the right type. (lower_send_clauses): Handle OMP_CLAUSE__LOOPTEMP_. (expand_parallel_call): Use the new non-_start suffixed builtins, handle OMP_CLAUSE_PROC_BIND, don't call the outlined function and GOMP_parallel_end after the call. (expand_task_call): Handle OMP_CLAUSE_DEPEND. (expand_omp_for_init_counts): Handle combined loops. (expand_omp_for_init_vars): Add inner_stmt argument, handle combined loops. (expand_omp_for_generic): Likewise. Use GOMP_loop_end_cancel at the end of cancellable loops. (expand_omp_for_static_nochunk, expand_omp_for_static_chunk): Likewise. Handle collapse > 1 loops. (expand_omp_simd): Handle combined loops. (expand_omp_for): Add inner_stmt argument, adjust callers of expand_omp_for* functions, use expand_omp_for_static*chunk even for collapse > 1 unless ordered. (expand_omp_sections): Use GOMP_sections_end_cancel at the end of cancellable sections. (expand_omp_single): Remove need_barrier variable, just rely on gimple_omp_return_nowait_p. Adjust build_omp_barrier caller. (expand_omp_synch): Allow GIMPLE_OMP_TASKGROUP and GIMPLE_OMP_TEAMS. (expand_omp_atomic_load, expand_omp_atomic_store, expand_omp_atomic_fetch_op): Handle gimple_omp_atomic_seq_cst_p. (expand_omp_target): New function. (expand_omp): Handle combined loops. Handle GIMPLE_OMP_TASKGROUP, GIMPLE_OMP_TEAMS, GIMPLE_OMP_TARGET. (build_omp_regions_1): Immediately close region for GF_OMP_TARGET_KIND_UPDATE. (maybe_add_implicit_barrier_cancel): New function. (lower_omp_sections): Adjust lower_rec_input_clauses caller. Handle cancellation. (lower_omp_single): Likewise. Add clobber after the barrier. (lower_omp_taskgroup): New function. (lower_omp_for): Handle combined loops. Adjust lower_rec_input_clauses caller. Handle cancellation. (lower_depend_clauses): New function. (lower_omp_taskreg): Lower depend clauses. Adjust lower_rec_input_clauses caller. Add clobber after the call. Handle cancellation. (lower_omp_target, lower_omp_teams): New functions. (lower_omp_1): Handle cancellation. Handle GIMPLE_OMP_TASKGROUP, GIMPLE_OMP_TARGET, GIMPLE_OMP_TEAMS and GOMP_barrier, GOMP_cancel and GOMP_cancellation_point calls. (lower_omp): Fold stmts inside of target region. (diagnose_sb_1, diagnose_sb_2): Handle GIMPLE_OMP_TASKGROUP, GIMPLE_OMP_TARGET and GIMPLE_OMP_TEAMS. * builtin-types.def (DEF_FUNCTION_TYPE_8): Document. (BT_FN_VOID_OMPFN_PTR_UINT, BT_FN_VOID_OMPFN_PTR_UINT_LONG_LONG_LONG, BT_FN_VOID_OMPFN_PTR_UINT_LONG_LONG_LONG_LONG, BT_FN_VOID_OMPFN_PTR_OMPCPYFN_LONG_LONG_BOOL_UINT): Remove. (BT_FN_VOID_OMPFN_PTR_UINT_UINT_UINT, BT_FN_VOID_OMPFN_PTR_UINT_LONG_LONG_LONG_UINT, BT_FN_VOID_OMPFN_PTR_UINT_LONG_LONG_LONG_LONG_UINT, BT_FN_BOOL_INT, BT_FN_BOOL_INT_BOOL, BT_FN_VOID_UINT_UINT, BT_FN_VOID_INT_PTR_SIZE_PTR_PTR_PTR, BT_FN_VOID_INT_OMPFN_PTR_SIZE_PTR_PTR_PTR, BT_FN_VOID_OMPFN_PTR_OMPCPYFN_LONG_LONG_BOOL_UINT_PTR): New. * tree-ssa-alias.c (ref_maybe_used_by_call_p_1, call_may_clobber_ref_p_1): Handle BUILT_IN_GOMP_BARRIER_CANCEL, BUILT_IN_GOMP_TASKGROUP_END, BUILT_IN_GOMP_LOOP_END_CANCEL, BUILT_IN_GOMP_SECTIONS_END_CANCEL. Don't handle BUILT_IN_GOMP_PARALLEL_END. * gimple-low.c (lower_stmt): Handle GIMPLE_OMP_TASKGROUP, GIMPLE_OMP_TARGET and GIMPLE_OMP_TEAMS. * gimple-pretty-print.c (dump_gimple_omp_for): Handle GF_OMP_FOR_KIND_DISTRIBUTE. (dump_gimple_omp_target, dump_gimple_omp_teams): New functions. (dump_gimple_omp_block): Handle GIMPLE_OMP_TASKGROUP. (dump_gimple_omp_return): Print lhs if it has any. (dump_gimple_omp_atomic_load, dump_gimple_omp_atomic_store): Handle gimple_omp_atomic_seq_cst_p. (pp_gimple_stmt_1): Handle GIMPLE_OMP_TASKGROUP, GIMPLE_OMP_TARGET and GIMPLE_OMP_TEAMS. * langhooks.c (lhd_omp_mappable_type): New function. * tree-vectorizer.c (struct simd_array_to_simduid): Fix up comment. * langhooks.h (struct lang_hooks_for_types): Add omp_mappable_type hook. * gimplify.c (enum gimplify_omp_var_data): Add GOVD_MAP, GOVD_ALIGNED and GOVD_MAP_TO_ONLY. (enum omp_region_type): Add ORT_TEAMS, ORT_TARGET_DATA and ORT_TARGET. (struct gimplify_omp_ctx): Add combined_loop field. (gimplify_call_expr, gimplify_modify_expr): Don't call fold_stmt on stmts inside of target region. (is_gimple_stmt): Return true for OMP_DISTRIBUTE and OMP_TASKGROUP. (omp_firstprivatize_variable): Handle GOVD_MAP, GOVD_ALIGNED, ORT_TARGET and ORT_TARGET_DATA. (omp_add_variable): Avoid checks on readding var for GOVD_ALIGNED. Handle GOVD_MAP. (omp_notice_threadprivate_variable): Complain about threadprivate variables in target region. (omp_notice_variable): Complain about vars with non-mappable type in target region. Handle ORT_TEAMS, ORT_TARGET and ORT_TARGET_DATA. (omp_check_private): Ignore ORT_TARGET* regions. (gimplify_scan_omp_clauses, gimplify_adjust_omp_clauses_1, gimplify_adjust_omp_clauses): Handle new OpenMP 4.0 clauses. (find_combined_omp_for): New function. (gimplify_omp_for): Handle gimplification of combined loops. (gimplify_omp_workshare): Gimplify also OMP_TARGET, OMP_TARGET_DATA, OMP_TEAMS. (gimplify_omp_target_update): New function. (gimplify_omp_atomic): Handle OMP_ATOMIC_SEQ_CST. (gimplify_expr): Handle OMP_DISTRIBUTE, OMP_TARGET, OMP_TARGET_DATA, OMP_TARGET_UPDATE, OMP_TEAMS, OMP_TASKGROUP. (gimplify_body): If fndecl has "omp declare target" attribute, add implicit ORT_TARGET context around it. * tree.def (OMP_DISTRIBUTE, OMP_TEAMS, OMP_TARGET_DATA, OMP_TARGET, OMP_TASKGROUP, OMP_TARGET_UPDATE): New tree codes. * tree-nested.c (convert_nonlocal_reference_stmt, convert_local_reference_stmt, convert_gimple_call): Handle GIMPLE_OMP_TARGET, GIMPLE_OMP_TEAMS and GIMPLE_OMP_TASKGROUP. * omp-builtins.def (BUILT_IN_GOMP_TASK): Use BT_FN_VOID_OMPFN_PTR_OMPCPYFN_LONG_LONG_BOOL_UINT_PTR instead of BT_FN_VOID_OMPFN_PTR_OMPCPYFN_LONG_LONG_BOOL_UINT. (BUILT_IN_GOMP_TARGET, BUILT_IN_GOMP_TARGET_DATA, BUILT_IN_GOMP_TARGET_END_DATA, BUILT_IN_GOMP_TARGET_UPDATE, BUILT_IN_GOMP_TEAMS, BUILT_IN_BARRIER_CANCEL, BUILT_IN_GOMP_LOOP_END_CANCEL, BUILT_IN_GOMP_SECTIONS_END_CANCEL, BUILT_IN_OMP_GET_TEAM_NUM, BUILT_IN_OMP_GET_NUM_TEAMS, BUILT_IN_GOMP_TASKGROUP_START, BUILT_IN_GOMP_TASKGROUP_END, BUILT_IN_GOMP_PARALLEL_LOOP_STATIC, BUILT_IN_GOMP_PARALLEL_LOOP_DYNAMIC, BUILT_IN_GOMP_PARALLEL_LOOP_GUIDED, BUILT_IN_GOMP_PARALLEL_LOOP_RUNTIME, BUILT_IN_GOMP_PARALLEL, BUILT_IN_GOMP_PARALLEL_SECTIONS, BUILT_IN_GOMP_CANCEL, BUILT_IN_GOMP_CANCELLATION_POINT): New built-ins. (BUILT_IN_GOMP_PARALLEL_LOOP_STATIC_START, BUILT_IN_GOMP_PARALLEL_LOOP_DYNAMIC_START, BUILT_IN_GOMP_PARALLEL_LOOP_GUIDED_START, BUILT_IN_GOMP_PARALLEL_LOOP_RUNTIME_START, BUILT_IN_GOMP_PARALLEL_START, BUILT_IN_GOMP_PARALLEL_END, BUILT_IN_GOMP_PARALLEL_SECTIONS_START): Remove. * tree-inline.c (remap_gimple_stmt, estimate_num_insns): Handle GIMPLE_OMP_TARGET, GIMPLE_OMP_TEAMS and GIMPLE_OMP_TASKGROUP. * gimple.c (gimple_build_omp_taskgroup, gimple_build_omp_target, gimple_build_omp_teams): New functions. (walk_gimple_op): Handle GIMPLE_OMP_TARGET, GIMPLE_OMP_TEAMS and GIMPLE_OMP_TASKGROUP. Walk optional lhs on GIMPLE_OMP_RETURN. (walk_gimple_stmt, gimple_copy): Handle GIMPLE_OMP_TARGET, GIMPLE_OMP_TEAMS and GIMPLE_OMP_TASKGROUP. * gimple.h (enum gf_mask): GF_OMP_FOR_KIND_DISTRIBUTE, GF_OMP_FOR_COMBINED, GF_OMP_FOR_COMBINED_INTO, GF_OMP_TARGET_KIND_MASK, GF_OMP_TARGET_KIND_REGION, GF_OMP_TARGET_KIND_DATA, GF_OMP_TARGET_KIND_UPDATE, GF_OMP_ATOMIC_SEQ_CST): New. (gimple_build_omp_taskgroup, gimple_build_omp_target, gimple_build_omp_teams): New prototypes. (gimple_has_substatements): Handle GIMPLE_OMP_TARGET, GIMPLE_OMP_TEAMS and GIMPLE_OMP_TASKGROUP. (gimple_omp_subcode): Use GIMPLE_OMP_TEAMS instead of GIMPLE_OMP_SINGLE as end of range. (gimple_omp_return_set_lhs, gimple_omp_return_lhs, gimple_omp_return_lhs_ptr, gimple_omp_atomic_seq_cst_p, gimple_omp_atomic_set_seq_cst, gimple_omp_for_combined_p, gimple_omp_for_set_combined_p, gimple_omp_for_combined_into_p, gimple_omp_for_set_combined_into_p, gimple_omp_target_clauses, gimple_omp_target_clauses_ptr, gimple_omp_target_set_clauses, gimple_omp_target_kind, gimple_omp_target_set_kind, gimple_omp_target_child_fn, gimple_omp_target_child_fn_ptr, gimple_omp_target_set_child_fn, gimple_omp_target_data_arg, gimple_omp_target_data_arg_ptr, gimple_omp_target_set_data_arg, gimple_omp_teams_clauses, gimple_omp_teams_clauses_ptr, gimple_omp_teams_set_clauses): New inlines. (CASE_GIMPLE_OMP): Add GIMPLE_OMP_TARGET, GIMPLE_OMP_TEAMS and GIMPLE_OMP_TASKGROUP. * tree-core.h (enum omp_clause_code): Add new OpenMP 4.0 clause codes. (enum omp_clause_depend_kind, enum omp_clause_map_kind, enum omp_clause_proc_bind_kind): New. (union omp_clause_subcode): Add depend_kind, map_kind and proc_bind_kind fields. * tree-cfg.c (make_edges): Handle GIMPLE_OMP_TARGET, GIMPLE_OMP_TEAMS and GIMPLE_OMP_TASKGROUP. * langhooks-def.h (lhd_omp_mappable_type): New prototype. (LANG_HOOKS_OMP_MAPPABLE_TYPE): Define. (LANG_HOOKS_FOR_TYPES_INITIALIZER): Add it. gcc/c-family/ * c-cppbuiltin.c (c_cpp_builtins): Predefine _OPENMP to 201307 instead of 201107. * c-common.c (DEF_FUNCTION_TYPE_8): Define. (c_common_attribute_table): Add "omp declare target" and "omp declare simd" attributes. (handle_omp_declare_target_attribute, handle_omp_declare_simd_attribute): New functions. * c-omp.c: Include c-pragma.h. (c_finish_omp_taskgroup): New function. (c_finish_omp_atomic): Add swapped argument, if true, build the operation first with rhs, lhs arguments and use NOP_EXPR build_modify_expr. (c_finish_omp_for): Add code argument, pass it down to make_code. (c_omp_split_clauses): New function. (c_split_parallel_clauses): Removed. (c_omp_declare_simd_clause_cmp, c_omp_declare_simd_clauses_to_numbers, c_omp_declare_simd_clauses_to_decls): New functions. * c-common.h (omp_clause_mask): New type. (OMP_CLAUSE_MASK_1): Define. (omp_clause_mask::omp_clause_mask, omp_clause_mask::operator &=, omp_clause_mask::operator |=, omp_clause_mask::operator ~, omp_clause_mask::operator |, omp_clause_mask::operator &, omp_clause_mask::operator <<, omp_clause_mask::operator >>, omp_clause_mask::operator ==): New methods. (enum c_omp_clause_split): New. (c_finish_omp_taskgroup): New prototype. (c_finish_omp_atomic): Add swapped argument. (c_finish_omp_for): Add code argument. (c_omp_split_clauses): New prototype. (c_split_parallel_clauses): Removed. (c_omp_declare_simd_clauses_to_numbers, c_omp_declare_simd_clauses_to_decls): New prototypes. * c-pragma.c (omp_pragmas): Add new OpenMP 4.0 constructs. * c-pragma.h (enum pragma_kind): Add PRAGMA_OMP_CANCEL, PRAGMA_OMP_CANCELLATION_POINT, PRAGMA_OMP_DECLARE_REDUCTION, PRAGMA_OMP_DISTRIBUTE, PRAGMA_OMP_END_DECLARE_TARGET, PRAGMA_OMP_SIMD, PRAGMA_OMP_TARGET, PRAGMA_OMP_TASKGROUP and PRAGMA_OMP_TEAMS. Remove PRAGMA_OMP_PARALLEL_FOR and PRAGMA_OMP_PARALLEL_SECTIONS. (enum pragma_omp_clause): Add PRAGMA_OMP_CLAUSE_ALIGNED, PRAGMA_OMP_CLAUSE_DEPEND, PRAGMA_OMP_CLAUSE_DEVICE, PRAGMA_OMP_CLAUSE_DIST_SCHEDULE, PRAGMA_OMP_CLAUSE_FOR, PRAGMA_OMP_CLAUSE_FROM, PRAGMA_OMP_CLAUSE_INBRANCH, PRAGMA_OMP_CLAUSE_LINEAR, PRAGMA_OMP_CLAUSE_MAP, PRAGMA_OMP_CLAUSE_NOTINBRANCH, PRAGMA_OMP_CLAUSE_NUM_TEAMS, PRAGMA_OMP_CLAUSE_PARALLEL, PRAGMA_OMP_CLAUSE_PROC_BIND, PRAGMA_OMP_CLAUSE_SAFELEN, PRAGMA_OMP_CLAUSE_SECTIONS, PRAGMA_OMP_CLAUSE_SIMDLEN, PRAGMA_OMP_CLAUSE_TASKGROUP, PRAGMA_OMP_CLAUSE_THREAD_LIMIT, PRAGMA_OMP_CLAUSE_TO and PRAGMA_OMP_CLAUSE_UNIFORM. gcc/ada/ * gcc-interface/utils.c (DEF_FUNCTION_TYPE_8): Define. gcc/fortran/ * trans-openmp.c (gfc_omp_clause_default_ctor, gfc_omp_clause_dtor): Return NULL for OMP_CLAUSE_REDUCTION. * f95-lang.c (ATTR_NULL, DEF_FUNCTION_TYPE_8): Define. * types.def (DEF_FUNCTION_TYPE_8): Document. (BT_FN_VOID_OMPFN_PTR_UINT, BT_FN_VOID_OMPFN_PTR_UINT_LONG_LONG_LONG, BT_FN_VOID_OMPFN_PTR_UINT_LONG_LONG_LONG_LONG, BT_FN_VOID_OMPFN_PTR_OMPCPYFN_LONG_LONG_BOOL_UINT): Remove. (BT_FN_VOID_OMPFN_PTR_UINT_UINT_UINT, BT_FN_VOID_OMPFN_PTR_UINT_LONG_LONG_LONG_UINT, BT_FN_VOID_OMPFN_PTR_UINT_LONG_LONG_LONG_LONG_UINT, BT_FN_BOOL_INT, BT_FN_BOOL_INT_BOOL, BT_FN_VOID_UINT_UINT, BT_FN_VOID_INT_PTR_SIZE_PTR_PTR_PTR, BT_FN_VOID_INT_OMPFN_PTR_SIZE_PTR_PTR_PTR, BT_FN_VOID_OMPFN_PTR_OMPCPYFN_LONG_LONG_BOOL_UINT_PTR): New. gcc/lto/ * lto-lang.c (DEF_FUNCTION_TYPE_8): Define. gcc/c/ * c-lang.h (current_omp_declare_target_attribute): New extern decl. * c-parser.c: Include c-lang.h. (struct c_parser): Change tokens to c_token *. Add tokens_buf field. Change tokens_avail type to unsigned int. (c_parser_consume_token): If parser->tokens isn't &parser->tokens_buf[0], increment parser->tokens. (c_parser_consume_pragma): Likewise. (enum pragma_context): Add pragma_struct and pragma_param. (c_parser_external_declaration): Adjust c_parser_declaration_or_fndef caller. (c_parser_declaration_or_fndef): Add omp_declare_simd_clauses argument, if it is non-vNULL vector, call c_finish_omp_declare_simd. Adjust recursive call. (c_parser_struct_or_union_specifier): Use pragma_struct instead of pragma_external. (c_parser_parameter_declaration): Use pragma_param instead of pragma_external. (c_parser_compound_statement_nostart, c_parser_label, c_parser_for_statement): Adjust c_parser_declaration_or_fndef callers. (c_parser_expr_no_commas): Add omp_atomic_lhs argument, pass it through to c_parser_conditional_expression. (c_parser_conditional_expression): Add omp_atomic_lhs argument, pass it through to c_parser_binary_expression. Adjust recursive call. (c_parser_binary_expression): Remove prec argument, add omp_atomic_lhs argument instead. Always start from PREC_NONE, if omp_atomic_lhs is non-NULL and one of the arguments of toplevel binop matches it, use build2 instead of parser_build_binary_op. (c_parser_pragma): Handle PRAGMA_OMP_CANCEL, PRAGMA_OMP_CANCELLATION_POINT, PRAGMA_OMP_TARGET, PRAGMA_OMP_END_DECLARE_TARGET, PRAGMA_OMP_DECLARE_REDUCTION. Handle pragma_struct and pragma_param the same as pragma_external. (c_parser_omp_clause_name): Parse new OpenMP 4.0 clause names. (c_parser_omp_variable_list): Parse array sections for OMP_CLAUSE_{DEPEND,MAP,TO,FROM} clauses. (c_parser_omp_clause_collapse): Fully fold collapse expression. (c_parser_omp_clause_reduction): Handle user defined reductions. (c_parser_omp_clause_branch, c_parser_omp_clause_cancelkind, c_parser_omp_clause_num_teams, c_parser_omp_clause_thread_limit, c_parser_omp_clause_aligned, c_parser_omp_clause_linear, c_parser_omp_clause_safelen, c_parser_omp_clause_simdlen, c_parser_omp_clause_depend, c_parser_omp_clause_map, c_parser_omp_clause_device, c_parser_omp_clause_dist_schedule, c_parser_omp_clause_proc_bind, c_parser_omp_clause_to, c_parser_omp_clause_from, c_parser_omp_clause_uniform): New functions. (c_parser_omp_all_clauses): Add finish_p argument. Don't call c_finish_omp_clauses if it is false. Handle new OpenMP 4.0 clauses. (c_parser_omp_atomic): Parse seq_cst clause, pass true if it is present to c_finish_omp_atomic. Handle OpenMP 4.0 atomic forms. (c_parser_omp_for_loop): Add CODE argument, pass it through to c_finish_omp_for. Change last argument to cclauses, and adjust uses to grab parallel clauses from the array of all the split clauses. Adjust c_parser_binary_expression, c_parser_declaration_or_fndef and c_finish_omp_for callers. (omp_split_clauses): New function. (c_parser_omp_simd): New function. (c_parser_omp_for): Add p_name, mask and cclauses arguments. Allow the function to be called also when parsing combined constructs, and call c_parser_omp_simd when parsing for simd. (c_parser_omp_sections_scope): If section-sequence doesn't start with #pragma omp section, require exactly one structured-block instead of sequence of statements. (c_parser_omp_sections): Add p_name, mask and cclauses arguments. Allow the function to be called also when parsing combined constructs. (c_parser_omp_parallel): Add p_name, mask and cclauses arguments. Allow the function to be called also when parsing combined constructs. (c_parser_omp_taskgroup, c_parser_omp_cancel, c_parser_omp_cancellation_point, c_parser_omp_distribute, c_parser_omp_teams, c_parser_omp_target_data, c_parser_omp_target_update, c_parser_omp_target, c_parser_omp_declare_simd, c_finish_omp_declare_simd, c_parser_omp_declare_target, c_parser_omp_end_declare_target, c_parser_omp_declare_reduction, c_parser_omp_declare): New functions. (c_parser_omp_construct): Add p_name and mask vars. Handle PRAGMA_OMP_DISTRIBUTE, PRAGMA_OMP_SIMD, PRAGMA_OMP_TASKGROUP, PRAGMA_OMP_TEAMS. Adjust c_parser_omp_for, c_parser_omp_parallel and c_parser_omp_sections callers. (c_parse_file): Initialize tparser.tokens and the_parser->tokens here. (OMP_FOR_CLAUSE_MASK, OMP_SECTIONS_CLAUSE_MASK, OMP_SINGLE_CLAUSE_MASK): Use OMP_CLAUSE_MASK_1 instead of 1. (OMP_PARALLEL_CLAUSE_MASK): Likewise. Add OMP_CLAUSE_PROC_BIND. (OMP_TASK_CLAUSE_MASK): Use OMP_CLAUSE_MASK_1 instead of 1. Add OMP_CLAUSE_DEPEND. (OMP_SIMD_CLAUSE_MASK, OMP_CANCEL_CLAUSE_MASK, OMP_CANCELLATION_POINT_CLAUSE_MASK, OMP_DISTRIBUTE_CLAUSE_MASK, OMP_TEAMS_CLAUSE_MASK, OMP_TARGET_DATA_CLAUSE_MASK, OMP_TARGET_UPDATE_CLAUSE_MASK, OMP_TARGET_CLAUSE_MASK, OMP_DECLARE_SIMD_CLAUSE_MASK): Define. * c-typeck.c: Include tree-inline.h. (c_finish_omp_cancel, c_finish_omp_cancellation_point, handle_omp_array_sections_1, handle_omp_array_sections, c_clone_omp_udr, c_find_omp_placeholder_r): New functions. (c_finish_omp_clauses): Handle new OpenMP 4.0 clauses and user defined reductions. (c_tree_equal): New function. * c-tree.h (temp_store_parm_decls, temp_pop_parm_decls, c_finish_omp_cancel, c_finish_omp_cancellation_point, c_tree_equal, c_omp_reduction_id, c_omp_reduction_decl, c_omp_reduction_lookup, c_check_omp_declare_reduction_r): New prototypes. * c-decl.c (current_omp_declare_target_attribute): New variable. (c_decl_attributes): New function. (start_decl, start_function): Use it instead of decl_attributes. (temp_store_parm_decls, temp_pop_parm_decls, c_omp_reduction_id, c_omp_reduction_decl, c_omp_reduction_lookup, c_check_omp_declare_reduction_r): New functions. gcc/cp/ * decl.c (duplicate_decls): Error out for redeclaration of UDRs. (declare_simd_adjust_this): New function. (grokfndecl): If "omp declare simd" attribute is present, call declare_simd_adjust_this if needed and c_omp_declare_simd_clauses_to_numbers. * cp-array-notation.c (expand_array_notation_exprs): Handle OMP_TASKGROUP. * cp-gimplify.c (cp_gimplify_expr): Handle OMP_SIMD and OMP_DISTRIBUTE. Handle is_invisiref_parm decls in OMP_CLAUSE_REDUCTION. (cp_genericize_r): Handle OMP_SIMD and OMP_DISTRIBUTE like OMP_FOR. (cxx_omp_privatize_by_reference): Return true for is_invisiref_parm decls. (cxx_omp_finish_clause): Adjust cxx_omp_create_clause_info caller. * pt.c (apply_late_template_attributes): For "omp declare simd" attribute call tsubst_omp_clauses, c_omp_declare_simd_clauses_to_decls, finish_omp_clauses and c_omp_declare_simd_clauses_to_numbers. (instantiate_class_template_1): Call cp_check_omp_declare_reduction for UDRs. (tsubst_decl): Handle UDRs. (tsubst_omp_clauses): Add declare_simd argument, if true don't call finish_omp_clauses. Handle new OpenMP 4.0 clauses. Handle non-NULL OMP_CLAUSE_REDUCTION_PLACEHOLDER on OMP_CLAUSE_REDUCTION. (tsubst_expr): For UDRs call pushdecl and cp_check_omp_declare_reduction. Adjust tsubst_omp_clauses callers. Handle OMP_SIMD, OMP_DISTRIBUTE, OMP_TEAMS, OMP_TARGET_DATA, OMP_TARGET_UPDATE, OMP_TARGET, OMP_TASKGROUP. Adjust finish_omp_atomic caller. (tsubst_omp_udr): New function. (instantiate_decl): For UDRs at block scope, don't call start_preparsed_function/finish_function. Call tsubst_omp_udr. * semantics.c (cxx_omp_create_clause_info): Add need_dtor argument, use it instead of need_default_ctor || need_copy_ctor. (struct cp_check_omp_declare_reduction_data): New type. (handle_omp_array_sections_1, handle_omp_array_sections, omp_reduction_id, omp_reduction_lookup, cp_remove_omp_priv_cleanup_stmt, cp_check_omp_declare_reduction_r, cp_check_omp_declare_reduction, clone_omp_udr, find_omp_placeholder_r, finish_omp_reduction_clause): New functions. (finish_omp_clauses): Handle new OpenMP 4.0 clauses and user defined reductions. (finish_omp_for): Add CODE argument, use it instead of hardcoded OMP_FOR. Adjust c_finish_omp_for caller. (finish_omp_atomic): Add seq_cst argument, adjust c_finish_omp_atomic callers, handle seq_cst and new OpenMP 4.0 atomic variants. (finish_omp_cancel, finish_omp_cancellation_point): New functions. * decl2.c (mark_used): Force immediate instantiation of DECL_OMP_DECLARE_REDUCTION_P decls. (is_late_template_attribute): Return true for "omp declare simd" attribute. (cp_omp_mappable_type): New function. (cplus_decl_attributes): Add implicit "omp declare target" attribute if requested. * parser.c (cp_debug_parser): Print parser->colon_doesnt_start_class_def_p. (cp_ensure_no_omp_declare_simd, cp_finalize_omp_declare_simd): New functions. (enum pragma_context): Add pragma_member and pragma_objc_icode. (cp_parser_binary_expression): Handle no_toplevel_fold_p even for binary operations other than comparison. (cp_parser_linkage_specification): Call cp_ensure_no_omp_declare_simd if needed. (cp_parser_namespace_definition): Likewise. (cp_parser_init_declarator): Call cp_finalize_omp_declare_simd. (cp_parser_direct_declarator): Pass declarator to cp_parser_late_return_type_opt. (cp_parser_late_return_type_opt): Add declarator argument, call cp_parser_late_parsing_omp_declare_simd for declare simd. (cp_parser_class_specifier_1): Call cp_ensure_no_omp_declare_simd. Parse UDRs before all other methods. (cp_parser_member_specification_opt): Use pragma_member instead of pragma_external. (cp_parser_member_declaration): Call cp_finalize_omp_declare_simd. (cp_parser_function_definition_from_specifiers_and_declarator, cp_parser_save_member_function_body): Likewise. (cp_parser_late_parsing_for_member): Handle UDRs specially. (cp_parser_next_token_starts_class_definition_p): Don't allow CPP_COLON if colon_doesnt_start_class_def_p flag is true. (cp_parser_objc_interstitial_code): Use pragma_objc_icode instead of pragma_external. (cp_parser_omp_clause_name): Parse new OpenMP 4.0 clause names. (cp_parser_omp_var_list_no_open): Parse array sections for OMP_CLAUSE_{DEPEND,MAP,TO,FROM} clauses. Add COLON argument, if non-NULL, allow parsing to end with a colon rather than close paren. (cp_parser_omp_var_list): Adjust cp_parser_omp_var_list_no_open caller. (cp_parser_omp_clause_reduction): Handle user defined reductions. (cp_parser_omp_clause_branch, cp_parser_omp_clause_cancelkind, cp_parser_omp_clause_num_teams, cp_parser_omp_clause_thread_limit, cp_parser_omp_clause_aligned, cp_parser_omp_clause_linear, cp_parser_omp_clause_safelen, cp_parser_omp_clause_simdlen, cp_parser_omp_clause_depend, cp_parser_omp_clause_map, cp_parser_omp_clause_device, cp_parser_omp_clause_dist_schedule, cp_parser_omp_clause_proc_bind, cp_parser_omp_clause_to, cp_parser_omp_clause_from, cp_parser_omp_clause_uniform): New functions. (cp_parser_omp_all_clauses): Add finish_p argument. Don't call finish_omp_clauses if it is false. Handle new OpenMP 4.0 clauses. (cp_parser_omp_atomic): Parse seq_cst clause, pass true if it is present to finish_omp_atomic. Handle new OpenMP 4.0 atomic forms. (cp_parser_omp_for_loop): Add CODE argument, pass it through to finish_omp_for. Change last argument to cclauses, and adjust uses to grab parallel clauses from the array of all the split clauses. (cp_omp_split_clauses): New function. (cp_parser_omp_simd): New function. (cp_parser_omp_for): Add p_name, mask and cclauses arguments. Allow the function to be called also when parsing combined constructs, and call c_parser_omp_simd when parsing for simd. (cp_parser_omp_sections_scope): If section-sequence doesn't start with #pragma omp section, require exactly one structured-block instead of sequence of statements. (cp_parser_omp_sections): Add p_name, mask and cclauses arguments. Allow the function to be called also when parsing combined constructs. (cp_parser_omp_parallel): Add p_name, mask and cclauses arguments. Allow the function to be called also when parsing combined constructs. (cp_parser_omp_taskgroup, cp_parser_omp_cancel, cp_parser_omp_cancellation_point, cp_parser_omp_distribute, cp_parser_omp_teams, cp_parser_omp_target_data, cp_parser_omp_target_update, cp_parser_omp_target, cp_parser_omp_declare_simd, cp_parser_late_parsing_omp_declare_simd, cp_parser_omp_declare_target, cp_parser_omp_end_declare_target, cp_parser_omp_declare_reduction_exprs, cp_parser_omp_declare_reduction, cp_parser_omp_declare): New functions. (cp_parser_omp_construct): Add p_name and mask vars. Handle PRAGMA_OMP_DISTRIBUTE, PRAGMA_OMP_SIMD, PRAGMA_OMP_TASKGROUP, PRAGMA_OMP_TEAMS. Adjust cp_parser_omp_for, cp_parser_omp_parallel and cp_parser_omp_sections callers. (cp_parser_pragma): Handle PRAGMA_OMP_CANCEL, PRAGMA_OMP_CANCELLATION_POINT, PRAGMA_OMP_DECLARE_REDUCTION, PRAGMA_OMP_DISTRIBUTE, PRAGMA_OMP_SIMD, PRAGMA_OMP_TASKGROUP, PRAGMA_OMP_TEAMS, PRAGMA_OMP_TARGET, PRAGMA_OMP_END_DECLARE_TARGET. Handle pragma_member and pragma_objc_icode like pragma_external. (OMP_FOR_CLAUSE_MASK, OMP_SECTIONS_CLAUSE_MASK, OMP_SINGLE_CLAUSE_MASK): Use OMP_CLAUSE_MASK_1 instead of 1. (OMP_PARALLEL_CLAUSE_MASK): Likewise. Add OMP_CLAUSE_PROC_BIND. (OMP_TASK_CLAUSE_MASK): Use OMP_CLAUSE_MASK_1 instead of 1. Add OMP_CLAUSE_DEPEND. (OMP_SIMD_CLAUSE_MASK, OMP_CANCEL_CLAUSE_MASK, OMP_CANCELLATION_POINT_CLAUSE_MASK, OMP_DISTRIBUTE_CLAUSE_MASK, OMP_TEAMS_CLAUSE_MASK, OMP_TARGET_DATA_CLAUSE_MASK, OMP_TARGET_UPDATE_CLAUSE_MASK, OMP_TARGET_CLAUSE_MASK, OMP_DECLARE_SIMD_CLAUSE_MASK): Define. * parser.h (struct cp_omp_declare_simd_data): New type. (struct cp_parser): Add colon_doesnt_start_class_def_p and omp_declare_simd fields. * cp-objcp-common.h (LANG_HOOKS_OMP_MAPPABLE_TYPE): Define. * cp-tree.h (struct lang_decl_fn): Add omp_declare_reduction_p bit. (DECL_OMP_DECLARE_REDUCTION_P): Define. (OMP_FOR_GIMPLIFYING_P): Use OMP_LOOP_CHECK macro. (struct saved_scope): Add omp_declare_target_attribute field. (cp_omp_mappable_type, omp_reduction_id, cp_remove_omp_priv_cleanup_stmt, cp_check_omp_declare_reduction, finish_omp_cancel, finish_omp_cancellation_point): New prototypes. (finish_omp_for): Add CODE argument. (finish_omp_atomic): Add seq_cst argument. (cxx_omp_create_clause_info): Add need_dtor argument. gcc/testsuite/ * c-c++-common/gomp/atomic-15.c: Adjust for C diagnostics. Remove error test that is now valid in OpenMP 4.0. * c-c++-common/gomp/atomic-16.c: New test. * c-c++-common/gomp/cancel-1.c: New test. * c-c++-common/gomp/depend-1.c: New test. * c-c++-common/gomp/depend-2.c: New test. * c-c++-common/gomp/map-1.c: New test. * c-c++-common/gomp/pr58472.c: New test. * c-c++-common/gomp/sections1.c: New test. * c-c++-common/gomp/simd1.c: New test. * c-c++-common/gomp/simd2.c: New test. * c-c++-common/gomp/simd3.c: New test. * c-c++-common/gomp/simd4.c: New test. * c-c++-common/gomp/simd5.c: New test. * c-c++-common/gomp/single1.c: New test. * g++.dg/gomp/block-0.C: Adjust for stricter #pragma omp sections parser. * g++.dg/gomp/block-3.C: Likewise. * g++.dg/gomp/clause-3.C: Adjust error messages. * g++.dg/gomp/declare-simd-1.C: New test. * g++.dg/gomp/declare-simd-2.C: New test. * g++.dg/gomp/depend-1.C: New test. * g++.dg/gomp/depend-2.C: New test. * g++.dg/gomp/target-1.C: New test. * g++.dg/gomp/target-2.C: New test. * g++.dg/gomp/taskgroup-1.C: New test. * g++.dg/gomp/teams-1.C: New test. * g++.dg/gomp/udr-1.C: New test. * g++.dg/gomp/udr-2.C: New test. * g++.dg/gomp/udr-3.C: New test. * g++.dg/gomp/udr-4.C: New test. * g++.dg/gomp/udr-5.C: New test. * g++.dg/gomp/udr-6.C: New test. * gcc.dg/autopar/outer-1.c: Expect 4 instead of 5 loopfn matches. * gcc.dg/autopar/outer-2.c: Likewise. * gcc.dg/autopar/outer-3.c: Likewise. * gcc.dg/autopar/outer-4.c: Likewise. * gcc.dg/autopar/outer-5.c: Likewise. * gcc.dg/autopar/outer-6.c: Likewise. * gcc.dg/autopar/parallelization-1.c: Likewise. * gcc.dg/gomp/block-3.c: Adjust for stricter #pragma omp sections parser. * gcc.dg/gomp/clause-1.c: Adjust error messages. * gcc.dg/gomp/combined-1.c: Look for GOMP_parallel_loop_runtime instead of GOMP_parallel_loop_runtime_start. * gcc.dg/gomp/declare-simd-1.c: New test. * gcc.dg/gomp/declare-simd-2.c: New test. * gcc.dg/gomp/nesting-1.c: Adjust for stricter #pragma omp sections parser. Add further #pragma omp sections nesting tests. * gcc.dg/gomp/target-1.c: New test. * gcc.dg/gomp/target-2.c: New test. * gcc.dg/gomp/taskgroup-1.c: New test. * gcc.dg/gomp/teams-1.c: New test. * gcc.dg/gomp/udr-1.c: New test. * gcc.dg/gomp/udr-2.c: New test. * gcc.dg/gomp/udr-3.c: New test. * gcc.dg/gomp/udr-4.c: New test. * gfortran.dg/gomp/appendix-a/a.35.5.f90: Add dg-error. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@203408 138bc75d-0d04-0410-961f-82ee72b054a4
Diffstat (limited to 'gcc/omp-low.c')
-rw-r--r--gcc/omp-low.c2600
1 files changed, 2312 insertions, 288 deletions
diff --git a/gcc/omp-low.c b/gcc/omp-low.c
index 84b2357b754..26f0c35e285 100644
--- a/gcc/omp-low.c
+++ b/gcc/omp-low.c
@@ -90,6 +90,10 @@ typedef struct omp_context
construct. In the case of a parallel, this is in the child function. */
tree block_vars;
+ /* Label to which GOMP_cancel{,llation_point} and explicit and implicit
+ barriers should jump to during omplower pass. */
+ tree cancel_label;
+
/* What to do with variables with implicitly determined sharing
attributes. */
enum omp_clause_default_kind default_kind;
@@ -101,6 +105,9 @@ typedef struct omp_context
/* True if this parallel directive is nested within another. */
bool is_nested;
+
+ /* True if this construct can be cancelled. */
+ bool cancellable;
} omp_context;
@@ -127,6 +134,7 @@ struct omp_for_data
static splay_tree all_contexts;
static int taskreg_nesting_level;
+static int target_nesting_level;
struct omp_region *root_omp_region;
static bitmap task_shared_vars;
@@ -224,6 +232,8 @@ extract_omp_for_data (gimple for_stmt, struct omp_for_data *fd,
struct omp_for_data_loop dummy_loop;
location_t loc = gimple_location (for_stmt);
bool simd = gimple_omp_for_kind (for_stmt) == GF_OMP_FOR_KIND_SIMD;
+ bool distribute = gimple_omp_for_kind (for_stmt)
+ == GF_OMP_FOR_KIND_DISTRIBUTE;
fd->for_stmt = for_stmt;
fd->pre = NULL;
@@ -233,7 +243,8 @@ extract_omp_for_data (gimple for_stmt, struct omp_for_data *fd,
else
fd->loops = &fd->loop;
- fd->have_nowait = fd->have_ordered = false;
+ fd->have_nowait = distribute || simd;
+ fd->have_ordered = false;
fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
fd->chunk_size = NULL_TREE;
collapse_iter = NULL;
@@ -249,9 +260,14 @@ extract_omp_for_data (gimple for_stmt, struct omp_for_data *fd,
fd->have_ordered = true;
break;
case OMP_CLAUSE_SCHEDULE:
+ gcc_assert (!distribute);
fd->sched_kind = OMP_CLAUSE_SCHEDULE_KIND (t);
fd->chunk_size = OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (t);
break;
+ case OMP_CLAUSE_DIST_SCHEDULE:
+ gcc_assert (distribute);
+ fd->chunk_size = OMP_CLAUSE_DIST_SCHEDULE_CHUNK_EXPR (t);
+ break;
case OMP_CLAUSE_COLLAPSE:
if (fd->collapse > 1)
{
@@ -279,8 +295,7 @@ extract_omp_for_data (gimple for_stmt, struct omp_for_data *fd,
/* We only need to compute a default chunk size for ordered
static loops and dynamic loops. */
if (fd->sched_kind != OMP_CLAUSE_SCHEDULE_STATIC
- || fd->have_ordered
- || fd->collapse > 1)
+ || fd->have_ordered)
fd->chunk_size = (fd->sched_kind == OMP_CLAUSE_SCHEDULE_STATIC)
? integer_zero_node : integer_one_node;
}
@@ -294,7 +309,6 @@ extract_omp_for_data (gimple for_stmt, struct omp_for_data *fd,
else
loop = &dummy_loop;
-
loop->v = gimple_omp_for_index (for_stmt, i);
gcc_assert (SSA_VAR_P (loop->v));
gcc_assert (TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
@@ -351,7 +365,9 @@ extract_omp_for_data (gimple for_stmt, struct omp_for_data *fd,
gcc_unreachable ();
}
- if (simd)
+ if (simd
+ || (fd->sched_kind == OMP_CLAUSE_SCHEDULE_STATIC
+ && !fd->have_ordered))
{
if (fd->collapse == 1)
iter_type = TREE_TYPE (loop->v);
@@ -360,7 +376,7 @@ extract_omp_for_data (gimple for_stmt, struct omp_for_data *fd,
< TYPE_PRECISION (TREE_TYPE (loop->v)))
iter_type
= build_nonstandard_integer_type
- (TYPE_PRECISION (TREE_TYPE (loop->v)), 1);
+ (TYPE_PRECISION (TREE_TYPE (loop->v)), 1);
}
else if (iter_type != long_long_unsigned_type_node)
{
@@ -459,7 +475,9 @@ extract_omp_for_data (gimple for_stmt, struct omp_for_data *fd,
}
if (count
- && !simd)
+ && !simd
+ && (fd->sched_kind != OMP_CLAUSE_SCHEDULE_STATIC
+ || fd->have_ordered))
{
if (!tree_int_cst_lt (count, TYPE_MAX_VALUE (long_integer_type_node)))
iter_type = long_long_unsigned_type_node;
@@ -570,7 +588,7 @@ workshare_safe_to_combine_p (basic_block ws_entry_bb)
expanded. */
static vec<tree, va_gc> *
-get_ws_args_for (gimple ws_stmt)
+get_ws_args_for (gimple par_stmt, gimple ws_stmt)
{
tree t;
location_t loc = gimple_location (ws_stmt);
@@ -579,15 +597,31 @@ get_ws_args_for (gimple ws_stmt)
if (gimple_code (ws_stmt) == GIMPLE_OMP_FOR)
{
struct omp_for_data fd;
+ tree n1, n2;
extract_omp_for_data (ws_stmt, &fd, NULL);
+ n1 = fd.loop.n1;
+ n2 = fd.loop.n2;
+
+ if (gimple_omp_for_combined_into_p (ws_stmt))
+ {
+ tree innerc
+ = find_omp_clause (gimple_omp_parallel_clauses (par_stmt),
+ OMP_CLAUSE__LOOPTEMP_);
+ gcc_assert (innerc);
+ n1 = OMP_CLAUSE_DECL (innerc);
+ innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
+ OMP_CLAUSE__LOOPTEMP_);
+ gcc_assert (innerc);
+ n2 = OMP_CLAUSE_DECL (innerc);
+ }
vec_alloc (ws_args, 3 + (fd.chunk_size != 0));
- t = fold_convert_loc (loc, long_integer_type_node, fd.loop.n1);
+ t = fold_convert_loc (loc, long_integer_type_node, n1);
ws_args->quick_push (t);
- t = fold_convert_loc (loc, long_integer_type_node, fd.loop.n2);
+ t = fold_convert_loc (loc, long_integer_type_node, n2);
ws_args->quick_push (t);
t = fold_convert_loc (loc, long_integer_type_node, fd.loop.step);
@@ -650,6 +684,7 @@ determine_parallel_type (struct omp_region *region)
|| (last_and_only_stmt (ws_entry_bb)
&& last_and_only_stmt (par_exit_bb))))
{
+ gimple par_stmt = last_stmt (par_entry_bb);
gimple ws_stmt = last_stmt (ws_entry_bb);
if (region->inner->type == GIMPLE_OMP_FOR)
@@ -677,7 +712,7 @@ determine_parallel_type (struct omp_region *region)
region->is_combined_parallel = true;
region->inner->is_combined_parallel = true;
- region->ws_args = get_ws_args_for (ws_stmt);
+ region->ws_args = get_ws_args_for (par_stmt, ws_stmt);
}
}
@@ -984,7 +1019,12 @@ install_var_field (tree var, bool by_ref, int mask, omp_context *ctx)
|| !splay_tree_lookup (ctx->sfield_map, (splay_tree_key) var));
type = TREE_TYPE (var);
- if (by_ref)
+ if (mask & 4)
+ {
+ gcc_assert (TREE_CODE (type) == ARRAY_TYPE);
+ type = build_pointer_type (build_pointer_type (type));
+ }
+ else if (by_ref)
type = build_pointer_type (type);
else if ((mask & 3) == 1 && is_reference (var))
type = TREE_TYPE (type);
@@ -1421,6 +1461,9 @@ scan_sharing_clauses (tree clauses, omp_context *ctx)
break;
case OMP_CLAUSE_SHARED:
+ /* Ignore shared directives in teams construct. */
+ if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
+ break;
gcc_assert (is_taskreg_ctx (ctx));
decl = OMP_CLAUSE_DECL (c);
gcc_assert (!COMPLETE_TYPE_P (TREE_TYPE (decl))
@@ -1480,6 +1523,13 @@ scan_sharing_clauses (tree clauses, omp_context *ctx)
install_var_local (decl, ctx);
break;
+ case OMP_CLAUSE__LOOPTEMP_:
+ gcc_assert (is_parallel_ctx (ctx));
+ decl = OMP_CLAUSE_DECL (c);
+ install_var_field (decl, false, 3, ctx);
+ install_var_local (decl, ctx);
+ break;
+
case OMP_CLAUSE_COPYPRIVATE:
case OMP_CLAUSE_COPYIN:
decl = OMP_CLAUSE_DECL (c);
@@ -1494,19 +1544,113 @@ scan_sharing_clauses (tree clauses, omp_context *ctx)
case OMP_CLAUSE_FINAL:
case OMP_CLAUSE_IF:
case OMP_CLAUSE_NUM_THREADS:
+ case OMP_CLAUSE_NUM_TEAMS:
+ case OMP_CLAUSE_THREAD_LIMIT:
+ case OMP_CLAUSE_DEVICE:
case OMP_CLAUSE_SCHEDULE:
+ case OMP_CLAUSE_DIST_SCHEDULE:
+ case OMP_CLAUSE_DEPEND:
if (ctx->outer)
scan_omp_op (&OMP_CLAUSE_OPERAND (c, 0), ctx->outer);
break;
+ case OMP_CLAUSE_TO:
+ case OMP_CLAUSE_FROM:
+ case OMP_CLAUSE_MAP:
+ if (ctx->outer)
+ scan_omp_op (&OMP_CLAUSE_SIZE (c), ctx->outer);
+ decl = OMP_CLAUSE_DECL (c);
+ /* Global variables with "omp declare target" attribute
+ don't need to be copied, the receiver side will use them
+ directly. */
+ if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
+ && DECL_P (decl)
+ && is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx))
+ && lookup_attribute ("omp declare target",
+ DECL_ATTRIBUTES (decl)))
+ break;
+ if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
+ && OMP_CLAUSE_MAP_KIND (c) == OMP_CLAUSE_MAP_POINTER)
+ {
+ /* Ignore OMP_CLAUSE_MAP_POINTER kind for arrays in
+ #pragma omp target data, there is nothing to map for
+ those. */
+ if (gimple_omp_target_kind (ctx->stmt) == GF_OMP_TARGET_KIND_DATA
+ && !POINTER_TYPE_P (TREE_TYPE (decl)))
+ break;
+ }
+ if (DECL_P (decl))
+ {
+ if (DECL_SIZE (decl)
+ && TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST)
+ {
+ tree decl2 = DECL_VALUE_EXPR (decl);
+ gcc_assert (TREE_CODE (decl2) == INDIRECT_REF);
+ decl2 = TREE_OPERAND (decl2, 0);
+ gcc_assert (DECL_P (decl2));
+ install_var_field (decl2, true, 3, ctx);
+ install_var_local (decl2, ctx);
+ install_var_local (decl, ctx);
+ }
+ else
+ {
+ if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
+ && OMP_CLAUSE_MAP_KIND (c) == OMP_CLAUSE_MAP_POINTER
+ && !OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c)
+ && TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE)
+ install_var_field (decl, true, 7, ctx);
+ else
+ install_var_field (decl, true, 3, ctx);
+ if (gimple_omp_target_kind (ctx->stmt)
+ == GF_OMP_TARGET_KIND_REGION)
+ install_var_local (decl, ctx);
+ }
+ }
+ else
+ {
+ tree base = get_base_address (decl);
+ tree nc = OMP_CLAUSE_CHAIN (c);
+ if (DECL_P (base)
+ && nc != NULL_TREE
+ && OMP_CLAUSE_CODE (nc) == OMP_CLAUSE_MAP
+ && OMP_CLAUSE_DECL (nc) == base
+ && OMP_CLAUSE_MAP_KIND (nc) == OMP_CLAUSE_MAP_POINTER
+ && integer_zerop (OMP_CLAUSE_SIZE (nc)))
+ {
+ OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c) = 1;
+ OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (nc) = 1;
+ }
+ else
+ {
+ gcc_assert (!splay_tree_lookup (ctx->field_map,
+ (splay_tree_key) decl));
+ tree field
+ = build_decl (OMP_CLAUSE_LOCATION (c),
+ FIELD_DECL, NULL_TREE, ptr_type_node);
+ DECL_ALIGN (field) = TYPE_ALIGN (ptr_type_node);
+ insert_field_into_struct (ctx->record_type, field);
+ splay_tree_insert (ctx->field_map, (splay_tree_key) decl,
+ (splay_tree_value) field);
+ }
+ }
+ break;
+
case OMP_CLAUSE_NOWAIT:
case OMP_CLAUSE_ORDERED:
case OMP_CLAUSE_COLLAPSE:
case OMP_CLAUSE_UNTIED:
case OMP_CLAUSE_MERGEABLE:
+ case OMP_CLAUSE_PROC_BIND:
case OMP_CLAUSE_SAFELEN:
break;
+ case OMP_CLAUSE_ALIGNED:
+ decl = OMP_CLAUSE_DECL (c);
+ if (is_global_var (decl)
+ && TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE)
+ install_var_local (decl, ctx);
+ break;
+
default:
gcc_unreachable ();
}
@@ -1541,24 +1685,71 @@ scan_sharing_clauses (tree clauses, omp_context *ctx)
break;
case OMP_CLAUSE_SHARED:
+ /* Ignore shared directives in teams construct. */
+ if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
+ break;
decl = OMP_CLAUSE_DECL (c);
if (! is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
fixup_remapped_decl (decl, ctx, false);
break;
+ case OMP_CLAUSE_MAP:
+ if (gimple_omp_target_kind (ctx->stmt) == GF_OMP_TARGET_KIND_DATA)
+ break;
+ decl = OMP_CLAUSE_DECL (c);
+ if (DECL_P (decl)
+ && is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx))
+ && lookup_attribute ("omp declare target",
+ DECL_ATTRIBUTES (decl)))
+ break;
+ if (DECL_P (decl))
+ {
+ if (OMP_CLAUSE_MAP_KIND (c) == OMP_CLAUSE_MAP_POINTER
+ && TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE
+ && !COMPLETE_TYPE_P (TREE_TYPE (decl)))
+ {
+ tree new_decl = lookup_decl (decl, ctx);
+ TREE_TYPE (new_decl)
+ = remap_type (TREE_TYPE (decl), &ctx->cb);
+ }
+ else if (DECL_SIZE (decl)
+ && TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST)
+ {
+ tree decl2 = DECL_VALUE_EXPR (decl);
+ gcc_assert (TREE_CODE (decl2) == INDIRECT_REF);
+ decl2 = TREE_OPERAND (decl2, 0);
+ gcc_assert (DECL_P (decl2));
+ fixup_remapped_decl (decl2, ctx, false);
+ fixup_remapped_decl (decl, ctx, true);
+ }
+ else
+ fixup_remapped_decl (decl, ctx, false);
+ }
+ break;
+
case OMP_CLAUSE_COPYPRIVATE:
case OMP_CLAUSE_COPYIN:
case OMP_CLAUSE_DEFAULT:
case OMP_CLAUSE_IF:
case OMP_CLAUSE_NUM_THREADS:
+ case OMP_CLAUSE_NUM_TEAMS:
+ case OMP_CLAUSE_THREAD_LIMIT:
+ case OMP_CLAUSE_DEVICE:
case OMP_CLAUSE_SCHEDULE:
+ case OMP_CLAUSE_DIST_SCHEDULE:
case OMP_CLAUSE_NOWAIT:
case OMP_CLAUSE_ORDERED:
case OMP_CLAUSE_COLLAPSE:
case OMP_CLAUSE_UNTIED:
case OMP_CLAUSE_FINAL:
case OMP_CLAUSE_MERGEABLE:
+ case OMP_CLAUSE_PROC_BIND:
case OMP_CLAUSE_SAFELEN:
+ case OMP_CLAUSE_ALIGNED:
+ case OMP_CLAUSE_DEPEND:
+ case OMP_CLAUSE__LOOPTEMP_:
+ case OMP_CLAUSE_TO:
+ case OMP_CLAUSE_FROM:
break;
default:
@@ -1623,6 +1814,26 @@ create_omp_child_function (omp_context *ctx, bool task_copy)
DECL_EXTERNAL (decl) = 0;
DECL_CONTEXT (decl) = NULL_TREE;
DECL_INITIAL (decl) = make_node (BLOCK);
+ bool target_p = false;
+ if (lookup_attribute ("omp declare target",
+ DECL_ATTRIBUTES (current_function_decl)))
+ target_p = true;
+ else
+ {
+ omp_context *octx;
+ for (octx = ctx; octx; octx = octx->outer)
+ if (gimple_code (octx->stmt) == GIMPLE_OMP_TARGET
+ && gimple_omp_target_kind (octx->stmt)
+ == GF_OMP_TARGET_KIND_REGION)
+ {
+ target_p = true;
+ break;
+ }
+ }
+ if (target_p)
+ DECL_ATTRIBUTES (decl)
+ = tree_cons (get_identifier ("omp declare target"),
+ NULL_TREE, DECL_ATTRIBUTES (decl));
t = build_decl (DECL_SOURCE_LOCATION (decl),
RESULT_DECL, NULL_TREE, void_type_node);
@@ -1664,6 +1875,35 @@ create_omp_child_function (omp_context *ctx, bool task_copy)
pop_cfun ();
}
+/* Callback for walk_gimple_seq. Check if combined parallel
+ contains gimple_omp_for_combined_into_p OMP_FOR. */
+
+static tree
+find_combined_for (gimple_stmt_iterator *gsi_p,
+ bool *handled_ops_p,
+ struct walk_stmt_info *wi)
+{
+ gimple stmt = gsi_stmt (*gsi_p);
+
+ *handled_ops_p = true;
+ switch (gimple_code (stmt))
+ {
+ WALK_SUBSTMTS;
+
+ case GIMPLE_OMP_FOR:
+ if (gimple_omp_for_combined_into_p (stmt)
+ && gimple_omp_for_kind (stmt) == GF_OMP_FOR_KIND_FOR)
+ {
+ wi->info = stmt;
+ return integer_zero_node;
+ }
+ break;
+ default:
+ break;
+ }
+ return NULL;
+}
+
/* Scan an OpenMP parallel directive. */
static void
@@ -1684,6 +1924,40 @@ scan_omp_parallel (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
return;
}
+ if (gimple_omp_parallel_combined_p (stmt))
+ {
+ gimple for_stmt;
+ struct walk_stmt_info wi;
+
+ memset (&wi, 0, sizeof (wi));
+ wi.val_only = true;
+ walk_gimple_seq (gimple_omp_body (stmt),
+ find_combined_for, NULL, &wi);
+ for_stmt = (gimple) wi.info;
+ if (for_stmt)
+ {
+ struct omp_for_data fd;
+ extract_omp_for_data (for_stmt, &fd, NULL);
+ /* We need two temporaries with fd.loop.v type (istart/iend)
+ and then (fd.collapse - 1) temporaries with the same
+ type for count2 ... countN-1 vars if not constant. */
+ size_t count = 2, i;
+ tree type = fd.iter_type;
+ if (fd.collapse > 1
+ && TREE_CODE (fd.loop.n2) != INTEGER_CST)
+ count += fd.collapse - 1;
+ for (i = 0; i < count; i++)
+ {
+ tree temp = create_tmp_var (type, NULL);
+ tree c = build_omp_clause (UNKNOWN_LOCATION,
+ OMP_CLAUSE__LOOPTEMP_);
+ OMP_CLAUSE_DECL (c) = temp;
+ OMP_CLAUSE_CHAIN (c) = gimple_omp_parallel_clauses (stmt);
+ gimple_omp_parallel_set_clauses (stmt, c);
+ }
+ }
+ }
+
ctx = new_omp_context (stmt, outer_ctx);
if (taskreg_nesting_level > 1)
ctx->is_nested = true;
@@ -1858,6 +2132,63 @@ scan_omp_single (gimple stmt, omp_context *outer_ctx)
layout_type (ctx->record_type);
}
+/* Scan an OpenMP target{, data, update} directive. */
+
+static void
+scan_omp_target (gimple stmt, omp_context *outer_ctx)
+{
+ omp_context *ctx;
+ tree name;
+ int kind = gimple_omp_target_kind (stmt);
+
+ ctx = new_omp_context (stmt, outer_ctx);
+ ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
+ ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
+ ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
+ name = create_tmp_var_name (".omp_data_t");
+ name = build_decl (gimple_location (stmt),
+ TYPE_DECL, name, ctx->record_type);
+ DECL_ARTIFICIAL (name) = 1;
+ DECL_NAMELESS (name) = 1;
+ TYPE_NAME (ctx->record_type) = name;
+ if (kind == GF_OMP_TARGET_KIND_REGION)
+ {
+ create_omp_child_function (ctx, false);
+ gimple_omp_target_set_child_fn (stmt, ctx->cb.dst_fn);
+ }
+
+ scan_sharing_clauses (gimple_omp_target_clauses (stmt), ctx);
+ scan_omp (gimple_omp_body_ptr (stmt), ctx);
+
+ if (TYPE_FIELDS (ctx->record_type) == NULL)
+ ctx->record_type = ctx->receiver_decl = NULL;
+ else
+ {
+ TYPE_FIELDS (ctx->record_type)
+ = nreverse (TYPE_FIELDS (ctx->record_type));
+#ifdef ENABLE_CHECKING
+ tree field;
+ unsigned int align = DECL_ALIGN (TYPE_FIELDS (ctx->record_type));
+ for (field = TYPE_FIELDS (ctx->record_type);
+ field;
+ field = DECL_CHAIN (field))
+ gcc_assert (DECL_ALIGN (field) == align);
+#endif
+ layout_type (ctx->record_type);
+ if (kind == GF_OMP_TARGET_KIND_REGION)
+ fixup_child_record_type (ctx);
+ }
+}
+
+/* Scan an OpenMP teams directive. */
+
+static void
+scan_omp_teams (gimple stmt, omp_context *outer_ctx)
+{
+ omp_context *ctx = new_omp_context (stmt, outer_ctx);
+ scan_sharing_clauses (gimple_omp_teams_clauses (stmt), ctx);
+ scan_omp (gimple_omp_body_ptr (stmt), ctx);
+}
/* Check OpenMP nesting restrictions. */
static bool
@@ -1872,16 +2203,149 @@ check_omp_nesting_restrictions (gimple stmt, omp_context *ctx)
"OpenMP constructs may not be nested inside simd region");
return false;
}
+ else if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
+ {
+ if ((gimple_code (stmt) != GIMPLE_OMP_FOR
+ || (gimple_omp_for_kind (stmt)
+ != GF_OMP_FOR_KIND_DISTRIBUTE))
+ && gimple_code (stmt) != GIMPLE_OMP_PARALLEL)
+ {
+ error_at (gimple_location (stmt),
+ "only distribute or parallel constructs are allowed to "
+ "be closely nested inside teams construct");
+ return false;
+ }
+ }
}
switch (gimple_code (stmt))
{
case GIMPLE_OMP_FOR:
if (gimple_omp_for_kind (stmt) == GF_OMP_FOR_KIND_SIMD)
return true;
+ if (gimple_omp_for_kind (stmt) == GF_OMP_FOR_KIND_DISTRIBUTE)
+ {
+ if (ctx != NULL && gimple_code (ctx->stmt) != GIMPLE_OMP_TEAMS)
+ {
+ error_at (gimple_location (stmt),
+ "distribute construct must be closely nested inside "
+ "teams construct");
+ return false;
+ }
+ return true;
+ }
+ /* FALLTHRU */
+ case GIMPLE_CALL:
+ if (is_gimple_call (stmt)
+ && (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
+ == BUILT_IN_GOMP_CANCEL
+ || DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
+ == BUILT_IN_GOMP_CANCELLATION_POINT))
+ {
+ const char *bad = NULL;
+ const char *kind = NULL;
+ if (ctx == NULL)
+ {
+ error_at (gimple_location (stmt), "orphaned %qs construct",
+ DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
+ == BUILT_IN_GOMP_CANCEL
+ ? "#pragma omp cancel"
+ : "#pragma omp cancellation point");
+ return false;
+ }
+ switch (host_integerp (gimple_call_arg (stmt, 0), 0)
+ ? tree_low_cst (gimple_call_arg (stmt, 0), 0)
+ : 0)
+ {
+ case 1:
+ if (gimple_code (ctx->stmt) != GIMPLE_OMP_PARALLEL)
+ bad = "#pragma omp parallel";
+ else if (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
+ == BUILT_IN_GOMP_CANCEL
+ && !integer_zerop (gimple_call_arg (stmt, 1)))
+ ctx->cancellable = true;
+ kind = "parallel";
+ break;
+ case 2:
+ if (gimple_code (ctx->stmt) != GIMPLE_OMP_FOR
+ || gimple_omp_for_kind (ctx->stmt) != GF_OMP_FOR_KIND_FOR)
+ bad = "#pragma omp for";
+ else if (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
+ == BUILT_IN_GOMP_CANCEL
+ && !integer_zerop (gimple_call_arg (stmt, 1)))
+ {
+ ctx->cancellable = true;
+ if (find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
+ OMP_CLAUSE_NOWAIT))
+ warning_at (gimple_location (stmt), 0,
+ "%<#pragma omp cancel for%> inside "
+ "%<nowait%> for construct");
+ if (find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
+ OMP_CLAUSE_ORDERED))
+ warning_at (gimple_location (stmt), 0,
+ "%<#pragma omp cancel for%> inside "
+ "%<ordered%> for construct");
+ }
+ kind = "for";
+ break;
+ case 4:
+ if (gimple_code (ctx->stmt) != GIMPLE_OMP_SECTIONS
+ && gimple_code (ctx->stmt) != GIMPLE_OMP_SECTION)
+ bad = "#pragma omp sections";
+ else if (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
+ == BUILT_IN_GOMP_CANCEL
+ && !integer_zerop (gimple_call_arg (stmt, 1)))
+ {
+ if (gimple_code (ctx->stmt) == GIMPLE_OMP_SECTIONS)
+ {
+ ctx->cancellable = true;
+ if (find_omp_clause (gimple_omp_sections_clauses
+ (ctx->stmt),
+ OMP_CLAUSE_NOWAIT))
+ warning_at (gimple_location (stmt), 0,
+ "%<#pragma omp cancel sections%> inside "
+ "%<nowait%> sections construct");
+ }
+ else
+ {
+ gcc_assert (ctx->outer
+ && gimple_code (ctx->outer->stmt)
+ == GIMPLE_OMP_SECTIONS);
+ ctx->outer->cancellable = true;
+ if (find_omp_clause (gimple_omp_sections_clauses
+ (ctx->outer->stmt),
+ OMP_CLAUSE_NOWAIT))
+ warning_at (gimple_location (stmt), 0,
+ "%<#pragma omp cancel sections%> inside "
+ "%<nowait%> sections construct");
+ }
+ }
+ kind = "sections";
+ break;
+ case 8:
+ if (gimple_code (ctx->stmt) != GIMPLE_OMP_TASK)
+ bad = "#pragma omp task";
+ else
+ ctx->cancellable = true;
+ kind = "taskgroup";
+ break;
+ default:
+ error_at (gimple_location (stmt), "invalid arguments");
+ return false;
+ }
+ if (bad)
+ {
+ error_at (gimple_location (stmt),
+ "%<%s %s%> construct not closely nested inside of %qs",
+ DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
+ == BUILT_IN_GOMP_CANCEL
+ ? "#pragma omp cancel"
+ : "#pragma omp cancellation point", kind, bad);
+ return false;
+ }
+ }
/* FALLTHRU */
case GIMPLE_OMP_SECTIONS:
case GIMPLE_OMP_SINGLE:
- case GIMPLE_CALL:
for (; ctx != NULL; ctx = ctx->outer)
switch (gimple_code (ctx->stmt))
{
@@ -1891,8 +2355,12 @@ check_omp_nesting_restrictions (gimple stmt, omp_context *ctx)
case GIMPLE_OMP_ORDERED:
case GIMPLE_OMP_MASTER:
case GIMPLE_OMP_TASK:
+ case GIMPLE_OMP_CRITICAL:
if (is_gimple_call (stmt))
{
+ if (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
+ != BUILT_IN_GOMP_BARRIER)
+ return true;
error_at (gimple_location (stmt),
"barrier region may not be closely nested inside "
"of work-sharing, critical, ordered, master or "
@@ -1949,7 +2417,10 @@ check_omp_nesting_restrictions (gimple stmt, omp_context *ctx)
}
return true;
case GIMPLE_OMP_PARALLEL:
- return true;
+ error_at (gimple_location (stmt),
+ "ordered region must be closely nested inside "
+ "a loop region with an ordered clause");
+ return false;
default:
break;
}
@@ -1966,6 +2437,17 @@ check_omp_nesting_restrictions (gimple stmt, omp_context *ctx)
return false;
}
break;
+ case GIMPLE_OMP_TEAMS:
+ if (ctx == NULL
+ || gimple_code (ctx->stmt) != GIMPLE_OMP_TARGET
+ || gimple_omp_target_kind (ctx->stmt) != GF_OMP_TARGET_KIND_REGION)
+ {
+ error_at (gimple_location (stmt),
+ "teams construct not closely nested inside of target "
+ "region");
+ return false;
+ }
+ break;
default:
break;
}
@@ -2038,23 +2520,33 @@ scan_omp_1_stmt (gimple_stmt_iterator *gsi, bool *handled_ops_p,
input_location = gimple_location (stmt);
/* Check the OpenMP nesting restrictions. */
- if (ctx != NULL)
- {
- bool remove = false;
- if (is_gimple_omp (stmt))
- remove = !check_omp_nesting_restrictions (stmt, ctx);
- else if (is_gimple_call (stmt))
- {
- tree fndecl = gimple_call_fndecl (stmt);
- if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
- && DECL_FUNCTION_CODE (fndecl) == BUILT_IN_GOMP_BARRIER)
+ bool remove = false;
+ if (is_gimple_omp (stmt))
+ remove = !check_omp_nesting_restrictions (stmt, ctx);
+ else if (is_gimple_call (stmt))
+ {
+ tree fndecl = gimple_call_fndecl (stmt);
+ if (fndecl
+ && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
+ switch (DECL_FUNCTION_CODE (fndecl))
+ {
+ case BUILT_IN_GOMP_BARRIER:
+ case BUILT_IN_GOMP_CANCEL:
+ case BUILT_IN_GOMP_CANCELLATION_POINT:
+ case BUILT_IN_GOMP_TASKYIELD:
+ case BUILT_IN_GOMP_TASKWAIT:
+ case BUILT_IN_GOMP_TASKGROUP_START:
+ case BUILT_IN_GOMP_TASKGROUP_END:
remove = !check_omp_nesting_restrictions (stmt, ctx);
- }
- if (remove)
- {
- stmt = gimple_build_nop ();
- gsi_replace (gsi, stmt, false);
- }
+ break;
+ default:
+ break;
+ }
+ }
+ if (remove)
+ {
+ stmt = gimple_build_nop ();
+ gsi_replace (gsi, stmt, false);
}
*handled_ops_p = true;
@@ -2087,12 +2579,21 @@ scan_omp_1_stmt (gimple_stmt_iterator *gsi, bool *handled_ops_p,
case GIMPLE_OMP_SECTION:
case GIMPLE_OMP_MASTER:
+ case GIMPLE_OMP_TASKGROUP:
case GIMPLE_OMP_ORDERED:
case GIMPLE_OMP_CRITICAL:
ctx = new_omp_context (stmt, ctx);
scan_omp (gimple_omp_body_ptr (stmt), ctx);
break;
+ case GIMPLE_OMP_TARGET:
+ scan_omp_target (stmt, ctx);
+ break;
+
+ case GIMPLE_OMP_TEAMS:
+ scan_omp_teams (stmt, ctx);
+ break;
+
case GIMPLE_BIND:
{
tree var;
@@ -2135,10 +2636,15 @@ scan_omp (gimple_seq *body_p, omp_context *ctx)
/* Build a call to GOMP_barrier. */
-static tree
-build_omp_barrier (void)
-{
- return build_call_expr (builtin_decl_explicit (BUILT_IN_GOMP_BARRIER), 0);
+static gimple
+build_omp_barrier (tree lhs)
+{
+ tree fndecl = builtin_decl_explicit (lhs ? BUILT_IN_GOMP_BARRIER_CANCEL
+ : BUILT_IN_GOMP_BARRIER);
+ gimple g = gimple_build_call (fndecl, 0);
+ if (lhs)
+ gimple_call_set_lhs (g, lhs);
+ return g;
}
/* If a context was created for STMT when it was scanned, return it. */
@@ -2299,6 +2805,49 @@ omp_reduction_init (tree clause, tree type)
}
}
+/* Return alignment to be assumed for var in CLAUSE, which should be
+ OMP_CLAUSE_ALIGNED. */
+
+static tree
+omp_clause_aligned_alignment (tree clause)
+{
+ if (OMP_CLAUSE_ALIGNED_ALIGNMENT (clause))
+ return OMP_CLAUSE_ALIGNED_ALIGNMENT (clause);
+
+ /* Otherwise return implementation defined alignment. */
+ unsigned int al = 1;
+ enum machine_mode mode, vmode;
+ int vs = targetm.vectorize.autovectorize_vector_sizes ();
+ if (vs)
+ vs = 1 << floor_log2 (vs);
+ static enum mode_class classes[]
+ = { MODE_INT, MODE_VECTOR_INT, MODE_FLOAT, MODE_VECTOR_FLOAT };
+ for (int i = 0; i < 4; i += 2)
+ for (mode = GET_CLASS_NARROWEST_MODE (classes[i]);
+ mode != VOIDmode;
+ mode = GET_MODE_WIDER_MODE (mode))
+ {
+ vmode = targetm.vectorize.preferred_simd_mode (mode);
+ if (GET_MODE_CLASS (vmode) != classes[i + 1])
+ continue;
+ while (vs
+ && GET_MODE_SIZE (vmode) < vs
+ && GET_MODE_2XWIDER_MODE (vmode) != VOIDmode)
+ vmode = GET_MODE_2XWIDER_MODE (vmode);
+
+ tree type = lang_hooks.types.type_for_mode (mode, 1);
+ if (type == NULL_TREE || TYPE_MODE (type) != mode)
+ continue;
+ type = build_vector_type (type, GET_MODE_SIZE (vmode)
+ / GET_MODE_SIZE (mode));
+ if (TYPE_MODE (type) != vmode)
+ continue;
+ if (TYPE_ALIGN_UNIT (type) > al)
+ al = TYPE_ALIGN_UNIT (type);
+ }
+ return build_int_cst (integer_type_node, al);
+}
+
/* Return maximum possible vectorization factor for the target. */
static int
@@ -2362,8 +2911,11 @@ lower_rec_simd_input_clauses (tree new_var, omp_context *ctx, int &max_vf,
NULL_TREE, NULL_TREE);
lvar = build4 (ARRAY_REF, TREE_TYPE (new_var), avar, lane,
NULL_TREE, NULL_TREE);
- SET_DECL_VALUE_EXPR (new_var, lvar);
- DECL_HAS_VALUE_EXPR_P (new_var) = 1;
+ if (DECL_P (new_var))
+ {
+ SET_DECL_VALUE_EXPR (new_var, lvar);
+ DECL_HAS_VALUE_EXPR_P (new_var) = 1;
+ }
return true;
}
@@ -2374,11 +2926,12 @@ lower_rec_simd_input_clauses (tree new_var, omp_context *ctx, int &max_vf,
static void
lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
- omp_context *ctx)
+ omp_context *ctx, struct omp_for_data *fd)
{
tree c, dtor, copyin_seq, x, ptr;
bool copyin_by_ref = false;
bool lastprivate_firstprivate = false;
+ bool reduction_omp_orig_ref = false;
int pass;
bool is_simd = (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
&& gimple_omp_for_kind (ctx->stmt) == GF_OMP_FOR_KIND_SIMD);
@@ -2398,9 +2951,6 @@ lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
switch (OMP_CLAUSE_CODE (c))
{
case OMP_CLAUSE_REDUCTION:
- if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
- max_vf = 1;
- /* FALLTHRU */
case OMP_CLAUSE_PRIVATE:
case OMP_CLAUSE_FIRSTPRIVATE:
case OMP_CLAUSE_LASTPRIVATE:
@@ -2432,6 +2982,9 @@ lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
continue;
break;
case OMP_CLAUSE_SHARED:
+ /* Ignore shared directives in teams construct. */
+ if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
+ continue;
if (maybe_lookup_decl (OMP_CLAUSE_DECL (c), ctx) == NULL)
{
gcc_assert (is_global_var (OMP_CLAUSE_DECL (c)));
@@ -2439,9 +2992,16 @@ lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
}
case OMP_CLAUSE_FIRSTPRIVATE:
case OMP_CLAUSE_COPYIN:
+ case OMP_CLAUSE_LINEAR:
+ break;
case OMP_CLAUSE_REDUCTION:
+ if (OMP_CLAUSE_REDUCTION_OMP_ORIG_REF (c))
+ reduction_omp_orig_ref = true;
break;
- case OMP_CLAUSE_LINEAR:
+ case OMP_CLAUSE__LOOPTEMP_:
+ /* Handle _looptemp_ clauses only on parallel. */
+ if (fd)
+ continue;
break;
case OMP_CLAUSE_LASTPRIVATE:
if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
@@ -2451,6 +3011,42 @@ lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
continue;
}
break;
+ case OMP_CLAUSE_ALIGNED:
+ if (pass == 0)
+ continue;
+ var = OMP_CLAUSE_DECL (c);
+ if (TREE_CODE (TREE_TYPE (var)) == POINTER_TYPE
+ && !is_global_var (var))
+ {
+ new_var = maybe_lookup_decl (var, ctx);
+ if (new_var == NULL_TREE)
+ new_var = maybe_lookup_decl_in_outer_ctx (var, ctx);
+ x = builtin_decl_explicit (BUILT_IN_ASSUME_ALIGNED);
+ x = build_call_expr_loc (clause_loc, x, 2, new_var,
+ omp_clause_aligned_alignment (c));
+ x = fold_convert_loc (clause_loc, TREE_TYPE (new_var), x);
+ x = build2 (MODIFY_EXPR, TREE_TYPE (new_var), new_var, x);
+ gimplify_and_add (x, ilist);
+ }
+ else if (TREE_CODE (TREE_TYPE (var)) == ARRAY_TYPE
+ && is_global_var (var))
+ {
+ tree ptype = build_pointer_type (TREE_TYPE (var)), t, t2;
+ new_var = lookup_decl (var, ctx);
+ t = maybe_lookup_decl_in_outer_ctx (var, ctx);
+ t = build_fold_addr_expr_loc (clause_loc, t);
+ t2 = builtin_decl_explicit (BUILT_IN_ASSUME_ALIGNED);
+ t = build_call_expr_loc (clause_loc, t2, 2, t,
+ omp_clause_aligned_alignment (c));
+ t = fold_convert_loc (clause_loc, ptype, t);
+ x = create_tmp_var (ptype, NULL);
+ t = build2 (MODIFY_EXPR, ptype, x, t);
+ gimplify_and_add (t, ilist);
+ t = build_simple_mem_ref_loc (clause_loc, x);
+ SET_DECL_VALUE_EXPR (new_var, t);
+ DECL_HAS_VALUE_EXPR_P (new_var) = 1;
+ }
+ continue;
default:
continue;
}
@@ -2502,10 +3098,7 @@ lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
allocate new backing storage for the new pointer
variable. This allows us to avoid changing all the
code that expects a pointer to something that expects
- a direct variable. Note that this doesn't apply to
- C++, since reference types are disallowed in data
- sharing clauses there, except for NRV optimized
- return values. */
+ a direct variable. */
if (pass == 0)
continue;
@@ -2550,6 +3143,9 @@ lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
switch (OMP_CLAUSE_CODE (c))
{
case OMP_CLAUSE_SHARED:
+ /* Ignore shared directives in teams construct. */
+ if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
+ continue;
/* Shared global vars are just accessed directly. */
if (is_global_var (new_var))
break;
@@ -2588,19 +3184,20 @@ lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
else
x = NULL;
do_private:
- x = lang_hooks.decls.omp_clause_default_ctor (c, new_var, x);
+ tree nx;
+ nx = lang_hooks.decls.omp_clause_default_ctor (c, new_var, x);
if (is_simd)
{
tree y = lang_hooks.decls.omp_clause_dtor (c, new_var);
- if ((TREE_ADDRESSABLE (new_var) || x || y
+ if ((TREE_ADDRESSABLE (new_var) || nx || y
|| OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE)
&& lower_rec_simd_input_clauses (new_var, ctx, max_vf,
idx, lane, ivar, lvar))
{
- if (x)
+ if (nx)
x = lang_hooks.decls.omp_clause_default_ctor
(c, unshare_expr (ivar), x);
- if (x)
+ if (nx && x)
gimplify_and_add (x, &llist[0]);
if (y)
{
@@ -2617,8 +3214,8 @@ lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
break;
}
}
- if (x)
- gimplify_and_add (x, ilist);
+ if (nx)
+ gimplify_and_add (nx, ilist);
/* FALLTHRU */
do_dtor:
@@ -2661,6 +3258,44 @@ lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
x = build_outer_var_ref (var, ctx);
if (is_simd)
{
+ if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
+ && gimple_omp_for_combined_into_p (ctx->stmt))
+ {
+ tree stept = POINTER_TYPE_P (TREE_TYPE (x))
+ ? sizetype : TREE_TYPE (x);
+ tree t = fold_convert (stept,
+ OMP_CLAUSE_LINEAR_STEP (c));
+ tree c = find_omp_clause (clauses,
+ OMP_CLAUSE__LOOPTEMP_);
+ gcc_assert (c);
+ tree l = OMP_CLAUSE_DECL (c);
+ if (fd->collapse == 1)
+ {
+ tree n1 = fd->loop.n1;
+ tree step = fd->loop.step;
+ tree itype = TREE_TYPE (l);
+ if (POINTER_TYPE_P (itype))
+ itype = signed_type_for (itype);
+ l = fold_build2 (MINUS_EXPR, itype, l, n1);
+ if (TYPE_UNSIGNED (itype)
+ && fd->loop.cond_code == GT_EXPR)
+ l = fold_build2 (TRUNC_DIV_EXPR, itype,
+ fold_build1 (NEGATE_EXPR,
+ itype, l),
+ fold_build1 (NEGATE_EXPR,
+ itype, step));
+ else
+ l = fold_build2 (TRUNC_DIV_EXPR, itype, l, step);
+ }
+ t = fold_build2 (MULT_EXPR, stept,
+ fold_convert (stept, l), t);
+ if (POINTER_TYPE_P (TREE_TYPE (x)))
+ x = fold_build2 (POINTER_PLUS_EXPR,
+ TREE_TYPE (x), x, t);
+ else
+ x = fold_build2 (PLUS_EXPR, TREE_TYPE (x), x, t);
+ }
+
if ((OMP_CLAUSE_CODE (c) != OMP_CLAUSE_LINEAR
|| TREE_ADDRESSABLE (new_var))
&& lower_rec_simd_input_clauses (new_var, ctx, max_vf,
@@ -2708,6 +3343,13 @@ lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
gimplify_and_add (x, ilist);
goto do_dtor;
+ case OMP_CLAUSE__LOOPTEMP_:
+ gcc_assert (is_parallel_ctx (ctx));
+ x = build_outer_var_ref (var, ctx);
+ x = build2 (MODIFY_EXPR, TREE_TYPE (new_var), new_var, x);
+ gimplify_and_add (x, ilist);
+ break;
+
case OMP_CLAUSE_COPYIN:
by_ref = use_pointer_for_field (var, NULL);
x = build_receiver_ref (var, by_ref, ctx);
@@ -2720,19 +3362,89 @@ lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
{
tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
+ gimple tseq;
x = build_outer_var_ref (var, ctx);
- /* FIXME: Not handled yet. */
- gcc_assert (!is_simd);
- if (is_reference (var))
+ if (is_reference (var)
+ && !useless_type_conversion_p (TREE_TYPE (placeholder),
+ TREE_TYPE (x)))
x = build_fold_addr_expr_loc (clause_loc, x);
SET_DECL_VALUE_EXPR (placeholder, x);
DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
- lower_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
- gimple_seq_add_seq (ilist,
- OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c));
+ tree new_vard = new_var;
+ if (is_reference (var))
+ {
+ gcc_assert (TREE_CODE (new_var) == MEM_REF);
+ new_vard = TREE_OPERAND (new_var, 0);
+ gcc_assert (DECL_P (new_vard));
+ }
+ if (is_simd
+ && lower_rec_simd_input_clauses (new_var, ctx, max_vf,
+ idx, lane, ivar, lvar))
+ {
+ if (new_vard == new_var)
+ {
+ gcc_assert (DECL_VALUE_EXPR (new_var) == lvar);
+ SET_DECL_VALUE_EXPR (new_var, ivar);
+ }
+ else
+ {
+ SET_DECL_VALUE_EXPR (new_vard,
+ build_fold_addr_expr (ivar));
+ DECL_HAS_VALUE_EXPR_P (new_vard) = 1;
+ }
+ x = lang_hooks.decls.omp_clause_default_ctor
+ (c, unshare_expr (ivar),
+ build_outer_var_ref (var, ctx));
+ if (x)
+ gimplify_and_add (x, &llist[0]);
+ if (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c))
+ {
+ tseq = OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c);
+ lower_omp (&tseq, ctx);
+ gimple_seq_add_seq (&llist[0], tseq);
+ }
+ OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c) = NULL;
+ tseq = OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c);
+ lower_omp (&tseq, ctx);
+ gimple_seq_add_seq (&llist[1], tseq);
+ OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
+ DECL_HAS_VALUE_EXPR_P (placeholder) = 0;
+ if (new_vard == new_var)
+ SET_DECL_VALUE_EXPR (new_var, lvar);
+ else
+ SET_DECL_VALUE_EXPR (new_vard,
+ build_fold_addr_expr (lvar));
+ x = lang_hooks.decls.omp_clause_dtor (c, ivar);
+ if (x)
+ {
+ tseq = NULL;
+ dtor = x;
+ gimplify_stmt (&dtor, &tseq);
+ gimple_seq_add_seq (&llist[1], tseq);
+ }
+ break;
+ }
+ x = lang_hooks.decls.omp_clause_default_ctor
+ (c, new_var, unshare_expr (x));
+ if (x)
+ gimplify_and_add (x, ilist);
+ if (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c))
+ {
+ tseq = OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c);
+ lower_omp (&tseq, ctx);
+ gimple_seq_add_seq (ilist, tseq);
+ }
OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c) = NULL;
+ if (is_simd)
+ {
+ tseq = OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c);
+ lower_omp (&tseq, ctx);
+ gimple_seq_add_seq (dlist, tseq);
+ OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
+ }
DECL_HAS_VALUE_EXPR_P (placeholder) = 0;
+ goto do_dtor;
}
else
{
@@ -2835,14 +3547,15 @@ lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
master thread doesn't modify it before it is copied over in all
threads. Similarly for variables in both firstprivate and
lastprivate clauses we need to ensure the lastprivate copying
- happens after firstprivate copying in all threads. */
- if (copyin_by_ref || lastprivate_firstprivate)
+ happens after firstprivate copying in all threads. And similarly
+ for UDRs if initializer expression refers to omp_orig. */
+ if (copyin_by_ref || lastprivate_firstprivate || reduction_omp_orig_ref)
{
/* Don't add any barrier for #pragma omp simd or
#pragma omp distribute. */
if (gimple_code (ctx->stmt) != GIMPLE_OMP_FOR
|| gimple_omp_for_kind (ctx->stmt) == GF_OMP_FOR_KIND_FOR)
- gimplify_and_add (build_omp_barrier (), ilist);
+ gimple_seq_add_stmt (ilist, build_omp_barrier (NULL_TREE));
}
/* If max_vf is non-zero, then we can use only a vectorization factor
@@ -2871,7 +3584,7 @@ lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
static void
lower_lastprivate_clauses (tree clauses, tree predicate, gimple_seq *stmt_list,
- omp_context *ctx)
+ omp_context *ctx)
{
tree x, c, label = NULL, orig_clauses = clauses;
bool par_clauses = false;
@@ -3025,7 +3738,7 @@ lower_reduction_clauses (tree clauses, gimple_seq *stmt_seqp, omp_context *ctx)
{
if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
{
- /* Never use OMP_ATOMIC for array reductions. */
+ /* Never use OMP_ATOMIC for array reductions or UDRs. */
count = -1;
break;
}
@@ -3072,7 +3785,9 @@ lower_reduction_clauses (tree clauses, gimple_seq *stmt_seqp, omp_context *ctx)
{
tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
- if (is_reference (var))
+ if (is_reference (var)
+ && !useless_type_conversion_p (TREE_TYPE (placeholder),
+ TREE_TYPE (ref)))
ref = build_fold_addr_expr_loc (clause_loc, ref);
SET_DECL_VALUE_EXPR (placeholder, ref);
DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
@@ -3175,6 +3890,7 @@ lower_send_clauses (tree clauses, gimple_seq *ilist, gimple_seq *olist,
case OMP_CLAUSE_COPYIN:
case OMP_CLAUSE_LASTPRIVATE:
case OMP_CLAUSE_REDUCTION:
+ case OMP_CLAUSE__LOOPTEMP_:
break;
default:
continue;
@@ -3195,6 +3911,7 @@ lower_send_clauses (tree clauses, gimple_seq *ilist, gimple_seq *olist,
case OMP_CLAUSE_PRIVATE:
case OMP_CLAUSE_FIRSTPRIVATE:
case OMP_CLAUSE_COPYIN:
+ case OMP_CLAUSE__LOOPTEMP_:
do_in = true;
break;
@@ -3317,7 +4034,7 @@ static void
expand_parallel_call (struct omp_region *region, basic_block bb,
gimple entry_stmt, vec<tree, va_gc> *ws_args)
{
- tree t, t1, t2, val, cond, c, clauses;
+ tree t, t1, t2, val, cond, c, clauses, flags;
gimple_stmt_iterator gsi;
gimple stmt;
enum built_in_function start_ix;
@@ -3327,23 +4044,23 @@ expand_parallel_call (struct omp_region *region, basic_block bb,
clauses = gimple_omp_parallel_clauses (entry_stmt);
- /* Determine what flavor of GOMP_parallel_start we will be
+ /* Determine what flavor of GOMP_parallel we will be
emitting. */
- start_ix = BUILT_IN_GOMP_PARALLEL_START;
+ start_ix = BUILT_IN_GOMP_PARALLEL;
if (is_combined_parallel (region))
{
switch (region->inner->type)
{
case GIMPLE_OMP_FOR:
gcc_assert (region->inner->sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
- start_ix2 = ((int)BUILT_IN_GOMP_PARALLEL_LOOP_STATIC_START
+ start_ix2 = ((int)BUILT_IN_GOMP_PARALLEL_LOOP_STATIC
+ (region->inner->sched_kind
== OMP_CLAUSE_SCHEDULE_RUNTIME
? 3 : region->inner->sched_kind));
start_ix = (enum built_in_function)start_ix2;
break;
case GIMPLE_OMP_SECTIONS:
- start_ix = BUILT_IN_GOMP_PARALLEL_SECTIONS_START;
+ start_ix = BUILT_IN_GOMP_PARALLEL_SECTIONS;
break;
default:
gcc_unreachable ();
@@ -3354,6 +4071,7 @@ expand_parallel_call (struct omp_region *region, basic_block bb,
and there is no conditional. */
cond = NULL_TREE;
val = build_int_cst (unsigned_type_node, 0);
+ flags = build_int_cst (unsigned_type_node, 0);
c = find_omp_clause (clauses, OMP_CLAUSE_IF);
if (c)
@@ -3368,6 +4086,10 @@ expand_parallel_call (struct omp_region *region, basic_block bb,
else
clause_loc = gimple_location (entry_stmt);
+ c = find_omp_clause (clauses, OMP_CLAUSE_PROC_BIND);
+ if (c)
+ flags = build_int_cst (unsigned_type_node, OMP_CLAUSE_PROC_BIND_KIND (c));
+
/* Ensure 'val' is of the correct type. */
val = fold_convert_loc (clause_loc, unsigned_type_node, val);
@@ -3459,34 +4181,19 @@ expand_parallel_call (struct omp_region *region, basic_block bb,
t1 = build_fold_addr_expr (t);
t2 = build_fold_addr_expr (gimple_omp_parallel_child_fn (entry_stmt));
- vec_alloc (args, 3 + vec_safe_length (ws_args));
+ vec_alloc (args, 4 + vec_safe_length (ws_args));
args->quick_push (t2);
args->quick_push (t1);
args->quick_push (val);
if (ws_args)
args->splice (*ws_args);
+ args->quick_push (flags);
t = build_call_expr_loc_vec (UNKNOWN_LOCATION,
builtin_decl_explicit (start_ix), args);
force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
false, GSI_CONTINUE_LINKING);
-
- t = gimple_omp_parallel_data_arg (entry_stmt);
- if (t == NULL)
- t = null_pointer_node;
- else
- t = build_fold_addr_expr (t);
- t = build_call_expr_loc (gimple_location (entry_stmt),
- gimple_omp_parallel_child_fn (entry_stmt), 1, t);
- force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
- false, GSI_CONTINUE_LINKING);
-
- t = build_call_expr_loc (gimple_location (entry_stmt),
- builtin_decl_explicit (BUILT_IN_GOMP_PARALLEL_END),
- 0);
- force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
- false, GSI_CONTINUE_LINKING);
}
@@ -3496,7 +4203,7 @@ expand_parallel_call (struct omp_region *region, basic_block bb,
static void
expand_task_call (basic_block bb, gimple entry_stmt)
{
- tree t, t1, t2, t3, flags, cond, c, c2, clauses;
+ tree t, t1, t2, t3, flags, cond, c, c2, clauses, depend;
gimple_stmt_iterator gsi;
location_t loc = gimple_location (entry_stmt);
@@ -3510,8 +4217,9 @@ expand_task_call (basic_block bb, gimple entry_stmt)
c = find_omp_clause (clauses, OMP_CLAUSE_UNTIED);
c2 = find_omp_clause (clauses, OMP_CLAUSE_MERGEABLE);
+ depend = find_omp_clause (clauses, OMP_CLAUSE_DEPEND);
flags = build_int_cst (unsigned_type_node,
- (c ? 1 : 0) + (c2 ? 4 : 0));
+ (c ? 1 : 0) + (c2 ? 4 : 0) + (depend ? 8 : 0));
c = find_omp_clause (clauses, OMP_CLAUSE_FINAL);
if (c)
@@ -3522,6 +4230,10 @@ expand_task_call (basic_block bb, gimple entry_stmt)
build_int_cst (unsigned_type_node, 0));
flags = fold_build2_loc (loc, PLUS_EXPR, unsigned_type_node, flags, c);
}
+ if (depend)
+ depend = OMP_CLAUSE_DECL (depend);
+ else
+ depend = build_int_cst (ptr_type_node, 0);
gsi = gsi_last_bb (bb);
t = gimple_omp_task_data_arg (entry_stmt);
@@ -3537,9 +4249,10 @@ expand_task_call (basic_block bb, gimple entry_stmt)
t3 = build_fold_addr_expr_loc (loc, t);
t = build_call_expr (builtin_decl_explicit (BUILT_IN_GOMP_TASK),
- 7, t1, t2, t3,
+ 8, t1, t2, t3,
gimple_omp_task_arg_size (entry_stmt),
- gimple_omp_task_arg_align (entry_stmt), cond, flags);
+ gimple_omp_task_arg_align (entry_stmt), cond, flags,
+ depend);
force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
false, GSI_CONTINUE_LINKING);
@@ -4064,7 +4777,9 @@ expand_omp_taskreg (struct omp_region *region)
count = count1 * count2 * count3;
Furthermore, if ZERO_ITER_BB is NULL, create a BB which does:
count = 0;
- and set ZERO_ITER_BB to that bb. */
+ and set ZERO_ITER_BB to that bb. If this isn't the outermost
+ of the combined loop constructs, just initialize COUNTS array
+ from the _looptemp_ clauses. */
/* NOTE: It *could* be better to moosh all of the BBs together,
creating one larger BB with all the computation and the unexpected
@@ -4106,6 +4821,28 @@ expand_omp_for_init_counts (struct omp_for_data *fd, gimple_stmt_iterator *gsi,
/* Collapsed loops need work for expansion into SSA form. */
gcc_assert (!gimple_in_ssa_p (cfun));
+ if (gimple_omp_for_combined_into_p (fd->for_stmt)
+ && TREE_CODE (fd->loop.n2) != INTEGER_CST)
+ {
+ /* First two _looptemp_ clauses are for istart/iend, counts[0]
+ isn't supposed to be handled, as the inner loop doesn't
+ use it. */
+ tree innerc = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
+ OMP_CLAUSE__LOOPTEMP_);
+ gcc_assert (innerc);
+ for (i = 0; i < fd->collapse; i++)
+ {
+ innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
+ OMP_CLAUSE__LOOPTEMP_);
+ gcc_assert (innerc);
+ if (i)
+ counts[i] = OMP_CLAUSE_DECL (innerc);
+ else
+ counts[0] = NULL_TREE;
+ }
+ return;
+ }
+
for (i = 0; i < fd->collapse; i++)
{
tree itype = TREE_TYPE (fd->loops[i].v);
@@ -4209,13 +4946,49 @@ expand_omp_for_init_counts (struct omp_for_data *fd, gimple_stmt_iterator *gsi,
V2 = N21 + (T % count2) * STEP2;
T = T / count2;
V1 = N11 + T * STEP1;
- if this loop doesn't have an inner loop construct combined with it. */
+ if this loop doesn't have an inner loop construct combined with it.
+ If it does have an inner loop construct combined with it and the
+ iteration count isn't known constant, store values from counts array
+ into its _looptemp_ temporaries instead. */
static void
expand_omp_for_init_vars (struct omp_for_data *fd, gimple_stmt_iterator *gsi,
- tree *counts, tree startvar)
+ tree *counts, gimple inner_stmt, tree startvar)
{
int i;
+ if (gimple_omp_for_combined_p (fd->for_stmt))
+ {
+ /* If fd->loop.n2 is constant, then no propagation of the counts
+ is needed, they are constant. */
+ if (TREE_CODE (fd->loop.n2) == INTEGER_CST)
+ return;
+
+ tree clauses = gimple_code (inner_stmt) == GIMPLE_OMP_PARALLEL
+ ? gimple_omp_parallel_clauses (inner_stmt)
+ : gimple_omp_for_clauses (inner_stmt);
+ /* First two _looptemp_ clauses are for istart/iend, counts[0]
+ isn't supposed to be handled, as the inner loop doesn't
+ use it. */
+ tree innerc = find_omp_clause (clauses, OMP_CLAUSE__LOOPTEMP_);
+ gcc_assert (innerc);
+ for (i = 0; i < fd->collapse; i++)
+ {
+ innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
+ OMP_CLAUSE__LOOPTEMP_);
+ gcc_assert (innerc);
+ if (i)
+ {
+ tree tem = OMP_CLAUSE_DECL (innerc);
+ tree t = fold_convert (TREE_TYPE (tem), counts[i]);
+ t = force_gimple_operand_gsi (gsi, t, false, NULL_TREE,
+ false, GSI_CONTINUE_LINKING);
+ gimple stmt = gimple_build_assign (tem, t);
+ gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING);
+ }
+ }
+ return;
+ }
+
tree type = TREE_TYPE (fd->loop.v);
tree tem = create_tmp_reg (type, ".tem");
gimple stmt = gimple_build_assign (tem, startvar);
@@ -4368,6 +5141,10 @@ extract_omp_for_update_vars (struct omp_for_data *fd, basic_block cont_bb,
If this is a combined omp parallel loop, instead of the call to
GOMP_loop_foo_start, we call GOMP_loop_foo_next.
+ If this is gimple_omp_for_combined_p loop, then instead of assigning
+ V and iend in L0 we assign the first two _looptemp_ clause decls of the
+ inner GIMPLE_OMP_FOR and V += STEP; and
+ if (V cond iend) goto L1; else goto L2; are removed.
For collapsed loops, given parameters:
collapse(3)
@@ -4437,7 +5214,8 @@ static void
expand_omp_for_generic (struct omp_region *region,
struct omp_for_data *fd,
enum built_in_function start_fn,
- enum built_in_function next_fn)
+ enum built_in_function next_fn,
+ gimple inner_stmt)
{
tree type, istart0, iend0, iend;
tree t, vmain, vback, bias = NULL_TREE;
@@ -4552,6 +5330,17 @@ expand_omp_for_generic (struct omp_region *region,
t2 = fold_convert (fd->iter_type, fd->loop.step);
t1 = fd->loop.n2;
t0 = fd->loop.n1;
+ if (gimple_omp_for_combined_into_p (fd->for_stmt))
+ {
+ tree innerc = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
+ OMP_CLAUSE__LOOPTEMP_);
+ gcc_assert (innerc);
+ t0 = OMP_CLAUSE_DECL (innerc);
+ innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
+ OMP_CLAUSE__LOOPTEMP_);
+ gcc_assert (innerc);
+ t1 = OMP_CLAUSE_DECL (innerc);
+ }
if (POINTER_TYPE_P (TREE_TYPE (t0))
&& TYPE_PRECISION (TREE_TYPE (t0))
!= TYPE_PRECISION (fd->iter_type))
@@ -4622,6 +5411,21 @@ expand_omp_for_generic (struct omp_region *region,
tree startvar = fd->loop.v;
tree endvar = NULL_TREE;
+ if (gimple_omp_for_combined_p (fd->for_stmt))
+ {
+ gcc_assert (gimple_code (inner_stmt) == GIMPLE_OMP_FOR
+ && gimple_omp_for_kind (inner_stmt)
+ == GF_OMP_FOR_KIND_SIMD);
+ tree innerc = find_omp_clause (gimple_omp_for_clauses (inner_stmt),
+ OMP_CLAUSE__LOOPTEMP_);
+ gcc_assert (innerc);
+ startvar = OMP_CLAUSE_DECL (innerc);
+ innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
+ OMP_CLAUSE__LOOPTEMP_);
+ gcc_assert (innerc);
+ endvar = OMP_CLAUSE_DECL (innerc);
+ }
+
gsi = gsi_start_bb (l0_bb);
t = istart0;
if (bias)
@@ -4650,7 +5454,7 @@ expand_omp_for_generic (struct omp_region *region,
gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
}
if (fd->collapse > 1)
- expand_omp_for_init_vars (fd, &gsi, counts, startvar);
+ expand_omp_for_init_vars (fd, &gsi, counts, inner_stmt, startvar);
if (!broken_loop)
{
@@ -4662,8 +5466,7 @@ expand_omp_for_generic (struct omp_region *region,
vmain = gimple_omp_continue_control_use (stmt);
vback = gimple_omp_continue_control_def (stmt);
- /* OMP4 placeholder: if (!gimple_omp_for_combined_p (fd->for_stmt)). */
- if (1)
+ if (!gimple_omp_for_combined_p (fd->for_stmt))
{
if (POINTER_TYPE_P (type))
t = fold_build_pointer_plus (vmain, fd->loop.step);
@@ -4686,7 +5489,7 @@ expand_omp_for_generic (struct omp_region *region,
/* Remove GIMPLE_OMP_CONTINUE. */
gsi_remove (&gsi, true);
- if (fd->collapse > 1)
+ if (fd->collapse > 1 && !gimple_omp_for_combined_p (fd->for_stmt))
collapse_bb = extract_omp_for_update_vars (fd, cont_bb, l1_bb);
/* Emit code to get the next parallel iteration in L2_BB. */
@@ -4708,9 +5511,13 @@ expand_omp_for_generic (struct omp_region *region,
gsi = gsi_last_bb (exit_bb);
if (gimple_omp_return_nowait_p (gsi_stmt (gsi)))
t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END_NOWAIT);
+ else if (gimple_omp_return_lhs (gsi_stmt (gsi)))
+ t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END_CANCEL);
else
t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END);
stmt = gimple_build_call (t, 0);
+ if (gimple_omp_return_lhs (gsi_stmt (gsi)))
+ gimple_call_set_lhs (stmt, gimple_omp_return_lhs (gsi_stmt (gsi)));
gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
gsi_remove (&gsi, true);
@@ -4738,9 +5545,11 @@ expand_omp_for_generic (struct omp_region *region,
if (current_loops)
add_bb_to_loop (l2_bb, cont_bb->loop_father);
e = find_edge (cont_bb, l1_bb);
- /* OMP4 placeholder for gimple_omp_for_combined_p (fd->for_stmt). */
- if (0)
- ;
+ if (gimple_omp_for_combined_p (fd->for_stmt))
+ {
+ remove_edge (e);
+ e = NULL;
+ }
else if (fd->collapse > 1)
{
remove_edge (e);
@@ -4774,8 +5583,7 @@ expand_omp_for_generic (struct omp_region *region,
outer_loop->latch = l2_bb;
add_loop (outer_loop, l0_bb->loop_father);
- /* OMP4 placeholder: if (!gimple_omp_for_combined_p (fd->for_stmt)). */
- if (1)
+ if (!gimple_omp_for_combined_p (fd->for_stmt))
{
struct loop *loop = alloc_loop ();
loop->header = l1_bb;
@@ -4825,16 +5633,22 @@ expand_omp_for_generic (struct omp_region *region,
static void
expand_omp_for_static_nochunk (struct omp_region *region,
- struct omp_for_data *fd)
+ struct omp_for_data *fd,
+ gimple inner_stmt)
{
tree n, q, s0, e0, e, t, tt, nthreads, threadid;
tree type, itype, vmain, vback;
basic_block entry_bb, second_bb, third_bb, exit_bb, seq_start_bb;
- basic_block body_bb, cont_bb;
+ basic_block body_bb, cont_bb, collapse_bb = NULL;
basic_block fin_bb;
gimple_stmt_iterator gsi;
gimple stmt;
edge ep;
+ enum built_in_function get_num_threads = BUILT_IN_OMP_GET_NUM_THREADS;
+ enum built_in_function get_thread_num = BUILT_IN_OMP_GET_THREAD_NUM;
+ bool broken_loop = region->cont == NULL;
+ tree *counts = NULL;
+ tree n1, n2, step;
itype = type = TREE_TYPE (fd->loop.v);
if (POINTER_TYPE_P (type))
@@ -4843,25 +5657,49 @@ expand_omp_for_static_nochunk (struct omp_region *region,
entry_bb = region->entry;
cont_bb = region->cont;
gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
- gcc_assert (BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
+ fin_bb = BRANCH_EDGE (entry_bb)->dest;
+ gcc_assert (broken_loop
+ || (fin_bb == FALLTHRU_EDGE (cont_bb)->dest));
seq_start_bb = split_edge (FALLTHRU_EDGE (entry_bb));
body_bb = single_succ (seq_start_bb);
- gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
- gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
- fin_bb = FALLTHRU_EDGE (cont_bb)->dest;
+ if (!broken_loop)
+ {
+ gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
+ gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
+ }
exit_bb = region->exit;
/* Iteration space partitioning goes in ENTRY_BB. */
gsi = gsi_last_bb (entry_bb);
gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
- t = fold_binary (fd->loop.cond_code, boolean_type_node,
- fold_convert (type, fd->loop.n1),
- fold_convert (type, fd->loop.n2));
- if (TYPE_UNSIGNED (type)
+ if (gimple_omp_for_kind (fd->for_stmt) == GF_OMP_FOR_KIND_DISTRIBUTE)
+ {
+ get_num_threads = BUILT_IN_OMP_GET_NUM_TEAMS;
+ get_thread_num = BUILT_IN_OMP_GET_TEAM_NUM;
+ }
+
+ if (fd->collapse > 1)
+ {
+ int first_zero_iter = -1;
+ basic_block l2_dom_bb = NULL;
+
+ counts = XALLOCAVEC (tree, fd->collapse);
+ expand_omp_for_init_counts (fd, &gsi, entry_bb, counts,
+ fin_bb, first_zero_iter,
+ l2_dom_bb);
+ t = NULL_TREE;
+ }
+ else if (gimple_omp_for_combined_into_p (fd->for_stmt))
+ t = integer_one_node;
+ else
+ t = fold_binary (fd->loop.cond_code, boolean_type_node,
+ fold_convert (type, fd->loop.n1),
+ fold_convert (type, fd->loop.n2));
+ if (fd->collapse == 1
+ && TYPE_UNSIGNED (type)
&& (t == NULL_TREE || !integer_onep (t)))
{
- tree n1, n2;
n1 = fold_convert (type, unshare_expr (fd->loop.n1));
n1 = force_gimple_operand_gsi (&gsi, n1, true, NULL_TREE,
true, GSI_SAME_STMT);
@@ -4899,36 +5737,47 @@ expand_omp_for_static_nochunk (struct omp_region *region,
gsi = gsi_last_bb (entry_bb);
}
- t = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS), 0);
+ t = build_call_expr (builtin_decl_explicit (get_num_threads), 0);
t = fold_convert (itype, t);
nthreads = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
true, GSI_SAME_STMT);
- t = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM), 0);
+ t = build_call_expr (builtin_decl_explicit (get_thread_num), 0);
t = fold_convert (itype, t);
threadid = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
true, GSI_SAME_STMT);
- fd->loop.n1
- = force_gimple_operand_gsi (&gsi, fold_convert (type, fd->loop.n1),
- true, NULL_TREE, true, GSI_SAME_STMT);
- fd->loop.n2
- = force_gimple_operand_gsi (&gsi, fold_convert (itype, fd->loop.n2),
- true, NULL_TREE, true, GSI_SAME_STMT);
- fd->loop.step
- = force_gimple_operand_gsi (&gsi, fold_convert (itype, fd->loop.step),
- true, NULL_TREE, true, GSI_SAME_STMT);
+ n1 = fd->loop.n1;
+ n2 = fd->loop.n2;
+ step = fd->loop.step;
+ if (gimple_omp_for_combined_into_p (fd->for_stmt))
+ {
+ tree innerc = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
+ OMP_CLAUSE__LOOPTEMP_);
+ gcc_assert (innerc);
+ n1 = OMP_CLAUSE_DECL (innerc);
+ innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
+ OMP_CLAUSE__LOOPTEMP_);
+ gcc_assert (innerc);
+ n2 = OMP_CLAUSE_DECL (innerc);
+ }
+ n1 = force_gimple_operand_gsi (&gsi, fold_convert (type, n1),
+ true, NULL_TREE, true, GSI_SAME_STMT);
+ n2 = force_gimple_operand_gsi (&gsi, fold_convert (itype, n2),
+ true, NULL_TREE, true, GSI_SAME_STMT);
+ step = force_gimple_operand_gsi (&gsi, fold_convert (itype, step),
+ true, NULL_TREE, true, GSI_SAME_STMT);
t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
- t = fold_build2 (PLUS_EXPR, itype, fd->loop.step, t);
- t = fold_build2 (PLUS_EXPR, itype, t, fd->loop.n2);
- t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, fd->loop.n1));
+ t = fold_build2 (PLUS_EXPR, itype, step, t);
+ t = fold_build2 (PLUS_EXPR, itype, t, n2);
+ t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, n1));
if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
t = fold_build2 (TRUNC_DIV_EXPR, itype,
fold_build1 (NEGATE_EXPR, itype, t),
- fold_build1 (NEGATE_EXPR, itype, fd->loop.step));
+ fold_build1 (NEGATE_EXPR, itype, step));
else
- t = fold_build2 (TRUNC_DIV_EXPR, itype, t, fd->loop.step);
+ t = fold_build2 (TRUNC_DIV_EXPR, itype, t, step);
t = fold_convert (itype, t);
n = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
@@ -4976,58 +5825,96 @@ expand_omp_for_static_nochunk (struct omp_region *region,
/* Setup code for sequential iteration goes in SEQ_START_BB. */
gsi = gsi_start_bb (seq_start_bb);
+ tree startvar = fd->loop.v;
+ tree endvar = NULL_TREE;
+
+ if (gimple_omp_for_combined_p (fd->for_stmt))
+ {
+ tree clauses = gimple_code (inner_stmt) == GIMPLE_OMP_PARALLEL
+ ? gimple_omp_parallel_clauses (inner_stmt)
+ : gimple_omp_for_clauses (inner_stmt);
+ tree innerc = find_omp_clause (clauses, OMP_CLAUSE__LOOPTEMP_);
+ gcc_assert (innerc);
+ startvar = OMP_CLAUSE_DECL (innerc);
+ innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
+ OMP_CLAUSE__LOOPTEMP_);
+ gcc_assert (innerc);
+ endvar = OMP_CLAUSE_DECL (innerc);
+ }
t = fold_convert (itype, s0);
- t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
+ t = fold_build2 (MULT_EXPR, itype, t, step);
if (POINTER_TYPE_P (type))
- t = fold_build_pointer_plus (fd->loop.n1, t);
+ t = fold_build_pointer_plus (n1, t);
else
- t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
+ t = fold_build2 (PLUS_EXPR, type, t, n1);
+ t = fold_convert (TREE_TYPE (startvar), t);
t = force_gimple_operand_gsi (&gsi, t,
- DECL_P (fd->loop.v)
- && TREE_ADDRESSABLE (fd->loop.v),
+ DECL_P (startvar)
+ && TREE_ADDRESSABLE (startvar),
NULL_TREE, false, GSI_CONTINUE_LINKING);
- stmt = gimple_build_assign (fd->loop.v, t);
+ stmt = gimple_build_assign (startvar, t);
gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
t = fold_convert (itype, e0);
- t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
+ t = fold_build2 (MULT_EXPR, itype, t, step);
if (POINTER_TYPE_P (type))
- t = fold_build_pointer_plus (fd->loop.n1, t);
+ t = fold_build_pointer_plus (n1, t);
else
- t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
+ t = fold_build2 (PLUS_EXPR, type, t, n1);
+ t = fold_convert (TREE_TYPE (startvar), t);
e = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
false, GSI_CONTINUE_LINKING);
+ if (endvar)
+ {
+ stmt = gimple_build_assign (endvar, e);
+ gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
+ }
+ if (fd->collapse > 1)
+ expand_omp_for_init_vars (fd, &gsi, counts, inner_stmt, startvar);
- /* The code controlling the sequential loop replaces the
- GIMPLE_OMP_CONTINUE. */
- gsi = gsi_last_bb (cont_bb);
- stmt = gsi_stmt (gsi);
- gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
- vmain = gimple_omp_continue_control_use (stmt);
- vback = gimple_omp_continue_control_def (stmt);
+ if (!broken_loop)
+ {
+ /* The code controlling the sequential loop replaces the
+ GIMPLE_OMP_CONTINUE. */
+ gsi = gsi_last_bb (cont_bb);
+ stmt = gsi_stmt (gsi);
+ gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
+ vmain = gimple_omp_continue_control_use (stmt);
+ vback = gimple_omp_continue_control_def (stmt);
- if (POINTER_TYPE_P (type))
- t = fold_build_pointer_plus (vmain, fd->loop.step);
- else
- t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
- t = force_gimple_operand_gsi (&gsi, t,
- DECL_P (vback) && TREE_ADDRESSABLE (vback),
- NULL_TREE, true, GSI_SAME_STMT);
- stmt = gimple_build_assign (vback, t);
- gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
+ if (!gimple_omp_for_combined_p (fd->for_stmt))
+ {
+ if (POINTER_TYPE_P (type))
+ t = fold_build_pointer_plus (vmain, step);
+ else
+ t = fold_build2 (PLUS_EXPR, type, vmain, step);
+ t = force_gimple_operand_gsi (&gsi, t,
+ DECL_P (vback)
+ && TREE_ADDRESSABLE (vback),
+ NULL_TREE, true, GSI_SAME_STMT);
+ stmt = gimple_build_assign (vback, t);
+ gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
- t = build2 (fd->loop.cond_code, boolean_type_node,
- DECL_P (vback) && TREE_ADDRESSABLE (vback) ? t : vback, e);
- gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
+ t = build2 (fd->loop.cond_code, boolean_type_node,
+ DECL_P (vback) && TREE_ADDRESSABLE (vback)
+ ? t : vback, e);
+ gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
+ }
- /* Remove the GIMPLE_OMP_CONTINUE statement. */
- gsi_remove (&gsi, true);
+ /* Remove the GIMPLE_OMP_CONTINUE statement. */
+ gsi_remove (&gsi, true);
+
+ if (fd->collapse > 1 && !gimple_omp_for_combined_p (fd->for_stmt))
+ collapse_bb = extract_omp_for_update_vars (fd, cont_bb, body_bb);
+ }
/* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
gsi = gsi_last_bb (exit_bb);
if (!gimple_omp_return_nowait_p (gsi_stmt (gsi)))
- force_gimple_operand_gsi (&gsi, build_omp_barrier (), false, NULL_TREE,
- false, GSI_SAME_STMT);
+ {
+ t = gimple_omp_return_lhs (gsi_stmt (gsi));
+ gsi_insert_after (&gsi, build_omp_barrier (t), GSI_SAME_STMT);
+ }
gsi_remove (&gsi, true);
/* Connect all the blocks. */
@@ -5039,21 +5926,42 @@ expand_omp_for_static_nochunk (struct omp_region *region,
find_edge (third_bb, seq_start_bb)->flags = EDGE_FALSE_VALUE;
find_edge (third_bb, fin_bb)->flags = EDGE_TRUE_VALUE;
- find_edge (cont_bb, body_bb)->flags = EDGE_TRUE_VALUE;
- find_edge (cont_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
+ if (!broken_loop)
+ {
+ ep = find_edge (cont_bb, body_bb);
+ if (gimple_omp_for_combined_p (fd->for_stmt))
+ {
+ remove_edge (ep);
+ ep = NULL;
+ }
+ else if (fd->collapse > 1)
+ {
+ remove_edge (ep);
+ ep = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE);
+ }
+ else
+ ep->flags = EDGE_TRUE_VALUE;
+ find_edge (cont_bb, fin_bb)->flags
+ = ep ? EDGE_FALSE_VALUE : EDGE_FALLTHRU;
+ }
set_immediate_dominator (CDI_DOMINATORS, second_bb, entry_bb);
set_immediate_dominator (CDI_DOMINATORS, third_bb, entry_bb);
set_immediate_dominator (CDI_DOMINATORS, seq_start_bb, third_bb);
+
set_immediate_dominator (CDI_DOMINATORS, body_bb,
recompute_dominator (CDI_DOMINATORS, body_bb));
set_immediate_dominator (CDI_DOMINATORS, fin_bb,
recompute_dominator (CDI_DOMINATORS, fin_bb));
- struct loop *loop = alloc_loop ();
- loop->header = body_bb;
- loop->latch = cont_bb;
- add_loop (loop, body_bb->loop_father);
+ if (!broken_loop && !gimple_omp_for_combined_p (fd->for_stmt))
+ {
+ struct loop *loop = alloc_loop ();
+ loop->header = body_bb;
+ if (collapse_bb == NULL)
+ loop->latch = cont_bb;
+ add_loop (loop, body_bb->loop_father);
+ }
}
@@ -5096,16 +6004,22 @@ expand_omp_for_static_nochunk (struct omp_region *region,
*/
static void
-expand_omp_for_static_chunk (struct omp_region *region, struct omp_for_data *fd)
+expand_omp_for_static_chunk (struct omp_region *region,
+ struct omp_for_data *fd, gimple inner_stmt)
{
tree n, s0, e0, e, t;
tree trip_var, trip_init, trip_main, trip_back, nthreads, threadid;
tree type, itype, v_main, v_back, v_extra;
basic_block entry_bb, exit_bb, body_bb, seq_start_bb, iter_part_bb;
- basic_block trip_update_bb, cont_bb, fin_bb;
+ basic_block trip_update_bb = NULL, cont_bb, collapse_bb = NULL, fin_bb;
gimple_stmt_iterator si;
gimple stmt;
edge se;
+ enum built_in_function get_num_threads = BUILT_IN_OMP_GET_NUM_THREADS;
+ enum built_in_function get_thread_num = BUILT_IN_OMP_GET_THREAD_NUM;
+ bool broken_loop = region->cont == NULL;
+ tree *counts = NULL;
+ tree n1, n2, step;
itype = type = TREE_TYPE (fd->loop.v);
if (POINTER_TYPE_P (type))
@@ -5117,27 +6031,50 @@ expand_omp_for_static_chunk (struct omp_region *region, struct omp_for_data *fd)
iter_part_bb = se->dest;
cont_bb = region->cont;
gcc_assert (EDGE_COUNT (iter_part_bb->succs) == 2);
- gcc_assert (BRANCH_EDGE (iter_part_bb)->dest
- == FALLTHRU_EDGE (cont_bb)->dest);
+ fin_bb = BRANCH_EDGE (iter_part_bb)->dest;
+ gcc_assert (broken_loop
+ || fin_bb == FALLTHRU_EDGE (cont_bb)->dest);
seq_start_bb = split_edge (FALLTHRU_EDGE (iter_part_bb));
body_bb = single_succ (seq_start_bb);
- gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
- gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
- fin_bb = FALLTHRU_EDGE (cont_bb)->dest;
- trip_update_bb = split_edge (FALLTHRU_EDGE (cont_bb));
+ if (!broken_loop)
+ {
+ gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
+ gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
+ trip_update_bb = split_edge (FALLTHRU_EDGE (cont_bb));
+ }
exit_bb = region->exit;
/* Trip and adjustment setup goes in ENTRY_BB. */
si = gsi_last_bb (entry_bb);
gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_FOR);
- t = fold_binary (fd->loop.cond_code, boolean_type_node,
- fold_convert (type, fd->loop.n1),
- fold_convert (type, fd->loop.n2));
- if (TYPE_UNSIGNED (type)
+ if (gimple_omp_for_kind (fd->for_stmt) == GF_OMP_FOR_KIND_DISTRIBUTE)
+ {
+ get_num_threads = BUILT_IN_OMP_GET_NUM_TEAMS;
+ get_thread_num = BUILT_IN_OMP_GET_TEAM_NUM;
+ }
+
+ if (fd->collapse > 1)
+ {
+ int first_zero_iter = -1;
+ basic_block l2_dom_bb = NULL;
+
+ counts = XALLOCAVEC (tree, fd->collapse);
+ expand_omp_for_init_counts (fd, &si, entry_bb, counts,
+ fin_bb, first_zero_iter,
+ l2_dom_bb);
+ t = NULL_TREE;
+ }
+ else if (gimple_omp_for_combined_into_p (fd->for_stmt))
+ t = integer_one_node;
+ else
+ t = fold_binary (fd->loop.cond_code, boolean_type_node,
+ fold_convert (type, fd->loop.n1),
+ fold_convert (type, fd->loop.n2));
+ if (fd->collapse == 1
+ && TYPE_UNSIGNED (type)
&& (t == NULL_TREE || !integer_onep (t)))
{
- tree n1, n2;
n1 = fold_convert (type, unshare_expr (fd->loop.n1));
n1 = force_gimple_operand_gsi (&si, n1, true, NULL_TREE,
true, GSI_SAME_STMT);
@@ -5175,39 +6112,50 @@ expand_omp_for_static_chunk (struct omp_region *region, struct omp_for_data *fd)
si = gsi_last_bb (entry_bb);
}
- t = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS), 0);
+ t = build_call_expr (builtin_decl_explicit (get_num_threads), 0);
t = fold_convert (itype, t);
nthreads = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
true, GSI_SAME_STMT);
- t = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM), 0);
+ t = build_call_expr (builtin_decl_explicit (get_thread_num), 0);
t = fold_convert (itype, t);
threadid = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
true, GSI_SAME_STMT);
- fd->loop.n1
- = force_gimple_operand_gsi (&si, fold_convert (type, fd->loop.n1),
- true, NULL_TREE, true, GSI_SAME_STMT);
- fd->loop.n2
- = force_gimple_operand_gsi (&si, fold_convert (itype, fd->loop.n2),
- true, NULL_TREE, true, GSI_SAME_STMT);
- fd->loop.step
- = force_gimple_operand_gsi (&si, fold_convert (itype, fd->loop.step),
- true, NULL_TREE, true, GSI_SAME_STMT);
+ n1 = fd->loop.n1;
+ n2 = fd->loop.n2;
+ step = fd->loop.step;
+ if (gimple_omp_for_combined_into_p (fd->for_stmt))
+ {
+ tree innerc = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
+ OMP_CLAUSE__LOOPTEMP_);
+ gcc_assert (innerc);
+ n1 = OMP_CLAUSE_DECL (innerc);
+ innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
+ OMP_CLAUSE__LOOPTEMP_);
+ gcc_assert (innerc);
+ n2 = OMP_CLAUSE_DECL (innerc);
+ }
+ n1 = force_gimple_operand_gsi (&si, fold_convert (type, n1),
+ true, NULL_TREE, true, GSI_SAME_STMT);
+ n2 = force_gimple_operand_gsi (&si, fold_convert (itype, n2),
+ true, NULL_TREE, true, GSI_SAME_STMT);
+ step = force_gimple_operand_gsi (&si, fold_convert (itype, step),
+ true, NULL_TREE, true, GSI_SAME_STMT);
fd->chunk_size
= force_gimple_operand_gsi (&si, fold_convert (itype, fd->chunk_size),
true, NULL_TREE, true, GSI_SAME_STMT);
t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
- t = fold_build2 (PLUS_EXPR, itype, fd->loop.step, t);
- t = fold_build2 (PLUS_EXPR, itype, t, fd->loop.n2);
- t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, fd->loop.n1));
+ t = fold_build2 (PLUS_EXPR, itype, step, t);
+ t = fold_build2 (PLUS_EXPR, itype, t, n2);
+ t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, n1));
if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
t = fold_build2 (TRUNC_DIV_EXPR, itype,
fold_build1 (NEGATE_EXPR, itype, t),
- fold_build1 (NEGATE_EXPR, itype, fd->loop.step));
+ fold_build1 (NEGATE_EXPR, itype, step));
else
- t = fold_build2 (TRUNC_DIV_EXPR, itype, t, fd->loop.step);
+ t = fold_build2 (TRUNC_DIV_EXPR, itype, t, step);
t = fold_convert (itype, t);
n = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
true, GSI_SAME_STMT);
@@ -5230,11 +6178,11 @@ expand_omp_for_static_chunk (struct omp_region *region, struct omp_for_data *fd)
gsi_insert_before (&si, stmt, GSI_SAME_STMT);
t = fold_build2 (MULT_EXPR, itype, threadid, fd->chunk_size);
- t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
+ t = fold_build2 (MULT_EXPR, itype, t, step);
if (POINTER_TYPE_P (type))
- t = fold_build_pointer_plus (fd->loop.n1, t);
+ t = fold_build_pointer_plus (n1, t);
else
- t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
+ t = fold_build2 (PLUS_EXPR, type, t, n1);
v_extra = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
true, GSI_SAME_STMT);
@@ -5261,77 +6209,130 @@ expand_omp_for_static_chunk (struct omp_region *region, struct omp_for_data *fd)
/* Setup code for sequential iteration goes in SEQ_START_BB. */
si = gsi_start_bb (seq_start_bb);
+ tree startvar = fd->loop.v;
+ tree endvar = NULL_TREE;
+
+ if (gimple_omp_for_combined_p (fd->for_stmt))
+ {
+ tree clauses = gimple_code (inner_stmt) == GIMPLE_OMP_PARALLEL
+ ? gimple_omp_parallel_clauses (inner_stmt)
+ : gimple_omp_for_clauses (inner_stmt);
+ tree innerc = find_omp_clause (clauses, OMP_CLAUSE__LOOPTEMP_);
+ gcc_assert (innerc);
+ startvar = OMP_CLAUSE_DECL (innerc);
+ innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
+ OMP_CLAUSE__LOOPTEMP_);
+ gcc_assert (innerc);
+ endvar = OMP_CLAUSE_DECL (innerc);
+ }
+
t = fold_convert (itype, s0);
- t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
+ t = fold_build2 (MULT_EXPR, itype, t, step);
if (POINTER_TYPE_P (type))
- t = fold_build_pointer_plus (fd->loop.n1, t);
+ t = fold_build_pointer_plus (n1, t);
else
- t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
+ t = fold_build2 (PLUS_EXPR, type, t, n1);
+ t = fold_convert (TREE_TYPE (startvar), t);
t = force_gimple_operand_gsi (&si, t,
- DECL_P (fd->loop.v)
- && TREE_ADDRESSABLE (fd->loop.v),
+ DECL_P (startvar)
+ && TREE_ADDRESSABLE (startvar),
NULL_TREE, false, GSI_CONTINUE_LINKING);
- stmt = gimple_build_assign (fd->loop.v, t);
+ stmt = gimple_build_assign (startvar, t);
gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
t = fold_convert (itype, e0);
- t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
+ t = fold_build2 (MULT_EXPR, itype, t, step);
if (POINTER_TYPE_P (type))
- t = fold_build_pointer_plus (fd->loop.n1, t);
+ t = fold_build_pointer_plus (n1, t);
else
- t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
+ t = fold_build2 (PLUS_EXPR, type, t, n1);
+ t = fold_convert (TREE_TYPE (startvar), t);
e = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
false, GSI_CONTINUE_LINKING);
+ if (endvar)
+ {
+ stmt = gimple_build_assign (endvar, e);
+ gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
+ }
+ if (fd->collapse > 1)
+ expand_omp_for_init_vars (fd, &si, counts, inner_stmt, startvar);
- /* The code controlling the sequential loop goes in CONT_BB,
- replacing the GIMPLE_OMP_CONTINUE. */
- si = gsi_last_bb (cont_bb);
- stmt = gsi_stmt (si);
- gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
- v_main = gimple_omp_continue_control_use (stmt);
- v_back = gimple_omp_continue_control_def (stmt);
+ if (!broken_loop)
+ {
+ /* The code controlling the sequential loop goes in CONT_BB,
+ replacing the GIMPLE_OMP_CONTINUE. */
+ si = gsi_last_bb (cont_bb);
+ stmt = gsi_stmt (si);
+ gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
+ v_main = gimple_omp_continue_control_use (stmt);
+ v_back = gimple_omp_continue_control_def (stmt);
- if (POINTER_TYPE_P (type))
- t = fold_build_pointer_plus (v_main, fd->loop.step);
- else
- t = fold_build2 (PLUS_EXPR, type, v_main, fd->loop.step);
- if (DECL_P (v_back) && TREE_ADDRESSABLE (v_back))
- t = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
- true, GSI_SAME_STMT);
- stmt = gimple_build_assign (v_back, t);
- gsi_insert_before (&si, stmt, GSI_SAME_STMT);
+ if (!gimple_omp_for_combined_p (fd->for_stmt))
+ {
+ if (POINTER_TYPE_P (type))
+ t = fold_build_pointer_plus (v_main, step);
+ else
+ t = fold_build2 (PLUS_EXPR, type, v_main, step);
+ if (DECL_P (v_back) && TREE_ADDRESSABLE (v_back))
+ t = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
+ true, GSI_SAME_STMT);
+ stmt = gimple_build_assign (v_back, t);
+ gsi_insert_before (&si, stmt, GSI_SAME_STMT);
- t = build2 (fd->loop.cond_code, boolean_type_node,
- DECL_P (v_back) && TREE_ADDRESSABLE (v_back)
- ? t : v_back, e);
- gsi_insert_before (&si, gimple_build_cond_empty (t), GSI_SAME_STMT);
+ t = build2 (fd->loop.cond_code, boolean_type_node,
+ DECL_P (v_back) && TREE_ADDRESSABLE (v_back)
+ ? t : v_back, e);
+ gsi_insert_before (&si, gimple_build_cond_empty (t), GSI_SAME_STMT);
+ }
- /* Remove GIMPLE_OMP_CONTINUE. */
- gsi_remove (&si, true);
+ /* Remove GIMPLE_OMP_CONTINUE. */
+ gsi_remove (&si, true);
- /* Trip update code goes into TRIP_UPDATE_BB. */
- si = gsi_start_bb (trip_update_bb);
+ if (fd->collapse > 1 && !gimple_omp_for_combined_p (fd->for_stmt))
+ collapse_bb = extract_omp_for_update_vars (fd, cont_bb, body_bb);
- t = build_int_cst (itype, 1);
- t = build2 (PLUS_EXPR, itype, trip_main, t);
- stmt = gimple_build_assign (trip_back, t);
- gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
+ /* Trip update code goes into TRIP_UPDATE_BB. */
+ si = gsi_start_bb (trip_update_bb);
+
+ t = build_int_cst (itype, 1);
+ t = build2 (PLUS_EXPR, itype, trip_main, t);
+ stmt = gimple_build_assign (trip_back, t);
+ gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
+ }
/* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
si = gsi_last_bb (exit_bb);
if (!gimple_omp_return_nowait_p (gsi_stmt (si)))
- force_gimple_operand_gsi (&si, build_omp_barrier (), false, NULL_TREE,
- false, GSI_SAME_STMT);
+ {
+ t = gimple_omp_return_lhs (gsi_stmt (si));
+ gsi_insert_after (&si, build_omp_barrier (t), GSI_SAME_STMT);
+ }
gsi_remove (&si, true);
/* Connect the new blocks. */
find_edge (iter_part_bb, seq_start_bb)->flags = EDGE_TRUE_VALUE;
find_edge (iter_part_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
- find_edge (cont_bb, body_bb)->flags = EDGE_TRUE_VALUE;
- find_edge (cont_bb, trip_update_bb)->flags = EDGE_FALSE_VALUE;
+ if (!broken_loop)
+ {
+ se = find_edge (cont_bb, body_bb);
+ if (gimple_omp_for_combined_p (fd->for_stmt))
+ {
+ remove_edge (se);
+ se = NULL;
+ }
+ else if (fd->collapse > 1)
+ {
+ remove_edge (se);
+ se = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE);
+ }
+ else
+ se->flags = EDGE_TRUE_VALUE;
+ find_edge (cont_bb, trip_update_bb)->flags
+ = se ? EDGE_FALSE_VALUE : EDGE_FALLTHRU;
- redirect_edge_and_branch (single_succ_edge (trip_update_bb), iter_part_bb);
+ redirect_edge_and_branch (single_succ_edge (trip_update_bb), iter_part_bb);
+ }
if (gimple_in_ssa_p (cfun))
{
@@ -5342,6 +6343,8 @@ expand_omp_for_static_chunk (struct omp_region *region, struct omp_for_data *fd)
edge_var_map *vm;
size_t i;
+ gcc_assert (fd->collapse == 1 && !broken_loop);
+
/* When we redirect the edge from trip_update_bb to iter_part_bb, we
remove arguments of the phi nodes in fin_bb. We need to create
appropriate phi nodes in iter_part_bb instead. */
@@ -5391,7 +6394,8 @@ expand_omp_for_static_chunk (struct omp_region *region, struct omp_for_data *fd)
UNKNOWN_LOCATION);
}
- set_immediate_dominator (CDI_DOMINATORS, trip_update_bb, cont_bb);
+ if (!broken_loop)
+ set_immediate_dominator (CDI_DOMINATORS, trip_update_bb, cont_bb);
set_immediate_dominator (CDI_DOMINATORS, iter_part_bb,
recompute_dominator (CDI_DOMINATORS, iter_part_bb));
set_immediate_dominator (CDI_DOMINATORS, fin_bb,
@@ -5401,17 +6405,24 @@ expand_omp_for_static_chunk (struct omp_region *region, struct omp_for_data *fd)
set_immediate_dominator (CDI_DOMINATORS, body_bb,
recompute_dominator (CDI_DOMINATORS, body_bb));
- struct loop *trip_loop = alloc_loop ();
- trip_loop->header = iter_part_bb;
- trip_loop->latch = trip_update_bb;
- add_loop (trip_loop, iter_part_bb->loop_father);
+ if (!broken_loop)
+ {
+ struct loop *trip_loop = alloc_loop ();
+ trip_loop->header = iter_part_bb;
+ trip_loop->latch = trip_update_bb;
+ add_loop (trip_loop, iter_part_bb->loop_father);
- struct loop *loop = alloc_loop ();
- loop->header = body_bb;
- loop->latch = cont_bb;
- add_loop (loop, trip_loop);
+ if (!gimple_omp_for_combined_p (fd->for_stmt))
+ {
+ struct loop *loop = alloc_loop ();
+ loop->header = body_bb;
+ loop->latch = cont_bb;
+ add_loop (loop, trip_loop);
+ }
+ }
}
+
/* A subroutine of expand_omp_for. Generate code for a simd non-worksharing
loop. Given parameters:
@@ -5487,7 +6498,7 @@ expand_omp_simd (struct omp_region *region, struct omp_for_data *fd)
OMP_CLAUSE_SAFELEN);
tree simduid = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
OMP_CLAUSE__SIMDUID_);
- tree n2;
+ tree n1, n2;
type = TREE_TYPE (fd->loop.v);
entry_bb = region->entry;
@@ -5530,10 +6541,27 @@ expand_omp_simd (struct omp_region *region, struct omp_for_data *fd)
if (l2_dom_bb == NULL)
l2_dom_bb = l1_bb;
+ n1 = fd->loop.n1;
n2 = fd->loop.n2;
- if (0)
- /* Place holder for gimple_omp_for_combined_into_p() in
- the upcoming gomp-4_0-branch merge. */;
+ if (gimple_omp_for_combined_into_p (fd->for_stmt))
+ {
+ tree innerc = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
+ OMP_CLAUSE__LOOPTEMP_);
+ gcc_assert (innerc);
+ n1 = OMP_CLAUSE_DECL (innerc);
+ innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
+ OMP_CLAUSE__LOOPTEMP_);
+ gcc_assert (innerc);
+ n2 = OMP_CLAUSE_DECL (innerc);
+ expand_omp_build_assign (&gsi, fd->loop.v,
+ fold_convert (type, n1));
+ if (fd->collapse > 1)
+ {
+ gsi_prev (&gsi);
+ expand_omp_for_init_vars (fd, &gsi, counts, NULL, n1);
+ gsi_next (&gsi);
+ }
+ }
else
{
expand_omp_build_assign (&gsi, fd->loop.v,
@@ -5706,7 +6734,7 @@ expand_omp_simd (struct omp_region *region, struct omp_for_data *fd)
/* Expand the OpenMP loop defined by REGION. */
static void
-expand_omp_for (struct omp_region *region)
+expand_omp_for (struct omp_region *region, gimple inner_stmt)
{
struct omp_for_data fd;
struct omp_for_data_loop *loops;
@@ -5736,14 +6764,12 @@ expand_omp_for (struct omp_region *region)
if (gimple_omp_for_kind (fd.for_stmt) == GF_OMP_FOR_KIND_SIMD)
expand_omp_simd (region, &fd);
else if (fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC
- && !fd.have_ordered
- && fd.collapse == 1
- && region->cont != NULL)
+ && !fd.have_ordered)
{
if (fd.chunk_size == NULL)
- expand_omp_for_static_nochunk (region, &fd);
+ expand_omp_for_static_nochunk (region, &fd, inner_stmt);
else
- expand_omp_for_static_chunk (region, &fd);
+ expand_omp_for_static_chunk (region, &fd, inner_stmt);
}
else
{
@@ -5768,7 +6794,7 @@ expand_omp_for (struct omp_region *region)
- (int)BUILT_IN_GOMP_LOOP_STATIC_NEXT);
}
expand_omp_for_generic (region, &fd, (enum built_in_function) start_ix,
- (enum built_in_function) next_ix);
+ (enum built_in_function) next_ix, inner_stmt);
}
if (gimple_in_ssa_p (cfun))
@@ -5983,9 +7009,13 @@ expand_omp_sections (struct omp_region *region)
si = gsi_last_bb (l2_bb);
if (gimple_omp_return_nowait_p (gsi_stmt (si)))
t = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END_NOWAIT);
+ else if (gimple_omp_return_lhs (gsi_stmt (si)))
+ t = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END_CANCEL);
else
t = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END);
stmt = gimple_build_call (t, 0);
+ if (gimple_omp_return_lhs (gsi_stmt (si)))
+ gimple_call_set_lhs (stmt, gimple_omp_return_lhs (gsi_stmt (si)));
gsi_insert_after (&si, stmt, GSI_SAME_STMT);
gsi_remove (&si, true);
@@ -6001,26 +7031,21 @@ expand_omp_single (struct omp_region *region)
{
basic_block entry_bb, exit_bb;
gimple_stmt_iterator si;
- bool need_barrier = false;
entry_bb = region->entry;
exit_bb = region->exit;
si = gsi_last_bb (entry_bb);
- /* The terminal barrier at the end of a GOMP_single_copy sequence cannot
- be removed. We need to ensure that the thread that entered the single
- does not exit before the data is copied out by the other threads. */
- if (find_omp_clause (gimple_omp_single_clauses (gsi_stmt (si)),
- OMP_CLAUSE_COPYPRIVATE))
- need_barrier = true;
gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE);
gsi_remove (&si, true);
single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
si = gsi_last_bb (exit_bb);
- if (!gimple_omp_return_nowait_p (gsi_stmt (si)) || need_barrier)
- force_gimple_operand_gsi (&si, build_omp_barrier (), false, NULL_TREE,
- false, GSI_SAME_STMT);
+ if (!gimple_omp_return_nowait_p (gsi_stmt (si)))
+ {
+ tree t = gimple_omp_return_lhs (gsi_stmt (si));
+ gsi_insert_after (&si, build_omp_barrier (t), GSI_SAME_STMT);
+ }
gsi_remove (&si, true);
single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
}
@@ -6042,8 +7067,10 @@ expand_omp_synch (struct omp_region *region)
si = gsi_last_bb (entry_bb);
gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE
|| gimple_code (gsi_stmt (si)) == GIMPLE_OMP_MASTER
+ || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_TASKGROUP
|| gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ORDERED
- || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CRITICAL);
+ || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CRITICAL
+ || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_TEAMS);
gsi_remove (&si, true);
single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
@@ -6088,7 +7115,10 @@ expand_omp_atomic_load (basic_block load_bb, tree addr,
itype = TREE_TYPE (TREE_TYPE (decl));
call = build_call_expr_loc (loc, decl, 2, addr,
- build_int_cst (NULL, MEMMODEL_RELAXED));
+ build_int_cst (NULL,
+ gimple_omp_atomic_seq_cst_p (stmt)
+ ? MEMMODEL_SEQ_CST
+ : MEMMODEL_RELAXED));
if (!useless_type_conversion_p (type, itype))
call = fold_build1_loc (loc, VIEW_CONVERT_EXPR, type, call);
call = build2_loc (loc, MODIFY_EXPR, void_type_node, loaded_val, call);
@@ -6160,7 +7190,10 @@ expand_omp_atomic_store (basic_block load_bb, tree addr,
if (!useless_type_conversion_p (itype, type))
stored_val = fold_build1_loc (loc, VIEW_CONVERT_EXPR, itype, stored_val);
call = build_call_expr_loc (loc, decl, 3, addr, stored_val,
- build_int_cst (NULL, MEMMODEL_RELAXED));
+ build_int_cst (NULL,
+ gimple_omp_atomic_seq_cst_p (stmt)
+ ? MEMMODEL_SEQ_CST
+ : MEMMODEL_RELAXED));
if (exchange)
{
if (!useless_type_conversion_p (type, itype))
@@ -6201,6 +7234,7 @@ expand_omp_atomic_fetch_op (basic_block load_bb,
enum tree_code code;
bool need_old, need_new;
enum machine_mode imode;
+ bool seq_cst;
/* We expect to find the following sequences:
@@ -6226,6 +7260,7 @@ expand_omp_atomic_fetch_op (basic_block load_bb,
return false;
need_new = gimple_omp_atomic_need_value_p (gsi_stmt (gsi));
need_old = gimple_omp_atomic_need_value_p (last_stmt (load_bb));
+ seq_cst = gimple_omp_atomic_seq_cst_p (last_stmt (load_bb));
gcc_checking_assert (!need_old || !need_new);
if (!operand_equal_p (gimple_assign_lhs (stmt), stored_val, 0))
@@ -6292,7 +7327,9 @@ expand_omp_atomic_fetch_op (basic_block load_bb,
use the RELAXED memory model. */
call = build_call_expr_loc (loc, decl, 3, addr,
fold_convert_loc (loc, itype, rhs),
- build_int_cst (NULL, MEMMODEL_RELAXED));
+ build_int_cst (NULL,
+ seq_cst ? MEMMODEL_SEQ_CST
+ : MEMMODEL_RELAXED));
if (need_old || need_new)
{
@@ -6622,6 +7659,321 @@ expand_omp_atomic (struct omp_region *region)
}
+/* Expand the OpenMP target{, data, update} directive starting at REGION. */
+
+static void
+expand_omp_target (struct omp_region *region)
+{
+ basic_block entry_bb, exit_bb, new_bb;
+ struct function *child_cfun = NULL;
+ tree child_fn = NULL_TREE, block, t;
+ gimple_stmt_iterator gsi;
+ gimple entry_stmt, stmt;
+ edge e;
+
+ entry_stmt = last_stmt (region->entry);
+ new_bb = region->entry;
+ int kind = gimple_omp_target_kind (entry_stmt);
+ if (kind == GF_OMP_TARGET_KIND_REGION)
+ {
+ child_fn = gimple_omp_target_child_fn (entry_stmt);
+ child_cfun = DECL_STRUCT_FUNCTION (child_fn);
+ }
+
+ entry_bb = region->entry;
+ exit_bb = region->exit;
+
+ if (kind == GF_OMP_TARGET_KIND_REGION)
+ {
+ unsigned srcidx, dstidx, num;
+
+ /* If the target region needs data sent from the parent
+ function, then the very first statement (except possible
+ tree profile counter updates) of the parallel body
+ is a copy assignment .OMP_DATA_I = &.OMP_DATA_O. Since
+ &.OMP_DATA_O is passed as an argument to the child function,
+ we need to replace it with the argument as seen by the child
+ function.
+
+ In most cases, this will end up being the identity assignment
+ .OMP_DATA_I = .OMP_DATA_I. However, if the parallel body had
+ a function call that has been inlined, the original PARM_DECL
+ .OMP_DATA_I may have been converted into a different local
+ variable. In which case, we need to keep the assignment. */
+ if (gimple_omp_target_data_arg (entry_stmt))
+ {
+ basic_block entry_succ_bb = single_succ (entry_bb);
+ gimple_stmt_iterator gsi;
+ tree arg;
+ gimple tgtcopy_stmt = NULL;
+ tree sender
+ = TREE_VEC_ELT (gimple_omp_target_data_arg (entry_stmt), 0);
+
+ for (gsi = gsi_start_bb (entry_succ_bb); ; gsi_next (&gsi))
+ {
+ gcc_assert (!gsi_end_p (gsi));
+ stmt = gsi_stmt (gsi);
+ if (gimple_code (stmt) != GIMPLE_ASSIGN)
+ continue;
+
+ if (gimple_num_ops (stmt) == 2)
+ {
+ tree arg = gimple_assign_rhs1 (stmt);
+
+ /* We're ignoring the subcode because we're
+ effectively doing a STRIP_NOPS. */
+
+ if (TREE_CODE (arg) == ADDR_EXPR
+ && TREE_OPERAND (arg, 0) == sender)
+ {
+ tgtcopy_stmt = stmt;
+ break;
+ }
+ }
+ }
+
+ gcc_assert (tgtcopy_stmt != NULL);
+ arg = DECL_ARGUMENTS (child_fn);
+
+ gcc_assert (gimple_assign_lhs (tgtcopy_stmt) == arg);
+ gsi_remove (&gsi, true);
+ }
+
+ /* Declare local variables needed in CHILD_CFUN. */
+ block = DECL_INITIAL (child_fn);
+ BLOCK_VARS (block) = vec2chain (child_cfun->local_decls);
+ /* The gimplifier could record temporaries in target block
+ rather than in containing function's local_decls chain,
+ which would mean cgraph missed finalizing them. Do it now. */
+ for (t = BLOCK_VARS (block); t; t = DECL_CHAIN (t))
+ if (TREE_CODE (t) == VAR_DECL
+ && TREE_STATIC (t)
+ && !DECL_EXTERNAL (t))
+ varpool_finalize_decl (t);
+ DECL_SAVED_TREE (child_fn) = NULL;
+ /* We'll create a CFG for child_fn, so no gimple body is needed. */
+ gimple_set_body (child_fn, NULL);
+ TREE_USED (block) = 1;
+
+ /* Reset DECL_CONTEXT on function arguments. */
+ for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t))
+ DECL_CONTEXT (t) = child_fn;
+
+ /* Split ENTRY_BB at GIMPLE_OMP_TARGET,
+ so that it can be moved to the child function. */
+ gsi = gsi_last_bb (entry_bb);
+ stmt = gsi_stmt (gsi);
+ gcc_assert (stmt && gimple_code (stmt) == GIMPLE_OMP_TARGET
+ && gimple_omp_target_kind (stmt)
+ == GF_OMP_TARGET_KIND_REGION);
+ gsi_remove (&gsi, true);
+ e = split_block (entry_bb, stmt);
+ entry_bb = e->dest;
+ single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
+
+ /* Convert GIMPLE_OMP_RETURN into a RETURN_EXPR. */
+ if (exit_bb)
+ {
+ gsi = gsi_last_bb (exit_bb);
+ gcc_assert (!gsi_end_p (gsi)
+ && gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
+ stmt = gimple_build_return (NULL);
+ gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
+ gsi_remove (&gsi, true);
+ }
+
+ /* Move the target region into CHILD_CFUN. */
+
+ block = gimple_block (entry_stmt);
+
+ new_bb = move_sese_region_to_fn (child_cfun, entry_bb, exit_bb, block);
+ if (exit_bb)
+ single_succ_edge (new_bb)->flags = EDGE_FALLTHRU;
+ /* When the OMP expansion process cannot guarantee an up-to-date
+ loop tree arrange for the child function to fixup loops. */
+ if (loops_state_satisfies_p (LOOPS_NEED_FIXUP))
+ child_cfun->x_current_loops->state |= LOOPS_NEED_FIXUP;
+
+ /* Remove non-local VAR_DECLs from child_cfun->local_decls list. */
+ num = vec_safe_length (child_cfun->local_decls);
+ for (srcidx = 0, dstidx = 0; srcidx < num; srcidx++)
+ {
+ t = (*child_cfun->local_decls)[srcidx];
+ if (DECL_CONTEXT (t) == cfun->decl)
+ continue;
+ if (srcidx != dstidx)
+ (*child_cfun->local_decls)[dstidx] = t;
+ dstidx++;
+ }
+ if (dstidx != num)
+ vec_safe_truncate (child_cfun->local_decls, dstidx);
+
+ /* Inform the callgraph about the new function. */
+ DECL_STRUCT_FUNCTION (child_fn)->curr_properties = cfun->curr_properties;
+ cgraph_add_new_function (child_fn, true);
+
+ /* Fix the callgraph edges for child_cfun. Those for cfun will be
+ fixed in a following pass. */
+ push_cfun (child_cfun);
+ rebuild_cgraph_edges ();
+
+ /* Some EH regions might become dead, see PR34608. If
+ pass_cleanup_cfg isn't the first pass to happen with the
+ new child, these dead EH edges might cause problems.
+ Clean them up now. */
+ if (flag_exceptions)
+ {
+ basic_block bb;
+ bool changed = false;
+
+ FOR_EACH_BB (bb)
+ changed |= gimple_purge_dead_eh_edges (bb);
+ if (changed)
+ cleanup_tree_cfg ();
+ }
+ pop_cfun ();
+ }
+
+ /* Emit a library call to launch the target region, or do data
+ transfers. */
+ tree t1, t2, t3, t4, device, cond, c, clauses;
+ enum built_in_function start_ix;
+ location_t clause_loc;
+
+ clauses = gimple_omp_target_clauses (entry_stmt);
+
+ if (kind == GF_OMP_TARGET_KIND_REGION)
+ start_ix = BUILT_IN_GOMP_TARGET;
+ else if (kind == GF_OMP_TARGET_KIND_DATA)
+ start_ix = BUILT_IN_GOMP_TARGET_DATA;
+ else
+ start_ix = BUILT_IN_GOMP_TARGET_UPDATE;
+
+ /* By default, the value of DEVICE is -1 (let runtime library choose)
+ and there is no conditional. */
+ cond = NULL_TREE;
+ device = build_int_cst (integer_type_node, -1);
+
+ c = find_omp_clause (clauses, OMP_CLAUSE_IF);
+ if (c)
+ cond = OMP_CLAUSE_IF_EXPR (c);
+
+ c = find_omp_clause (clauses, OMP_CLAUSE_DEVICE);
+ if (c)
+ {
+ device = OMP_CLAUSE_DEVICE_ID (c);
+ clause_loc = OMP_CLAUSE_LOCATION (c);
+ }
+ else
+ clause_loc = gimple_location (entry_stmt);
+
+ /* Ensure 'device' is of the correct type. */
+ device = fold_convert_loc (clause_loc, integer_type_node, device);
+
+ /* If we found the clause 'if (cond)', build
+ (cond ? device : -2). */
+ if (cond)
+ {
+ cond = gimple_boolify (cond);
+
+ basic_block cond_bb, then_bb, else_bb;
+ edge e;
+ tree tmp_var;
+
+ tmp_var = create_tmp_var (TREE_TYPE (device), NULL);
+ if (kind != GF_OMP_TARGET_KIND_REGION)
+ {
+ gsi = gsi_last_bb (new_bb);
+ gsi_prev (&gsi);
+ e = split_block (new_bb, gsi_stmt (gsi));
+ }
+ else
+ e = split_block (new_bb, NULL);
+ cond_bb = e->src;
+ new_bb = e->dest;
+ remove_edge (e);
+
+ then_bb = create_empty_bb (cond_bb);
+ else_bb = create_empty_bb (then_bb);
+ set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb);
+ set_immediate_dominator (CDI_DOMINATORS, else_bb, cond_bb);
+
+ stmt = gimple_build_cond_empty (cond);
+ gsi = gsi_last_bb (cond_bb);
+ gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
+
+ gsi = gsi_start_bb (then_bb);
+ stmt = gimple_build_assign (tmp_var, device);
+ gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
+
+ gsi = gsi_start_bb (else_bb);
+ stmt = gimple_build_assign (tmp_var,
+ build_int_cst (integer_type_node, -2));
+ gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
+
+ make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE);
+ make_edge (cond_bb, else_bb, EDGE_FALSE_VALUE);
+ if (current_loops)
+ {
+ add_bb_to_loop (then_bb, cond_bb->loop_father);
+ add_bb_to_loop (else_bb, cond_bb->loop_father);
+ }
+ make_edge (then_bb, new_bb, EDGE_FALLTHRU);
+ make_edge (else_bb, new_bb, EDGE_FALLTHRU);
+
+ device = tmp_var;
+ }
+
+ gsi = gsi_last_bb (new_bb);
+ t = gimple_omp_target_data_arg (entry_stmt);
+ if (t == NULL)
+ {
+ t1 = size_zero_node;
+ t2 = build_zero_cst (ptr_type_node);
+ t3 = t2;
+ t4 = t2;
+ }
+ else
+ {
+ t1 = TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (TREE_VEC_ELT (t, 1))));
+ t1 = size_binop (PLUS_EXPR, t1, size_int (1));
+ t2 = build_fold_addr_expr (TREE_VEC_ELT (t, 0));
+ t3 = build_fold_addr_expr (TREE_VEC_ELT (t, 1));
+ t4 = build_fold_addr_expr (TREE_VEC_ELT (t, 2));
+ }
+
+ gimple g;
+ /* FIXME: This will be address of
+ extern char __OPENMP_TARGET__[] __attribute__((visibility ("hidden")))
+ symbol, as soon as the linker plugin is able to create it for us. */
+ tree openmp_target = build_zero_cst (ptr_type_node);
+ if (kind == GF_OMP_TARGET_KIND_REGION)
+ {
+ tree fnaddr = build_fold_addr_expr (child_fn);
+ g = gimple_build_call (builtin_decl_explicit (start_ix), 7,
+ device, fnaddr, openmp_target, t1, t2, t3, t4);
+ }
+ else
+ g = gimple_build_call (builtin_decl_explicit (start_ix), 6,
+ device, openmp_target, t1, t2, t3, t4);
+ gimple_set_location (g, gimple_location (entry_stmt));
+ gsi_insert_before (&gsi, g, GSI_SAME_STMT);
+ if (kind != GF_OMP_TARGET_KIND_REGION)
+ {
+ g = gsi_stmt (gsi);
+ gcc_assert (g && gimple_code (g) == GIMPLE_OMP_TARGET);
+ gsi_remove (&gsi, true);
+ }
+ if (kind == GF_OMP_TARGET_KIND_DATA && region->exit)
+ {
+ gsi = gsi_last_bb (region->exit);
+ g = gsi_stmt (gsi);
+ gcc_assert (g && gimple_code (g) == GIMPLE_OMP_RETURN);
+ gsi_remove (&gsi, true);
+ }
+}
+
+
/* Expand the parallel region tree rooted at REGION. Expansion
proceeds in depth-first order. Innermost regions are expanded
first. This way, parallel regions that require a new function to
@@ -6634,12 +7986,17 @@ expand_omp (struct omp_region *region)
while (region)
{
location_t saved_location;
+ gimple inner_stmt = NULL;
/* First, determine whether this is a combined parallel+workshare
region. */
if (region->type == GIMPLE_OMP_PARALLEL)
determine_parallel_type (region);
+ if (region->type == GIMPLE_OMP_FOR
+ && gimple_omp_for_combined_p (last_stmt (region->entry)))
+ inner_stmt = last_stmt (region->inner->entry);
+
if (region->inner)
expand_omp (region->inner);
@@ -6655,7 +8012,7 @@ expand_omp (struct omp_region *region)
break;
case GIMPLE_OMP_FOR:
- expand_omp_for (region);
+ expand_omp_for (region, inner_stmt);
break;
case GIMPLE_OMP_SECTIONS:
@@ -6672,8 +8029,10 @@ expand_omp (struct omp_region *region)
break;
case GIMPLE_OMP_MASTER:
+ case GIMPLE_OMP_TASKGROUP:
case GIMPLE_OMP_ORDERED:
case GIMPLE_OMP_CRITICAL:
+ case GIMPLE_OMP_TEAMS:
expand_omp_synch (region);
break;
@@ -6681,6 +8040,10 @@ expand_omp (struct omp_region *region)
expand_omp_atomic (region);
break;
+ case GIMPLE_OMP_TARGET:
+ expand_omp_target (region);
+ break;
+
default:
gcc_unreachable ();
}
@@ -6745,6 +8108,9 @@ build_omp_regions_1 (basic_block bb, struct omp_region *parent,
GIMPLE_OMP_SECTIONS, and we do nothing for it. */
;
}
+ else if (code == GIMPLE_OMP_TARGET
+ && gimple_omp_target_kind (stmt) == GF_OMP_TARGET_KIND_UPDATE)
+ new_omp_region (bb, code, parent);
else
{
/* Otherwise, this directive becomes the parent for a new
@@ -6880,6 +8246,32 @@ make_pass_expand_omp (gcc::context *ctxt)
/* Routines to lower OpenMP directives into OMP-GIMPLE. */
+/* If ctx is a worksharing context inside of a cancellable parallel
+ region and it isn't nowait, add lhs to its GIMPLE_OMP_RETURN
+ and conditional branch to parallel's cancel_label to handle
+ cancellation in the implicit barrier. */
+
+static void
+maybe_add_implicit_barrier_cancel (omp_context *ctx, gimple_seq *body)
+{
+ gimple omp_return = gimple_seq_last_stmt (*body);
+ gcc_assert (gimple_code (omp_return) == GIMPLE_OMP_RETURN);
+ if (gimple_omp_return_nowait_p (omp_return))
+ return;
+ if (ctx->outer
+ && gimple_code (ctx->outer->stmt) == GIMPLE_OMP_PARALLEL
+ && ctx->outer->cancellable)
+ {
+ tree lhs = create_tmp_var (boolean_type_node, NULL);
+ gimple_omp_return_set_lhs (omp_return, lhs);
+ tree fallthru_label = create_artificial_label (UNKNOWN_LOCATION);
+ gimple g = gimple_build_cond (NE_EXPR, lhs, boolean_false_node,
+ ctx->outer->cancel_label, fallthru_label);
+ gimple_seq_add_stmt (body, g);
+ gimple_seq_add_stmt (body, gimple_build_label (fallthru_label));
+ }
+}
+
/* Lower the OpenMP sections directive in the current statement in GSI_P.
CTX is the enclosing OMP context for the current statement. */
@@ -6899,7 +8291,7 @@ lower_omp_sections (gimple_stmt_iterator *gsi_p, omp_context *ctx)
dlist = NULL;
ilist = NULL;
lower_rec_input_clauses (gimple_omp_sections_clauses (stmt),
- &ilist, &dlist, ctx);
+ &ilist, &dlist, ctx, NULL);
new_body = gimple_omp_body (stmt);
gimple_omp_set_body (stmt, NULL);
@@ -6959,6 +8351,8 @@ lower_omp_sections (gimple_stmt_iterator *gsi_p, omp_context *ctx)
gimple_seq_add_stmt (&new_body, t);
gimple_seq_add_seq (&new_body, olist);
+ if (ctx->cancellable)
+ gimple_seq_add_stmt (&new_body, gimple_build_label (ctx->cancel_label));
gimple_seq_add_seq (&new_body, dlist);
new_body = maybe_catch_exception (new_body);
@@ -6967,6 +8361,7 @@ lower_omp_sections (gimple_stmt_iterator *gsi_p, omp_context *ctx)
(!!find_omp_clause (gimple_omp_sections_clauses (stmt),
OMP_CLAUSE_NOWAIT));
gimple_seq_add_stmt (&new_body, t);
+ maybe_add_implicit_barrier_cancel (ctx, &new_body);
gimple_bind_set_body (new_stmt, new_body);
}
@@ -7096,7 +8491,7 @@ lower_omp_single (gimple_stmt_iterator *gsi_p, omp_context *ctx)
{
tree block;
gimple t, bind, single_stmt = gsi_stmt (*gsi_p);
- gimple_seq bind_body, dlist;
+ gimple_seq bind_body, bind_body_tail = NULL, dlist;
struct gimplify_ctx gctx;
push_gimplify_context (&gctx);
@@ -7107,7 +8502,7 @@ lower_omp_single (gimple_stmt_iterator *gsi_p, omp_context *ctx)
bind_body = NULL;
dlist = NULL;
lower_rec_input_clauses (gimple_omp_single_clauses (single_stmt),
- &bind_body, &dlist, ctx);
+ &bind_body, &dlist, ctx, NULL);
lower_omp (gimple_omp_body_ptr (single_stmt), ctx);
gimple_seq_add_stmt (&bind_body, single_stmt);
@@ -7126,7 +8521,17 @@ lower_omp_single (gimple_stmt_iterator *gsi_p, omp_context *ctx)
t = gimple_build_omp_return
(!!find_omp_clause (gimple_omp_single_clauses (single_stmt),
OMP_CLAUSE_NOWAIT));
- gimple_seq_add_stmt (&bind_body, t);
+ gimple_seq_add_stmt (&bind_body_tail, t);
+ maybe_add_implicit_barrier_cancel (ctx, &bind_body_tail);
+ if (ctx->record_type)
+ {
+ gimple_stmt_iterator gsi = gsi_start (bind_body_tail);
+ tree clobber = build_constructor (ctx->record_type, NULL);
+ TREE_THIS_VOLATILE (clobber) = 1;
+ gsi_insert_after (&gsi, gimple_build_assign (ctx->sender_decl,
+ clobber), GSI_SAME_STMT);
+ }
+ gimple_seq_add_seq (&bind_body, bind_body_tail);
gimple_bind_set_body (bind, bind_body);
pop_gimplify_context (bind);
@@ -7180,6 +8585,33 @@ lower_omp_master (gimple_stmt_iterator *gsi_p, omp_context *ctx)
}
+/* Expand code for an OpenMP taskgroup directive. */
+
+static void
+lower_omp_taskgroup (gimple_stmt_iterator *gsi_p, omp_context *ctx)
+{
+ gimple stmt = gsi_stmt (*gsi_p), bind, x;
+ tree block = make_node (BLOCK);
+
+ bind = gimple_build_bind (NULL, NULL, block);
+ gsi_replace (gsi_p, bind, true);
+ gimple_bind_add_stmt (bind, stmt);
+
+ x = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_TASKGROUP_START),
+ 0);
+ gimple_bind_add_stmt (bind, x);
+
+ lower_omp (gimple_omp_body_ptr (stmt), ctx);
+ gimple_bind_add_seq (bind, gimple_omp_body (stmt));
+ gimple_omp_set_body (stmt, NULL);
+
+ gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
+
+ gimple_bind_append_vars (bind, ctx->block_vars);
+ BLOCK_VARS (block) = ctx->block_vars;
+}
+
+
/* Expand code for an OpenMP ordered directive. */
static void
@@ -7372,7 +8804,7 @@ static void
lower_omp_for (gimple_stmt_iterator *gsi_p, omp_context *ctx)
{
tree *rhs_p, block;
- struct omp_for_data fd;
+ struct omp_for_data fd, *fdp = NULL;
gimple stmt = gsi_stmt (*gsi_p), new_stmt;
gimple_seq omp_for_body, body, dlist;
size_t i;
@@ -7399,10 +8831,50 @@ lower_omp_for (gimple_stmt_iterator *gsi_p, omp_context *ctx)
gimple_bind_append_vars (new_stmt, vars);
}
+ if (gimple_omp_for_combined_into_p (stmt))
+ {
+ extract_omp_for_data (stmt, &fd, NULL);
+ fdp = &fd;
+
+ /* We need two temporaries with fd.loop.v type (istart/iend)
+ and then (fd.collapse - 1) temporaries with the same
+ type for count2 ... countN-1 vars if not constant. */
+ size_t count = 2;
+ tree type = fd.iter_type;
+ if (fd.collapse > 1
+ && TREE_CODE (fd.loop.n2) != INTEGER_CST)
+ count += fd.collapse - 1;
+ bool parallel_for = gimple_omp_for_kind (stmt) == GF_OMP_FOR_KIND_FOR;
+ tree outerc = NULL, *pc = gimple_omp_for_clauses_ptr (stmt);
+ tree clauses = *pc;
+ if (parallel_for)
+ outerc
+ = find_omp_clause (gimple_omp_parallel_clauses (ctx->outer->stmt),
+ OMP_CLAUSE__LOOPTEMP_);
+ for (i = 0; i < count; i++)
+ {
+ tree temp;
+ if (parallel_for)
+ {
+ gcc_assert (outerc);
+ temp = lookup_decl (OMP_CLAUSE_DECL (outerc), ctx->outer);
+ outerc = find_omp_clause (OMP_CLAUSE_CHAIN (outerc),
+ OMP_CLAUSE__LOOPTEMP_);
+ }
+ else
+ temp = create_tmp_var (type, NULL);
+ *pc = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE__LOOPTEMP_);
+ OMP_CLAUSE_DECL (*pc) = temp;
+ pc = &OMP_CLAUSE_CHAIN (*pc);
+ }
+ *pc = clauses;
+ }
+
/* The pre-body and input clauses go before the lowered GIMPLE_OMP_FOR. */
dlist = NULL;
body = NULL;
- lower_rec_input_clauses (gimple_omp_for_clauses (stmt), &body, &dlist, ctx);
+ lower_rec_input_clauses (gimple_omp_for_clauses (stmt), &body, &dlist, ctx,
+ fdp);
gimple_seq_add_seq (&body, gimple_omp_for_pre_body (stmt));
lower_omp (gimple_omp_body_ptr (stmt), ctx);
@@ -7442,13 +8914,17 @@ lower_omp_for (gimple_stmt_iterator *gsi_p, omp_context *ctx)
/* After the loop, add exit clauses. */
lower_reduction_clauses (gimple_omp_for_clauses (stmt), &body, ctx);
+
+ if (ctx->cancellable)
+ gimple_seq_add_stmt (&body, gimple_build_label (ctx->cancel_label));
+
gimple_seq_add_seq (&body, dlist);
body = maybe_catch_exception (body);
/* Region exit marker goes at the end of the loop body. */
gimple_seq_add_stmt (&body, gimple_build_omp_return (fd.have_nowait));
-
+ maybe_add_implicit_barrier_cancel (ctx, &body);
pop_gimplify_context (new_stmt);
gimple_bind_append_vars (new_stmt, ctx->block_vars);
@@ -7769,6 +9245,68 @@ create_task_copyfn (gimple task_stmt, omp_context *ctx)
pop_cfun ();
}
+static void
+lower_depend_clauses (gimple stmt, gimple_seq *iseq, gimple_seq *oseq)
+{
+ tree c, clauses;
+ gimple g;
+ size_t n_in = 0, n_out = 0, idx = 2, i;
+
+ clauses = find_omp_clause (gimple_omp_task_clauses (stmt),
+ OMP_CLAUSE_DEPEND);
+ gcc_assert (clauses);
+ for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
+ if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND)
+ switch (OMP_CLAUSE_DEPEND_KIND (c))
+ {
+ case OMP_CLAUSE_DEPEND_IN:
+ n_in++;
+ break;
+ case OMP_CLAUSE_DEPEND_OUT:
+ case OMP_CLAUSE_DEPEND_INOUT:
+ n_out++;
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ tree type = build_array_type_nelts (ptr_type_node, n_in + n_out + 2);
+ tree array = create_tmp_var (type, NULL);
+ tree r = build4 (ARRAY_REF, ptr_type_node, array, size_int (0), NULL_TREE,
+ NULL_TREE);
+ g = gimple_build_assign (r, build_int_cst (ptr_type_node, n_in + n_out));
+ gimple_seq_add_stmt (iseq, g);
+ r = build4 (ARRAY_REF, ptr_type_node, array, size_int (1), NULL_TREE,
+ NULL_TREE);
+ g = gimple_build_assign (r, build_int_cst (ptr_type_node, n_out));
+ gimple_seq_add_stmt (iseq, g);
+ for (i = 0; i < 2; i++)
+ {
+ if ((i ? n_in : n_out) == 0)
+ continue;
+ for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
+ if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND
+ && ((OMP_CLAUSE_DEPEND_KIND (c) != OMP_CLAUSE_DEPEND_IN) ^ i))
+ {
+ tree t = OMP_CLAUSE_DECL (c);
+ t = fold_convert (ptr_type_node, t);
+ gimplify_expr (&t, iseq, NULL, is_gimple_val, fb_rvalue);
+ r = build4 (ARRAY_REF, ptr_type_node, array, size_int (idx++),
+ NULL_TREE, NULL_TREE);
+ g = gimple_build_assign (r, t);
+ gimple_seq_add_stmt (iseq, g);
+ }
+ }
+ tree *p = gimple_omp_task_clauses_ptr (stmt);
+ c = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE_DEPEND);
+ OMP_CLAUSE_DECL (c) = build_fold_addr_expr (array);
+ OMP_CLAUSE_CHAIN (c) = *p;
+ *p = c;
+ tree clobber = build_constructor (type, NULL);
+ TREE_THIS_VOLATILE (clobber) = 1;
+ g = gimple_build_assign (array, clobber);
+ gimple_seq_add_stmt (oseq, g);
+}
+
/* Lower the OpenMP parallel or task directive in the current statement
in GSI_P. CTX holds context information for the directive. */
@@ -7778,9 +9316,9 @@ lower_omp_taskreg (gimple_stmt_iterator *gsi_p, omp_context *ctx)
tree clauses;
tree child_fn, t;
gimple stmt = gsi_stmt (*gsi_p);
- gimple par_bind, bind;
- gimple_seq par_body, olist, ilist, par_olist, par_ilist, new_body;
- struct gimplify_ctx gctx;
+ gimple par_bind, bind, dep_bind = NULL;
+ gimple_seq par_body, olist, ilist, par_olist, par_rlist, par_ilist, new_body;
+ struct gimplify_ctx gctx, dep_gctx;
location_t loc = gimple_location (stmt);
clauses = gimple_omp_taskreg_clauses (stmt);
@@ -7800,6 +9338,16 @@ lower_omp_taskreg (gimple_stmt_iterator *gsi_p, omp_context *ctx)
if (ws_num == 1)
gimple_omp_parallel_set_combined_p (stmt, true);
}
+ gimple_seq dep_ilist = NULL;
+ gimple_seq dep_olist = NULL;
+ if (gimple_code (stmt) == GIMPLE_OMP_TASK
+ && find_omp_clause (clauses, OMP_CLAUSE_DEPEND))
+ {
+ push_gimplify_context (&dep_gctx);
+ dep_bind = gimple_build_bind (NULL, NULL, make_node (BLOCK));
+ lower_depend_clauses (stmt, &dep_ilist, &dep_olist);
+ }
+
if (ctx->srecord_type)
create_task_copyfn (stmt, ctx);
@@ -7807,10 +9355,11 @@ lower_omp_taskreg (gimple_stmt_iterator *gsi_p, omp_context *ctx)
par_olist = NULL;
par_ilist = NULL;
- lower_rec_input_clauses (clauses, &par_ilist, &par_olist, ctx);
+ par_rlist = NULL;
+ lower_rec_input_clauses (clauses, &par_ilist, &par_olist, ctx, NULL);
lower_omp (&par_body, ctx);
if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL)
- lower_reduction_clauses (clauses, &par_olist, ctx);
+ lower_reduction_clauses (clauses, &par_rlist, ctx);
/* Declare all the variables created by mapping and the variables
declared in the scope of the parallel body. */
@@ -7832,6 +9381,14 @@ lower_omp_taskreg (gimple_stmt_iterator *gsi_p, omp_context *ctx)
lower_send_clauses (clauses, &ilist, &olist, ctx);
lower_send_shared_vars (&ilist, &olist, ctx);
+ if (ctx->record_type)
+ {
+ tree clobber = build_constructor (TREE_TYPE (ctx->sender_decl), NULL);
+ TREE_THIS_VOLATILE (clobber) = 1;
+ gimple_seq_add_stmt (&olist, gimple_build_assign (ctx->sender_decl,
+ clobber));
+ }
+
/* Once all the expansions are done, sequence all the different
fragments inside gimple_omp_body. */
@@ -7848,12 +9405,329 @@ lower_omp_taskreg (gimple_stmt_iterator *gsi_p, omp_context *ctx)
gimple_seq_add_seq (&new_body, par_ilist);
gimple_seq_add_seq (&new_body, par_body);
+ gimple_seq_add_seq (&new_body, par_rlist);
+ if (ctx->cancellable)
+ gimple_seq_add_stmt (&new_body, gimple_build_label (ctx->cancel_label));
gimple_seq_add_seq (&new_body, par_olist);
new_body = maybe_catch_exception (new_body);
gimple_seq_add_stmt (&new_body, gimple_build_omp_return (false));
gimple_omp_set_body (stmt, new_body);
bind = gimple_build_bind (NULL, NULL, gimple_bind_block (par_bind));
+ gsi_replace (gsi_p, dep_bind ? dep_bind : bind, true);
+ gimple_bind_add_seq (bind, ilist);
+ gimple_bind_add_stmt (bind, stmt);
+ gimple_bind_add_seq (bind, olist);
+
+ pop_gimplify_context (NULL);
+
+ if (dep_bind)
+ {
+ gimple_bind_add_seq (dep_bind, dep_ilist);
+ gimple_bind_add_stmt (dep_bind, bind);
+ gimple_bind_add_seq (dep_bind, dep_olist);
+ pop_gimplify_context (dep_bind);
+ }
+}
+
+/* Lower the OpenMP target directive in the current statement
+ in GSI_P. CTX holds context information for the directive. */
+
+static void
+lower_omp_target (gimple_stmt_iterator *gsi_p, omp_context *ctx)
+{
+ tree clauses;
+ tree child_fn, t, c;
+ gimple stmt = gsi_stmt (*gsi_p);
+ gimple tgt_bind = NULL, bind;
+ gimple_seq tgt_body = NULL, olist, ilist, new_body;
+ struct gimplify_ctx gctx;
+ location_t loc = gimple_location (stmt);
+ int kind = gimple_omp_target_kind (stmt);
+ unsigned int map_cnt = 0;
+
+ clauses = gimple_omp_target_clauses (stmt);
+ if (kind == GF_OMP_TARGET_KIND_REGION)
+ {
+ tgt_bind = gimple_seq_first_stmt (gimple_omp_body (stmt));
+ tgt_body = gimple_bind_body (tgt_bind);
+ }
+ else if (kind == GF_OMP_TARGET_KIND_DATA)
+ tgt_body = gimple_omp_body (stmt);
+ child_fn = ctx->cb.dst_fn;
+
+ push_gimplify_context (&gctx);
+
+ for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
+ switch (OMP_CLAUSE_CODE (c))
+ {
+ tree var, x;
+
+ default:
+ break;
+ case OMP_CLAUSE_MAP:
+ case OMP_CLAUSE_TO:
+ case OMP_CLAUSE_FROM:
+ var = OMP_CLAUSE_DECL (c);
+ if (!DECL_P (var))
+ {
+ if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_MAP
+ || !OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c))
+ map_cnt++;
+ continue;
+ }
+
+ if (DECL_SIZE (var)
+ && TREE_CODE (DECL_SIZE (var)) != INTEGER_CST)
+ {
+ tree var2 = DECL_VALUE_EXPR (var);
+ gcc_assert (TREE_CODE (var2) == INDIRECT_REF);
+ var2 = TREE_OPERAND (var2, 0);
+ gcc_assert (DECL_P (var2));
+ var = var2;
+ }
+
+ if (!maybe_lookup_field (var, ctx))
+ continue;
+
+ if (kind == GF_OMP_TARGET_KIND_REGION)
+ {
+ x = build_receiver_ref (var, true, ctx);
+ tree new_var = lookup_decl (var, ctx);
+ if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
+ && OMP_CLAUSE_MAP_KIND (c) == OMP_CLAUSE_MAP_POINTER
+ && !OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c)
+ && TREE_CODE (TREE_TYPE (var)) == ARRAY_TYPE)
+ x = build_simple_mem_ref (x);
+ SET_DECL_VALUE_EXPR (new_var, x);
+ DECL_HAS_VALUE_EXPR_P (new_var) = 1;
+ }
+ map_cnt++;
+ }
+
+ if (kind == GF_OMP_TARGET_KIND_REGION)
+ {
+ target_nesting_level++;
+ lower_omp (&tgt_body, ctx);
+ target_nesting_level--;
+ }
+ else if (kind == GF_OMP_TARGET_KIND_DATA)
+ lower_omp (&tgt_body, ctx);
+
+ if (kind == GF_OMP_TARGET_KIND_REGION)
+ {
+ /* Declare all the variables created by mapping and the variables
+ declared in the scope of the target body. */
+ record_vars_into (ctx->block_vars, child_fn);
+ record_vars_into (gimple_bind_vars (tgt_bind), child_fn);
+ }
+
+ olist = NULL;
+ ilist = NULL;
+ if (ctx->record_type)
+ {
+ ctx->sender_decl
+ = create_tmp_var (ctx->record_type, ".omp_data_arr");
+ DECL_NAMELESS (ctx->sender_decl) = 1;
+ TREE_ADDRESSABLE (ctx->sender_decl) = 1;
+ t = make_tree_vec (3);
+ TREE_VEC_ELT (t, 0) = ctx->sender_decl;
+ TREE_VEC_ELT (t, 1)
+ = create_tmp_var (build_array_type_nelts (size_type_node, map_cnt),
+ ".omp_data_sizes");
+ DECL_NAMELESS (TREE_VEC_ELT (t, 1)) = 1;
+ TREE_ADDRESSABLE (TREE_VEC_ELT (t, 1)) = 1;
+ TREE_STATIC (TREE_VEC_ELT (t, 1)) = 1;
+ TREE_VEC_ELT (t, 2)
+ = create_tmp_var (build_array_type_nelts (unsigned_char_type_node,
+ map_cnt),
+ ".omp_data_kinds");
+ DECL_NAMELESS (TREE_VEC_ELT (t, 2)) = 1;
+ TREE_ADDRESSABLE (TREE_VEC_ELT (t, 2)) = 1;
+ TREE_STATIC (TREE_VEC_ELT (t, 2)) = 1;
+ gimple_omp_target_set_data_arg (stmt, t);
+
+ vec<constructor_elt, va_gc> *vsize;
+ vec<constructor_elt, va_gc> *vkind;
+ vec_alloc (vsize, map_cnt);
+ vec_alloc (vkind, map_cnt);
+ unsigned int map_idx = 0;
+
+ for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
+ switch (OMP_CLAUSE_CODE (c))
+ {
+ tree ovar, nc;
+
+ default:
+ break;
+ case OMP_CLAUSE_MAP:
+ case OMP_CLAUSE_TO:
+ case OMP_CLAUSE_FROM:
+ nc = c;
+ ovar = OMP_CLAUSE_DECL (c);
+ if (!DECL_P (ovar))
+ {
+ if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
+ && OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c))
+ {
+ gcc_checking_assert (OMP_CLAUSE_DECL (OMP_CLAUSE_CHAIN (c))
+ == get_base_address (ovar));
+ nc = OMP_CLAUSE_CHAIN (c);
+ ovar = OMP_CLAUSE_DECL (nc);
+ }
+ else
+ {
+ tree x = build_sender_ref (ovar, ctx);
+ tree v
+ = build_fold_addr_expr_with_type (ovar, ptr_type_node);
+ gimplify_assign (x, v, &ilist);
+ nc = NULL_TREE;
+ }
+ }
+ else
+ {
+ if (DECL_SIZE (ovar)
+ && TREE_CODE (DECL_SIZE (ovar)) != INTEGER_CST)
+ {
+ tree ovar2 = DECL_VALUE_EXPR (ovar);
+ gcc_assert (TREE_CODE (ovar2) == INDIRECT_REF);
+ ovar2 = TREE_OPERAND (ovar2, 0);
+ gcc_assert (DECL_P (ovar2));
+ ovar = ovar2;
+ }
+ if (!maybe_lookup_field (ovar, ctx))
+ continue;
+ }
+
+ if (nc)
+ {
+ tree var = lookup_decl_in_outer_ctx (ovar, ctx);
+ tree x = build_sender_ref (ovar, ctx);
+ if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
+ && OMP_CLAUSE_MAP_KIND (c) == OMP_CLAUSE_MAP_POINTER
+ && !OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c)
+ && TREE_CODE (TREE_TYPE (ovar)) == ARRAY_TYPE)
+ {
+ gcc_assert (kind == GF_OMP_TARGET_KIND_REGION);
+ tree avar
+ = create_tmp_var (TREE_TYPE (TREE_TYPE (x)), NULL);
+ mark_addressable (avar);
+ gimplify_assign (avar, build_fold_addr_expr (var), &ilist);
+ avar = build_fold_addr_expr (avar);
+ gimplify_assign (x, avar, &ilist);
+ }
+ else if (is_gimple_reg (var))
+ {
+ gcc_assert (kind == GF_OMP_TARGET_KIND_REGION);
+ tree avar = create_tmp_var (TREE_TYPE (var), NULL);
+ mark_addressable (avar);
+ if (OMP_CLAUSE_MAP_KIND (c) != OMP_CLAUSE_MAP_ALLOC
+ && OMP_CLAUSE_MAP_KIND (c) != OMP_CLAUSE_MAP_FROM)
+ gimplify_assign (avar, var, &ilist);
+ avar = build_fold_addr_expr (avar);
+ gimplify_assign (x, avar, &ilist);
+ if ((OMP_CLAUSE_MAP_KIND (c) == OMP_CLAUSE_MAP_FROM
+ || OMP_CLAUSE_MAP_KIND (c) == OMP_CLAUSE_MAP_TOFROM)
+ && !TYPE_READONLY (TREE_TYPE (var)))
+ {
+ x = build_sender_ref (ovar, ctx);
+ x = build_simple_mem_ref (x);
+ gimplify_assign (var, x, &olist);
+ }
+ }
+ else
+ {
+ var = build_fold_addr_expr (var);
+ gimplify_assign (x, var, &ilist);
+ }
+ }
+ tree s = OMP_CLAUSE_SIZE (c);
+ if (s == NULL_TREE)
+ s = TYPE_SIZE_UNIT (TREE_TYPE (ovar));
+ s = fold_convert (size_type_node, s);
+ tree purpose = size_int (map_idx++);
+ CONSTRUCTOR_APPEND_ELT (vsize, purpose, s);
+ if (TREE_CODE (s) != INTEGER_CST)
+ TREE_STATIC (TREE_VEC_ELT (t, 1)) = 0;
+
+ unsigned char tkind = 0;
+ switch (OMP_CLAUSE_CODE (c))
+ {
+ case OMP_CLAUSE_MAP:
+ tkind = OMP_CLAUSE_MAP_KIND (c);
+ break;
+ case OMP_CLAUSE_TO:
+ tkind = OMP_CLAUSE_MAP_TO;
+ break;
+ case OMP_CLAUSE_FROM:
+ tkind = OMP_CLAUSE_MAP_FROM;
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ unsigned int talign = TYPE_ALIGN_UNIT (TREE_TYPE (ovar));
+ if (DECL_P (ovar) && DECL_ALIGN_UNIT (ovar) > talign)
+ talign = DECL_ALIGN_UNIT (ovar);
+ talign = ceil_log2 (talign);
+ tkind |= talign << 3;
+ CONSTRUCTOR_APPEND_ELT (vkind, purpose,
+ build_int_cst (unsigned_char_type_node,
+ tkind));
+ if (nc && nc != c)
+ c = nc;
+ }
+
+ gcc_assert (map_idx == map_cnt);
+
+ DECL_INITIAL (TREE_VEC_ELT (t, 1))
+ = build_constructor (TREE_TYPE (TREE_VEC_ELT (t, 1)), vsize);
+ DECL_INITIAL (TREE_VEC_ELT (t, 2))
+ = build_constructor (TREE_TYPE (TREE_VEC_ELT (t, 2)), vkind);
+ if (!TREE_STATIC (TREE_VEC_ELT (t, 1)))
+ {
+ gimple_seq initlist = NULL;
+ force_gimple_operand (build1 (DECL_EXPR, void_type_node,
+ TREE_VEC_ELT (t, 1)),
+ &initlist, true, NULL_TREE);
+ gimple_seq_add_seq (&ilist, initlist);
+ }
+
+ tree clobber = build_constructor (ctx->record_type, NULL);
+ TREE_THIS_VOLATILE (clobber) = 1;
+ gimple_seq_add_stmt (&olist, gimple_build_assign (ctx->sender_decl,
+ clobber));
+ }
+
+ /* Once all the expansions are done, sequence all the different
+ fragments inside gimple_omp_body. */
+
+ new_body = NULL;
+
+ if (ctx->record_type && kind == GF_OMP_TARGET_KIND_REGION)
+ {
+ t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
+ /* fixup_child_record_type might have changed receiver_decl's type. */
+ t = fold_convert_loc (loc, TREE_TYPE (ctx->receiver_decl), t);
+ gimple_seq_add_stmt (&new_body,
+ gimple_build_assign (ctx->receiver_decl, t));
+ }
+
+ if (kind == GF_OMP_TARGET_KIND_REGION)
+ {
+ gimple_seq_add_seq (&new_body, tgt_body);
+ new_body = maybe_catch_exception (new_body);
+ }
+ else if (kind == GF_OMP_TARGET_KIND_DATA)
+ new_body = tgt_body;
+ if (kind != GF_OMP_TARGET_KIND_UPDATE)
+ {
+ gimple_seq_add_stmt (&new_body, gimple_build_omp_return (false));
+ gimple_omp_set_body (stmt, new_body);
+ }
+
+ bind = gimple_build_bind (NULL, NULL,
+ tgt_bind ? gimple_bind_block (tgt_bind)
+ : NULL_TREE);
gsi_replace (gsi_p, bind, true);
gimple_bind_add_seq (bind, ilist);
gimple_bind_add_stmt (bind, stmt);
@@ -7862,6 +9736,72 @@ lower_omp_taskreg (gimple_stmt_iterator *gsi_p, omp_context *ctx)
pop_gimplify_context (NULL);
}
+/* Expand code for an OpenMP teams directive. */
+
+static void
+lower_omp_teams (gimple_stmt_iterator *gsi_p, omp_context *ctx)
+{
+ gimple teams_stmt = gsi_stmt (*gsi_p);
+ struct gimplify_ctx gctx;
+ push_gimplify_context (&gctx);
+
+ tree block = make_node (BLOCK);
+ gimple bind = gimple_build_bind (NULL, NULL, block);
+ gsi_replace (gsi_p, bind, true);
+ gimple_seq bind_body = NULL;
+ gimple_seq dlist = NULL;
+ gimple_seq olist = NULL;
+
+ tree num_teams = find_omp_clause (gimple_omp_teams_clauses (teams_stmt),
+ OMP_CLAUSE_NUM_TEAMS);
+ if (num_teams == NULL_TREE)
+ num_teams = build_int_cst (unsigned_type_node, 0);
+ else
+ {
+ num_teams = OMP_CLAUSE_NUM_TEAMS_EXPR (num_teams);
+ num_teams = fold_convert (unsigned_type_node, num_teams);
+ gimplify_expr (&num_teams, &bind_body, NULL, is_gimple_val, fb_rvalue);
+ }
+ tree thread_limit = find_omp_clause (gimple_omp_teams_clauses (teams_stmt),
+ OMP_CLAUSE_THREAD_LIMIT);
+ if (thread_limit == NULL_TREE)
+ thread_limit = build_int_cst (unsigned_type_node, 0);
+ else
+ {
+ thread_limit = OMP_CLAUSE_THREAD_LIMIT_EXPR (thread_limit);
+ thread_limit = fold_convert (unsigned_type_node, thread_limit);
+ gimplify_expr (&thread_limit, &bind_body, NULL, is_gimple_val,
+ fb_rvalue);
+ }
+
+ lower_rec_input_clauses (gimple_omp_teams_clauses (teams_stmt),
+ &bind_body, &dlist, ctx, NULL);
+ lower_omp (gimple_omp_body_ptr (teams_stmt), ctx);
+ lower_reduction_clauses (gimple_omp_teams_clauses (teams_stmt), &olist, ctx);
+ gimple_seq_add_stmt (&bind_body, teams_stmt);
+
+ location_t loc = gimple_location (teams_stmt);
+ tree decl = builtin_decl_explicit (BUILT_IN_GOMP_TEAMS);
+ gimple call = gimple_build_call (decl, 2, num_teams, thread_limit);
+ gimple_set_location (call, loc);
+ gimple_seq_add_stmt (&bind_body, call);
+
+ gimple_seq_add_seq (&bind_body, gimple_omp_body (teams_stmt));
+ gimple_omp_set_body (teams_stmt, NULL);
+ gimple_seq_add_seq (&bind_body, olist);
+ gimple_seq_add_seq (&bind_body, dlist);
+ gimple_seq_add_stmt (&bind_body, gimple_build_omp_return (true));
+ gimple_bind_set_body (bind, bind_body);
+
+ pop_gimplify_context (bind);
+
+ gimple_bind_append_vars (bind, ctx->block_vars);
+ BLOCK_VARS (block) = ctx->block_vars;
+ if (BLOCK_VARS (block))
+ TREE_USED (block) = 1;
+}
+
+
/* Callback for lower_omp_1. Return non-NULL if *tp needs to be
regimplified. If DATA is non-NULL, lower_omp_1 is outside
of OpenMP context, but with task_shared_vars set. */
@@ -7940,16 +9880,23 @@ lower_omp_1 (gimple_stmt_iterator *gsi_p, omp_context *ctx)
case GIMPLE_OMP_PARALLEL:
case GIMPLE_OMP_TASK:
ctx = maybe_lookup_ctx (stmt);
+ gcc_assert (ctx);
+ if (ctx->cancellable)
+ ctx->cancel_label = create_artificial_label (UNKNOWN_LOCATION);
lower_omp_taskreg (gsi_p, ctx);
break;
case GIMPLE_OMP_FOR:
ctx = maybe_lookup_ctx (stmt);
gcc_assert (ctx);
+ if (ctx->cancellable)
+ ctx->cancel_label = create_artificial_label (UNKNOWN_LOCATION);
lower_omp_for (gsi_p, ctx);
break;
case GIMPLE_OMP_SECTIONS:
ctx = maybe_lookup_ctx (stmt);
gcc_assert (ctx);
+ if (ctx->cancellable)
+ ctx->cancel_label = create_artificial_label (UNKNOWN_LOCATION);
lower_omp_sections (gsi_p, ctx);
break;
case GIMPLE_OMP_SINGLE:
@@ -7962,6 +9909,11 @@ lower_omp_1 (gimple_stmt_iterator *gsi_p, omp_context *ctx)
gcc_assert (ctx);
lower_omp_master (gsi_p, ctx);
break;
+ case GIMPLE_OMP_TASKGROUP:
+ ctx = maybe_lookup_ctx (stmt);
+ gcc_assert (ctx);
+ lower_omp_taskgroup (gsi_p, ctx);
+ break;
case GIMPLE_OMP_ORDERED:
ctx = maybe_lookup_ctx (stmt);
gcc_assert (ctx);
@@ -7978,6 +9930,66 @@ lower_omp_1 (gimple_stmt_iterator *gsi_p, omp_context *ctx)
lower_omp_regimplify_p, ctx ? NULL : &wi, NULL))
gimple_regimplify_operands (stmt, gsi_p);
break;
+ case GIMPLE_OMP_TARGET:
+ ctx = maybe_lookup_ctx (stmt);
+ gcc_assert (ctx);
+ lower_omp_target (gsi_p, ctx);
+ break;
+ case GIMPLE_OMP_TEAMS:
+ ctx = maybe_lookup_ctx (stmt);
+ gcc_assert (ctx);
+ lower_omp_teams (gsi_p, ctx);
+ break;
+ case GIMPLE_CALL:
+ tree fndecl;
+ fndecl = gimple_call_fndecl (stmt);
+ if (fndecl
+ && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
+ switch (DECL_FUNCTION_CODE (fndecl))
+ {
+ case BUILT_IN_GOMP_BARRIER:
+ if (ctx == NULL)
+ break;
+ /* FALLTHRU */
+ case BUILT_IN_GOMP_CANCEL:
+ case BUILT_IN_GOMP_CANCELLATION_POINT:
+ omp_context *cctx;
+ cctx = ctx;
+ if (gimple_code (cctx->stmt) == GIMPLE_OMP_SECTION)
+ cctx = cctx->outer;
+ gcc_assert (gimple_call_lhs (stmt) == NULL_TREE);
+ if (!cctx->cancellable)
+ {
+ if (DECL_FUNCTION_CODE (fndecl)
+ == BUILT_IN_GOMP_CANCELLATION_POINT)
+ {
+ stmt = gimple_build_nop ();
+ gsi_replace (gsi_p, stmt, false);
+ }
+ break;
+ }
+ tree lhs;
+ lhs = create_tmp_var (boolean_type_node, NULL);
+ if (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_GOMP_BARRIER)
+ {
+ fndecl = builtin_decl_explicit (BUILT_IN_GOMP_BARRIER_CANCEL);
+ gimple_call_set_fndecl (stmt, fndecl);
+ gimple_call_set_fntype (stmt, TREE_TYPE (fndecl));
+ }
+ gimple_call_set_lhs (stmt, lhs);
+ tree fallthru_label;
+ fallthru_label = create_artificial_label (UNKNOWN_LOCATION);
+ gimple g;
+ g = gimple_build_label (fallthru_label);
+ gsi_insert_after (gsi_p, g, GSI_SAME_STMT);
+ g = gimple_build_cond (NE_EXPR, lhs, boolean_false_node,
+ cctx->cancel_label, fallthru_label);
+ gsi_insert_after (gsi_p, g, GSI_SAME_STMT);
+ break;
+ default:
+ break;
+ }
+ /* FALLTHRU */
default:
if ((ctx || task_shared_vars)
&& walk_gimple_op (stmt, lower_omp_regimplify_p,
@@ -7994,6 +10006,12 @@ lower_omp (gimple_seq *body, omp_context *ctx)
gimple_stmt_iterator gsi;
for (gsi = gsi_start (*body); !gsi_end_p (gsi); gsi_next (&gsi))
lower_omp_1 (&gsi, ctx);
+ /* Inside target region we haven't called fold_stmt during gimplification,
+ because it can break code by adding decl references that weren't in the
+ source. Call fold_stmt now. */
+ if (target_nesting_level)
+ for (gsi = gsi_start (*body); !gsi_end_p (gsi); gsi_next (&gsi))
+ fold_stmt (&gsi);
input_location = saved_location;
}
@@ -8162,6 +10180,9 @@ diagnose_sb_1 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
case GIMPLE_OMP_MASTER:
case GIMPLE_OMP_ORDERED:
case GIMPLE_OMP_CRITICAL:
+ case GIMPLE_OMP_TARGET:
+ case GIMPLE_OMP_TEAMS:
+ case GIMPLE_OMP_TASKGROUP:
/* The minimal context here is just the current OMP construct. */
inner_context = stmt;
wi->info = inner_context;
@@ -8217,6 +10238,9 @@ diagnose_sb_2 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
case GIMPLE_OMP_MASTER:
case GIMPLE_OMP_ORDERED:
case GIMPLE_OMP_CRITICAL:
+ case GIMPLE_OMP_TARGET:
+ case GIMPLE_OMP_TEAMS:
+ case GIMPLE_OMP_TASKGROUP:
wi->info = stmt;
walk_gimple_seq_mod (gimple_omp_body_ptr (stmt), diagnose_sb_2, NULL, wi);
wi->info = context;