summaryrefslogtreecommitdiff
path: root/rts/StgMiscClosures.cmm
diff options
context:
space:
mode:
authorSimon Marlow <marlowsd@gmail.com>2008-11-18 14:24:42 +0000
committerSimon Marlow <marlowsd@gmail.com>2008-11-18 14:24:42 +0000
commitd600bf7a6afdbfc4a22f9379406a9c6f789a4c2d (patch)
treefc86da89b8891374298c441d14d2333b33e29d53 /rts/StgMiscClosures.cmm
parent0fa59deb44b8a1a0b44ee2b4cc4ae0db31dec038 (diff)
downloadhaskell-d600bf7a6afdbfc4a22f9379406a9c6f789a4c2d.tar.gz
Add optional eager black-holing, with new flag -feager-blackholing
Eager blackholing can improve parallel performance by reducing the chances that two threads perform the same computation. However, it has a cost: one extra memory write per thunk entry. To get the best results, any code which may be executed in parallel should be compiled with eager blackholing turned on. But since there's a cost for sequential code, we make it optional and turn it on for the parallel package only. It might be a good idea to compile applications (or modules) with parallel code in with -feager-blackholing. ToDo: document -feager-blackholing.
Diffstat (limited to 'rts/StgMiscClosures.cmm')
-rw-r--r--rts/StgMiscClosures.cmm29
1 files changed, 24 insertions, 5 deletions
diff --git a/rts/StgMiscClosures.cmm b/rts/StgMiscClosures.cmm
index d22a880917..7f7cf78f7b 100644
--- a/rts/StgMiscClosures.cmm
+++ b/rts/StgMiscClosures.cmm
@@ -384,14 +384,33 @@ INFO_TABLE(stg_CAF_BLACKHOLE,0,1,CAF_BLACKHOLE,"CAF_BLACKHOLE","CAF_BLACKHOLE")
jump stg_block_blackhole;
}
-#ifdef EAGER_BLACKHOLING
-INFO_TABLE(stg_SE_BLACKHOLE,0,1,SE_BLACKHOLE,"SE_BLACKHOLE","SE_BLACKHOLE")
-{ foreign "C" barf("SE_BLACKHOLE object entered!") never returns; }
+INFO_TABLE(__stg_EAGER_BLACKHOLE,0,1,BLACKHOLE,"EAGER_BLACKHOLE","EAGER_BLACKHOLE")
+{
+ TICK_ENT_BH();
+
+#ifdef THREADED_RTS
+ // foreign "C" debugBelch("BLACKHOLE entry\n");
+#endif
-INFO_TABLE(stg_SE_CAF_BLACKHOLE,0,1,SE_CAF_BLACKHOLE,"SE_CAF_BLACKHOLE","SE_CAF_BLACKHOLE")
-{ foreign "C" barf("SE_CAF_BLACKHOLE object entered!") never returns; }
+ /* Actually this is not necessary because R1 is about to be destroyed. */
+ LDV_ENTER(R1);
+
+#if defined(THREADED_RTS)
+ ACQUIRE_LOCK(sched_mutex "ptr");
+ // released in stg_block_blackhole_finally
#endif
+ /* Put ourselves on the blackhole queue */
+ StgTSO__link(CurrentTSO) = W_[blackhole_queue];
+ W_[blackhole_queue] = CurrentTSO;
+
+ /* jot down why and on what closure we are blocked */
+ StgTSO_why_blocked(CurrentTSO) = BlockedOnBlackHole::I16;
+ StgTSO_block_info(CurrentTSO) = R1;
+
+ jump stg_block_blackhole;
+}
+
/* ----------------------------------------------------------------------------
Whiteholes are used for the "locked" state of a closure (see lockClosure())
------------------------------------------------------------------------- */