summaryrefslogtreecommitdiff
path: root/rts
diff options
context:
space:
mode:
authorSimon Marlow <marlowsd@gmail.com>2012-10-03 09:30:56 +0100
committerSimon Marlow <marlowsd@gmail.com>2012-10-08 09:04:40 +0100
commita7c0387d20c1c9994d1100b14fbb8fb4e28a259e (patch)
treeb95d0a512f951a4a463f1aa5178b0cd5c4fdb410 /rts
parentaed37acd4d157791381800d5de960a2461bcbef3 (diff)
downloadhaskell-a7c0387d20c1c9994d1100b14fbb8fb4e28a259e.tar.gz
Produce new-style Cmm from the Cmm parser
The main change here is that the Cmm parser now allows high-level cmm code with argument-passing and function calls. For example: foo ( gcptr a, bits32 b ) { if (b > 0) { // we can make tail calls passing arguments: jump stg_ap_0_fast(a); } return (x,y); } More details on the new cmm syntax are in Note [Syntax of .cmm files] in CmmParse.y. The old syntax is still more-or-less supported for those occasional code fragments that really need to explicitly manipulate the stack. However there are a couple of differences: it is now obligatory to give a list of live GlobalRegs on every jump, e.g. jump %ENTRY_CODE(Sp(0)) [R1]; Again, more details in Note [Syntax of .cmm files]. I have rewritten most of the .cmm files in the RTS into the new syntax, except for AutoApply.cmm which is generated by the genapply program: this file could be generated in the new syntax instead and would probably be better off for it, but I ran out of enthusiasm. Some other changes in this batch: - The PrimOp calling convention is gone, primops now use the ordinary NativeNodeCall convention. This means that primops and "foreign import prim" code must be written in high-level cmm, but they can now take more than 10 arguments. - CmmSink now does constant-folding (should fix #7219) - .cmm files now go through the cmmPipeline, and as a result we generate better code in many cases. All the object files generated for the RTS .cmm files are now smaller. Performance should be better too, but I haven't measured it yet. - RET_DYN frames are removed from the RTS, lots of code goes away - we now have some more canned GC points to cover unboxed-tuples with 2-4 pointers, which will reduce code size a little.
Diffstat (limited to 'rts')
-rw-r--r--rts/Apply.cmm72
-rw-r--r--rts/AutoApply.h8
-rw-r--r--rts/ClosureFlags.c5
-rw-r--r--rts/Exception.cmm242
-rw-r--r--rts/HeapStackCheck.cmm510
-rw-r--r--rts/Interpreter.c60
-rw-r--r--rts/LdvProfile.c1
-rw-r--r--rts/Linker.c577
-rw-r--r--rts/PrimOps.cmm1163
-rw-r--r--rts/Printer.c35
-rw-r--r--rts/RaiseAsync.c2
-rw-r--r--rts/RetainerProfile.c29
-rw-r--r--rts/StgMiscClosures.cmm126
-rw-r--r--rts/StgStartup.cmm56
-rw-r--r--rts/StgStdThunks.cmm302
-rw-r--r--rts/Updates.cmm99
-rw-r--r--rts/Updates.h17
-rw-r--r--rts/sm/Compact.c32
-rw-r--r--rts/sm/Evac.c1
-rw-r--r--rts/sm/Sanity.c27
-rw-r--r--rts/sm/Scav.c26
21 files changed, 1533 insertions, 1857 deletions
diff --git a/rts/Apply.cmm b/rts/Apply.cmm
index a2d4a7e123..b89abeaff2 100644
--- a/rts/Apply.cmm
+++ b/rts/Apply.cmm
@@ -21,18 +21,16 @@
STRING(stg_ap_0_ret_str,"stg_ap_0_ret... ")
-stg_ap_0_fast
+stg_ap_0_fast ( P_ fun )
{
- // fn is in R1, no args on the stack
-
IF_DEBUG(apply,
- foreign "C" debugBelch(stg_ap_0_ret_str) [R1];
- foreign "C" printClosure(R1 "ptr") [R1]);
+ ccall debugBelch(stg_ap_0_ret_str);
+ ccall printClosure(R1 "ptr"));
IF_DEBUG(sanity,
- foreign "C" checkStackFrame(Sp "ptr") [R1]);
+ ccall checkStackFrame(Sp "ptr"));
- ENTER();
+ ENTER(fun);
}
/* -----------------------------------------------------------------------------
@@ -56,9 +54,9 @@ stg_ap_0_fast
-------------------------------------------------------------------------- */
INFO_TABLE(stg_PAP,/*special layout*/0,0,PAP,"PAP","PAP")
-{ foreign "C" barf("PAP object entered!") never returns; }
+{ ccall barf("PAP object entered!") never returns; }
-stg_PAP_apply
+stg_PAP_apply /* no args => explicit stack */
{
W_ Words;
W_ pap;
@@ -78,7 +76,7 @@ stg_PAP_apply
// this before calling stg_PAP_entry.
Sp_adj(-1);
Sp(0) = R2;
- jump stg_gc_unpt_r1;
+ jump stg_gc_unpt_r1 [R1];
}
Sp_adj(-Words);
@@ -86,7 +84,7 @@ stg_PAP_apply
TICK_ENT_PAP();
LDV_ENTER(pap);
#ifdef PROFILING
- foreign "C" enterFunCCS(BaseReg "ptr", StgHeader_ccs(pap) "ptr");
+ ccall enterFunCCS(BaseReg "ptr", StgHeader_ccs(pap) "ptr");
#endif
// Reload the stack
@@ -122,26 +120,26 @@ for:
TICK_ENT_VIA_NODE();
#ifdef NO_ARG_REGS
- jump %GET_ENTRY(UNTAG(R1));
+ jump %GET_ENTRY(UNTAG(R1)) [R1];
#else
W_ info;
info = %GET_FUN_INFO(UNTAG(R1));
W_ type;
type = TO_W_(StgFunInfoExtra_fun_type(info));
if (type == ARG_GEN) {
- jump StgFunInfoExtra_slow_apply(info);
+ jump StgFunInfoExtra_slow_apply(info) [R1];
}
if (type == ARG_GEN_BIG) {
- jump StgFunInfoExtra_slow_apply(info);
+ jump StgFunInfoExtra_slow_apply(info) [R1];
}
if (type == ARG_BCO) {
Sp_adj(-2);
Sp(1) = R1;
Sp(0) = stg_apply_interp_info;
- jump stg_yield_to_interpreter;
+ jump stg_yield_to_interpreter [];
}
jump W_[stg_ap_stack_entries +
- WDS(TO_W_(StgFunInfoExtra_fun_type(info)))];
+ WDS(TO_W_(StgFunInfoExtra_fun_type(info)))] [R1];
#endif
}
@@ -155,6 +153,7 @@ for:
-------------------------------------------------------------------------- */
INFO_TABLE(stg_AP,/*special layout*/0,0,AP,"AP","AP")
+ /* no args => explicit stack */
{
W_ Words;
W_ ap;
@@ -164,12 +163,12 @@ INFO_TABLE(stg_AP,/*special layout*/0,0,AP,"AP","AP")
Words = TO_W_(StgAP_n_args(ap));
/*
- * Check for stack overflow. IMPORTANT: use a _NP check here,
+ * Check for stack overflow. IMPORTANT: use a _ENTER check here,
* because if the check fails, we might end up blackholing this very
* closure, in which case we must enter the blackhole on return rather
* than continuing to evaluate the now-defunct closure.
*/
- STK_CHK_NP(WDS(Words) + SIZEOF_StgUpdateFrame);
+ STK_CHK_ENTER(WDS(Words) + SIZEOF_StgUpdateFrame, R1);
PUSH_UPD_FRAME(Sp - SIZEOF_StgUpdateFrame, R1);
Sp = Sp - SIZEOF_StgUpdateFrame - WDS(Words);
@@ -197,26 +196,26 @@ for:
TICK_ENT_VIA_NODE();
#ifdef NO_ARG_REGS
- jump %GET_ENTRY(UNTAG(R1));
+ jump %GET_ENTRY(UNTAG(R1)) [R1];
#else
W_ info;
info = %GET_FUN_INFO(UNTAG(R1));
W_ type;
type = TO_W_(StgFunInfoExtra_fun_type(info));
if (type == ARG_GEN) {
- jump StgFunInfoExtra_slow_apply(info);
+ jump StgFunInfoExtra_slow_apply(info) [R1];
}
if (type == ARG_GEN_BIG) {
- jump StgFunInfoExtra_slow_apply(info);
+ jump StgFunInfoExtra_slow_apply(info) [R1];
}
if (type == ARG_BCO) {
Sp_adj(-2);
Sp(1) = R1;
Sp(0) = stg_apply_interp_info;
- jump stg_yield_to_interpreter;
+ jump stg_yield_to_interpreter [];
}
jump W_[stg_ap_stack_entries +
- WDS(TO_W_(StgFunInfoExtra_fun_type(info)))];
+ WDS(TO_W_(StgFunInfoExtra_fun_type(info)))] [R1];
#endif
}
@@ -225,6 +224,7 @@ for:
those generated by the byte-code compiler for inserting breakpoints. */
INFO_TABLE(stg_AP_NOUPD,/*special layout*/0,0,AP,"AP_NOUPD","AP_NOUPD")
+ /* no args => explicit stack */
{
W_ Words;
W_ ap;
@@ -234,12 +234,12 @@ INFO_TABLE(stg_AP_NOUPD,/*special layout*/0,0,AP,"AP_NOUPD","AP_NOUPD")
Words = TO_W_(StgAP_n_args(ap));
/*
- * Check for stack overflow. IMPORTANT: use a _NP check here,
+ * Check for stack overflow. IMPORTANT: use a _ENTER check here,
* because if the check fails, we might end up blackholing this very
* closure, in which case we must enter the blackhole on return rather
* than continuing to evaluate the now-defunct closure.
*/
- STK_CHK_NP(WDS(Words));
+ STK_CHK_ENTER(WDS(Words), R1);
Sp = Sp - WDS(Words);
TICK_ENT_AP();
@@ -265,26 +265,26 @@ for:
TICK_ENT_VIA_NODE();
#ifdef NO_ARG_REGS
- jump %GET_ENTRY(UNTAG(R1));
+ jump %GET_ENTRY(UNTAG(R1)) [R1];
#else
W_ info;
info = %GET_FUN_INFO(UNTAG(R1));
W_ type;
type = TO_W_(StgFunInfoExtra_fun_type(info));
if (type == ARG_GEN) {
- jump StgFunInfoExtra_slow_apply(info);
+ jump StgFunInfoExtra_slow_apply(info) [R1];
}
if (type == ARG_GEN_BIG) {
- jump StgFunInfoExtra_slow_apply(info);
+ jump StgFunInfoExtra_slow_apply(info) [R1];
}
if (type == ARG_BCO) {
Sp_adj(-2);
Sp(1) = R1;
Sp(0) = stg_apply_interp_info;
- jump stg_yield_to_interpreter;
+ jump stg_yield_to_interpreter [];
}
jump W_[stg_ap_stack_entries +
- WDS(TO_W_(StgFunInfoExtra_fun_type(info)))];
+ WDS(TO_W_(StgFunInfoExtra_fun_type(info)))] [R1];
#endif
}
@@ -300,6 +300,7 @@ for:
-------------------------------------------------------------------------- */
INFO_TABLE(stg_AP_STACK,/*special layout*/0,0,AP_STACK,"AP_STACK","AP_STACK")
+ /* no args => explicit stack */
{
W_ Words;
W_ ap;
@@ -309,12 +310,12 @@ INFO_TABLE(stg_AP_STACK,/*special layout*/0,0,AP_STACK,"AP_STACK","AP_STACK")
Words = StgAP_STACK_size(ap);
/*
- * Check for stack overflow. IMPORTANT: use a _NP check here,
+ * Check for stack overflow. IMPORTANT: use a _ENTER check here,
* because if the check fails, we might end up blackholing this very
* closure, in which case we must enter the blackhole on return rather
* than continuing to evaluate the now-defunct closure.
*/
- STK_CHK_NP(WDS(Words) + SIZEOF_StgUpdateFrame + WDS(AP_STACK_SPLIM));
+ STK_CHK_ENTER(WDS(Words) + SIZEOF_StgUpdateFrame + WDS(AP_STACK_SPLIM), R1);
/* ensure there is at least AP_STACK_SPLIM words of headroom available
* after unpacking the AP_STACK. See bug #1466 */
@@ -343,7 +344,7 @@ for:
R1 = StgAP_STACK_fun(ap);
- ENTER();
+ ENTER_R1();
}
/* -----------------------------------------------------------------------------
@@ -352,6 +353,7 @@ for:
INFO_TABLE(stg_AP_STACK_NOUPD,/*special layout*/0,0,AP_STACK,
"AP_STACK_NOUPD","AP_STACK_NOUPD")
+ /* no args => explicit stack */
{
W_ Words;
W_ ap;
@@ -366,7 +368,7 @@ INFO_TABLE(stg_AP_STACK_NOUPD,/*special layout*/0,0,AP_STACK,
* closure, in which case we must enter the blackhole on return rather
* than continuing to evaluate the now-defunct closure.
*/
- STK_CHK_NP(WDS(Words) + WDS(AP_STACK_SPLIM));
+ STK_CHK_ENTER(WDS(Words) + WDS(AP_STACK_SPLIM), R1);
/* ensure there is at least AP_STACK_SPLIM words of headroom available
* after unpacking the AP_STACK. See bug #1466 */
@@ -394,5 +396,5 @@ for:
R1 = StgAP_STACK_fun(ap);
- ENTER();
+ ENTER_R1();
}
diff --git a/rts/AutoApply.h b/rts/AutoApply.h
index d0c5c3fe6b..ebb7308875 100644
--- a/rts/AutoApply.h
+++ b/rts/AutoApply.h
@@ -35,7 +35,7 @@
} \
R1 = pap; \
Sp_adj(1 + n); \
- jump %ENTRY_CODE(Sp(0));
+ jump %ENTRY_CODE(Sp(0)) [R1];
// Copy the old PAP, build a new one with the extra arg(s)
// ret addr and m arguments taking up n words are on the stack.
@@ -74,7 +74,7 @@
} \
R1 = new_pap; \
Sp_adj(n+1); \
- jump %ENTRY_CODE(Sp(0));
+ jump %ENTRY_CODE(Sp(0)) [R1];
// Jump to target, saving CCCS and restoring it on return
#if defined(PROFILING)
@@ -82,9 +82,9 @@
Sp(-1) = CCCS; \
Sp(-2) = stg_restore_cccs_info; \
Sp_adj(-2); \
- jump (target)
+ jump (target) [*]
#else
-#define jump_SAVE_CCCS(target) jump (target)
+#define jump_SAVE_CCCS(target) jump (target) [*]
#endif
#endif /* APPLY_H */
diff --git a/rts/ClosureFlags.c b/rts/ClosureFlags.c
index 0ab8b45669..a2a140282f 100644
--- a/rts/ClosureFlags.c
+++ b/rts/ClosureFlags.c
@@ -55,8 +55,7 @@ StgWord16 closure_flags[] = {
[RET_BCO] = ( 0 ),
[RET_SMALL] = ( _BTM| _SRT ),
[RET_BIG] = ( _SRT ),
- [RET_DYN] = ( _SRT ),
- [RET_FUN] = ( 0 ),
+ [RET_FUN] = ( 0 ),
[UPDATE_FRAME] = ( _BTM ),
[CATCH_FRAME] = ( _BTM ),
[UNDERFLOW_FRAME] = ( _BTM ),
@@ -84,6 +83,6 @@ StgWord16 closure_flags[] = {
[WHITEHOLE] = ( 0 )
};
-#if N_CLOSURE_TYPES != 61
+#if N_CLOSURE_TYPES != 60
#error Closure types changed: update ClosureFlags.c!
#endif
diff --git a/rts/Exception.cmm b/rts/Exception.cmm
index 78907c4ba7..8a9f4e62c9 100644
--- a/rts/Exception.cmm
+++ b/rts/Exception.cmm
@@ -50,7 +50,8 @@ import ghczmprim_GHCziTypes_True_closure;
-------------------------------------------------------------------------- */
-INFO_TABLE_RET(stg_unmaskAsyncExceptionszh_ret, RET_SMALL)
+INFO_TABLE_RET(stg_unmaskAsyncExceptionszh_ret, RET_SMALL, W_ info_ptr)
+ /* explicit stack */
{
CInt r;
@@ -60,7 +61,7 @@ INFO_TABLE_RET(stg_unmaskAsyncExceptionszh_ret, RET_SMALL)
/* Eagerly raise a blocked exception, if there is one */
if (StgTSO_blocked_exceptions(CurrentTSO) != END_TSO_QUEUE) {
- STK_CHK_GEN( WDS(2), R1_PTR, stg_unmaskAsyncExceptionszh_ret_info);
+ STK_CHK_P (WDS(2), stg_unmaskAsyncExceptionszh_ret_info, R1);
/*
* We have to be very careful here, as in killThread#, since
* we are about to raise an async exception in the current
@@ -68,18 +69,18 @@ INFO_TABLE_RET(stg_unmaskAsyncExceptionszh_ret, RET_SMALL)
*/
Sp_adj(-2);
Sp(1) = R1;
- Sp(0) = stg_gc_unpt_r1_info;
+ Sp(0) = stg_ret_p_info;
SAVE_THREAD_STATE();
- (r) = foreign "C" maybePerformBlockedException (MyCapability() "ptr",
- CurrentTSO "ptr") [R1];
+ (r) = ccall maybePerformBlockedException (MyCapability() "ptr",
+ CurrentTSO "ptr");
if (r != 0::CInt) {
if (StgTSO_what_next(CurrentTSO) == ThreadKilled::I16) {
- jump stg_threadFinished;
+ jump stg_threadFinished [];
} else {
LOAD_THREAD_STATE();
ASSERT(StgTSO_what_next(CurrentTSO) == ThreadRunGHC::I16);
- jump %ENTRY_CODE(Sp(0));
+ jump %ENTRY_CODE(Sp(0)) [R1];
}
}
else {
@@ -93,10 +94,11 @@ INFO_TABLE_RET(stg_unmaskAsyncExceptionszh_ret, RET_SMALL)
}
Sp_adj(1);
- jump %ENTRY_CODE(Sp(0));
+ jump %ENTRY_CODE(Sp(0)) [R1];
}
-INFO_TABLE_RET(stg_maskAsyncExceptionszh_ret, RET_SMALL)
+INFO_TABLE_RET(stg_maskAsyncExceptionszh_ret, RET_SMALL, W_ info_ptr)
+ return (P_ ret)
{
StgTSO_flags(CurrentTSO) =
%lobits32(
@@ -104,11 +106,11 @@ INFO_TABLE_RET(stg_maskAsyncExceptionszh_ret, RET_SMALL)
| TSO_BLOCKEX | TSO_INTERRUPTIBLE
);
- Sp_adj(1);
- jump %ENTRY_CODE(Sp(0));
+ return (ret);
}
-INFO_TABLE_RET(stg_maskUninterruptiblezh_ret, RET_SMALL)
+INFO_TABLE_RET(stg_maskUninterruptiblezh_ret, RET_SMALL, W_ info_ptr)
+ return (P_ ret)
{
StgTSO_flags(CurrentTSO) =
%lobits32(
@@ -117,14 +119,13 @@ INFO_TABLE_RET(stg_maskUninterruptiblezh_ret, RET_SMALL)
& ~TSO_INTERRUPTIBLE
);
- Sp_adj(1);
- jump %ENTRY_CODE(Sp(0));
+ return (ret);
}
-stg_maskAsyncExceptionszh
+stg_maskAsyncExceptionszh /* explicit stack */
{
/* Args: R1 :: IO a */
- STK_CHK_GEN( WDS(1)/* worst case */, R1_PTR, stg_maskAsyncExceptionszh);
+ STK_CHK_P( WDS(1)/* worst case */, stg_maskAsyncExceptionszh, R1);
if ((TO_W_(StgTSO_flags(CurrentTSO)) & TSO_BLOCKEX) == 0) {
/* avoid growing the stack unnecessarily */
@@ -146,13 +147,13 @@ stg_maskAsyncExceptionszh
TICK_UNKNOWN_CALL();
TICK_SLOW_CALL_v();
- jump stg_ap_v_fast;
+ jump stg_ap_v_fast [R1];
}
-stg_maskUninterruptiblezh
+stg_maskUninterruptiblezh /* explicit stack */
{
/* Args: R1 :: IO a */
- STK_CHK_GEN( WDS(1)/* worst case */, R1_PTR, stg_maskAsyncExceptionszh);
+ STK_CHK_P( WDS(1)/* worst case */, stg_maskAsyncExceptionszh, R1);
if ((TO_W_(StgTSO_flags(CurrentTSO)) & TSO_BLOCKEX) == 0) {
/* avoid growing the stack unnecessarily */
@@ -174,16 +175,16 @@ stg_maskUninterruptiblezh
TICK_UNKNOWN_CALL();
TICK_SLOW_CALL_v();
- jump stg_ap_v_fast;
+ jump stg_ap_v_fast [R1];
}
-stg_unmaskAsyncExceptionszh
+stg_unmaskAsyncExceptionszh /* explicit stack */
{
CInt r;
W_ level;
/* Args: R1 :: IO a */
- STK_CHK_GEN( WDS(4), R1_PTR, stg_unmaskAsyncExceptionszh);
+ STK_CHK_P (WDS(4), stg_unmaskAsyncExceptionszh, R1);
/* 4 words: one for the unblock frame, 3 for setting up the
* stack to call maybePerformBlockedException() below.
*/
@@ -225,16 +226,16 @@ stg_unmaskAsyncExceptionszh
Sp(0) = stg_enter_info;
SAVE_THREAD_STATE();
- (r) = foreign "C" maybePerformBlockedException (MyCapability() "ptr",
- CurrentTSO "ptr") [R1];
+ (r) = ccall maybePerformBlockedException (MyCapability() "ptr",
+ CurrentTSO "ptr");
if (r != 0::CInt) {
if (StgTSO_what_next(CurrentTSO) == ThreadKilled::I16) {
- jump stg_threadFinished;
+ jump stg_threadFinished [];
} else {
LOAD_THREAD_STATE();
ASSERT(StgTSO_what_next(CurrentTSO) == ThreadRunGHC::I16);
- jump %ENTRY_CODE(Sp(0));
+ jump %ENTRY_CODE(Sp(0)) [R1];
}
} else {
/* we'll just call R1 directly, below */
@@ -245,11 +246,11 @@ stg_unmaskAsyncExceptionszh
}
TICK_UNKNOWN_CALL();
TICK_SLOW_CALL_v();
- jump stg_ap_v_fast;
+ jump stg_ap_v_fast [R1];
}
-stg_getMaskingStatezh
+stg_getMaskingStatezh ()
{
/* args: none */
/*
@@ -257,25 +258,18 @@ stg_getMaskingStatezh
1 == masked, non-interruptible,
2 == masked, interruptible
*/
- RET_N(((TO_W_(StgTSO_flags(CurrentTSO)) & TSO_BLOCKEX) != 0) +
- ((TO_W_(StgTSO_flags(CurrentTSO)) & TSO_INTERRUPTIBLE) != 0));
+ return (((TO_W_(StgTSO_flags(CurrentTSO)) & TSO_BLOCKEX) != 0) +
+ ((TO_W_(StgTSO_flags(CurrentTSO)) & TSO_INTERRUPTIBLE) != 0));
}
-stg_killThreadzh
+stg_killThreadzh (P_ target, P_ exception)
{
- /* args: R1 = TSO to kill, R2 = Exception */
-
W_ why_blocked;
- W_ target;
- W_ exception;
-
- target = R1;
- exception = R2;
-
+
/* Needs 3 words because throwToSingleThreaded uses some stack */
- STK_CHK_GEN( WDS(3), R1_PTR & R2_PTR, stg_killThreadzh);
+ STK_CHK_PP (WDS(3), stg_killThreadzh, target, exception);
/* We call allocate in throwTo(), so better check for GC */
- MAYBE_GC(R1_PTR & R2_PTR, stg_killThreadzh);
+ MAYBE_GC_PP (stg_killThreadzh, target, exception);
/*
* We might have killed ourselves. In which case, better be *very*
@@ -292,58 +286,75 @@ stg_killThreadzh
* happens: on resumption, we will just jump to the next frame on
* the stack, which is the return point for stg_killThreadzh.
*/
- SAVE_THREAD_STATE();
- /* ToDo: what if the current thread is blocking exceptions? */
- foreign "C" throwToSingleThreaded(MyCapability() "ptr",
- target "ptr", exception "ptr")[R1,R2];
- if (StgTSO_what_next(CurrentTSO) == ThreadKilled::I16) {
- jump stg_threadFinished;
- } else {
- LOAD_THREAD_STATE();
- ASSERT(StgTSO_what_next(CurrentTSO) == ThreadRunGHC::I16);
- jump %ENTRY_CODE(Sp(0));
- }
+ R1 = target;
+ R2 = exception;
+ jump stg_killMyself [R1,R2];
} else {
- W_ out;
- W_ msg;
- out = Sp - WDS(1); /* ok to re-use stack space here */
+ W_ msg;
- (msg) = foreign "C" throwTo(MyCapability() "ptr",
+ (msg) = ccall throwTo(MyCapability() "ptr",
CurrentTSO "ptr",
target "ptr",
- exception "ptr") [R1,R2];
+ exception "ptr");
if (msg == NULL) {
- jump %ENTRY_CODE(Sp(0));
- } else {
+ return ();
+ } else {
StgTSO_why_blocked(CurrentTSO) = BlockedOnMsgThrowTo;
StgTSO_block_info(CurrentTSO) = msg;
// we must block, and unlock the message before returning
- jump stg_block_throwto;
+ jump stg_block_throwto (target, exception);
}
}
}
+/*
+ * We must switch into low-level Cmm in order to raise an exception in
+ * the current thread, hence this is in a separate proc with arguments
+ * passed explicitly in R1 and R2.
+ */
+stg_killMyself
+{
+ P_ target, exception;
+ target = R1;
+ exception = R2;
+
+ SAVE_THREAD_STATE();
+ /* ToDo: what if the current thread is blocking exceptions? */
+ ccall throwToSingleThreaded(MyCapability() "ptr",
+ target "ptr", exception "ptr");
+ if (StgTSO_what_next(CurrentTSO) == ThreadKilled::I16) {
+ jump stg_threadFinished [];
+ } else {
+ LOAD_THREAD_STATE();
+ ASSERT(StgTSO_what_next(CurrentTSO) == ThreadRunGHC::I16);
+ jump %ENTRY_CODE(Sp(0)) [];
+ }
+}
+
/* -----------------------------------------------------------------------------
Catch frames
-------------------------------------------------------------------------- */
-#define SP_OFF 0
-
/* Catch frames are very similar to update frames, but when entering
* one we just pop the frame off the stack and perform the correct
* kind of return to the activation record underneath us on the stack.
*/
+#define CATCH_FRAME_FIELDS(w_,p_,info_ptr,exceptions_blocked,handler) \
+ w_ info_ptr, \
+ PROF_HDR_FIELDS(w_) \
+ w_ exceptions_blocked, \
+ p_ handler
+
+
INFO_TABLE_RET(stg_catch_frame, CATCH_FRAME,
-#if defined(PROFILING)
- W_ unused1, W_ unused2,
-#endif
- W_ unused3, P_ unused4)
- {
- Sp = Sp + SIZEOF_StgCatchFrame;
- jump %ENTRY_CODE(Sp(SP_OFF));
- }
+ CATCH_FRAME_FIELDS(W_,P_,info_ptr,
+ exceptions_blocked,handler))
+ return (P_ ret)
+{
+ return (ret);
+}
/* -----------------------------------------------------------------------------
* The catch infotable
@@ -356,30 +367,30 @@ INFO_TABLE_RET(stg_catch_frame, CATCH_FRAME,
* -------------------------------------------------------------------------- */
INFO_TABLE(stg_catch,2,0,FUN,"catch","catch")
+ (P_ node)
{
- R2 = StgClosure_payload(R1,1); /* h */
- R1 = StgClosure_payload(R1,0); /* x */
- jump stg_catchzh;
+ jump stg_catchzh(StgClosure_payload(node,0),StgClosure_payload(node,1));
}
-stg_catchzh
+stg_catchzh ( P_ io, /* :: IO a */
+ P_ handler /* :: Exception -> IO a */ )
{
- /* args: R1 = m :: IO a, R2 = handler :: Exception -> IO a */
- STK_CHK_GEN(SIZEOF_StgCatchFrame + WDS(1), R1_PTR & R2_PTR, stg_catchzh);
+ W_ exceptions_blocked;
+
+ STK_CHK_GEN();
- /* Set up the catch frame */
- Sp = Sp - SIZEOF_StgCatchFrame;
- SET_HDR(Sp,stg_catch_frame_info,CCCS);
-
- StgCatchFrame_handler(Sp) = R2;
- StgCatchFrame_exceptions_blocked(Sp) =
+ exceptions_blocked =
TO_W_(StgTSO_flags(CurrentTSO)) & (TSO_BLOCKEX | TSO_INTERRUPTIBLE);
TICK_CATCHF_PUSHED();
/* Apply R1 to the realworld token */
TICK_UNKNOWN_CALL();
TICK_SLOW_CALL_v();
- jump stg_ap_v_fast;
+
+ jump stg_ap_v_fast
+ (CATCH_FRAME_FIELDS(,,stg_catch_frame_info,
+ exceptions_blocked, handler))
+ (io);
}
/* -----------------------------------------------------------------------------
@@ -394,28 +405,33 @@ stg_catchzh
INFO_TABLE(stg_raise,1,0,THUNK_1_0,"raise","raise")
{
- R1 = StgThunk_payload(R1,0);
- jump stg_raisezh;
+ jump stg_raisezh(StgThunk_payload(R1,0));
}
section "data" {
no_break_on_exception: W_[1];
}
-INFO_TABLE_RET(stg_raise_ret, RET_SMALL, P_ arg1)
+INFO_TABLE_RET(stg_raise_ret, RET_SMALL, W_ info_ptr, P_ exception)
+ return (P_ ret)
{
- R1 = Sp(1);
- Sp = Sp + WDS(2);
- W_[no_break_on_exception] = 1;
- jump stg_raisezh;
+ W_[no_break_on_exception] = 1;
+ jump stg_raisezh (exception);
}
-stg_raisezh
+stg_raisezh /* explicit stack */
+/*
+ * args : R1 :: Exception
+ *
+ * Here we assume that the NativeNodeCall convention always puts the
+ * first argument in R1 (which it does). We cannot use high-level cmm
+ * due to all the LOAD_THREAD_STATE()/SAVE_THREAD_STATE() and stack
+ * walking that happens in here.
+ */
{
W_ handler;
W_ frame_type;
W_ exception;
- /* args : R1 :: Exception */
exception = R1;
@@ -427,16 +443,16 @@ stg_raisezh
*/
if (RtsFlags_ProfFlags_showCCSOnException(RtsFlags) != 0::I32) {
SAVE_THREAD_STATE();
- foreign "C" fprintCCS_stderr(CCCS "ptr",
+ ccall fprintCCS_stderr(CCCS "ptr",
exception "ptr",
- CurrentTSO "ptr") [];
+ CurrentTSO "ptr");
LOAD_THREAD_STATE();
}
#endif
retry_pop_stack:
SAVE_THREAD_STATE();
- (frame_type) = foreign "C" raiseExceptionHelper(BaseReg "ptr", CurrentTSO "ptr", exception "ptr") [];
+ (frame_type) = ccall raiseExceptionHelper(BaseReg "ptr", CurrentTSO "ptr", exception "ptr");
LOAD_THREAD_STATE();
if (frame_type == ATOMICALLY_FRAME) {
/* The exception has reached the edge of a memory transaction. Check that
@@ -450,14 +466,14 @@ retry_pop_stack:
W_ trec, outer;
W_ r;
trec = StgTSO_trec(CurrentTSO);
- (r) = foreign "C" stmValidateNestOfTransactions(trec "ptr") [];
+ (r) = ccall stmValidateNestOfTransactions(trec "ptr");
outer = StgTRecHeader_enclosing_trec(trec);
- foreign "C" stmAbortTransaction(MyCapability() "ptr", trec "ptr") [];
- foreign "C" stmFreeAbortedTRec(MyCapability() "ptr", trec "ptr") [];
+ ccall stmAbortTransaction(MyCapability() "ptr", trec "ptr");
+ ccall stmFreeAbortedTRec(MyCapability() "ptr", trec "ptr");
if (outer != NO_TREC) {
- foreign "C" stmAbortTransaction(MyCapability() "ptr", outer "ptr") [];
- foreign "C" stmFreeAbortedTRec(MyCapability() "ptr", outer "ptr") [];
+ ccall stmAbortTransaction(MyCapability() "ptr", outer "ptr");
+ ccall stmFreeAbortedTRec(MyCapability() "ptr", outer "ptr");
}
StgTSO_trec(CurrentTSO) = NO_TREC;
@@ -468,10 +484,10 @@ retry_pop_stack:
} else {
// Transaction was not valid: we retry the exception (otherwise continue
// with a further call to raiseExceptionHelper)
- ("ptr" trec) = foreign "C" stmStartTransaction(MyCapability() "ptr", NO_TREC "ptr") [];
+ ("ptr" trec) = ccall stmStartTransaction(MyCapability() "ptr", NO_TREC "ptr");
StgTSO_trec(CurrentTSO) = trec;
R1 = StgAtomicallyFrame_code(Sp);
- jump stg_ap_v_fast;
+ jump stg_ap_v_fast [R1];
}
}
@@ -492,7 +508,7 @@ retry_pop_stack:
// for exmplae. Perhaps the stop_on_exception flag should
// be per-thread.
CInt[rts_stop_on_exception] = 0;
- ("ptr" ioAction) = foreign "C" deRefStablePtr (W_[rts_breakpoint_io_action] "ptr") [];
+ ("ptr" ioAction) = ccall deRefStablePtr (W_[rts_breakpoint_io_action] "ptr");
Sp = Sp - WDS(6);
Sp(5) = exception;
Sp(4) = stg_raise_ret_info;
@@ -500,7 +516,7 @@ retry_pop_stack:
Sp(2) = ghczmprim_GHCziTypes_True_closure; // dummy breakpoint info
Sp(1) = ghczmprim_GHCziTypes_True_closure; // True <=> a breakpoint
R1 = ioAction;
- jump RET_LBL(stg_ap_pppv);
+ jump RET_LBL(stg_ap_pppv) [R1];
}
}
@@ -519,11 +535,12 @@ retry_pop_stack:
StgTSO_what_next(CurrentTSO) = ThreadKilled::I16;
SAVE_THREAD_STATE(); /* inline! */
- jump stg_threadFinished;
+ jump stg_threadFinished [];
}
- /* Ok, Sp points to the enclosing CATCH_FRAME or CATCH_STM_FRAME. Pop everything
- * down to and including this frame, update Su, push R1, and enter the handler.
+ /* Ok, Sp points to the enclosing CATCH_FRAME or CATCH_STM_FRAME.
+ * Pop everything down to and including this frame, update Su,
+ * push R1, and enter the handler.
*/
if (frame_type == CATCH_FRAME) {
handler = StgCatchFrame_handler(Sp);
@@ -572,8 +589,8 @@ retry_pop_stack:
W_ trec, outer;
trec = StgTSO_trec(CurrentTSO);
outer = StgTRecHeader_enclosing_trec(trec);
- foreign "C" stmAbortTransaction(MyCapability() "ptr", trec "ptr") [];
- foreign "C" stmFreeAbortedTRec(MyCapability() "ptr", trec "ptr") [];
+ ccall stmAbortTransaction(MyCapability() "ptr", trec "ptr");
+ ccall stmFreeAbortedTRec(MyCapability() "ptr", trec "ptr");
StgTSO_trec(CurrentTSO) = outer;
Sp = Sp + SIZEOF_StgCatchSTMFrame;
}
@@ -587,11 +604,10 @@ retry_pop_stack:
Sp_adj(-1);
TICK_UNKNOWN_CALL();
TICK_SLOW_CALL_pv();
- jump RET_LBL(stg_ap_pv);
+ jump RET_LBL(stg_ap_pv) [R1];
}
-stg_raiseIOzh
+stg_raiseIOzh (P_ exception)
{
- /* Args :: R1 :: Exception */
- jump stg_raisezh;
+ jump stg_raisezh (exception);
}
diff --git a/rts/HeapStackCheck.cmm b/rts/HeapStackCheck.cmm
index 90691fa091..08adf45b02 100644
--- a/rts/HeapStackCheck.cmm
+++ b/rts/HeapStackCheck.cmm
@@ -11,6 +11,7 @@
* ---------------------------------------------------------------------------*/
#include "Cmm.h"
+#include "Updates.h"
#ifdef __PIC__
import pthread_mutex_unlock;
@@ -81,58 +82,66 @@ import LeaveCriticalSection;
* ThreadRunGHC thread.
*/
-#define GC_GENERIC \
- DEBUG_ONLY(foreign "C" heapCheckFail()); \
- if (Hp > HpLim) { \
- Hp = Hp - HpAlloc/*in bytes*/; \
- if (HpLim == 0) { \
- R1 = ThreadYielding; \
- goto sched; \
- } \
- if (HpAlloc <= BLOCK_SIZE \
- && bdescr_link(CurrentNursery) != NULL) { \
- HpAlloc = 0; \
- CLOSE_NURSERY(); \
- CurrentNursery = bdescr_link(CurrentNursery); \
- OPEN_NURSERY(); \
- if (Capability_context_switch(MyCapability()) != 0 :: CInt || \
- Capability_interrupt(MyCapability()) != 0 :: CInt) { \
- R1 = ThreadYielding; \
- goto sched; \
- } else { \
- jump %ENTRY_CODE(Sp(0)); \
- } \
- } else { \
- R1 = HeapOverflow; \
- goto sched; \
- } \
- } else { \
- R1 = StackOverflow; \
- } \
- sched: \
- PRE_RETURN(R1,ThreadRunGHC); \
- jump stg_returnToSched;
+stg_gc_noregs
+{
+ W_ ret;
+
+ DEBUG_ONLY(foreign "C" heapCheckFail());
+ if (Hp > HpLim) {
+ Hp = Hp - HpAlloc/*in bytes*/;
+ if (HpLim == 0) {
+ ret = ThreadYielding;
+ goto sched;
+ }
+ if (HpAlloc <= BLOCK_SIZE
+ && bdescr_link(CurrentNursery) != NULL) {
+ HpAlloc = 0;
+ CLOSE_NURSERY();
+ CurrentNursery = bdescr_link(CurrentNursery);
+ OPEN_NURSERY();
+ if (Capability_context_switch(MyCapability()) != 0 :: CInt ||
+ Capability_interrupt(MyCapability()) != 0 :: CInt) {
+ ret = ThreadYielding;
+ goto sched;
+ } else {
+ jump %ENTRY_CODE(Sp(0)) [];
+ }
+ } else {
+ ret = HeapOverflow;
+ goto sched;
+ }
+ } else {
+ if (CHECK_GC()) {
+ ret = HeapOverflow;
+ } else {
+ ret = StackOverflow;
+ }
+ }
+ sched:
+ PRE_RETURN(ret,ThreadRunGHC);
+ jump stg_returnToSched [R1];
+}
#define HP_GENERIC \
- PRE_RETURN(HeapOverflow, ThreadRunGHC) \
- jump stg_returnToSched;
+ PRE_RETURN(HeapOverflow, ThreadRunGHC) \
+ jump stg_returnToSched [R1];
#define BLOCK_GENERIC \
- PRE_RETURN(ThreadBlocked, ThreadRunGHC) \
- jump stg_returnToSched;
+ PRE_RETURN(ThreadBlocked, ThreadRunGHC) \
+ jump stg_returnToSched [R1];
#define YIELD_GENERIC \
- PRE_RETURN(ThreadYielding, ThreadRunGHC) \
- jump stg_returnToSched;
+ PRE_RETURN(ThreadYielding, ThreadRunGHC) \
+ jump stg_returnToSched [R1];
#define BLOCK_BUT_FIRST(c) \
- PRE_RETURN(ThreadBlocked, ThreadRunGHC) \
- R2 = c; \
- jump stg_returnToSchedButFirst;
+ PRE_RETURN(ThreadBlocked, ThreadRunGHC) \
+ R2 = c; \
+ jump stg_returnToSchedButFirst [R1,R2,R3];
#define YIELD_TO_INTERPRETER \
- PRE_RETURN(ThreadYielding, ThreadInterpret) \
- jump stg_returnToSchedNotPaused;
+ PRE_RETURN(ThreadYielding, ThreadInterpret) \
+ jump stg_returnToSchedNotPaused [R1];
/* -----------------------------------------------------------------------------
Heap checks in thunks/functions.
@@ -144,19 +153,55 @@ import LeaveCriticalSection;
There are canned sequences for 'n' pointer values in registers.
-------------------------------------------------------------------------- */
-INFO_TABLE_RET( stg_enter, RET_SMALL, P_ unused)
+INFO_TABLE_RET ( stg_enter, RET_SMALL, W_ info_ptr, P_ closure )
+ return (/* no return values */)
{
- R1 = Sp(1);
- Sp_adj(2);
- ENTER();
+ ENTER(closure);
}
-__stg_gc_enter_1
+__stg_gc_enter_1 (P_ node)
{
- Sp_adj(-2);
- Sp(1) = R1;
- Sp(0) = stg_enter_info;
- GC_GENERIC
+ jump stg_gc_noregs (stg_enter_info, node) ();
+}
+
+/* -----------------------------------------------------------------------------
+ Canned heap checks for primitives.
+
+ We can't use stg_gc_fun because primitives are not functions, so
+ these fragments let us save some boilerplate heap-check-failure
+ code in a few common cases.
+ -------------------------------------------------------------------------- */
+
+stg_gc_prim ()
+{
+ W_ fun;
+ fun = R9;
+ call stg_gc_noregs ();
+ jump fun();
+}
+
+stg_gc_prim_p (P_ arg)
+{
+ W_ fun;
+ fun = R9;
+ call stg_gc_noregs ();
+ jump fun(arg);
+}
+
+stg_gc_prim_pp (P_ arg1, P_ arg2)
+{
+ W_ fun;
+ fun = R9;
+ call stg_gc_noregs ();
+ jump fun(arg1,arg2);
+}
+
+stg_gc_prim_n (W_ arg)
+{
+ W_ fun;
+ fun = R9;
+ call stg_gc_noregs ();
+ jump fun(arg);
}
/* -----------------------------------------------------------------------------
@@ -169,138 +214,121 @@ __stg_gc_enter_1
-------------------------------------------------------------------------- */
/* The stg_enter_checkbh frame has the same shape as an update frame: */
-#if defined(PROFILING)
-#define UPD_FRAME_PARAMS W_ unused1, W_ unused2, P_ unused3
-#else
-#define UPD_FRAME_PARAMS P_ unused1
-#endif
-INFO_TABLE_RET( stg_enter_checkbh, RET_SMALL, UPD_FRAME_PARAMS)
+INFO_TABLE_RET ( stg_enter_checkbh, RET_SMALL,
+ UPDATE_FRAME_FIELDS(W_,P_,info_ptr,updatee))
+ return (P_ ret)
{
- R1 = StgUpdateFrame_updatee(Sp);
- Sp = Sp + SIZEOF_StgUpdateFrame;
foreign "C" checkBlockingQueues(MyCapability() "ptr",
- CurrentTSO) [R1];
- ENTER();
+ CurrentTSO);
+ return (updatee);
}
/* -----------------------------------------------------------------------------
- Heap checks in Primitive case alternatives
-
- A primitive case alternative is entered with a value either in
- R1, FloatReg1 or D1 depending on the return convention. All the
- cases are covered below.
+ Info tables for returning values of various types. These are used
+ when we want to push a frame on the stack that will return a value
+ to the frame underneath it.
-------------------------------------------------------------------------- */
-/*-- No Registers live ------------------------------------------------------ */
-
-stg_gc_noregs
+INFO_TABLE_RET ( stg_ret_v, RET_SMALL, W_ info_ptr )
+ return (/* no return values */)
{
- GC_GENERIC
+ return ();
}
-/*-- void return ------------------------------------------------------------ */
-
-INFO_TABLE_RET( stg_gc_void, RET_SMALL)
+INFO_TABLE_RET ( stg_ret_p, RET_SMALL, W_ info_ptr, P_ ptr )
+ return (/* no return values */)
{
- Sp_adj(1);
- jump %ENTRY_CODE(Sp(0));
+ return (ptr);
}
-/*-- R1 is boxed/unpointed -------------------------------------------------- */
-
-INFO_TABLE_RET( stg_gc_unpt_r1, RET_SMALL, P_ unused)
+INFO_TABLE_RET ( stg_ret_n, RET_SMALL, W_ info_ptr, W_ nptr )
+ return (/* no return values */)
{
- R1 = Sp(1);
- Sp_adj(2);
- jump %ENTRY_CODE(Sp(0));
+ return (nptr);
}
-stg_gc_unpt_r1
+INFO_TABLE_RET ( stg_ret_f, RET_SMALL, W_ info_ptr, F_ f )
+ return (/* no return values */)
{
- Sp_adj(-2);
- Sp(1) = R1;
- Sp(0) = stg_gc_unpt_r1_info;
- GC_GENERIC
+ return (f);
}
-/*-- R1 is unboxed -------------------------------------------------- */
-
-/* the 1 is a bitmap - i.e. 1 non-pointer word on the stack. */
-INFO_TABLE_RET( stg_gc_unbx_r1, RET_SMALL, W_ unused )
+INFO_TABLE_RET ( stg_ret_d, RET_SMALL, W_ info_ptr, D_ d )
+ return (/* no return values */)
{
- R1 = Sp(1);
- Sp_adj(2);
- jump %ENTRY_CODE(Sp(0));
+ return (d);
}
-stg_gc_unbx_r1
+INFO_TABLE_RET ( stg_ret_l, RET_SMALL, W_ info_ptr, L_ l )
+ return (/* no return values */)
{
- Sp_adj(-2);
- Sp(1) = R1;
- Sp(0) = stg_gc_unbx_r1_info;
- GC_GENERIC
+ return (l);
}
-/*-- F1 contains a float ------------------------------------------------- */
+/* -----------------------------------------------------------------------------
+ Canned heap-check failures for case alts, where we have some values
+ in registers or on the stack according to the NativeReturn
+ convention.
+ -------------------------------------------------------------------------- */
+
-INFO_TABLE_RET( stg_gc_f1, RET_SMALL, F_ unused )
+/*-- void return ------------------------------------------------------------ */
+
+/*-- R1 is a GC pointer, but we don't enter it ----------------------- */
+
+stg_gc_unpt_r1 return (P_ ptr) /* NB. return convention */
{
- F1 = F_[Sp+WDS(1)];
- Sp_adj(2);
- jump %ENTRY_CODE(Sp(0));
+ jump stg_gc_noregs (stg_ret_p_info, ptr) ();
}
-stg_gc_f1
+/*-- R1 is unboxed -------------------------------------------------- */
+
+stg_gc_unbx_r1 return (W_ nptr) /* NB. return convention */
{
- Sp_adj(-2);
- F_[Sp + WDS(1)] = F1;
- Sp(0) = stg_gc_f1_info;
- GC_GENERIC
+ jump stg_gc_noregs (stg_ret_n_info, nptr) ();
}
-/*-- D1 contains a double ------------------------------------------------- */
+/*-- F1 contains a float ------------------------------------------------- */
-INFO_TABLE_RET( stg_gc_d1, RET_SMALL, D_ unused )
+stg_gc_f1 return (F_ f)
{
- D1 = D_[Sp + WDS(1)];
- Sp = Sp + WDS(1) + SIZEOF_StgDouble;
- jump %ENTRY_CODE(Sp(0));
+ jump stg_gc_noregs (stg_ret_f_info, f) ();
}
-stg_gc_d1
+/*-- D1 contains a double ------------------------------------------------- */
+
+stg_gc_d1 return (D_ d)
{
- Sp = Sp - WDS(1) - SIZEOF_StgDouble;
- D_[Sp + WDS(1)] = D1;
- Sp(0) = stg_gc_d1_info;
- GC_GENERIC
+ jump stg_gc_noregs (stg_ret_d_info, d) ();
}
/*-- L1 contains an int64 ------------------------------------------------- */
-INFO_TABLE_RET( stg_gc_l1, RET_SMALL, L_ unused )
+stg_gc_l1 return (L_ l)
{
- L1 = L_[Sp + WDS(1)];
- Sp_adj(1) + SIZEOF_StgWord64;
- jump %ENTRY_CODE(Sp(0));
+ jump stg_gc_noregs (stg_ret_l_info, l) ();
}
-stg_gc_l1
+/*-- Unboxed tuples with multiple pointers -------------------------------- */
+
+stg_gc_pp return (P_ arg1, P_ arg2)
{
- Sp_adj(-1) - SIZEOF_StgWord64;
- L_[Sp + WDS(1)] = L1;
- Sp(0) = stg_gc_l1_info;
- GC_GENERIC
+ call stg_gc_noregs();
+ return (arg1,arg2);
}
-/*-- Unboxed tuple return, one pointer (unregisterised build only) ---------- */
+stg_gc_ppp return (P_ arg1, P_ arg2, P_ arg3)
+{
+ call stg_gc_noregs();
+ return (arg1,arg2,arg3);
+}
-INFO_TABLE_RET( stg_ut_1_0_unreg, RET_SMALL, P_ unused )
+stg_gc_pppp return (P_ arg1, P_ arg2, P_ arg3, P_ arg4)
{
- Sp_adj(1);
- // one ptr is on the stack (Sp(0))
- jump %ENTRY_CODE(Sp(1));
+ call stg_gc_noregs();
+ return (arg1,arg2,arg3,arg4);
}
/* -----------------------------------------------------------------------------
@@ -333,7 +361,7 @@ INFO_TABLE_RET( stg_ut_1_0_unreg, RET_SMALL, P_ unused )
-------------------------------------------------------------------------- */
-__stg_gc_fun
+__stg_gc_fun /* explicit stack */
{
W_ size;
W_ info;
@@ -365,7 +393,7 @@ __stg_gc_fun
Sp(2) = R1;
Sp(1) = size;
Sp(0) = stg_gc_fun_info;
- GC_GENERIC
+ jump stg_gc_noregs [];
#else
W_ type;
type = TO_W_(StgFunInfoExtra_fun_type(info));
@@ -377,14 +405,15 @@ __stg_gc_fun
Sp(1) = size;
Sp(0) = stg_gc_fun_info;
// DEBUG_ONLY(foreign "C" debugBelch("stg_fun_gc_gen(ARG_GEN)"););
- GC_GENERIC
+ jump stg_gc_noregs [];
} else {
- jump W_[stg_stack_save_entries + WDS(type)];
+ jump W_[stg_stack_save_entries + WDS(type)] [*]; // all regs live
// jumps to stg_gc_noregs after saving stuff
}
#endif /* !NO_ARG_REGS */
}
+
/* -----------------------------------------------------------------------------
Generic Apply (return point)
@@ -393,14 +422,15 @@ __stg_gc_fun
appropriately. The stack layout is given above.
-------------------------------------------------------------------------- */
-INFO_TABLE_RET( stg_gc_fun, RET_FUN )
+INFO_TABLE_RET ( stg_gc_fun, RET_FUN )
+ /* explicit stack */
{
R1 = Sp(2);
Sp_adj(3);
#ifdef NO_ARG_REGS
// Minor optimisation: there are no argument registers to load up,
// so we can just jump straight to the function's entry point.
- jump %GET_ENTRY(UNTAG(R1));
+ jump %GET_ENTRY(UNTAG(R1)) [R1];
#else
W_ info;
W_ type;
@@ -408,126 +438,25 @@ INFO_TABLE_RET( stg_gc_fun, RET_FUN )
info = %GET_FUN_INFO(UNTAG(R1));
type = TO_W_(StgFunInfoExtra_fun_type(info));
if (type == ARG_GEN || type == ARG_GEN_BIG) {
- jump StgFunInfoExtra_slow_apply(info);
+ jump StgFunInfoExtra_slow_apply(info) [R1];
} else {
if (type == ARG_BCO) {
// cover this case just to be on the safe side
Sp_adj(-2);
Sp(1) = R1;
Sp(0) = stg_apply_interp_info;
- jump stg_yield_to_interpreter;
+ jump stg_yield_to_interpreter [];
} else {
- jump W_[stg_ap_stack_entries + WDS(type)];
+ jump W_[stg_ap_stack_entries + WDS(type)] [R1];
}
}
#endif
}
/* -----------------------------------------------------------------------------
- Generic Heap Check Code.
-
- Called with Liveness mask in R9, Return address in R10.
- Stack must be consistent (containing all necessary info pointers
- to relevant SRTs).
-
- See StgMacros.h for a description of the RET_DYN stack frame.
-
- We also define an stg_gen_yield here, because it's very similar.
- -------------------------------------------------------------------------- */
-
-// For simplicity, we assume that SIZEOF_DOUBLE == 2*SIZEOF_VOID_P
-// on a 64-bit machine, we'll end up wasting a couple of words, but
-// it's not a big deal.
-
-#define RESTORE_EVERYTHING \
- L1 = L_[Sp + WDS(19)]; \
- D2 = D_[Sp + WDS(17)]; \
- D1 = D_[Sp + WDS(15)]; \
- F4 = F_[Sp + WDS(14)]; \
- F3 = F_[Sp + WDS(13)]; \
- F2 = F_[Sp + WDS(12)]; \
- F1 = F_[Sp + WDS(11)]; \
- R8 = Sp(10); \
- R7 = Sp(9); \
- R6 = Sp(8); \
- R5 = Sp(7); \
- R4 = Sp(6); \
- R3 = Sp(5); \
- R2 = Sp(4); \
- R1 = Sp(3); \
- Sp_adj(21);
-
-#define RET_OFFSET (-19)
-
-#define SAVE_EVERYTHING \
- Sp_adj(-21); \
- L_[Sp + WDS(19)] = L1; \
- D_[Sp + WDS(17)] = D2; \
- D_[Sp + WDS(15)] = D1; \
- F_[Sp + WDS(14)] = F4; \
- F_[Sp + WDS(13)] = F3; \
- F_[Sp + WDS(12)] = F2; \
- F_[Sp + WDS(11)] = F1; \
- Sp(10) = R8; \
- Sp(9) = R7; \
- Sp(8) = R6; \
- Sp(7) = R5; \
- Sp(6) = R4; \
- Sp(5) = R3; \
- Sp(4) = R2; \
- Sp(3) = R1; \
- Sp(2) = R10; /* return address */ \
- Sp(1) = R9; /* liveness mask */ \
- Sp(0) = stg_gc_gen_info;
-
-INFO_TABLE_RET( stg_gc_gen, RET_DYN )
-/* bitmap in the above info table is unused, the real one is on the stack. */
-{
- RESTORE_EVERYTHING;
- jump Sp(RET_OFFSET); /* No %ENTRY_CODE( - this is an actual code ptr */
-}
-
-stg_gc_gen
-{
- // Hack; see Note [mvar-heap-check] in PrimOps.cmm
- if (R10 == stg_putMVarzh || R10 == stg_takeMVarzh) {
- unlockClosure(R1, stg_MVAR_DIRTY_info)
- }
- SAVE_EVERYTHING;
- GC_GENERIC
-}
-
-// A heap check at an unboxed tuple return point. The return address
-// is on the stack, and we can find it by using the offsets given
-// to us in the liveness mask.
-stg_gc_ut
-{
- R10 = %ENTRY_CODE(Sp(RET_DYN_NONPTRS(R9) + RET_DYN_PTRS(R9)));
- SAVE_EVERYTHING;
- GC_GENERIC
-}
-
-/*
- * stg_gen_hp is used by MAYBE_GC, where we can't use GC_GENERIC
- * because we've just failed doYouWantToGC(), not a standard heap
- * check. GC_GENERIC would end up returning StackOverflow.
- */
-stg_gc_gen_hp
-{
- SAVE_EVERYTHING;
- HP_GENERIC
-}
-
-/* -----------------------------------------------------------------------------
Yields
-------------------------------------------------------------------------- */
-stg_gen_yield
-{
- SAVE_EVERYTHING;
- YIELD_GENERIC
-}
-
stg_yield_noregs
{
YIELD_GENERIC;
@@ -546,25 +475,11 @@ stg_yield_to_interpreter
Blocks
-------------------------------------------------------------------------- */
-stg_gen_block
-{
- SAVE_EVERYTHING;
- BLOCK_GENERIC;
-}
-
stg_block_noregs
{
BLOCK_GENERIC;
}
-stg_block_1
-{
- Sp_adj(-2);
- Sp(1) = R1;
- Sp(0) = stg_enter_info;
- BLOCK_GENERIC;
-}
-
/* -----------------------------------------------------------------------------
* takeMVar/putMVar-specific blocks
*
@@ -585,52 +500,48 @@ stg_block_1
*
* -------------------------------------------------------------------------- */
-INFO_TABLE_RET( stg_block_takemvar, RET_SMALL, P_ unused )
+INFO_TABLE_RET ( stg_block_takemvar, RET_SMALL, W_ info_ptr, P_ mvar )
+ return ()
{
- R1 = Sp(1);
- Sp_adj(2);
- jump stg_takeMVarzh;
+ jump stg_takeMVarzh(mvar);
}
// code fragment executed just before we return to the scheduler
stg_block_takemvar_finally
{
unlockClosure(R3, stg_MVAR_DIRTY_info);
- jump StgReturn;
+ jump StgReturn [R1];
}
-stg_block_takemvar
+stg_block_takemvar /* mvar passed in R1 */
{
Sp_adj(-2);
Sp(1) = R1;
Sp(0) = stg_block_takemvar_info;
- R3 = R1;
+ R3 = R1; // mvar communicated to stg_block_takemvar_finally in R3
BLOCK_BUT_FIRST(stg_block_takemvar_finally);
}
-INFO_TABLE_RET( stg_block_putmvar, RET_SMALL, P_ unused1, P_ unused2 )
+INFO_TABLE_RET( stg_block_putmvar, RET_SMALL, W_ info_ptr,
+ P_ mvar, P_ val )
+ return ()
{
- R2 = Sp(2);
- R1 = Sp(1);
- Sp_adj(3);
- jump stg_putMVarzh;
+ jump stg_putMVarzh(mvar, val);
}
// code fragment executed just before we return to the scheduler
stg_block_putmvar_finally
{
unlockClosure(R3, stg_MVAR_DIRTY_info);
- jump StgReturn;
+ jump StgReturn [R1];
}
-stg_block_putmvar
+stg_block_putmvar (P_ mvar, P_ val)
{
- Sp_adj(-3);
- Sp(2) = R2;
- Sp(1) = R1;
- Sp(0) = stg_block_putmvar_info;
- R3 = R1;
- BLOCK_BUT_FIRST(stg_block_putmvar_finally);
+ push (stg_block_putmvar_info, mvar, val) {
+ R3 = R1; // mvar communicated to stg_block_putmvar_finally in R3
+ BLOCK_BUT_FIRST(stg_block_putmvar_finally);
+ }
}
stg_block_blackhole
@@ -641,12 +552,11 @@ stg_block_blackhole
BLOCK_GENERIC;
}
-INFO_TABLE_RET( stg_block_throwto, RET_SMALL, P_ unused, P_ unused )
+INFO_TABLE_RET ( stg_block_throwto, RET_SMALL, W_ info_ptr,
+ P_ tso, P_ exception )
+ return ()
{
- R2 = Sp(2);
- R1 = Sp(1);
- Sp_adj(3);
- jump stg_killThreadzh;
+ jump stg_killThreadzh(tso, exception);
}
stg_block_throwto_finally
@@ -657,30 +567,26 @@ stg_block_throwto_finally
if (StgHeader_info(StgTSO_block_info(CurrentTSO)) == stg_WHITEHOLE_info) {
unlockClosure(StgTSO_block_info(CurrentTSO), stg_MSG_THROWTO_info);
}
- jump StgReturn;
+ jump StgReturn [R1];
}
-stg_block_throwto
+stg_block_throwto (P_ tso, P_ exception)
{
- Sp_adj(-3);
- Sp(2) = R2;
- Sp(1) = R1;
- Sp(0) = stg_block_throwto_info;
- BLOCK_BUT_FIRST(stg_block_throwto_finally);
+ push (stg_block_throwto_info, tso, exception) {
+ BLOCK_BUT_FIRST(stg_block_throwto_finally);
+ }
}
#ifdef mingw32_HOST_OS
-INFO_TABLE_RET( stg_block_async, RET_SMALL, W_ unused )
+INFO_TABLE_RET ( stg_block_async, RET_SMALL, W_ info_ptr, W_ ares )
+ return ()
{
- W_ ares;
W_ len, errC;
- ares = Sp(1);
len = TO_W_(StgAsyncIOResult_len(ares));
errC = TO_W_(StgAsyncIOResult_errCode(ares));
- foreign "C" free(ares "ptr");
- Sp_adj(2);
- RET_NN(len, errC);
+ ccall free(ares "ptr");
+ return (len, errC);
}
stg_block_async
@@ -693,14 +599,11 @@ stg_block_async
/* Used by threadDelay implementation; it would be desirable to get rid of
* this free()'ing void return continuation.
*/
-INFO_TABLE_RET( stg_block_async_void, RET_SMALL, W_ ares )
+INFO_TABLE_RET ( stg_block_async_void, RET_SMALL, W_ info_ptr, W_ ares )
+ return ()
{
- W_ ares;
-
- ares = Sp(1);
- foreign "C" free(ares "ptr");
- Sp_adj(2);
- jump %ENTRY_CODE(Sp(0));
+ ccall free(ares "ptr");
+ return ();
}
stg_block_async_void
@@ -712,14 +615,15 @@ stg_block_async_void
#endif
+
/* -----------------------------------------------------------------------------
STM-specific waiting
-------------------------------------------------------------------------- */
stg_block_stmwait_finally
{
- foreign "C" stmWaitUnlock(MyCapability() "ptr", R3 "ptr");
- jump StgReturn;
+ ccall stmWaitUnlock(MyCapability() "ptr", R3 "ptr");
+ jump StgReturn [R1];
}
stg_block_stmwait
diff --git a/rts/Interpreter.c b/rts/Interpreter.c
index 83973e8c9b..2eb2d0789f 100644
--- a/rts/Interpreter.c
+++ b/rts/Interpreter.c
@@ -503,7 +503,7 @@ do_return:
// | XXXX_info |
// +---------------+
//
- // where XXXX_info is one of the stg_gc_unbx_r1_info family.
+ // where XXXX_info is one of the stg_ret_*_info family.
//
// We're only interested in the case when the real return address
// is a BCO; otherwise we'll return to the scheduler.
@@ -512,12 +512,12 @@ do_return_unboxed:
{
int offset;
- ASSERT( Sp[0] == (W_)&stg_gc_unbx_r1_info
- || Sp[0] == (W_)&stg_gc_unpt_r1_info
- || Sp[0] == (W_)&stg_gc_f1_info
- || Sp[0] == (W_)&stg_gc_d1_info
- || Sp[0] == (W_)&stg_gc_l1_info
- || Sp[0] == (W_)&stg_gc_void_info // VoidRep
+ ASSERT( Sp[0] == (W_)&stg_ret_v_info
+ || Sp[0] == (W_)&stg_ret_p_info
+ || Sp[0] == (W_)&stg_ret_n_info
+ || Sp[0] == (W_)&stg_ret_f_info
+ || Sp[0] == (W_)&stg_ret_d_info
+ || Sp[0] == (W_)&stg_ret_l_info
);
// get the offset of the stg_ctoi_ret_XXX itbl
@@ -1336,27 +1336,27 @@ run_BCO:
case bci_RETURN_P:
Sp--;
- Sp[0] = (W_)&stg_gc_unpt_r1_info;
+ Sp[0] = (W_)&stg_ret_p_info;
goto do_return_unboxed;
case bci_RETURN_N:
Sp--;
- Sp[0] = (W_)&stg_gc_unbx_r1_info;
+ Sp[0] = (W_)&stg_ret_n_info;
goto do_return_unboxed;
case bci_RETURN_F:
Sp--;
- Sp[0] = (W_)&stg_gc_f1_info;
+ Sp[0] = (W_)&stg_ret_f_info;
goto do_return_unboxed;
case bci_RETURN_D:
Sp--;
- Sp[0] = (W_)&stg_gc_d1_info;
+ Sp[0] = (W_)&stg_ret_d_info;
goto do_return_unboxed;
case bci_RETURN_L:
Sp--;
- Sp[0] = (W_)&stg_gc_l1_info;
+ Sp[0] = (W_)&stg_ret_l_info;
goto do_return_unboxed;
case bci_RETURN_V:
Sp--;
- Sp[0] = (W_)&stg_gc_void_info;
+ Sp[0] = (W_)&stg_ret_v_info;
goto do_return_unboxed;
case bci_SWIZZLE: {
@@ -1372,9 +1372,6 @@ run_BCO:
int o_itbl = BCO_GET_LARGE_ARG;
int interruptible = BCO_NEXT;
void(*marshall_fn)(void*) = (void (*)(void*))BCO_LIT(o_itbl);
- int ret_dyn_size =
- RET_DYN_BITMAP_SIZE + RET_DYN_NONPTR_REGS_SIZE
- + sizeofW(StgRetDyn);
/* the stack looks like this:
@@ -1405,6 +1402,7 @@ run_BCO:
nat nargs = cif->nargs;
nat ret_size;
nat i;
+ int j;
StgPtr p;
W_ ret[2]; // max needed
W_ *arguments[stk_offset]; // max needed
@@ -1446,17 +1444,19 @@ run_BCO:
//
// We know how many (non-ptr) words there are before the
// next valid stack frame: it is the stk_offset arg to the
- // CCALL instruction. So we build a RET_DYN stack frame
- // on the stack frame to describe this chunk of stack.
- //
- Sp -= ret_dyn_size;
- ((StgRetDyn *)Sp)->liveness = R1_PTR | N_NONPTRS(stk_offset);
- ((StgRetDyn *)Sp)->info = (StgInfoTable *)&stg_gc_gen_info;
+ // CCALL instruction. So we overwrite this area of the
+ // stack with empty stack frames (stg_ret_v_info);
+ //
+ for (j = 0; j < stk_offset; j++) {
+ Sp[j] = (W_)&stg_ret_v_info; /* an empty stack frame */
+ }
// save obj (pointer to the current BCO), since this
- // might move during the call. We use the R1 slot in the
- // RET_DYN frame for this, hence R1_PTR above.
- ((StgRetDyn *)Sp)->payload[0] = (StgClosure *)obj;
+ // might move during the call. We push an stg_ret_p frame
+ // for this.
+ Sp -= 2;
+ Sp[1] = (W_)obj;
+ Sp[0] = (W_)&stg_ret_p_info;
SAVE_STACK_POINTERS;
tok = suspendThread(&cap->r, interruptible ? rtsTrue : rtsFalse);
@@ -1464,11 +1464,11 @@ run_BCO:
// We already made a copy of the arguments above.
ffi_call(cif, fn, ret, argptrs);
- // And restart the thread again, popping the RET_DYN frame.
+ // And restart the thread again, popping the stg_ret_p frame.
cap = (Capability *)((void *)((unsigned char*)resumeThread(tok) - STG_FIELD_OFFSET(Capability,r)));
LOAD_STACK_POINTERS;
- if (Sp[0] != (W_)&stg_gc_gen_info) {
+ if (Sp[0] != (W_)&stg_ret_p_info) {
// the stack is not how we left it. This probably
// means that an exception got raised on exit from the
// foreign call, so we should just continue with
@@ -1476,16 +1476,16 @@ run_BCO:
RETURN_TO_SCHEDULER_NO_PAUSE(ThreadRunGHC, ThreadYielding);
}
- // Re-load the pointer to the BCO from the RET_DYN frame,
+ // Re-load the pointer to the BCO from the stg_ret_p frame,
// it might have moved during the call. Also reload the
// pointers to the components of the BCO.
- obj = ((StgRetDyn *)Sp)->payload[0];
+ obj = (P_)Sp[1];
bco = (StgBCO*)obj;
instrs = (StgWord16*)(bco->instrs->payload);
literals = (StgWord*)(&bco->literals->payload[0]);
ptrs = (StgPtr*)(&bco->ptrs->payload[0]);
- Sp += ret_dyn_size;
+ Sp += 2; // pop the stg_ret_p frame
// Save the Haskell thread's current value of errno
cap->r.rCurrentTSO->saved_errno = errno;
diff --git a/rts/LdvProfile.c b/rts/LdvProfile.c
index 7bc032e05d..8ccafef9e2 100644
--- a/rts/LdvProfile.c
+++ b/rts/LdvProfile.c
@@ -133,7 +133,6 @@ processHeapClosureForDead( StgClosure *c )
case CATCH_FRAME:
case UNDERFLOW_FRAME:
case STOP_FRAME:
- case RET_DYN:
case RET_BCO:
case RET_SMALL:
case RET_BIG:
diff --git a/rts/Linker.c b/rts/Linker.c
index cf60c528d3..64d60f23d0 100644
--- a/rts/Linker.c
+++ b/rts/Linker.c
@@ -1011,301 +1011,300 @@ typedef struct _RtsSymbolVal {
#endif
-#define RTS_SYMBOLS \
- Maybe_Stable_Names \
- RTS_TICKY_SYMBOLS \
- SymI_HasProto(StgReturn) \
- SymI_HasProto(stg_enter_info) \
- SymI_HasProto(stg_gc_void_info) \
- SymI_HasProto(__stg_gc_enter_1) \
- SymI_HasProto(stg_gc_noregs) \
- SymI_HasProto(stg_gc_unpt_r1_info) \
- SymI_HasProto(stg_gc_unpt_r1) \
- SymI_HasProto(stg_gc_unbx_r1_info) \
- SymI_HasProto(stg_gc_unbx_r1) \
- SymI_HasProto(stg_gc_f1_info) \
- SymI_HasProto(stg_gc_f1) \
- SymI_HasProto(stg_gc_d1_info) \
- SymI_HasProto(stg_gc_d1) \
- SymI_HasProto(stg_gc_l1_info) \
- SymI_HasProto(stg_gc_l1) \
- SymI_HasProto(__stg_gc_fun) \
- SymI_HasProto(stg_gc_fun_info) \
- SymI_HasProto(stg_gc_gen) \
- SymI_HasProto(stg_gc_gen_info) \
- SymI_HasProto(stg_gc_gen_hp) \
- SymI_HasProto(stg_gc_ut) \
- SymI_HasProto(stg_gen_yield) \
- SymI_HasProto(stg_yield_noregs) \
- SymI_HasProto(stg_yield_to_interpreter) \
- SymI_HasProto(stg_gen_block) \
- SymI_HasProto(stg_block_noregs) \
- SymI_HasProto(stg_block_1) \
- SymI_HasProto(stg_block_takemvar) \
- SymI_HasProto(stg_block_putmvar) \
- MAIN_CAP_SYM \
- SymI_HasProto(MallocFailHook) \
- SymI_HasProto(OnExitHook) \
- SymI_HasProto(OutOfHeapHook) \
- SymI_HasProto(StackOverflowHook) \
- SymI_HasProto(addDLL) \
- SymI_HasProto(__int_encodeDouble) \
- SymI_HasProto(__word_encodeDouble) \
- SymI_HasProto(__2Int_encodeDouble) \
- SymI_HasProto(__int_encodeFloat) \
- SymI_HasProto(__word_encodeFloat) \
- SymI_HasProto(stg_atomicallyzh) \
- SymI_HasProto(barf) \
- SymI_HasProto(debugBelch) \
- SymI_HasProto(errorBelch) \
- SymI_HasProto(sysErrorBelch) \
- SymI_HasProto(stg_getMaskingStatezh) \
- SymI_HasProto(stg_maskAsyncExceptionszh) \
- SymI_HasProto(stg_maskUninterruptiblezh) \
- SymI_HasProto(stg_catchzh) \
- SymI_HasProto(stg_catchRetryzh) \
- SymI_HasProto(stg_catchSTMzh) \
- SymI_HasProto(stg_checkzh) \
- SymI_HasProto(closure_flags) \
- SymI_HasProto(cmp_thread) \
- SymI_HasProto(createAdjustor) \
- SymI_HasProto(stg_decodeDoublezu2Intzh) \
- SymI_HasProto(stg_decodeFloatzuIntzh) \
- SymI_HasProto(defaultsHook) \
- SymI_HasProto(stg_delayzh) \
- SymI_HasProto(stg_deRefWeakzh) \
- SymI_HasProto(stg_deRefStablePtrzh) \
- SymI_HasProto(dirty_MUT_VAR) \
- SymI_HasProto(stg_forkzh) \
- SymI_HasProto(stg_forkOnzh) \
- SymI_HasProto(forkProcess) \
- SymI_HasProto(forkOS_createThread) \
- SymI_HasProto(freeHaskellFunctionPtr) \
+#define RTS_SYMBOLS \
+ Maybe_Stable_Names \
+ RTS_TICKY_SYMBOLS \
+ SymI_HasProto(StgReturn) \
+ SymI_HasProto(stg_gc_noregs) \
+ SymI_HasProto(stg_ret_v_info) \
+ SymI_HasProto(stg_ret_p_info) \
+ SymI_HasProto(stg_ret_n_info) \
+ SymI_HasProto(stg_ret_f_info) \
+ SymI_HasProto(stg_ret_d_info) \
+ SymI_HasProto(stg_ret_l_info) \
+ SymI_HasProto(stg_gc_prim_p) \
+ SymI_HasProto(stg_gc_prim_pp) \
+ SymI_HasProto(stg_gc_prim_n) \
+ SymI_HasProto(stg_enter_info) \
+ SymI_HasProto(__stg_gc_enter_1) \
+ SymI_HasProto(stg_gc_unpt_r1) \
+ SymI_HasProto(stg_gc_unbx_r1) \
+ SymI_HasProto(stg_gc_f1) \
+ SymI_HasProto(stg_gc_d1) \
+ SymI_HasProto(stg_gc_l1) \
+ SymI_HasProto(stg_gc_pp) \
+ SymI_HasProto(stg_gc_ppp) \
+ SymI_HasProto(stg_gc_pppp) \
+ SymI_HasProto(__stg_gc_fun) \
+ SymI_HasProto(stg_gc_fun_info) \
+ SymI_HasProto(stg_yield_noregs) \
+ SymI_HasProto(stg_yield_to_interpreter) \
+ SymI_HasProto(stg_block_noregs) \
+ SymI_HasProto(stg_block_takemvar) \
+ SymI_HasProto(stg_block_putmvar) \
+ MAIN_CAP_SYM \
+ SymI_HasProto(MallocFailHook) \
+ SymI_HasProto(OnExitHook) \
+ SymI_HasProto(OutOfHeapHook) \
+ SymI_HasProto(StackOverflowHook) \
+ SymI_HasProto(addDLL) \
+ SymI_HasProto(__int_encodeDouble) \
+ SymI_HasProto(__word_encodeDouble) \
+ SymI_HasProto(__2Int_encodeDouble) \
+ SymI_HasProto(__int_encodeFloat) \
+ SymI_HasProto(__word_encodeFloat) \
+ SymI_HasProto(stg_atomicallyzh) \
+ SymI_HasProto(barf) \
+ SymI_HasProto(debugBelch) \
+ SymI_HasProto(errorBelch) \
+ SymI_HasProto(sysErrorBelch) \
+ SymI_HasProto(stg_getMaskingStatezh) \
+ SymI_HasProto(stg_maskAsyncExceptionszh) \
+ SymI_HasProto(stg_maskUninterruptiblezh) \
+ SymI_HasProto(stg_catchzh) \
+ SymI_HasProto(stg_catchRetryzh) \
+ SymI_HasProto(stg_catchSTMzh) \
+ SymI_HasProto(stg_checkzh) \
+ SymI_HasProto(closure_flags) \
+ SymI_HasProto(cmp_thread) \
+ SymI_HasProto(createAdjustor) \
+ SymI_HasProto(stg_decodeDoublezu2Intzh) \
+ SymI_HasProto(stg_decodeFloatzuIntzh) \
+ SymI_HasProto(defaultsHook) \
+ SymI_HasProto(stg_delayzh) \
+ SymI_HasProto(stg_deRefWeakzh) \
+ SymI_HasProto(stg_deRefStablePtrzh) \
+ SymI_HasProto(dirty_MUT_VAR) \
+ SymI_HasProto(stg_forkzh) \
+ SymI_HasProto(stg_forkOnzh) \
+ SymI_HasProto(forkProcess) \
+ SymI_HasProto(forkOS_createThread) \
+ SymI_HasProto(freeHaskellFunctionPtr) \
SymI_HasProto(getOrSetGHCConcSignalSignalHandlerStore) \
SymI_HasProto(getOrSetGHCConcWindowsPendingDelaysStore) \
SymI_HasProto(getOrSetGHCConcWindowsIOManagerThreadStore) \
SymI_HasProto(getOrSetGHCConcWindowsProddingStore) \
SymI_HasProto(getOrSetSystemEventThreadEventManagerStore) \
SymI_HasProto(getOrSetSystemEventThreadIOManagerThreadStore) \
- SymI_HasProto(getGCStats) \
- SymI_HasProto(getGCStatsEnabled) \
- SymI_HasProto(genSymZh) \
- SymI_HasProto(genericRaise) \
- SymI_HasProto(getProgArgv) \
- SymI_HasProto(getFullProgArgv) \
- SymI_HasProto(getStablePtr) \
- SymI_HasProto(hs_init) \
- SymI_HasProto(hs_exit) \
- SymI_HasProto(hs_set_argv) \
- SymI_HasProto(hs_add_root) \
- SymI_HasProto(hs_perform_gc) \
- SymI_HasProto(hs_free_stable_ptr) \
- SymI_HasProto(hs_free_fun_ptr) \
- SymI_HasProto(hs_hpc_rootModule) \
- SymI_HasProto(hs_hpc_module) \
- SymI_HasProto(initLinker) \
- SymI_HasProto(stg_unpackClosurezh) \
- SymI_HasProto(stg_getApStackValzh) \
- SymI_HasProto(stg_getSparkzh) \
- SymI_HasProto(stg_numSparkszh) \
- SymI_HasProto(stg_isCurrentThreadBoundzh) \
- SymI_HasProto(stg_isEmptyMVarzh) \
- SymI_HasProto(stg_killThreadzh) \
- SymI_HasProto(loadArchive) \
- SymI_HasProto(loadObj) \
- SymI_HasProto(insertStableSymbol) \
- SymI_HasProto(insertSymbol) \
- SymI_HasProto(lookupSymbol) \
- SymI_HasProto(stg_makeStablePtrzh) \
- SymI_HasProto(stg_mkApUpd0zh) \
- SymI_HasProto(stg_myThreadIdzh) \
- SymI_HasProto(stg_labelThreadzh) \
- SymI_HasProto(stg_newArrayzh) \
- SymI_HasProto(stg_newArrayArrayzh) \
- SymI_HasProto(stg_newBCOzh) \
- SymI_HasProto(stg_newByteArrayzh) \
- SymI_HasProto_redirect(newCAF, newDynCAF) \
- SymI_HasProto(stg_newMVarzh) \
- SymI_HasProto(stg_newMutVarzh) \
- SymI_HasProto(stg_newTVarzh) \
- SymI_HasProto(stg_noDuplicatezh) \
- SymI_HasProto(stg_atomicModifyMutVarzh) \
- SymI_HasProto(stg_casMutVarzh) \
- SymI_HasProto(stg_newPinnedByteArrayzh) \
- SymI_HasProto(stg_newAlignedPinnedByteArrayzh) \
- SymI_HasProto(newSpark) \
- SymI_HasProto(performGC) \
- SymI_HasProto(performMajorGC) \
- SymI_HasProto(prog_argc) \
- SymI_HasProto(prog_argv) \
- SymI_HasProto(stg_putMVarzh) \
- SymI_HasProto(stg_raisezh) \
- SymI_HasProto(stg_raiseIOzh) \
- SymI_HasProto(stg_readTVarzh) \
- SymI_HasProto(stg_readTVarIOzh) \
- SymI_HasProto(resumeThread) \
- SymI_HasProto(setNumCapabilities) \
- SymI_HasProto(getNumberOfProcessors) \
- SymI_HasProto(resolveObjs) \
- SymI_HasProto(stg_retryzh) \
- SymI_HasProto(rts_apply) \
- SymI_HasProto(rts_checkSchedStatus) \
- SymI_HasProto(rts_eval) \
- SymI_HasProto(rts_evalIO) \
- SymI_HasProto(rts_evalLazyIO) \
- SymI_HasProto(rts_evalStableIO) \
- SymI_HasProto(rts_eval_) \
- SymI_HasProto(rts_getBool) \
- SymI_HasProto(rts_getChar) \
- SymI_HasProto(rts_getDouble) \
- SymI_HasProto(rts_getFloat) \
- SymI_HasProto(rts_getInt) \
- SymI_HasProto(rts_getInt8) \
- SymI_HasProto(rts_getInt16) \
- SymI_HasProto(rts_getInt32) \
- SymI_HasProto(rts_getInt64) \
- SymI_HasProto(rts_getPtr) \
- SymI_HasProto(rts_getFunPtr) \
- SymI_HasProto(rts_getStablePtr) \
- SymI_HasProto(rts_getThreadId) \
- SymI_HasProto(rts_getWord) \
- SymI_HasProto(rts_getWord8) \
- SymI_HasProto(rts_getWord16) \
- SymI_HasProto(rts_getWord32) \
- SymI_HasProto(rts_getWord64) \
- SymI_HasProto(rts_lock) \
- SymI_HasProto(rts_mkBool) \
- SymI_HasProto(rts_mkChar) \
- SymI_HasProto(rts_mkDouble) \
- SymI_HasProto(rts_mkFloat) \
- SymI_HasProto(rts_mkInt) \
- SymI_HasProto(rts_mkInt8) \
- SymI_HasProto(rts_mkInt16) \
- SymI_HasProto(rts_mkInt32) \
- SymI_HasProto(rts_mkInt64) \
- SymI_HasProto(rts_mkPtr) \
- SymI_HasProto(rts_mkFunPtr) \
- SymI_HasProto(rts_mkStablePtr) \
- SymI_HasProto(rts_mkString) \
- SymI_HasProto(rts_mkWord) \
- SymI_HasProto(rts_mkWord8) \
- SymI_HasProto(rts_mkWord16) \
- SymI_HasProto(rts_mkWord32) \
- SymI_HasProto(rts_mkWord64) \
- SymI_HasProto(rts_unlock) \
- SymI_HasProto(rts_unsafeGetMyCapability) \
- SymI_HasProto(rtsSupportsBoundThreads) \
- SymI_HasProto(rts_isProfiled) \
- SymI_HasProto(setProgArgv) \
- SymI_HasProto(startupHaskell) \
- SymI_HasProto(shutdownHaskell) \
- SymI_HasProto(shutdownHaskellAndExit) \
- SymI_HasProto(stable_ptr_table) \
- SymI_HasProto(stackOverflow) \
- SymI_HasProto(stg_CAF_BLACKHOLE_info) \
- SymI_HasProto(stg_BLACKHOLE_info) \
- SymI_HasProto(__stg_EAGER_BLACKHOLE_info) \
- SymI_HasProto(stg_BLOCKING_QUEUE_CLEAN_info) \
- SymI_HasProto(stg_BLOCKING_QUEUE_DIRTY_info) \
- SymI_HasProto(startTimer) \
- SymI_HasProto(stg_MVAR_CLEAN_info) \
- SymI_HasProto(stg_MVAR_DIRTY_info) \
- SymI_HasProto(stg_IND_STATIC_info) \
- SymI_HasProto(stg_ARR_WORDS_info) \
- SymI_HasProto(stg_MUT_ARR_PTRS_DIRTY_info) \
- SymI_HasProto(stg_MUT_ARR_PTRS_FROZEN_info) \
- SymI_HasProto(stg_MUT_ARR_PTRS_FROZEN0_info) \
- SymI_HasProto(stg_WEAK_info) \
- SymI_HasProto(stg_ap_v_info) \
- SymI_HasProto(stg_ap_f_info) \
- SymI_HasProto(stg_ap_d_info) \
- SymI_HasProto(stg_ap_l_info) \
- SymI_HasProto(stg_ap_n_info) \
- SymI_HasProto(stg_ap_p_info) \
- SymI_HasProto(stg_ap_pv_info) \
- SymI_HasProto(stg_ap_pp_info) \
- SymI_HasProto(stg_ap_ppv_info) \
- SymI_HasProto(stg_ap_ppp_info) \
- SymI_HasProto(stg_ap_pppv_info) \
- SymI_HasProto(stg_ap_pppp_info) \
- SymI_HasProto(stg_ap_ppppp_info) \
- SymI_HasProto(stg_ap_pppppp_info) \
- SymI_HasProto(stg_ap_0_fast) \
- SymI_HasProto(stg_ap_v_fast) \
- SymI_HasProto(stg_ap_f_fast) \
- SymI_HasProto(stg_ap_d_fast) \
- SymI_HasProto(stg_ap_l_fast) \
- SymI_HasProto(stg_ap_n_fast) \
- SymI_HasProto(stg_ap_p_fast) \
- SymI_HasProto(stg_ap_pv_fast) \
- SymI_HasProto(stg_ap_pp_fast) \
- SymI_HasProto(stg_ap_ppv_fast) \
- SymI_HasProto(stg_ap_ppp_fast) \
- SymI_HasProto(stg_ap_pppv_fast) \
- SymI_HasProto(stg_ap_pppp_fast) \
- SymI_HasProto(stg_ap_ppppp_fast) \
- SymI_HasProto(stg_ap_pppppp_fast) \
- SymI_HasProto(stg_ap_1_upd_info) \
- SymI_HasProto(stg_ap_2_upd_info) \
- SymI_HasProto(stg_ap_3_upd_info) \
- SymI_HasProto(stg_ap_4_upd_info) \
- SymI_HasProto(stg_ap_5_upd_info) \
- SymI_HasProto(stg_ap_6_upd_info) \
- SymI_HasProto(stg_ap_7_upd_info) \
- SymI_HasProto(stg_exit) \
- SymI_HasProto(stg_sel_0_upd_info) \
- SymI_HasProto(stg_sel_10_upd_info) \
- SymI_HasProto(stg_sel_11_upd_info) \
- SymI_HasProto(stg_sel_12_upd_info) \
- SymI_HasProto(stg_sel_13_upd_info) \
- SymI_HasProto(stg_sel_14_upd_info) \
- SymI_HasProto(stg_sel_15_upd_info) \
- SymI_HasProto(stg_sel_1_upd_info) \
- SymI_HasProto(stg_sel_2_upd_info) \
- SymI_HasProto(stg_sel_3_upd_info) \
- SymI_HasProto(stg_sel_4_upd_info) \
- SymI_HasProto(stg_sel_5_upd_info) \
- SymI_HasProto(stg_sel_6_upd_info) \
- SymI_HasProto(stg_sel_7_upd_info) \
- SymI_HasProto(stg_sel_8_upd_info) \
- SymI_HasProto(stg_sel_9_upd_info) \
- SymI_HasProto(stg_upd_frame_info) \
- SymI_HasProto(stg_bh_upd_frame_info) \
- SymI_HasProto(suspendThread) \
- SymI_HasProto(stg_takeMVarzh) \
- SymI_HasProto(stg_threadStatuszh) \
- SymI_HasProto(stg_tryPutMVarzh) \
- SymI_HasProto(stg_tryTakeMVarzh) \
- SymI_HasProto(stg_unmaskAsyncExceptionszh) \
- SymI_HasProto(unloadObj) \
- SymI_HasProto(stg_unsafeThawArrayzh) \
- SymI_HasProto(stg_waitReadzh) \
- SymI_HasProto(stg_waitWritezh) \
- SymI_HasProto(stg_writeTVarzh) \
- SymI_HasProto(stg_yieldzh) \
- SymI_NeedsProto(stg_interp_constr_entry) \
- SymI_HasProto(stg_arg_bitmaps) \
- SymI_HasProto(large_alloc_lim) \
- SymI_HasProto(g0) \
- SymI_HasProto(allocate) \
- SymI_HasProto(allocateExec) \
- SymI_HasProto(freeExec) \
- SymI_HasProto(getAllocations) \
- SymI_HasProto(revertCAFs) \
- SymI_HasProto(RtsFlags) \
- SymI_NeedsProto(rts_breakpoint_io_action) \
- SymI_NeedsProto(rts_stop_next_breakpoint) \
- SymI_NeedsProto(rts_stop_on_exception) \
- SymI_HasProto(stopTimer) \
- SymI_HasProto(n_capabilities) \
- SymI_HasProto(stg_traceCcszh) \
- SymI_HasProto(stg_traceEventzh) \
- SymI_HasProto(getMonotonicNSec) \
- SymI_HasProto(lockFile) \
- SymI_HasProto(unlockFile) \
- SymI_HasProto(startProfTimer) \
- SymI_HasProto(stopProfTimer) \
- RTS_USER_SIGNALS_SYMBOLS \
+ SymI_HasProto(getGCStats) \
+ SymI_HasProto(getGCStatsEnabled) \
+ SymI_HasProto(genSymZh) \
+ SymI_HasProto(genericRaise) \
+ SymI_HasProto(getProgArgv) \
+ SymI_HasProto(getFullProgArgv) \
+ SymI_HasProto(getStablePtr) \
+ SymI_HasProto(hs_init) \
+ SymI_HasProto(hs_exit) \
+ SymI_HasProto(hs_set_argv) \
+ SymI_HasProto(hs_add_root) \
+ SymI_HasProto(hs_perform_gc) \
+ SymI_HasProto(hs_free_stable_ptr) \
+ SymI_HasProto(hs_free_fun_ptr) \
+ SymI_HasProto(hs_hpc_rootModule) \
+ SymI_HasProto(hs_hpc_module) \
+ SymI_HasProto(initLinker) \
+ SymI_HasProto(stg_unpackClosurezh) \
+ SymI_HasProto(stg_getApStackValzh) \
+ SymI_HasProto(stg_getSparkzh) \
+ SymI_HasProto(stg_numSparkszh) \
+ SymI_HasProto(stg_isCurrentThreadBoundzh) \
+ SymI_HasProto(stg_isEmptyMVarzh) \
+ SymI_HasProto(stg_killThreadzh) \
+ SymI_HasProto(loadArchive) \
+ SymI_HasProto(loadObj) \
+ SymI_HasProto(insertStableSymbol) \
+ SymI_HasProto(insertSymbol) \
+ SymI_HasProto(lookupSymbol) \
+ SymI_HasProto(stg_makeStablePtrzh) \
+ SymI_HasProto(stg_mkApUpd0zh) \
+ SymI_HasProto(stg_myThreadIdzh) \
+ SymI_HasProto(stg_labelThreadzh) \
+ SymI_HasProto(stg_newArrayzh) \
+ SymI_HasProto(stg_newArrayArrayzh) \
+ SymI_HasProto(stg_newBCOzh) \
+ SymI_HasProto(stg_newByteArrayzh) \
+ SymI_HasProto_redirect(newCAF, newDynCAF) \
+ SymI_HasProto(stg_newMVarzh) \
+ SymI_HasProto(stg_newMutVarzh) \
+ SymI_HasProto(stg_newTVarzh) \
+ SymI_HasProto(stg_noDuplicatezh) \
+ SymI_HasProto(stg_atomicModifyMutVarzh) \
+ SymI_HasProto(stg_casMutVarzh) \
+ SymI_HasProto(stg_newPinnedByteArrayzh) \
+ SymI_HasProto(stg_newAlignedPinnedByteArrayzh) \
+ SymI_HasProto(newSpark) \
+ SymI_HasProto(performGC) \
+ SymI_HasProto(performMajorGC) \
+ SymI_HasProto(prog_argc) \
+ SymI_HasProto(prog_argv) \
+ SymI_HasProto(stg_putMVarzh) \
+ SymI_HasProto(stg_raisezh) \
+ SymI_HasProto(stg_raiseIOzh) \
+ SymI_HasProto(stg_readTVarzh) \
+ SymI_HasProto(stg_readTVarIOzh) \
+ SymI_HasProto(resumeThread) \
+ SymI_HasProto(setNumCapabilities) \
+ SymI_HasProto(getNumberOfProcessors) \
+ SymI_HasProto(resolveObjs) \
+ SymI_HasProto(stg_retryzh) \
+ SymI_HasProto(rts_apply) \
+ SymI_HasProto(rts_checkSchedStatus) \
+ SymI_HasProto(rts_eval) \
+ SymI_HasProto(rts_evalIO) \
+ SymI_HasProto(rts_evalLazyIO) \
+ SymI_HasProto(rts_evalStableIO) \
+ SymI_HasProto(rts_eval_) \
+ SymI_HasProto(rts_getBool) \
+ SymI_HasProto(rts_getChar) \
+ SymI_HasProto(rts_getDouble) \
+ SymI_HasProto(rts_getFloat) \
+ SymI_HasProto(rts_getInt) \
+ SymI_HasProto(rts_getInt8) \
+ SymI_HasProto(rts_getInt16) \
+ SymI_HasProto(rts_getInt32) \
+ SymI_HasProto(rts_getInt64) \
+ SymI_HasProto(rts_getPtr) \
+ SymI_HasProto(rts_getFunPtr) \
+ SymI_HasProto(rts_getStablePtr) \
+ SymI_HasProto(rts_getThreadId) \
+ SymI_HasProto(rts_getWord) \
+ SymI_HasProto(rts_getWord8) \
+ SymI_HasProto(rts_getWord16) \
+ SymI_HasProto(rts_getWord32) \
+ SymI_HasProto(rts_getWord64) \
+ SymI_HasProto(rts_lock) \
+ SymI_HasProto(rts_mkBool) \
+ SymI_HasProto(rts_mkChar) \
+ SymI_HasProto(rts_mkDouble) \
+ SymI_HasProto(rts_mkFloat) \
+ SymI_HasProto(rts_mkInt) \
+ SymI_HasProto(rts_mkInt8) \
+ SymI_HasProto(rts_mkInt16) \
+ SymI_HasProto(rts_mkInt32) \
+ SymI_HasProto(rts_mkInt64) \
+ SymI_HasProto(rts_mkPtr) \
+ SymI_HasProto(rts_mkFunPtr) \
+ SymI_HasProto(rts_mkStablePtr) \
+ SymI_HasProto(rts_mkString) \
+ SymI_HasProto(rts_mkWord) \
+ SymI_HasProto(rts_mkWord8) \
+ SymI_HasProto(rts_mkWord16) \
+ SymI_HasProto(rts_mkWord32) \
+ SymI_HasProto(rts_mkWord64) \
+ SymI_HasProto(rts_unlock) \
+ SymI_HasProto(rts_unsafeGetMyCapability) \
+ SymI_HasProto(rtsSupportsBoundThreads) \
+ SymI_HasProto(rts_isProfiled) \
+ SymI_HasProto(setProgArgv) \
+ SymI_HasProto(startupHaskell) \
+ SymI_HasProto(shutdownHaskell) \
+ SymI_HasProto(shutdownHaskellAndExit) \
+ SymI_HasProto(stable_ptr_table) \
+ SymI_HasProto(stackOverflow) \
+ SymI_HasProto(stg_CAF_BLACKHOLE_info) \
+ SymI_HasProto(stg_BLACKHOLE_info) \
+ SymI_HasProto(__stg_EAGER_BLACKHOLE_info) \
+ SymI_HasProto(stg_BLOCKING_QUEUE_CLEAN_info) \
+ SymI_HasProto(stg_BLOCKING_QUEUE_DIRTY_info) \
+ SymI_HasProto(startTimer) \
+ SymI_HasProto(stg_MVAR_CLEAN_info) \
+ SymI_HasProto(stg_MVAR_DIRTY_info) \
+ SymI_HasProto(stg_IND_STATIC_info) \
+ SymI_HasProto(stg_ARR_WORDS_info) \
+ SymI_HasProto(stg_MUT_ARR_PTRS_DIRTY_info) \
+ SymI_HasProto(stg_MUT_ARR_PTRS_FROZEN_info) \
+ SymI_HasProto(stg_MUT_ARR_PTRS_FROZEN0_info) \
+ SymI_HasProto(stg_WEAK_info) \
+ SymI_HasProto(stg_ap_v_info) \
+ SymI_HasProto(stg_ap_f_info) \
+ SymI_HasProto(stg_ap_d_info) \
+ SymI_HasProto(stg_ap_l_info) \
+ SymI_HasProto(stg_ap_n_info) \
+ SymI_HasProto(stg_ap_p_info) \
+ SymI_HasProto(stg_ap_pv_info) \
+ SymI_HasProto(stg_ap_pp_info) \
+ SymI_HasProto(stg_ap_ppv_info) \
+ SymI_HasProto(stg_ap_ppp_info) \
+ SymI_HasProto(stg_ap_pppv_info) \
+ SymI_HasProto(stg_ap_pppp_info) \
+ SymI_HasProto(stg_ap_ppppp_info) \
+ SymI_HasProto(stg_ap_pppppp_info) \
+ SymI_HasProto(stg_ap_0_fast) \
+ SymI_HasProto(stg_ap_v_fast) \
+ SymI_HasProto(stg_ap_f_fast) \
+ SymI_HasProto(stg_ap_d_fast) \
+ SymI_HasProto(stg_ap_l_fast) \
+ SymI_HasProto(stg_ap_n_fast) \
+ SymI_HasProto(stg_ap_p_fast) \
+ SymI_HasProto(stg_ap_pv_fast) \
+ SymI_HasProto(stg_ap_pp_fast) \
+ SymI_HasProto(stg_ap_ppv_fast) \
+ SymI_HasProto(stg_ap_ppp_fast) \
+ SymI_HasProto(stg_ap_pppv_fast) \
+ SymI_HasProto(stg_ap_pppp_fast) \
+ SymI_HasProto(stg_ap_ppppp_fast) \
+ SymI_HasProto(stg_ap_pppppp_fast) \
+ SymI_HasProto(stg_ap_1_upd_info) \
+ SymI_HasProto(stg_ap_2_upd_info) \
+ SymI_HasProto(stg_ap_3_upd_info) \
+ SymI_HasProto(stg_ap_4_upd_info) \
+ SymI_HasProto(stg_ap_5_upd_info) \
+ SymI_HasProto(stg_ap_6_upd_info) \
+ SymI_HasProto(stg_ap_7_upd_info) \
+ SymI_HasProto(stg_exit) \
+ SymI_HasProto(stg_sel_0_upd_info) \
+ SymI_HasProto(stg_sel_10_upd_info) \
+ SymI_HasProto(stg_sel_11_upd_info) \
+ SymI_HasProto(stg_sel_12_upd_info) \
+ SymI_HasProto(stg_sel_13_upd_info) \
+ SymI_HasProto(stg_sel_14_upd_info) \
+ SymI_HasProto(stg_sel_15_upd_info) \
+ SymI_HasProto(stg_sel_1_upd_info) \
+ SymI_HasProto(stg_sel_2_upd_info) \
+ SymI_HasProto(stg_sel_3_upd_info) \
+ SymI_HasProto(stg_sel_4_upd_info) \
+ SymI_HasProto(stg_sel_5_upd_info) \
+ SymI_HasProto(stg_sel_6_upd_info) \
+ SymI_HasProto(stg_sel_7_upd_info) \
+ SymI_HasProto(stg_sel_8_upd_info) \
+ SymI_HasProto(stg_sel_9_upd_info) \
+ SymI_HasProto(stg_upd_frame_info) \
+ SymI_HasProto(stg_bh_upd_frame_info) \
+ SymI_HasProto(suspendThread) \
+ SymI_HasProto(stg_takeMVarzh) \
+ SymI_HasProto(stg_threadStatuszh) \
+ SymI_HasProto(stg_tryPutMVarzh) \
+ SymI_HasProto(stg_tryTakeMVarzh) \
+ SymI_HasProto(stg_unmaskAsyncExceptionszh) \
+ SymI_HasProto(unloadObj) \
+ SymI_HasProto(stg_unsafeThawArrayzh) \
+ SymI_HasProto(stg_waitReadzh) \
+ SymI_HasProto(stg_waitWritezh) \
+ SymI_HasProto(stg_writeTVarzh) \
+ SymI_HasProto(stg_yieldzh) \
+ SymI_NeedsProto(stg_interp_constr_entry) \
+ SymI_HasProto(stg_arg_bitmaps) \
+ SymI_HasProto(large_alloc_lim) \
+ SymI_HasProto(g0) \
+ SymI_HasProto(allocate) \
+ SymI_HasProto(allocateExec) \
+ SymI_HasProto(freeExec) \
+ SymI_HasProto(getAllocations) \
+ SymI_HasProto(revertCAFs) \
+ SymI_HasProto(RtsFlags) \
+ SymI_NeedsProto(rts_breakpoint_io_action) \
+ SymI_NeedsProto(rts_stop_next_breakpoint) \
+ SymI_NeedsProto(rts_stop_on_exception) \
+ SymI_HasProto(stopTimer) \
+ SymI_HasProto(n_capabilities) \
+ SymI_HasProto(stg_traceCcszh) \
+ SymI_HasProto(stg_traceEventzh) \
+ SymI_HasProto(getMonotonicNSec) \
+ SymI_HasProto(lockFile) \
+ SymI_HasProto(unlockFile) \
+ SymI_HasProto(startProfTimer) \
+ SymI_HasProto(stopProfTimer) \
+ RTS_USER_SIGNALS_SYMBOLS \
RTS_INTCHAR_SYMBOLS
diff --git a/rts/PrimOps.cmm b/rts/PrimOps.cmm
index 9cedabdca8..1a531b2149 100644
--- a/rts/PrimOps.cmm
+++ b/rts/PrimOps.cmm
@@ -1,6 +1,6 @@
/* -----------------------------------------------------------------------------
*
- * (c) The GHC Team, 1998-2011
+ * (c) The GHC Team, 1998-2012
*
* Out-of-line primitive operations
*
@@ -10,14 +10,9 @@
* this file contains code for most of those with the attribute
* out_of_line=True.
*
- * Entry convention: the entry convention for a primop is that all the
- * args are in Stg registers (R1, R2, etc.). This is to make writing
- * the primops easier. (see compiler/codeGen/CgCallConv.hs).
- *
- * Return convention: results from a primop are generally returned
- * using the ordinary unboxed tuple return convention. The C-- parser
- * implements the RET_xxxx() macros to perform unboxed-tuple returns
- * based on the prevailing return convention.
+ * Entry convention: the entry convention for a primop is the
+ * NativeNodeCall convention, and the return convention is
+ * NativeReturn. (see compiler/cmm/CmmCallConv.hs)
*
* This file is written in a subset of C--, extended with various
* features specific to GHC. It is compiled by GHC directly. For the
@@ -44,8 +39,6 @@ import sm_mutex;
Basically just new*Array - the others are all inline macros.
- The size arg is always passed in R1, and the result returned in R1.
-
The slow entry point is for returning from a heap check, the saved
size argument must be re-loaded from the stack.
-------------------------------------------------------------------------- */
@@ -54,29 +47,32 @@ import sm_mutex;
* round up to the nearest word for the size of the array.
*/
-stg_newByteArrayzh
+stg_newByteArrayzh ( W_ n )
{
- W_ words, payload_words, n, p;
- MAYBE_GC(NO_PTRS,stg_newByteArrayzh);
- n = R1;
+ W_ words, payload_words;
+ gcptr p;
+
+ MAYBE_GC_N(stg_newByteArrayzh, n);
+
payload_words = ROUNDUP_BYTES_TO_WDS(n);
words = BYTES_TO_WDS(SIZEOF_StgArrWords) + payload_words;
- ("ptr" p) = foreign "C" allocate(MyCapability() "ptr",words) [];
+ ("ptr" p) = ccall allocate(MyCapability() "ptr",words);
TICK_ALLOC_PRIM(SIZEOF_StgArrWords,WDS(payload_words),0);
SET_HDR(p, stg_ARR_WORDS_info, CCCS);
StgArrWords_bytes(p) = n;
- RET_P(p);
+ return (p);
}
#define BA_ALIGN 16
#define BA_MASK (BA_ALIGN-1)
-stg_newPinnedByteArrayzh
+stg_newPinnedByteArrayzh ( W_ n )
{
- W_ words, n, bytes, payload_words, p;
+ W_ words, bytes, payload_words;
+ gcptr p;
+
+ MAYBE_GC_N(stg_newPinnedByteArrayzh, n);
- MAYBE_GC(NO_PTRS,stg_newPinnedByteArrayzh);
- n = R1;
bytes = n;
/* payload_words is what we will tell the profiler we had to allocate */
payload_words = ROUNDUP_BYTES_TO_WDS(bytes);
@@ -89,7 +85,7 @@ stg_newPinnedByteArrayzh
/* Now we convert to a number of words: */
words = ROUNDUP_BYTES_TO_WDS(bytes);
- ("ptr" p) = foreign "C" allocatePinned(MyCapability() "ptr", words) [];
+ ("ptr" p) = ccall allocatePinned(MyCapability() "ptr", words);
TICK_ALLOC_PRIM(SIZEOF_StgArrWords,WDS(payload_words),0);
/* Now we need to move p forward so that the payload is aligned
@@ -98,16 +94,15 @@ stg_newPinnedByteArrayzh
SET_HDR(p, stg_ARR_WORDS_info, CCCS);
StgArrWords_bytes(p) = n;
- RET_P(p);
+ return (p);
}
-stg_newAlignedPinnedByteArrayzh
+stg_newAlignedPinnedByteArrayzh ( W_ n, W_ alignment )
{
- W_ words, n, bytes, payload_words, p, alignment;
+ W_ words, bytes, payload_words;
+ gcptr p;
- MAYBE_GC(NO_PTRS,stg_newAlignedPinnedByteArrayzh);
- n = R1;
- alignment = R2;
+ again: MAYBE_GC(again);
/* we always supply at least word-aligned memory, so there's no
need to allow extra space for alignment if the requirement is less
@@ -128,7 +123,7 @@ stg_newAlignedPinnedByteArrayzh
/* Now we convert to a number of words: */
words = ROUNDUP_BYTES_TO_WDS(bytes);
- ("ptr" p) = foreign "C" allocatePinned(MyCapability() "ptr", words) [];
+ ("ptr" p) = ccall allocatePinned(MyCapability() "ptr", words);
TICK_ALLOC_PRIM(SIZEOF_StgArrWords,WDS(payload_words),0);
/* Now we need to move p forward so that the payload is aligned
@@ -138,23 +133,22 @@ stg_newAlignedPinnedByteArrayzh
SET_HDR(p, stg_ARR_WORDS_info, CCCS);
StgArrWords_bytes(p) = n;
- RET_P(p);
+ return (p);
}
-stg_newArrayzh
+stg_newArrayzh ( W_ n /* words */, gcptr init )
{
- W_ words, n, init, arr, p, size;
- /* Args: R1 = words, R2 = initialisation value */
+ W_ words, size;
+ gcptr p, arr;
- n = R1;
- MAYBE_GC(R2_PTR,stg_newArrayzh);
+ again: MAYBE_GC(again);
// the mark area contains one byte for each 2^MUT_ARR_PTRS_CARD_BITS words
// in the array, making sure we round up, and then rounding up to a whole
// number of words.
size = n + mutArrPtrsCardWords(n);
words = BYTES_TO_WDS(SIZEOF_StgMutArrPtrs) + size;
- ("ptr" arr) = foreign "C" allocate(MyCapability() "ptr",words) [R2];
+ ("ptr" arr) = ccall allocate(MyCapability() "ptr",words);
TICK_ALLOC_PRIM(SIZEOF_StgMutArrPtrs, WDS(n), 0);
SET_HDR(arr, stg_MUT_ARR_PTRS_DIRTY_info, CCCS);
@@ -162,7 +156,6 @@ stg_newArrayzh
StgMutArrPtrs_size(arr) = size;
// Initialise all elements of the the array with the value in R2
- init = R2;
p = arr + SIZEOF_StgMutArrPtrs;
for:
if (p < arr + WDS(words)) {
@@ -178,10 +171,10 @@ stg_newArrayzh
goto for2;
}
- RET_P(arr);
+ return (arr);
}
-stg_unsafeThawArrayzh
+stg_unsafeThawArrayzh ( gcptr arr )
{
// SUBTLETY TO DO WITH THE OLD GEN MUTABLE LIST
//
@@ -201,31 +194,30 @@ stg_unsafeThawArrayzh
// we put it on the mutable list more than once, but it would get scavenged
// multiple times during GC, which would be unnecessarily slow.
//
- if (StgHeader_info(R1) != stg_MUT_ARR_PTRS_FROZEN0_info) {
- SET_INFO(R1,stg_MUT_ARR_PTRS_DIRTY_info);
- recordMutable(R1, R1);
+ if (StgHeader_info(arr) != stg_MUT_ARR_PTRS_FROZEN0_info) {
+ SET_INFO(arr,stg_MUT_ARR_PTRS_DIRTY_info);
+ recordMutable(arr);
// must be done after SET_INFO, because it ASSERTs closure_MUTABLE()
- RET_P(R1);
+ return (arr);
} else {
- SET_INFO(R1,stg_MUT_ARR_PTRS_DIRTY_info);
- RET_P(R1);
+ SET_INFO(arr,stg_MUT_ARR_PTRS_DIRTY_info);
+ return (arr);
}
}
-stg_newArrayArrayzh
+stg_newArrayArrayzh ( W_ n /* words */ )
{
- W_ words, n, arr, p, size;
- /* Args: R1 = words */
+ W_ words, size;
+ gcptr p, arr;
- n = R1;
- MAYBE_GC(NO_PTRS,stg_newArrayArrayzh);
+ MAYBE_GC_N(stg_newArrayArrayzh, n);
// the mark area contains one byte for each 2^MUT_ARR_PTRS_CARD_BITS words
// in the array, making sure we round up, and then rounding up to a whole
// number of words.
size = n + mutArrPtrsCardWords(n);
words = BYTES_TO_WDS(SIZEOF_StgMutArrPtrs) + size;
- ("ptr" arr) = foreign "C" allocate(MyCapability() "ptr",words) [];
+ ("ptr" arr) = ccall allocate(MyCapability() "ptr",words);
TICK_ALLOC_PRIM(SIZEOF_StgMutArrPtrs, WDS(n), 0);
SET_HDR(arr, stg_MUT_ARR_PTRS_DIRTY_info, W_[CCCS]);
@@ -248,7 +240,7 @@ stg_newArrayArrayzh
goto for2;
}
- RET_P(arr);
+ return (arr);
}
@@ -256,46 +248,39 @@ stg_newArrayArrayzh
MutVar primitives
-------------------------------------------------------------------------- */
-stg_newMutVarzh
+stg_newMutVarzh ( gcptr init )
{
W_ mv;
- /* Args: R1 = initialisation value */
- ALLOC_PRIM( SIZEOF_StgMutVar, R1_PTR, stg_newMutVarzh);
+ ALLOC_PRIM (SIZEOF_StgMutVar);
mv = Hp - SIZEOF_StgMutVar + WDS(1);
SET_HDR(mv,stg_MUT_VAR_DIRTY_info,CCCS);
- StgMutVar_var(mv) = R1;
+ StgMutVar_var(mv) = init;
- RET_P(mv);
+ return (mv);
}
-stg_casMutVarzh
+stg_casMutVarzh ( gcptr mv, gcptr old, gcptr new )
/* MutVar# s a -> a -> a -> State# s -> (# State#, Int#, a #) */
{
- W_ mv, old, new, h;
-
- mv = R1;
- old = R2;
- new = R3;
+ gcptr h;
- (h) = foreign "C" cas(mv + SIZEOF_StgHeader + OFFSET_StgMutVar_var,
- old, new) [];
+ (h) = ccall cas(mv + SIZEOF_StgHeader + OFFSET_StgMutVar_var,
+ old, new);
if (h != old) {
- RET_NP(1,h);
+ return (1,h);
} else {
if (GET_INFO(mv) == stg_MUT_VAR_CLEAN_info) {
- foreign "C" dirty_MUT_VAR(BaseReg "ptr", mv "ptr") [];
+ ccall dirty_MUT_VAR(BaseReg "ptr", mv "ptr");
}
- RET_NP(0,h);
+ return (0,h);
}
}
-
-stg_atomicModifyMutVarzh
+stg_atomicModifyMutVarzh ( gcptr mv, gcptr f )
{
- W_ mv, f, z, x, y, r, h;
- /* Args: R1 :: MutVar#, R2 :: a -> (a,b) */
+ W_ z, x, y, r, h;
/* If x is the current contents of the MutVar#, then
We want to make the new contents point to
@@ -331,10 +316,7 @@ stg_atomicModifyMutVarzh
#define SIZE (THUNK_2_SIZE + THUNK_1_SIZE + THUNK_1_SIZE)
- HP_CHK_GEN_TICKY(SIZE, R1_PTR & R2_PTR, stg_atomicModifyMutVarzh);
-
- mv = R1;
- f = R2;
+ HP_CHK_GEN_TICKY(SIZE);
TICK_ALLOC_THUNK_2();
CCCS_ALLOC(THUNK_2_SIZE);
@@ -361,17 +343,17 @@ stg_atomicModifyMutVarzh
x = StgMutVar_var(mv);
StgThunk_payload(z,1) = x;
#ifdef THREADED_RTS
- (h) = foreign "C" cas(mv + SIZEOF_StgHeader + OFFSET_StgMutVar_var, x, y) [];
+ (h) = ccall cas(mv + SIZEOF_StgHeader + OFFSET_StgMutVar_var, x, y);
if (h != x) { goto retry; }
#else
StgMutVar_var(mv) = y;
#endif
if (GET_INFO(mv) == stg_MUT_VAR_CLEAN_info) {
- foreign "C" dirty_MUT_VAR(BaseReg "ptr", mv "ptr") [];
+ ccall dirty_MUT_VAR(BaseReg "ptr", mv "ptr");
}
- RET_P(r);
+ return (r);
}
/* -----------------------------------------------------------------------------
@@ -380,15 +362,13 @@ stg_atomicModifyMutVarzh
STRING(stg_weak_msg,"New weak pointer at %p\n")
-stg_mkWeakzh
+stg_mkWeakzh ( gcptr key,
+ gcptr value,
+ gcptr finalizer /* or stg_NO_FINALIZER_closure */ )
{
- /* R1 = key
- R2 = value
- R3 = finalizer (or stg_NO_FINALIZER_closure)
- */
- W_ w;
+ gcptr w;
- ALLOC_PRIM( SIZEOF_StgWeak, R1_PTR & R2_PTR & R3_PTR, stg_mkWeakzh );
+ ALLOC_PRIM (SIZEOF_StgWeak)
w = Hp - SIZEOF_StgWeak + WDS(1);
SET_HDR(w, stg_WEAK_info, CCCS);
@@ -397,9 +377,9 @@ stg_mkWeakzh
// Should StgWeak_cfinalizer(w) be stg_NO_FINALIZER_closure or
// something else?
- StgWeak_key(w) = R1;
- StgWeak_value(w) = R2;
- StgWeak_finalizer(w) = R3;
+ StgWeak_key(w) = key;
+ StgWeak_value(w) = value;
+ StgWeak_finalizer(w) = finalizer;
StgWeak_cfinalizer(w) = stg_NO_FINALIZER_closure;
ACQUIRE_LOCK(sm_mutex);
@@ -407,49 +387,34 @@ stg_mkWeakzh
W_[weak_ptr_list] = w;
RELEASE_LOCK(sm_mutex);
- IF_DEBUG(weak, foreign "C" debugBelch(stg_weak_msg,w) []);
+ IF_DEBUG(weak, ccall debugBelch(stg_weak_msg,w));
- RET_P(w);
+ return (w);
}
-stg_mkWeakNoFinalizzerzh
+stg_mkWeakNoFinalizzerzh ( gcptr key, gcptr value )
{
- /* R1 = key
- R2 = value
- */
- R3 = stg_NO_FINALIZER_closure;
-
- jump stg_mkWeakzh;
+ jump stg_mkWeakzh (key, value, stg_NO_FINALIZER_closure);
}
-stg_mkWeakForeignEnvzh
+stg_mkWeakForeignEnvzh ( gcptr key,
+ gcptr val,
+ W_ fptr, // finalizer
+ W_ ptr,
+ W_ flag, // has environment (0 or 1)
+ W_ eptr )
{
- /* R1 = key
- R2 = value
- R3 = finalizer
- R4 = pointer
- R5 = has environment (0 or 1)
- R6 = environment
- */
- W_ w, payload_words, words, p;
-
- W_ key, val, fptr, ptr, flag, eptr;
-
- key = R1;
- val = R2;
- fptr = R3;
- ptr = R4;
- flag = R5;
- eptr = R6;
-
- ALLOC_PRIM( SIZEOF_StgWeak, R1_PTR & R2_PTR, stg_mkWeakForeignEnvzh );
+ W_ payload_words, words;
+ gcptr w, p;
+
+ ALLOC_PRIM (SIZEOF_StgWeak);
w = Hp - SIZEOF_StgWeak + WDS(1);
SET_HDR(w, stg_WEAK_info, CCCS);
payload_words = 4;
words = BYTES_TO_WDS(SIZEOF_StgArrWords) + payload_words;
- ("ptr" p) = foreign "C" allocate(MyCapability() "ptr", words) [];
+ ("ptr" p) = ccall allocate(MyCapability() "ptr", words);
TICK_ALLOC_PRIM(SIZEOF_StgArrWords,WDS(payload_words),0);
SET_HDR(p, stg_ARR_WORDS_info, CCCS);
@@ -473,22 +438,18 @@ stg_mkWeakForeignEnvzh
W_[weak_ptr_list] = w;
RELEASE_LOCK(sm_mutex);
- IF_DEBUG(weak, foreign "C" debugBelch(stg_weak_msg,w) []);
+ IF_DEBUG(weak, ccall debugBelch(stg_weak_msg,w));
- RET_P(w);
+ return (w);
}
-stg_finalizzeWeakzh
+stg_finalizzeWeakzh ( gcptr w )
{
- /* R1 = weak ptr
- */
- W_ w, f, arr;
-
- w = R1;
+ gcptr f, arr;
// already dead?
if (GET_INFO(w) == stg_DEAD_WEAK_info) {
- RET_NP(0,stg_NO_FINALIZER_closure);
+ return (0,stg_NO_FINALIZER_closure);
}
// kill it
@@ -516,26 +477,25 @@ stg_finalizzeWeakzh
StgDeadWeak_link(w) = StgWeak_link(w);
if (arr != stg_NO_FINALIZER_closure) {
- foreign "C" runCFinalizer(StgArrWords_payload(arr,0),
+ ccall runCFinalizer(StgArrWords_payload(arr,0),
StgArrWords_payload(arr,1),
StgArrWords_payload(arr,2),
- StgArrWords_payload(arr,3)) [];
+ StgArrWords_payload(arr,3));
}
/* return the finalizer */
if (f == stg_NO_FINALIZER_closure) {
- RET_NP(0,stg_NO_FINALIZER_closure);
+ return (0,stg_NO_FINALIZER_closure);
} else {
- RET_NP(1,f);
+ return (1,f);
}
}
-stg_deRefWeakzh
+stg_deRefWeakzh ( gcptr w )
{
- /* R1 = weak ptr */
- W_ w, code, val;
+ W_ code;
+ gcptr val;
- w = R1;
if (GET_INFO(w) == stg_WEAK_info) {
code = 1;
val = StgWeak_value(w);
@@ -543,171 +503,144 @@ stg_deRefWeakzh
code = 0;
val = w;
}
- RET_NP(code,val);
+ return (code,val);
}
/* -----------------------------------------------------------------------------
Floating point operations.
-------------------------------------------------------------------------- */
-stg_decodeFloatzuIntzh
+stg_decodeFloatzuIntzh ( F_ arg )
{
W_ p;
- F_ arg;
W_ mp_tmp1;
W_ mp_tmp_w;
- STK_CHK_GEN( WDS(2), NO_PTRS, stg_decodeFloatzuIntzh );
+ STK_CHK_GEN_N (WDS(2));
mp_tmp1 = Sp - WDS(1);
mp_tmp_w = Sp - WDS(2);
- /* arguments: F1 = Float# */
- arg = F1;
-
/* Perform the operation */
- foreign "C" __decodeFloat_Int(mp_tmp1 "ptr", mp_tmp_w "ptr", arg) [];
+ ccall __decodeFloat_Int(mp_tmp1 "ptr", mp_tmp_w "ptr", arg);
/* returns: (Int# (mantissa), Int# (exponent)) */
- RET_NN(W_[mp_tmp1], W_[mp_tmp_w]);
+ return (W_[mp_tmp1], W_[mp_tmp_w]);
}
-stg_decodeDoublezu2Intzh
+stg_decodeDoublezu2Intzh ( D_ arg )
{
- D_ arg;
W_ p;
W_ mp_tmp1;
W_ mp_tmp2;
W_ mp_result1;
W_ mp_result2;
- STK_CHK_GEN( WDS(4), NO_PTRS, stg_decodeDoublezu2Intzh );
+ STK_CHK_GEN_N (WDS(4));
mp_tmp1 = Sp - WDS(1);
mp_tmp2 = Sp - WDS(2);
mp_result1 = Sp - WDS(3);
mp_result2 = Sp - WDS(4);
- /* arguments: D1 = Double# */
- arg = D1;
-
/* Perform the operation */
- foreign "C" __decodeDouble_2Int(mp_tmp1 "ptr", mp_tmp2 "ptr",
+ ccall __decodeDouble_2Int(mp_tmp1 "ptr", mp_tmp2 "ptr",
mp_result1 "ptr", mp_result2 "ptr",
- arg) [];
+ arg);
/* returns:
(Int# (mant sign), Word# (mant high), Word# (mant low), Int# (expn)) */
- RET_NNNN(W_[mp_tmp1], W_[mp_tmp2], W_[mp_result1], W_[mp_result2]);
+ return (W_[mp_tmp1], W_[mp_tmp2], W_[mp_result1], W_[mp_result2]);
}
/* -----------------------------------------------------------------------------
* Concurrency primitives
* -------------------------------------------------------------------------- */
-stg_forkzh
+stg_forkzh ( gcptr closure )
{
- /* args: R1 = closure to spark */
-
- MAYBE_GC(R1_PTR, stg_forkzh);
+ MAYBE_GC_P(stg_forkzh, closure);
- W_ closure;
- W_ threadid;
- closure = R1;
+ gcptr threadid;
- ("ptr" threadid) = foreign "C" createIOThread( MyCapability() "ptr",
+ ("ptr" threadid) = ccall createIOThread( MyCapability() "ptr",
RtsFlags_GcFlags_initialStkSize(RtsFlags),
- closure "ptr") [];
+ closure "ptr");
/* start blocked if the current thread is blocked */
StgTSO_flags(threadid) = %lobits16(
TO_W_(StgTSO_flags(threadid)) |
TO_W_(StgTSO_flags(CurrentTSO)) & (TSO_BLOCKEX | TSO_INTERRUPTIBLE));
- foreign "C" scheduleThread(MyCapability() "ptr", threadid "ptr") [];
+ ccall scheduleThread(MyCapability() "ptr", threadid "ptr");
// context switch soon, but not immediately: we don't want every
// forkIO to force a context-switch.
Capability_context_switch(MyCapability()) = 1 :: CInt;
- RET_P(threadid);
+ return (threadid);
}
-stg_forkOnzh
+stg_forkOnzh ( W_ cpu, gcptr closure )
{
- /* args: R1 = cpu, R2 = closure to spark */
+again: MAYBE_GC(again);
- MAYBE_GC(R2_PTR, stg_forkOnzh);
+ gcptr threadid;
- W_ cpu;
- W_ closure;
- W_ threadid;
- cpu = R1;
- closure = R2;
-
- ("ptr" threadid) = foreign "C" createIOThread( MyCapability() "ptr",
+ ("ptr" threadid) = ccall createIOThread( MyCapability() "ptr",
RtsFlags_GcFlags_initialStkSize(RtsFlags),
- closure "ptr") [];
+ closure "ptr");
/* start blocked if the current thread is blocked */
StgTSO_flags(threadid) = %lobits16(
TO_W_(StgTSO_flags(threadid)) |
TO_W_(StgTSO_flags(CurrentTSO)) & (TSO_BLOCKEX | TSO_INTERRUPTIBLE));
- foreign "C" scheduleThreadOn(MyCapability() "ptr", cpu, threadid "ptr") [];
+ ccall scheduleThreadOn(MyCapability() "ptr", cpu, threadid "ptr");
// context switch soon, but not immediately: we don't want every
// forkIO to force a context-switch.
Capability_context_switch(MyCapability()) = 1 :: CInt;
- RET_P(threadid);
+ return (threadid);
}
-stg_yieldzh
+stg_yieldzh ()
{
// when we yield to the scheduler, we have to tell it to put the
// current thread to the back of the queue by setting the
// context_switch flag. If we don't do this, it will run the same
// thread again.
Capability_context_switch(MyCapability()) = 1 :: CInt;
- jump stg_yield_noregs;
+ jump stg_yield_noregs();
}
-stg_myThreadIdzh
+stg_myThreadIdzh ()
{
- /* no args. */
- RET_P(CurrentTSO);
+ return (CurrentTSO);
}
-stg_labelThreadzh
+stg_labelThreadzh ( gcptr threadid, W_ addr )
{
- /* args:
- R1 = ThreadId#
- R2 = Addr# */
#if defined(DEBUG) || defined(TRACING) || defined(DTRACE)
- foreign "C" labelThread(MyCapability() "ptr", R1 "ptr", R2 "ptr") [];
+ ccall labelThread(MyCapability() "ptr", threadid "ptr", addr "ptr");
#endif
- jump %ENTRY_CODE(Sp(0));
+ return ();
}
-stg_isCurrentThreadBoundzh
+stg_isCurrentThreadBoundzh (/* no args */)
{
- /* no args */
W_ r;
- (r) = foreign "C" isThreadBound(CurrentTSO) [];
- RET_N(r);
+ (r) = ccall isThreadBound(CurrentTSO);
+ return (r);
}
-stg_threadStatuszh
+stg_threadStatuszh ( gcptr tso )
{
- /* args: R1 :: ThreadId# */
- W_ tso;
W_ why_blocked;
W_ what_next;
W_ ret, cap, locked;
- tso = R1;
-
what_next = TO_W_(StgTSO_what_next(tso));
why_blocked = TO_W_(StgTSO_why_blocked(tso));
// Note: these two reads are not atomic, so they might end up
@@ -733,214 +666,250 @@ stg_threadStatuszh
locked = 0;
}
- RET_NNN(ret,cap,locked);
+ return (ret,cap,locked);
}
/* -----------------------------------------------------------------------------
* TVar primitives
* -------------------------------------------------------------------------- */
-#define SP_OFF 0
+// Catch retry frame -----------------------------------------------------------
+
+#define CATCH_RETRY_FRAME_FIELDS(w_,p_,info_ptr, \
+ running_alt_code, \
+ first_code, \
+ alt_code) \
+ w_ info_ptr, \
+ PROF_HDR_FIELDS(w_) \
+ w_ running_alt_code, \
+ p_ first_code, \
+ p_ alt_code
-// Catch retry frame ------------------------------------------------------------
INFO_TABLE_RET(stg_catch_retry_frame, CATCH_RETRY_FRAME,
-#if defined(PROFILING)
- W_ unused1, W_ unused2,
-#endif
- W_ unused3, P_ unused4, P_ unused5)
+ CATCH_RETRY_FRAME_FIELDS(W_,P_,
+ info_ptr,
+ running_alt_code,
+ first_code,
+ alt_code))
+ return (P_ ret)
{
- W_ r, frame, trec, outer;
-
- frame = Sp;
- trec = StgTSO_trec(CurrentTSO);
- outer = StgTRecHeader_enclosing_trec(trec);
- (r) = foreign "C" stmCommitNestedTransaction(MyCapability() "ptr", trec "ptr") [];
- if (r != 0) {
- /* Succeeded (either first branch or second branch) */
- StgTSO_trec(CurrentTSO) = outer;
- Sp = Sp + SIZEOF_StgCatchRetryFrame;
- jump %ENTRY_CODE(Sp(SP_OFF));
- } else {
- /* Did not commit: re-execute */
- W_ new_trec;
- ("ptr" new_trec) = foreign "C" stmStartTransaction(MyCapability() "ptr", outer "ptr") [];
- StgTSO_trec(CurrentTSO) = new_trec;
- if (StgCatchRetryFrame_running_alt_code(frame) != 0::I32) {
- R1 = StgCatchRetryFrame_alt_code(frame);
- } else {
- R1 = StgCatchRetryFrame_first_code(frame);
- }
- jump stg_ap_v_fast;
- }
-}
+ W_ r;
+ gcptr trec, outer, arg;
+ trec = StgTSO_trec(CurrentTSO);
+ outer = StgTRecHeader_enclosing_trec(trec);
+ (r) = ccall stmCommitNestedTransaction(MyCapability() "ptr", trec "ptr");
+ if (r != 0) {
+ // Succeeded (either first branch or second branch)
+ StgTSO_trec(CurrentTSO) = outer;
+ return (ret);
+ } else {
+ // Did not commit: re-execute
+ P_ new_trec;
+ ("ptr" new_trec) = ccall stmStartTransaction(MyCapability() "ptr",
+ outer "ptr");
+ StgTSO_trec(CurrentTSO) = new_trec;
+ if (running_alt_code != 0) {
+ jump stg_ap_v_fast
+ (CATCH_RETRY_FRAME_FIELDS(,,info_ptr,
+ running_alt_code,
+ first_code,
+ alt_code))
+ (alt_code);
+ } else {
+ jump stg_ap_v_fast
+ (CATCH_RETRY_FRAME_FIELDS(,,info_ptr,
+ running_alt_code,
+ first_code,
+ alt_code))
+ (first_code);
+ }
+ }
+}
// Atomically frame ------------------------------------------------------------
+// This must match StgAtomicallyFrame in Closures.h
+#define ATOMICALLY_FRAME_FIELDS(w_,p_,info_ptr,code,next,result) \
+ w_ info_ptr, \
+ PROF_HDR_FIELDS(w_) \
+ p_ code, \
+ p_ next, \
+ p_ result
+
+
INFO_TABLE_RET(stg_atomically_frame, ATOMICALLY_FRAME,
-#if defined(PROFILING)
- W_ unused1, W_ unused2,
-#endif
- P_ code, P_ next_invariant_to_check, P_ result)
+ // layout of the frame, and bind the field names
+ ATOMICALLY_FRAME_FIELDS(W_,P_,
+ info_ptr,
+ code,
+ next_invariant,
+ frame_result))
+ return (P_ result) // value returned to the frame
{
- W_ frame, trec, valid, next_invariant, q, outer;
+ W_ valid;
+ gcptr trec, outer, next_invariant, q;
- frame = Sp;
trec = StgTSO_trec(CurrentTSO);
- result = R1;
outer = StgTRecHeader_enclosing_trec(trec);
if (outer == NO_TREC) {
/* First time back at the atomically frame -- pick up invariants */
- ("ptr" q) = foreign "C" stmGetInvariantsToCheck(MyCapability() "ptr", trec "ptr") [];
- StgAtomicallyFrame_next_invariant_to_check(frame) = q;
- StgAtomicallyFrame_result(frame) = result;
+ ("ptr" next_invariant) =
+ ccall stmGetInvariantsToCheck(MyCapability() "ptr", trec "ptr");
+ frame_result = result;
} else {
/* Second/subsequent time back at the atomically frame -- abort the
* tx that's checking the invariant and move on to the next one */
StgTSO_trec(CurrentTSO) = outer;
- q = StgAtomicallyFrame_next_invariant_to_check(frame);
- StgInvariantCheckQueue_my_execution(q) = trec;
- foreign "C" stmAbortTransaction(MyCapability() "ptr", trec "ptr") [];
+ StgInvariantCheckQueue_my_execution(next_invariant) = trec;
+ ccall stmAbortTransaction(MyCapability() "ptr", trec "ptr");
/* Don't free trec -- it's linked from q and will be stashed in the
* invariant if we eventually commit. */
- q = StgInvariantCheckQueue_next_queue_entry(q);
- StgAtomicallyFrame_next_invariant_to_check(frame) = q;
+ next_invariant =
+ StgInvariantCheckQueue_next_queue_entry(next_invariant);
trec = outer;
}
- q = StgAtomicallyFrame_next_invariant_to_check(frame);
-
- if (q != END_INVARIANT_CHECK_QUEUE) {
+ if (next_invariant != END_INVARIANT_CHECK_QUEUE) {
/* We can't commit yet: another invariant to check */
- ("ptr" trec) = foreign "C" stmStartTransaction(MyCapability() "ptr", trec "ptr") [];
+ ("ptr" trec) = ccall stmStartTransaction(MyCapability() "ptr", trec "ptr");
StgTSO_trec(CurrentTSO) = trec;
-
- next_invariant = StgInvariantCheckQueue_invariant(q);
- R1 = StgAtomicInvariant_code(next_invariant);
- jump stg_ap_v_fast;
+ q = StgInvariantCheckQueue_invariant(next_invariant);
+ jump stg_ap_v_fast
+ (ATOMICALLY_FRAME_FIELDS(,,info_ptr,code,next_invariant,frame_result))
+ (StgAtomicInvariant_code(q));
} else {
/* We've got no more invariants to check, try to commit */
- (valid) = foreign "C" stmCommitTransaction(MyCapability() "ptr", trec "ptr") [];
+ (valid) = ccall stmCommitTransaction(MyCapability() "ptr", trec "ptr");
if (valid != 0) {
/* Transaction was valid: commit succeeded */
StgTSO_trec(CurrentTSO) = NO_TREC;
- R1 = StgAtomicallyFrame_result(frame);
- Sp = Sp + SIZEOF_StgAtomicallyFrame;
- jump %ENTRY_CODE(Sp(SP_OFF));
+ return (frame_result);
} else {
/* Transaction was not valid: try again */
- ("ptr" trec) = foreign "C" stmStartTransaction(MyCapability() "ptr", NO_TREC "ptr") [];
+ ("ptr" trec) = ccall stmStartTransaction(MyCapability() "ptr", NO_TREC "ptr");
StgTSO_trec(CurrentTSO) = trec;
- StgAtomicallyFrame_next_invariant_to_check(frame) = END_INVARIANT_CHECK_QUEUE;
- R1 = StgAtomicallyFrame_code(frame);
- jump stg_ap_v_fast;
+ next_invariant = END_INVARIANT_CHECK_QUEUE;
+
+ jump stg_ap_v_fast
+ // push the StgAtomicallyFrame again: the code generator is
+ // clever enough to only assign the fields that have changed.
+ (ATOMICALLY_FRAME_FIELDS(,,info_ptr,code,next_invariant,frame_result))
+ (code);
}
}
}
+
INFO_TABLE_RET(stg_atomically_waiting_frame, ATOMICALLY_FRAME,
-#if defined(PROFILING)
- W_ unused1, W_ unused2,
-#endif
- P_ code, P_ next_invariant_to_check, P_ result)
+ // layout of the frame, and bind the field names
+ ATOMICALLY_FRAME_FIELDS(W_,P_,
+ info_ptr,
+ code,
+ next_invariant,
+ frame_result))
+ return (/* no return values */)
{
- W_ frame, trec, valid;
-
- frame = Sp;
+ W_ trec, valid;
/* The TSO is currently waiting: should we stop waiting? */
- (valid) = foreign "C" stmReWait(MyCapability() "ptr", CurrentTSO "ptr") [];
+ (valid) = ccall stmReWait(MyCapability() "ptr", CurrentTSO "ptr");
if (valid != 0) {
- /* Previous attempt is still valid: no point trying again yet */
- jump stg_block_noregs;
+ /* Previous attempt is still valid: no point trying again yet */
+ jump stg_block_noregs
+ (ATOMICALLY_FRAME_FIELDS(,,info_ptr,
+ code,next_invariant,frame_result))
+ ();
} else {
/* Previous attempt is no longer valid: try again */
- ("ptr" trec) = foreign "C" stmStartTransaction(MyCapability() "ptr", NO_TREC "ptr") [];
+ ("ptr" trec) = ccall stmStartTransaction(MyCapability() "ptr", NO_TREC "ptr");
StgTSO_trec(CurrentTSO) = trec;
- StgHeader_info(frame) = stg_atomically_frame_info;
- R1 = StgAtomicallyFrame_code(frame);
- jump stg_ap_v_fast;
+
+ // change the frame header to stg_atomically_frame_info
+ jump stg_ap_v_fast
+ (ATOMICALLY_FRAME_FIELDS(,,stg_atomically_frame_info,
+ code,next_invariant,frame_result))
+ (code);
}
}
-// STM catch frame --------------------------------------------------------------
-
-#define SP_OFF 0
+// STM catch frame -------------------------------------------------------------
/* Catch frames are very similar to update frames, but when entering
* one we just pop the frame off the stack and perform the correct
* kind of return to the activation record underneath us on the stack.
*/
+#define CATCH_STM_FRAME_FIELDS(w_,p_,info_ptr,code,handler) \
+ w_ info_ptr, \
+ PROF_HDR_FIELDS(w_) \
+ p_ code, \
+ p_ handler
+
INFO_TABLE_RET(stg_catch_stm_frame, CATCH_STM_FRAME,
-#if defined(PROFILING)
- W_ unused1, W_ unused2,
-#endif
- P_ unused3, P_ unused4)
- {
- W_ r, frame, trec, outer;
- frame = Sp;
- trec = StgTSO_trec(CurrentTSO);
- outer = StgTRecHeader_enclosing_trec(trec);
- (r) = foreign "C" stmCommitNestedTransaction(MyCapability() "ptr", trec "ptr") [];
- if (r != 0) {
+ // layout of the frame, and bind the field names
+ CATCH_STM_FRAME_FIELDS(W_,P_,info_ptr,code,handler))
+ return (P_ ret)
+{
+ W_ r, trec, outer;
+
+ trec = StgTSO_trec(CurrentTSO);
+ outer = StgTRecHeader_enclosing_trec(trec);
+ (r) = ccall stmCommitNestedTransaction(MyCapability() "ptr", trec "ptr");
+ if (r != 0) {
/* Commit succeeded */
StgTSO_trec(CurrentTSO) = outer;
- Sp = Sp + SIZEOF_StgCatchSTMFrame;
- jump %ENTRY_CODE(Sp(SP_OFF));
- } else {
+ return (ret);
+ } else {
/* Commit failed */
W_ new_trec;
- ("ptr" new_trec) = foreign "C" stmStartTransaction(MyCapability() "ptr", outer "ptr") [];
+ ("ptr" new_trec) = ccall stmStartTransaction(MyCapability() "ptr", outer "ptr");
StgTSO_trec(CurrentTSO) = new_trec;
- R1 = StgCatchSTMFrame_code(frame);
- jump stg_ap_v_fast;
- }
- }
+
+ jump stg_ap_v_fast
+ (CATCH_STM_FRAME_FIELDS(,,info_ptr,code,handler))
+ (code);
+ }
+}
-// Primop definition ------------------------------------------------------------
+// Primop definition -----------------------------------------------------------
-stg_atomicallyzh
+stg_atomicallyzh (P_ stm)
{
- W_ frame;
W_ old_trec;
W_ new_trec;
-
+ W_ code, next_invariant, frame_result;
+
// stmStartTransaction may allocate
- MAYBE_GC (R1_PTR, stg_atomicallyzh);
+ MAYBE_GC_P(stg_atomicallyzh, stm);
- /* Args: R1 = m :: STM a */
- STK_CHK_GEN(SIZEOF_StgAtomicallyFrame + WDS(1), R1_PTR, stg_atomicallyzh);
+ STK_CHK_GEN();
old_trec = StgTSO_trec(CurrentTSO);
/* Nested transactions are not allowed; raise an exception */
if (old_trec != NO_TREC) {
- R1 = base_ControlziExceptionziBase_nestedAtomically_closure;
- jump stg_raisezh;
+ jump stg_raisezh(base_ControlziExceptionziBase_nestedAtomically_closure);
}
- /* Set up the atomically frame */
- Sp = Sp - SIZEOF_StgAtomicallyFrame;
- frame = Sp;
-
- SET_HDR(frame,stg_atomically_frame_info, CCCS);
- StgAtomicallyFrame_code(frame) = R1;
- StgAtomicallyFrame_result(frame) = NO_TREC;
- StgAtomicallyFrame_next_invariant_to_check(frame) = END_INVARIANT_CHECK_QUEUE;
+ code = stm;
+ next_invariant = END_INVARIANT_CHECK_QUEUE;
+ frame_result = NO_TREC;
/* Start the memory transcation */
- ("ptr" new_trec) = foreign "C" stmStartTransaction(MyCapability() "ptr", old_trec "ptr") [R1];
+ ("ptr" new_trec) = ccall stmStartTransaction(MyCapability() "ptr", old_trec "ptr");
StgTSO_trec(CurrentTSO) = new_trec;
- /* Apply R1 to the realworld token */
- jump stg_ap_v_fast;
+ jump stg_ap_v_fast
+ (ATOMICALLY_FRAME_FIELDS(,,stg_atomically_frame_info,
+ code,next_invariant,frame_result))
+ (stm);
}
// A closure representing "atomically x". This is used when a thread
@@ -948,73 +917,57 @@ stg_atomicallyzh
// It is somewhat similar to the stg_raise closure.
//
INFO_TABLE(stg_atomically,1,0,THUNK_1_0,"atomically","atomically")
+ (P_ thunk)
{
- R1 = StgThunk_payload(R1,0);
- jump stg_atomicallyzh;
+ jump stg_atomicallyzh(StgThunk_payload(thunk,0));
}
-stg_catchSTMzh
+stg_catchSTMzh (P_ code /* :: STM a */,
+ P_ handler /* :: Exception -> STM a */)
{
- W_ frame;
-
- /* Args: R1 :: STM a */
- /* Args: R2 :: Exception -> STM a */
- STK_CHK_GEN(SIZEOF_StgCatchSTMFrame + WDS(1), R1_PTR & R2_PTR, stg_catchSTMzh);
-
- /* Set up the catch frame */
- Sp = Sp - SIZEOF_StgCatchSTMFrame;
- frame = Sp;
-
- SET_HDR(frame, stg_catch_stm_frame_info, CCCS);
- StgCatchSTMFrame_handler(frame) = R2;
- StgCatchSTMFrame_code(frame) = R1;
-
- /* Start a nested transaction to run the body of the try block in */
- W_ cur_trec;
- W_ new_trec;
- cur_trec = StgTSO_trec(CurrentTSO);
- ("ptr" new_trec) = foreign "C" stmStartTransaction(MyCapability() "ptr", cur_trec "ptr");
- StgTSO_trec(CurrentTSO) = new_trec;
-
- /* Apply R1 to the realworld token */
- jump stg_ap_v_fast;
+ STK_CHK_GEN();
+
+ /* Start a nested transaction to run the body of the try block in */
+ W_ cur_trec;
+ W_ new_trec;
+ cur_trec = StgTSO_trec(CurrentTSO);
+ ("ptr" new_trec) = ccall stmStartTransaction(MyCapability() "ptr",
+ cur_trec "ptr");
+ StgTSO_trec(CurrentTSO) = new_trec;
+
+ jump stg_ap_v_fast
+ (CATCH_STM_FRAME_FIELDS(,,stg_catch_stm_frame_info, code, handler))
+ (code);
}
-stg_catchRetryzh
+stg_catchRetryzh (P_ first_code, /* :: STM a */
+ P_ alt_code /* :: STM a */)
{
- W_ frame;
W_ new_trec;
- W_ trec;
// stmStartTransaction may allocate
- MAYBE_GC (R1_PTR & R2_PTR, stg_catchRetryzh);
+ MAYBE_GC_PP (stg_catchRetryzh, first_code, alt_code);
- /* Args: R1 :: STM a */
- /* Args: R2 :: STM a */
- STK_CHK_GEN(SIZEOF_StgCatchRetryFrame + WDS(1), R1_PTR & R2_PTR, stg_catchRetryzh);
+ STK_CHK_GEN();
/* Start a nested transaction within which to run the first code */
- trec = StgTSO_trec(CurrentTSO);
- ("ptr" new_trec) = foreign "C" stmStartTransaction(MyCapability() "ptr", trec "ptr") [R1,R2];
+ ("ptr" new_trec) = ccall stmStartTransaction(MyCapability() "ptr",
+ StgTSO_trec(CurrentTSO) "ptr");
StgTSO_trec(CurrentTSO) = new_trec;
- /* Set up the catch-retry frame */
- Sp = Sp - SIZEOF_StgCatchRetryFrame;
- frame = Sp;
-
- SET_HDR(frame, stg_catch_retry_frame_info, CCCS);
- StgCatchRetryFrame_running_alt_code(frame) = 0 :: CInt; // false;
- StgCatchRetryFrame_first_code(frame) = R1;
- StgCatchRetryFrame_alt_code(frame) = R2;
-
- /* Apply R1 to the realworld token */
- jump stg_ap_v_fast;
+ // push the CATCH_RETRY stack frame, and apply first_code to realWorld#
+ jump stg_ap_v_fast
+ (CATCH_RETRY_FRAME_FIELDS(,, stg_catch_retry_frame_info,
+ 0, /* not running_alt_code */
+ first_code,
+ alt_code))
+ (first_code);
}
-stg_retryzh
+stg_retryzh /* no arg list: explicit stack layout */
{
W_ frame_type;
W_ frame;
@@ -1022,12 +975,14 @@ stg_retryzh
W_ outer;
W_ r;
- MAYBE_GC (NO_PTRS, stg_retryzh); // STM operations may allocate
+ // STM operations may allocate
+ MAYBE_GC_ (stg_retryzh); // NB. not MAYBE_GC(), we cannot make a
+ // function call in an explicit-stack proc
// Find the enclosing ATOMICALLY_FRAME or CATCH_RETRY_FRAME
retry_pop_stack:
SAVE_THREAD_STATE();
- (frame_type) = foreign "C" findRetryFrameHelper(MyCapability(), CurrentTSO "ptr") [];
+ (frame_type) = ccall findRetryFrameHelper(MyCapability(), CurrentTSO "ptr");
LOAD_THREAD_STATE();
frame = Sp;
trec = StgTSO_trec(CurrentTSO);
@@ -1037,15 +992,15 @@ retry_pop_stack:
// The retry reaches a CATCH_RETRY_FRAME before the atomic frame
ASSERT(outer != NO_TREC);
// Abort the transaction attempting the current branch
- foreign "C" stmAbortTransaction(MyCapability() "ptr", trec "ptr") [];
- foreign "C" stmFreeAbortedTRec(MyCapability() "ptr", trec "ptr") [];
- if (!StgCatchRetryFrame_running_alt_code(frame) != 0::I32) {
+ ccall stmAbortTransaction(MyCapability() "ptr", trec "ptr");
+ ccall stmFreeAbortedTRec(MyCapability() "ptr", trec "ptr");
+ if (!StgCatchRetryFrame_running_alt_code(frame) != 0) {
// Retry in the first branch: try the alternative
- ("ptr" trec) = foreign "C" stmStartTransaction(MyCapability() "ptr", outer "ptr") [];
+ ("ptr" trec) = ccall stmStartTransaction(MyCapability() "ptr", outer "ptr");
StgTSO_trec(CurrentTSO) = trec;
StgCatchRetryFrame_running_alt_code(frame) = 1 :: CInt; // true;
R1 = StgCatchRetryFrame_alt_code(frame);
- jump stg_ap_v_fast;
+ jump stg_ap_v_fast [R1];
} else {
// Retry in the alternative code: propagate the retry
StgTSO_trec(CurrentTSO) = outer;
@@ -1060,108 +1015,93 @@ retry_pop_stack:
// We called retry while checking invariants, so abort the current
// invariant check (merging its TVar accesses into the parents read
// set so we'll wait on them)
- foreign "C" stmAbortTransaction(MyCapability() "ptr", trec "ptr") [];
- foreign "C" stmFreeAbortedTRec(MyCapability() "ptr", trec "ptr") [];
+ ccall stmAbortTransaction(MyCapability() "ptr", trec "ptr");
+ ccall stmFreeAbortedTRec(MyCapability() "ptr", trec "ptr");
trec = outer;
StgTSO_trec(CurrentTSO) = trec;
outer = StgTRecHeader_enclosing_trec(trec);
}
ASSERT(outer == NO_TREC);
- (r) = foreign "C" stmWait(MyCapability() "ptr", CurrentTSO "ptr", trec "ptr") [];
+ (r) = ccall stmWait(MyCapability() "ptr", CurrentTSO "ptr", trec "ptr");
if (r != 0) {
// Transaction was valid: stmWait put us on the TVars' queues, we now block
StgHeader_info(frame) = stg_atomically_waiting_frame_info;
Sp = frame;
- // Fix up the stack in the unregisterised case: the return convention is different.
R3 = trec; // passing to stmWaitUnblock()
- jump stg_block_stmwait;
+ jump stg_block_stmwait [R3];
} else {
// Transaction was not valid: retry immediately
- ("ptr" trec) = foreign "C" stmStartTransaction(MyCapability() "ptr", outer "ptr") [];
+ ("ptr" trec) = ccall stmStartTransaction(MyCapability() "ptr", outer "ptr");
StgTSO_trec(CurrentTSO) = trec;
- R1 = StgAtomicallyFrame_code(frame);
Sp = frame;
- jump stg_ap_v_fast;
+ R1 = StgAtomicallyFrame_code(frame);
+ jump stg_ap_v_fast [R1];
}
}
-
-stg_checkzh
+stg_checkzh (P_ closure /* STM a */)
{
- W_ trec, closure;
-
- /* Args: R1 = invariant closure */
- MAYBE_GC (R1_PTR, stg_checkzh);
+ W_ trec;
- trec = StgTSO_trec(CurrentTSO);
- closure = R1;
- foreign "C" stmAddInvariantToCheck(MyCapability() "ptr",
- trec "ptr",
- closure "ptr") [];
+ MAYBE_GC_P (stg_checkzh, closure);
- jump %ENTRY_CODE(Sp(0));
+ trec = StgTSO_trec(CurrentTSO);
+ ccall stmAddInvariantToCheck(MyCapability() "ptr",
+ trec "ptr",
+ closure "ptr");
+ return ();
}
-stg_newTVarzh
+stg_newTVarzh (P_ init)
{
- W_ tv;
- W_ new_value;
+ W_ tv;
- /* Args: R1 = initialisation value */
-
- MAYBE_GC (R1_PTR, stg_newTVarzh);
- new_value = R1;
- ("ptr" tv) = foreign "C" stmNewTVar(MyCapability() "ptr", new_value "ptr") [];
- RET_P(tv);
+ MAYBE_GC_P (stg_newTVarzh, init);
+ ("ptr" tv) = ccall stmNewTVar(MyCapability() "ptr", init "ptr");
+ return (tv);
}
-stg_readTVarzh
+stg_readTVarzh (P_ tvar)
{
W_ trec;
- W_ tvar;
W_ result;
- /* Args: R1 = TVar closure */
+ // Call to stmReadTVar may allocate
+ MAYBE_GC_P (stg_readTVarzh, tvar);
- MAYBE_GC (R1_PTR, stg_readTVarzh); // Call to stmReadTVar may allocate
trec = StgTSO_trec(CurrentTSO);
- tvar = R1;
- ("ptr" result) = foreign "C" stmReadTVar(MyCapability() "ptr", trec "ptr", tvar "ptr") [];
-
- RET_P(result);
+ ("ptr" result) = ccall stmReadTVar(MyCapability() "ptr", trec "ptr",
+ tvar "ptr");
+ return (result);
}
-stg_readTVarIOzh
+stg_readTVarIOzh ( P_ tvar /* :: TVar a */ )
{
W_ result;
again:
- result = StgTVar_current_value(R1);
+ result = StgTVar_current_value(tvar);
if (%INFO_PTR(result) == stg_TREC_HEADER_info) {
goto again;
}
- RET_P(result);
+ return (result);
}
-stg_writeTVarzh
+stg_writeTVarzh (P_ tvar, /* :: TVar a */
+ P_ new_value /* :: a */)
{
- W_ trec;
- W_ tvar;
- W_ new_value;
-
- /* Args: R1 = TVar closure */
- /* R2 = New value */
+ W_ trec;
- MAYBE_GC (R1_PTR & R2_PTR, stg_writeTVarzh); // Call to stmWriteTVar may allocate
- trec = StgTSO_trec(CurrentTSO);
- tvar = R1;
- new_value = R2;
- foreign "C" stmWriteTVar(MyCapability() "ptr", trec "ptr", tvar "ptr", new_value "ptr") [];
+ // Call to stmWriteTVar may allocate
+ MAYBE_GC_PP (stg_writeTVarzh, tvar, new_value);
- jump %ENTRY_CODE(Sp(0));
+ trec = StgTSO_trec(CurrentTSO);
+ ccall stmWriteTVar(MyCapability() "ptr", trec "ptr", tvar "ptr",
+ new_value "ptr");
+ return ();
}
@@ -1197,23 +1137,20 @@ stg_writeTVarzh
*
* -------------------------------------------------------------------------- */
-stg_isEmptyMVarzh
+stg_isEmptyMVarzh ( P_ mvar /* :: MVar a */ )
{
- /* args: R1 = MVar closure */
-
- if (StgMVar_value(R1) == stg_END_TSO_QUEUE_closure) {
- RET_N(1);
+ if (StgMVar_value(mvar) == stg_END_TSO_QUEUE_closure) {
+ return (1);
} else {
- RET_N(0);
+ return (0);
}
}
-stg_newMVarzh
+stg_newMVarzh ()
{
- /* args: none */
W_ mvar;
- ALLOC_PRIM ( SIZEOF_StgMVar, NO_PTRS, stg_newMVarzh );
+ ALLOC_PRIM (SIZEOF_StgMVar);
mvar = Hp - SIZEOF_StgMVar + WDS(1);
SET_HDR(mvar,stg_MVAR_DIRTY_info,CCCS);
@@ -1221,7 +1158,7 @@ stg_newMVarzh
StgMVar_head(mvar) = stg_END_TSO_QUEUE_closure;
StgMVar_tail(mvar) = stg_END_TSO_QUEUE_closure;
StgMVar_value(mvar) = stg_END_TSO_QUEUE_closure;
- RET_P(mvar);
+ return (mvar);
}
@@ -1229,7 +1166,7 @@ stg_newMVarzh
W_ sp; \
sp = StgStack_sp(stack); \
W_[sp + WDS(1)] = value; \
- W_[sp + WDS(0)] = stg_gc_unpt_r1_info;
+ W_[sp + WDS(0)] = stg_ret_p_info;
#define PerformPut(stack,lval) \
W_ sp; \
@@ -1237,21 +1174,19 @@ stg_newMVarzh
StgStack_sp(stack) = sp; \
lval = W_[sp - WDS(1)];
-stg_takeMVarzh
-{
- W_ mvar, val, info, tso, q;
- /* args: R1 = MVar closure */
- mvar = R1;
+stg_takeMVarzh ( P_ mvar /* :: MVar a */ )
+{
+ W_ val, info, tso, q;
#if defined(THREADED_RTS)
- ("ptr" info) = foreign "C" lockClosure(mvar "ptr") [];
+ ("ptr" info) = ccall lockClosure(mvar "ptr");
#else
info = GET_INFO(mvar);
#endif
if (info == stg_MVAR_CLEAN_info) {
- foreign "C" dirty_MVAR(BaseReg "ptr", mvar "ptr") [];
+ ccall dirty_MVAR(BaseReg "ptr", mvar "ptr");
}
/* If the MVar is empty, put ourselves on its blocking queue,
@@ -1259,16 +1194,13 @@ stg_takeMVarzh
*/
if (StgMVar_value(mvar) == stg_END_TSO_QUEUE_closure) {
- // Note [mvar-heap-check] We want to do the heap check in the
- // branch here, to avoid the conditional in the common case.
- // However, we've already locked the MVar above, so we better
- // be careful to unlock it again if the the heap check fails.
- // Unfortunately we don't have an easy way to inject any code
- // into the heap check generated by the code generator, so we
- // have to do it in stg_gc_gen (see HeapStackCheck.cmm).
- HP_CHK_GEN_TICKY(SIZEOF_StgMVarTSOQueue, R1_PTR, stg_takeMVarzh);
- TICK_ALLOC_PRIM(SIZEOF_StgMVarTSOQueue, 0, 0);
- CCCS_ALLOC(SIZEOF_StgMVarTSOQueue);
+ // We want to put the heap check down here in the slow path,
+ // but be careful to unlock the closure before returning to
+ // the RTS if the check fails.
+ ALLOC_PRIM_WITH_CUSTOM_FAILURE
+ (SIZEOF_StgMVarTSOQueue,
+ unlockClosure(mvar, stg_MVAR_DIRTY_info);
+ GC_PRIM_P(stg_takeMVarzh, mvar));
q = Hp - SIZEOF_StgMVarTSOQueue + WDS(1);
@@ -1280,16 +1212,15 @@ stg_takeMVarzh
StgMVar_head(mvar) = q;
} else {
StgMVarTSOQueue_link(StgMVar_tail(mvar)) = q;
- foreign "C" recordClosureMutated(MyCapability() "ptr",
- StgMVar_tail(mvar)) [];
+ ccall recordClosureMutated(MyCapability() "ptr",
+ StgMVar_tail(mvar));
}
StgTSO__link(CurrentTSO) = q;
StgTSO_block_info(CurrentTSO) = mvar;
StgTSO_why_blocked(CurrentTSO) = BlockedOnMVar::I16;
StgMVar_tail(mvar) = q;
- R1 = mvar;
- jump stg_block_takemvar;
+ jump stg_block_takemvar(mvar);
}
/* we got the value... */
@@ -1301,14 +1232,14 @@ loop:
/* No further putMVars, MVar is now empty */
StgMVar_value(mvar) = stg_END_TSO_QUEUE_closure;
unlockClosure(mvar, stg_MVAR_DIRTY_info);
- RET_P(val);
+ return (val);
}
if (StgHeader_info(q) == stg_IND_info ||
StgHeader_info(q) == stg_MSG_NULL_info) {
q = StgInd_indirectee(q);
goto loop;
}
-
+
// There are putMVar(s) waiting... wake up the first thread on the queue
tso = StgMVarTSOQueue_tso(q);
@@ -1330,22 +1261,18 @@ loop:
// no need to mark the TSO dirty, we have only written END_TSO_QUEUE.
- foreign "C" tryWakeupThread(MyCapability() "ptr", tso) [];
+ ccall tryWakeupThread(MyCapability() "ptr", tso);
unlockClosure(mvar, stg_MVAR_DIRTY_info);
- RET_P(val);
+ return (val);
}
-
-stg_tryTakeMVarzh
+stg_tryTakeMVarzh ( P_ mvar /* :: MVar a */ )
{
- W_ mvar, val, info, tso, q;
-
- /* args: R1 = MVar closure */
- mvar = R1;
+ W_ val, info, tso, q;
#if defined(THREADED_RTS)
- ("ptr" info) = foreign "C" lockClosure(mvar "ptr") [];
+ ("ptr" info) = ccall lockClosure(mvar "ptr");
#else
info = GET_INFO(mvar);
#endif
@@ -1360,11 +1287,11 @@ stg_tryTakeMVarzh
/* HACK: we need a pointer to pass back,
* so we abuse NO_FINALIZER_closure
*/
- RET_NP(0, stg_NO_FINALIZER_closure);
+ return (0, stg_NO_FINALIZER_closure);
}
if (info == stg_MVAR_CLEAN_info) {
- foreign "C" dirty_MVAR(BaseReg "ptr", mvar "ptr") [];
+ ccall dirty_MVAR(BaseReg "ptr", mvar "ptr");
}
/* we got the value... */
@@ -1376,7 +1303,7 @@ loop:
/* No further putMVars, MVar is now empty */
StgMVar_value(mvar) = stg_END_TSO_QUEUE_closure;
unlockClosure(mvar, stg_MVAR_DIRTY_info);
- RET_NP(1, val);
+ return (1, val);
}
if (StgHeader_info(q) == stg_IND_info ||
StgHeader_info(q) == stg_MSG_NULL_info) {
@@ -1405,37 +1332,36 @@ loop:
// no need to mark the TSO dirty, we have only written END_TSO_QUEUE.
- foreign "C" tryWakeupThread(MyCapability() "ptr", tso) [];
+ ccall tryWakeupThread(MyCapability() "ptr", tso);
unlockClosure(mvar, stg_MVAR_DIRTY_info);
- RET_NP(1,val);
+ return (1,val);
}
-
-stg_putMVarzh
+stg_putMVarzh ( P_ mvar, /* :: MVar a */
+ P_ val, /* :: a */ )
{
- W_ mvar, val, info, tso, q;
-
- /* args: R1 = MVar, R2 = value */
- mvar = R1;
- val = R2;
+ W_ info, tso, q;
#if defined(THREADED_RTS)
- ("ptr" info) = foreign "C" lockClosure(mvar "ptr") [];
+ ("ptr" info) = ccall lockClosure(mvar "ptr");
#else
info = GET_INFO(mvar);
#endif
if (info == stg_MVAR_CLEAN_info) {
- foreign "C" dirty_MVAR(BaseReg "ptr", mvar "ptr");
+ ccall dirty_MVAR(BaseReg "ptr", mvar "ptr");
}
if (StgMVar_value(mvar) != stg_END_TSO_QUEUE_closure) {
- // see Note [mvar-heap-check] above
- HP_CHK_GEN_TICKY(SIZEOF_StgMVarTSOQueue, R1_PTR & R2_PTR, stg_putMVarzh);
- TICK_ALLOC_PRIM(SIZEOF_StgMVarTSOQueue, 0, 0);
- CCCS_ALLOC(SIZEOF_StgMVarTSOQueue);
+ // We want to put the heap check down here in the slow path,
+ // but be careful to unlock the closure before returning to
+ // the RTS if the check fails.
+ ALLOC_PRIM_WITH_CUSTOM_FAILURE
+ (SIZEOF_StgMVarTSOQueue,
+ unlockClosure(mvar, stg_MVAR_DIRTY_info);
+ GC_PRIM_P(stg_putMVarzh, mvar));
q = Hp - SIZEOF_StgMVarTSOQueue + WDS(1);
@@ -1447,17 +1373,15 @@ stg_putMVarzh
StgMVar_head(mvar) = q;
} else {
StgMVarTSOQueue_link(StgMVar_tail(mvar)) = q;
- foreign "C" recordClosureMutated(MyCapability() "ptr",
- StgMVar_tail(mvar)) [];
+ ccall recordClosureMutated(MyCapability() "ptr",
+ StgMVar_tail(mvar));
}
StgTSO__link(CurrentTSO) = q;
StgTSO_block_info(CurrentTSO) = mvar;
StgTSO_why_blocked(CurrentTSO) = BlockedOnMVar::I16;
StgMVar_tail(mvar) = q;
- R1 = mvar;
- R2 = val;
- jump stg_block_putmvar;
+ jump stg_block_putmvar(mvar,val);
}
q = StgMVar_head(mvar);
@@ -1466,7 +1390,7 @@ loop:
/* No further takes, the MVar is now full. */
StgMVar_value(mvar) = val;
unlockClosure(mvar, stg_MVAR_DIRTY_info);
- jump %ENTRY_CODE(Sp(0));
+ return ();
}
if (StgHeader_info(q) == stg_IND_info ||
StgHeader_info(q) == stg_MSG_NULL_info) {
@@ -1494,26 +1418,23 @@ loop:
StgTSO__link(tso) = stg_END_TSO_QUEUE_closure;
if (TO_W_(StgStack_dirty(stack)) == 0) {
- foreign "C" dirty_STACK(MyCapability() "ptr", stack "ptr") [];
+ ccall dirty_STACK(MyCapability() "ptr", stack "ptr");
}
- foreign "C" tryWakeupThread(MyCapability() "ptr", tso) [];
+ ccall tryWakeupThread(MyCapability() "ptr", tso);
unlockClosure(mvar, stg_MVAR_DIRTY_info);
- jump %ENTRY_CODE(Sp(0));
+ return ();
}
-stg_tryPutMVarzh
+stg_tryPutMVarzh ( P_ mvar, /* :: MVar a */
+ P_ val, /* :: a */ )
{
- W_ mvar, val, info, tso, q;
-
- /* args: R1 = MVar, R2 = value */
- mvar = R1;
- val = R2;
+ W_ info, tso, q;
#if defined(THREADED_RTS)
- ("ptr" info) = foreign "C" lockClosure(mvar "ptr") [];
+ ("ptr" info) = ccall lockClosure(mvar "ptr");
#else
info = GET_INFO(mvar);
#endif
@@ -1522,11 +1443,11 @@ stg_tryPutMVarzh
#if defined(THREADED_RTS)
unlockClosure(mvar, info);
#endif
- RET_N(0);
+ return (0);
}
if (info == stg_MVAR_CLEAN_info) {
- foreign "C" dirty_MVAR(BaseReg "ptr", mvar "ptr");
+ ccall dirty_MVAR(BaseReg "ptr", mvar "ptr");
}
q = StgMVar_head(mvar);
@@ -1535,7 +1456,7 @@ loop:
/* No further takes, the MVar is now full. */
StgMVar_value(mvar) = val;
unlockClosure(mvar, stg_MVAR_DIRTY_info);
- RET_N(1);
+ return (1);
}
if (StgHeader_info(q) == stg_IND_info ||
StgHeader_info(q) == stg_MSG_NULL_info) {
@@ -1563,13 +1484,13 @@ loop:
StgTSO__link(tso) = stg_END_TSO_QUEUE_closure;
if (TO_W_(StgStack_dirty(stack)) == 0) {
- foreign "C" dirty_STACK(MyCapability() "ptr", stack "ptr") [];
+ ccall dirty_STACK(MyCapability() "ptr", stack "ptr");
}
- foreign "C" tryWakeupThread(MyCapability() "ptr", tso) [];
+ ccall tryWakeupThread(MyCapability() "ptr", tso);
unlockClosure(mvar, stg_MVAR_DIRTY_info);
- RET_N(1);
+ return (1);
}
@@ -1577,13 +1498,13 @@ loop:
Stable pointer primitives
------------------------------------------------------------------------- */
-stg_makeStableNamezh
+stg_makeStableNamezh ( P_ obj )
{
W_ index, sn_obj;
- ALLOC_PRIM( SIZEOF_StgStableName, R1_PTR, stg_makeStableNamezh );
+ ALLOC_PRIM_P (SIZEOF_StgStableName, stg_makeStableNamezh, obj);
- (index) = foreign "C" lookupStableName(R1 "ptr") [];
+ (index) = ccall lookupStableName(obj "ptr");
/* Is there already a StableName for this heap object?
* stable_ptr_table is a pointer to an array of snEntry structs.
@@ -1597,56 +1518,48 @@ stg_makeStableNamezh
sn_obj = snEntry_sn_obj(W_[stable_ptr_table] + index*SIZEOF_snEntry);
}
- RET_P(sn_obj);
+ return (sn_obj);
}
-
-stg_makeStablePtrzh
+stg_makeStablePtrzh ( P_ obj )
{
- /* Args: R1 = a */
W_ sp;
- MAYBE_GC(R1_PTR, stg_makeStablePtrzh);
- ("ptr" sp) = foreign "C" getStablePtr(R1 "ptr") [];
- RET_N(sp);
+
+ ("ptr" sp) = ccall getStablePtr(obj "ptr");
+ return (sp);
}
-stg_deRefStablePtrzh
+stg_deRefStablePtrzh ( P_ sp )
{
- /* Args: R1 = the stable ptr */
- W_ r, sp;
- sp = R1;
+ W_ r;
r = snEntry_addr(W_[stable_ptr_table] + sp*SIZEOF_snEntry);
- RET_P(r);
+ return (r);
}
/* -----------------------------------------------------------------------------
Bytecode object primitives
------------------------------------------------------------------------- */
-stg_newBCOzh
+stg_newBCOzh ( P_ instrs,
+ P_ literals,
+ P_ ptrs,
+ W_ arity,
+ P_ bitmap_arr )
{
- /* R1 = instrs
- R2 = literals
- R3 = ptrs
- R4 = arity
- R5 = bitmap array
- */
- W_ bco, bitmap_arr, bytes, words;
-
- bitmap_arr = R5;
+ W_ bco, bytes, words;
words = BYTES_TO_WDS(SIZEOF_StgBCO) + BYTE_ARR_WDS(bitmap_arr);
bytes = WDS(words);
- ALLOC_PRIM( bytes, R1_PTR&R2_PTR&R3_PTR&R5_PTR, stg_newBCOzh );
+ ALLOC_PRIM (bytes);
bco = Hp - bytes + WDS(1);
SET_HDR(bco, stg_BCO_info, CCCS);
- StgBCO_instrs(bco) = R1;
- StgBCO_literals(bco) = R2;
- StgBCO_ptrs(bco) = R3;
- StgBCO_arity(bco) = HALF_W_(R4);
+ StgBCO_instrs(bco) = instrs;
+ StgBCO_literals(bco) = literals;
+ StgBCO_ptrs(bco) = ptrs;
+ StgBCO_arity(bco) = HALF_W_(arity);
StgBCO_size(bco) = HALF_W_(words);
// Copy the arity/bitmap info into the BCO
@@ -1659,23 +1572,20 @@ for:
goto for;
}
- RET_P(bco);
+ return (bco);
}
-
-stg_mkApUpd0zh
+stg_mkApUpd0zh ( P_ bco )
{
- // R1 = the BCO# for the AP
- //
W_ ap;
// This function is *only* used to wrap zero-arity BCOs in an
// updatable wrapper (see ByteCodeLink.lhs). An AP thunk is always
// saturated and always points directly to a FUN or BCO.
- ASSERT(%INFO_TYPE(%GET_STD_INFO(R1)) == HALF_W_(BCO) &&
- StgBCO_arity(R1) == HALF_W_(0));
+ ASSERT(%INFO_TYPE(%GET_STD_INFO(bco)) == HALF_W_(BCO) &&
+ StgBCO_arity(bco) == HALF_W_(0));
- HP_CHK_GEN_TICKY(SIZEOF_StgAP, R1_PTR, stg_mkApUpd0zh);
+ HP_CHK_P(SIZEOF_StgAP, stg_mkApUpd0zh, bco);
TICK_ALLOC_UP_THK(0, 0);
CCCS_ALLOC(SIZEOF_StgAP);
@@ -1683,18 +1593,17 @@ stg_mkApUpd0zh
SET_HDR(ap, stg_AP_info, CCCS);
StgAP_n_args(ap) = HALF_W_(0);
- StgAP_fun(ap) = R1;
+ StgAP_fun(ap) = bco;
- RET_P(ap);
+ return (ap);
}
-stg_unpackClosurezh
+stg_unpackClosurezh ( P_ closure )
{
-/* args: R1 = closure to analyze */
// TODO: Consider the absence of ptrs or nonptrs as a special case ?
W_ info, ptrs, nptrs, p, ptrs_arr, nptrs_arr;
- info = %GET_STD_INFO(UNTAG(R1));
+ info = %GET_STD_INFO(UNTAG(closure));
// Some closures have non-standard layout, so we omit those here.
W_ type;
@@ -1723,10 +1632,10 @@ out:
ptrs_arr_cards = mutArrPtrsCardWords(ptrs);
ptrs_arr_sz = SIZEOF_StgMutArrPtrs + WDS(ptrs) + WDS(ptrs_arr_cards);
- ALLOC_PRIM (ptrs_arr_sz + nptrs_arr_sz, R1_PTR, stg_unpackClosurezh);
+ ALLOC_PRIM_P (ptrs_arr_sz + nptrs_arr_sz, stg_unpackClosurezh, closure);
W_ clos;
- clos = UNTAG(R1);
+ clos = UNTAG(closure);
ptrs_arr = Hp - nptrs_arr_sz - ptrs_arr_sz + WDS(1);
nptrs_arr = Hp - nptrs_arr_sz + WDS(1);
@@ -1755,7 +1664,7 @@ for2:
p = p + 1;
goto for2;
}
- RET_NPP(info, ptrs_arr, nptrs_arr);
+ return (info, ptrs_arr, nptrs_arr);
}
/* -----------------------------------------------------------------------------
@@ -1770,47 +1679,45 @@ for2:
if (W_[blocked_queue_hd] == END_TSO_QUEUE) { \
W_[blocked_queue_hd] = tso; \
} else { \
- foreign "C" setTSOLink(MyCapability() "ptr", W_[blocked_queue_tl] "ptr", tso) []; \
+ ccall setTSOLink(MyCapability() "ptr", W_[blocked_queue_tl] "ptr", tso); \
} \
W_[blocked_queue_tl] = tso;
-stg_waitReadzh
+stg_waitReadzh ( W_ fd )
{
- /* args: R1 */
#ifdef THREADED_RTS
- foreign "C" barf("waitRead# on threaded RTS") never returns;
+ ccall barf("waitRead# on threaded RTS") never returns;
#else
ASSERT(StgTSO_why_blocked(CurrentTSO) == NotBlocked::I16);
StgTSO_why_blocked(CurrentTSO) = BlockedOnRead::I16;
- StgTSO_block_info(CurrentTSO) = R1;
+ StgTSO_block_info(CurrentTSO) = fd;
// No locking - we're not going to use this interface in the
// threaded RTS anyway.
APPEND_TO_BLOCKED_QUEUE(CurrentTSO);
- jump stg_block_noregs;
+ jump stg_block_noregs();
#endif
}
-stg_waitWritezh
+stg_waitWritezh ( W_ fd )
{
- /* args: R1 */
#ifdef THREADED_RTS
- foreign "C" barf("waitWrite# on threaded RTS") never returns;
+ ccall barf("waitWrite# on threaded RTS") never returns;
#else
ASSERT(StgTSO_why_blocked(CurrentTSO) == NotBlocked::I16);
StgTSO_why_blocked(CurrentTSO) = BlockedOnWrite::I16;
- StgTSO_block_info(CurrentTSO) = R1;
+ StgTSO_block_info(CurrentTSO) = fd;
// No locking - we're not going to use this interface in the
// threaded RTS anyway.
APPEND_TO_BLOCKED_QUEUE(CurrentTSO);
- jump stg_block_noregs;
+ jump stg_block_noregs();
#endif
}
STRING(stg_delayzh_malloc_str, "stg_delayzh")
-stg_delayzh
+stg_delayzh ( W_ us_delay )
{
#ifdef mingw32_HOST_OS
W_ ares;
@@ -1820,19 +1727,18 @@ stg_delayzh
#endif
#ifdef THREADED_RTS
- foreign "C" barf("delay# on threaded RTS") never returns;
+ ccall barf("delay# on threaded RTS") never returns;
#else
- /* args: R1 (microsecond delay amount) */
ASSERT(StgTSO_why_blocked(CurrentTSO) == NotBlocked::I16);
StgTSO_why_blocked(CurrentTSO) = BlockedOnDelay::I16;
#ifdef mingw32_HOST_OS
/* could probably allocate this on the heap instead */
- ("ptr" ares) = foreign "C" stgMallocBytes(SIZEOF_StgAsyncIOResult,
+ ("ptr" ares) = ccall stgMallocBytes(SIZEOF_StgAsyncIOResult,
stg_delayzh_malloc_str);
- (reqID) = foreign "C" addDelayRequest(R1);
+ (reqID) = ccall addDelayRequest(us_delay);
StgAsyncIOResult_reqID(ares) = reqID;
StgAsyncIOResult_len(ares) = 0;
StgAsyncIOResult_errCode(ares) = 0;
@@ -1844,12 +1750,12 @@ stg_delayzh
*/
StgTSO_why_blocked(CurrentTSO) = BlockedOnDoProc::I16;
APPEND_TO_BLOCKED_QUEUE(CurrentTSO);
- jump stg_block_async_void;
+ jump stg_block_async_void();
#else
- (target) = foreign "C" getDelayTarget(R1) [R1];
+ (target) = ccall getDelayTarget(us_delay);
StgTSO_block_info(CurrentTSO) = target;
@@ -1867,9 +1773,9 @@ while:
if (prev == NULL) {
W_[sleeping_queue] = CurrentTSO;
} else {
- foreign "C" setTSOLink(MyCapability() "ptr", prev "ptr", CurrentTSO) [];
+ ccall setTSOLink(MyCapability() "ptr", prev "ptr", CurrentTSO);
}
- jump stg_block_noregs;
+ jump stg_block_noregs();
#endif
#endif /* !THREADED_RTS */
}
@@ -1877,86 +1783,80 @@ while:
#ifdef mingw32_HOST_OS
STRING(stg_asyncReadzh_malloc_str, "stg_asyncReadzh")
-stg_asyncReadzh
+stg_asyncReadzh ( W_ fd, W_ is_sock, W_ len, W_ buf )
{
W_ ares;
CInt reqID;
#ifdef THREADED_RTS
- foreign "C" barf("asyncRead# on threaded RTS") never returns;
+ ccall barf("asyncRead# on threaded RTS") never returns;
#else
- /* args: R1 = fd, R2 = isSock, R3 = len, R4 = buf */
ASSERT(StgTSO_why_blocked(CurrentTSO) == NotBlocked::I16);
StgTSO_why_blocked(CurrentTSO) = BlockedOnRead::I16;
/* could probably allocate this on the heap instead */
- ("ptr" ares) = foreign "C" stgMallocBytes(SIZEOF_StgAsyncIOResult,
- stg_asyncReadzh_malloc_str)
- [R1,R2,R3,R4];
- (reqID) = foreign "C" addIORequest(R1, 0/*FALSE*/,R2,R3,R4 "ptr") [];
+ ("ptr" ares) = ccall stgMallocBytes(SIZEOF_StgAsyncIOResult,
+ stg_asyncReadzh_malloc_str);
+ (reqID) = ccall addIORequest(fd, 0/*FALSE*/,is_sock,len,buf "ptr");
StgAsyncIOResult_reqID(ares) = reqID;
StgAsyncIOResult_len(ares) = 0;
StgAsyncIOResult_errCode(ares) = 0;
StgTSO_block_info(CurrentTSO) = ares;
APPEND_TO_BLOCKED_QUEUE(CurrentTSO);
- jump stg_block_async;
+ jump stg_block_async();
#endif
}
STRING(stg_asyncWritezh_malloc_str, "stg_asyncWritezh")
-stg_asyncWritezh
+stg_asyncWritezh ( W_ fd, W_ is_sock, W_ len, W_ buf )
{
W_ ares;
CInt reqID;
#ifdef THREADED_RTS
- foreign "C" barf("asyncWrite# on threaded RTS") never returns;
+ ccall barf("asyncWrite# on threaded RTS") never returns;
#else
- /* args: R1 = fd, R2 = isSock, R3 = len, R4 = buf */
ASSERT(StgTSO_why_blocked(CurrentTSO) == NotBlocked::I16);
StgTSO_why_blocked(CurrentTSO) = BlockedOnWrite::I16;
- ("ptr" ares) = foreign "C" stgMallocBytes(SIZEOF_StgAsyncIOResult,
- stg_asyncWritezh_malloc_str)
- [R1,R2,R3,R4];
- (reqID) = foreign "C" addIORequest(R1, 1/*TRUE*/,R2,R3,R4 "ptr") [];
+ ("ptr" ares) = ccall stgMallocBytes(SIZEOF_StgAsyncIOResult,
+ stg_asyncWritezh_malloc_str);
+ (reqID) = ccall addIORequest(fd, 1/*TRUE*/,is_sock,len,buf "ptr");
StgAsyncIOResult_reqID(ares) = reqID;
StgAsyncIOResult_len(ares) = 0;
StgAsyncIOResult_errCode(ares) = 0;
StgTSO_block_info(CurrentTSO) = ares;
APPEND_TO_BLOCKED_QUEUE(CurrentTSO);
- jump stg_block_async;
+ jump stg_block_async();
#endif
}
STRING(stg_asyncDoProczh_malloc_str, "stg_asyncDoProczh")
-stg_asyncDoProczh
+stg_asyncDoProczh ( W_ proc, W_ param )
{
W_ ares;
CInt reqID;
#ifdef THREADED_RTS
- foreign "C" barf("asyncDoProc# on threaded RTS") never returns;
+ ccall barf("asyncDoProc# on threaded RTS") never returns;
#else
- /* args: R1 = proc, R2 = param */
ASSERT(StgTSO_why_blocked(CurrentTSO) == NotBlocked::I16);
StgTSO_why_blocked(CurrentTSO) = BlockedOnDoProc::I16;
/* could probably allocate this on the heap instead */
- ("ptr" ares) = foreign "C" stgMallocBytes(SIZEOF_StgAsyncIOResult,
- stg_asyncDoProczh_malloc_str)
- [R1,R2];
- (reqID) = foreign "C" addDoProcRequest(R1 "ptr",R2 "ptr") [];
+ ("ptr" ares) = ccall stgMallocBytes(SIZEOF_StgAsyncIOResult,
+ stg_asyncDoProczh_malloc_str);
+ (reqID) = ccall addDoProcRequest(proc "ptr",param "ptr");
StgAsyncIOResult_reqID(ares) = reqID;
StgAsyncIOResult_len(ares) = 0;
StgAsyncIOResult_errCode(ares) = 0;
StgTSO_block_info(CurrentTSO) = ares;
APPEND_TO_BLOCKED_QUEUE(CurrentTSO);
- jump stg_block_async;
+ jump stg_block_async();
#endif
}
#endif
@@ -2012,15 +1912,16 @@ stg_asyncDoProczh
* only manifests occasionally (once very 10 runs or so).
* -------------------------------------------------------------------------- */
-INFO_TABLE_RET(stg_noDuplicate, RET_SMALL)
+INFO_TABLE_RET(stg_noDuplicate, RET_SMALL, W_ info_ptr)
+ return (/* no return values */)
{
- Sp_adj(1);
- jump stg_noDuplicatezh;
+ jump stg_noDuplicatezh();
}
-stg_noDuplicatezh
+stg_noDuplicatezh /* no arg list: explicit stack layout */
{
- STK_CHK_GEN( WDS(1), NO_PTRS, stg_noDuplicatezh );
+ STK_CHK(WDS(1), stg_noDuplicatezh);
+
// leave noDuplicate frame in case the current
// computation is suspended and restarted (see above).
Sp_adj(-1);
@@ -2028,10 +1929,10 @@ stg_noDuplicatezh
SAVE_THREAD_STATE();
ASSERT(StgTSO_what_next(CurrentTSO) == ThreadRunGHC::I16);
- foreign "C" threadPaused (MyCapability() "ptr", CurrentTSO "ptr") [];
+ ccall threadPaused (MyCapability() "ptr", CurrentTSO "ptr");
if (StgTSO_what_next(CurrentTSO) == ThreadKilled::I16) {
- jump stg_threadFinished;
+ jump stg_threadFinished [];
} else {
LOAD_THREAD_STATE();
ASSERT(StgTSO_what_next(CurrentTSO) == ThreadRunGHC::I16);
@@ -2039,7 +1940,7 @@ stg_noDuplicatezh
if (Sp(0) == stg_noDuplicate_info) {
Sp_adj(1);
}
- jump %ENTRY_CODE(Sp(0));
+ jump %ENTRY_CODE(Sp(0)) [];
}
}
@@ -2047,75 +1948,62 @@ stg_noDuplicatezh
Misc. primitives
-------------------------------------------------------------------------- */
-stg_getApStackValzh
+stg_getApStackValzh ( P_ ap_stack, W_ offset )
{
- W_ ap_stack, offset, val, ok;
-
- /* args: R1 = AP_STACK, R2 = offset */
- ap_stack = R1;
- offset = R2;
-
if (%INFO_PTR(ap_stack) == stg_AP_STACK_info) {
- ok = 1;
- val = StgAP_STACK_payload(ap_stack,offset);
+ return (1,StgAP_STACK_payload(ap_stack,offset));
} else {
- ok = 0;
- val = R1;
+ return (0,ap_stack);
}
- RET_NP(ok,val);
}
// Write the cost center stack of the first argument on stderr; return
// the second. Possibly only makes sense for already evaluated
// things?
-stg_traceCcszh
+stg_traceCcszh ( P_ obj, P_ ret )
{
W_ ccs;
#ifdef PROFILING
- ccs = StgHeader_ccs(UNTAG(R1));
- foreign "C" fprintCCS_stderr(ccs "ptr") [R2];
+ ccs = StgHeader_ccs(UNTAG(obj));
+ ccall fprintCCS_stderr(ccs "ptr");
#endif
- R1 = R2;
- ENTER();
+ jump stg_ap_0_fast(ret);
}
-stg_getSparkzh
+stg_getSparkzh ()
{
W_ spark;
#ifndef THREADED_RTS
- RET_NP(0,ghczmprim_GHCziTypes_False_closure);
+ return (0,ghczmprim_GHCziTypes_False_closure);
#else
- (spark) = foreign "C" findSpark(MyCapability());
+ (spark) = ccall findSpark(MyCapability());
if (spark != 0) {
- RET_NP(1,spark);
+ return (1,spark);
} else {
- RET_NP(0,ghczmprim_GHCziTypes_False_closure);
+ return (0,ghczmprim_GHCziTypes_False_closure);
}
#endif
}
-stg_numSparkszh
+stg_numSparkszh ()
{
W_ n;
#ifdef THREADED_RTS
- (n) = foreign "C" dequeElements(Capability_sparks(MyCapability()));
+ (n) = ccall dequeElements(Capability_sparks(MyCapability()));
#else
n = 0;
#endif
- RET_N(n);
+ return (n);
}
-stg_traceEventzh
+stg_traceEventzh ( W_ msg )
{
- W_ msg;
- msg = R1;
-
#if defined(TRACING) || defined(DEBUG)
- foreign "C" traceUserMsg(MyCapability() "ptr", msg "ptr") [];
+ ccall traceUserMsg(MyCapability() "ptr", msg "ptr");
#elif defined(DTRACE)
@@ -2125,7 +2013,7 @@ stg_traceEventzh
// RtsProbes.h, but that header file includes unistd.h, which doesn't
// work in Cmm
#if !defined(solaris2_TARGET_OS)
- (enabled) = foreign "C" __dtrace_isenabled$HaskellEvent$user__msg$v1() [];
+ (enabled) = ccall __dtrace_isenabled$HaskellEvent$user__msg$v1();
#else
// Solaris' DTrace can't handle the
// __dtrace_isenabled$HaskellEvent$user__msg$v1
@@ -2139,9 +2027,10 @@ stg_traceEventzh
enabled = 1;
#endif
if (enabled != 0) {
- foreign "C" dtraceUserMsgWrapper(MyCapability() "ptr", msg "ptr") [];
+ ccall dtraceUserMsgWrapper(MyCapability() "ptr", msg "ptr");
}
#endif
- jump %ENTRY_CODE(Sp(0));
+ return ();
}
+
diff --git a/rts/Printer.c b/rts/Printer.c
index fb00401f59..4f9f83db52 100644
--- a/rts/Printer.c
+++ b/rts/Printer.c
@@ -251,7 +251,6 @@ printClosure( StgClosure *obj )
case RET_BCO:
case RET_SMALL:
case RET_BIG:
- case RET_DYN:
case RET_FUN:
*/
@@ -478,38 +477,7 @@ printStackChunk( StgPtr sp, StgPtr spBottom )
printObj((StgClosure*)sp);
continue;
- case RET_DYN:
- {
- StgRetDyn* r;
- StgPtr p;
- StgWord dyn;
- nat size;
-
- r = (StgRetDyn *)sp;
- dyn = r->liveness;
- debugBelch("RET_DYN (%p)\n", r);
-
- p = (P_)(r->payload);
- printSmallBitmap(spBottom, sp,
- RET_DYN_LIVENESS(r->liveness),
- RET_DYN_BITMAP_SIZE);
- p += RET_DYN_BITMAP_SIZE + RET_DYN_NONPTR_REGS_SIZE;
-
- for (size = RET_DYN_NONPTRS(dyn); size > 0; size--) {
- debugBelch(" stk[%ld] (%p) = ", (long)(spBottom-p), p);
- debugBelch("Word# %ld\n", (long)*p);
- p++;
- }
-
- for (size = RET_DYN_PTRS(dyn); size > 0; size--) {
- debugBelch(" stk[%ld] (%p) = ", (long)(spBottom-p), p);
- printPtr(p);
- p++;
- }
- continue;
- }
-
- case RET_SMALL:
+ case RET_SMALL:
debugBelch("RET_SMALL (%p)\n", info);
bitmap = info->layout.bitmap;
printSmallBitmap(spBottom, sp+1,
@@ -1112,7 +1080,6 @@ char *closure_type_names[] = {
[RET_BCO] = "RET_BCO",
[RET_SMALL] = "RET_SMALL",
[RET_BIG] = "RET_BIG",
- [RET_DYN] = "RET_DYN",
[RET_FUN] = "RET_FUN",
[UPDATE_FRAME] = "UPDATE_FRAME",
[CATCH_FRAME] = "CATCH_FRAME",
diff --git a/rts/RaiseAsync.c b/rts/RaiseAsync.c
index 47d88068bf..f5669cb8ec 100644
--- a/rts/RaiseAsync.c
+++ b/rts/RaiseAsync.c
@@ -959,7 +959,7 @@ raiseAsync(Capability *cap, StgTSO *tso, StgClosure *exception,
// transactions, but I don't fully understand the
// interaction with STM invariants.
stack->sp[1] = (W_)&stg_NO_TREC_closure;
- stack->sp[0] = (W_)&stg_gc_unpt_r1_info;
+ stack->sp[0] = (W_)&stg_ret_p_info;
tso->what_next = ThreadRunGHC;
goto done;
}
diff --git a/rts/RetainerProfile.c b/rts/RetainerProfile.c
index c07dff76e4..5f9164b77b 100644
--- a/rts/RetainerProfile.c
+++ b/rts/RetainerProfile.c
@@ -605,7 +605,6 @@ push( StgClosure *c, retainer c_child_r, StgClosure **first_child )
case CATCH_FRAME:
case UNDERFLOW_FRAME:
case STOP_FRAME:
- case RET_DYN:
case RET_BCO:
case RET_SMALL:
case RET_BIG:
@@ -931,8 +930,7 @@ pop( StgClosure **c, StgClosure **cp, retainer *r )
case IND_STATIC:
case CONSTR_NOCAF_STATIC:
// stack objects
- case RET_DYN:
- case UPDATE_FRAME:
+ case UPDATE_FRAME:
case CATCH_FRAME:
case UNDERFLOW_FRAME:
case STOP_FRAME:
@@ -1087,7 +1085,6 @@ isRetainer( StgClosure *c )
case CATCH_FRAME:
case UNDERFLOW_FRAME:
case STOP_FRAME:
- case RET_DYN:
case RET_BCO:
case RET_SMALL:
case RET_BIG:
@@ -1349,29 +1346,7 @@ retainStack( StgClosure *c, retainer c_child_r,
// and don't forget to follow the SRT
goto follow_srt;
- // Dynamic bitmap: the mask is stored on the stack
- case RET_DYN: {
- StgWord dyn;
- dyn = ((StgRetDyn *)p)->liveness;
-
- // traverse the bitmap first
- bitmap = RET_DYN_LIVENESS(dyn);
- p = (P_)&((StgRetDyn *)p)->payload[0];
- size = RET_DYN_BITMAP_SIZE;
- p = retain_small_bitmap(p, size, bitmap, c, c_child_r);
-
- // skip over the non-ptr words
- p += RET_DYN_NONPTRS(dyn) + RET_DYN_NONPTR_REGS_SIZE;
-
- // follow the ptr words
- for (size = RET_DYN_PTRS(dyn); size > 0; size--) {
- retainClosure((StgClosure *)*p, c, c_child_r);
- p++;
- }
- continue;
- }
-
- case RET_FUN: {
+ case RET_FUN: {
StgRetFun *ret_fun = (StgRetFun *)p;
StgFunInfoTable *fun_info;
diff --git a/rts/StgMiscClosures.cmm b/rts/StgMiscClosures.cmm
index b99126187a..2985982d64 100644
--- a/rts/StgMiscClosures.cmm
+++ b/rts/StgMiscClosures.cmm
@@ -22,30 +22,36 @@ import LeaveCriticalSection;
Stack underflow
------------------------------------------------------------------------- */
-INFO_TABLE_RET (stg_stack_underflow_frame, UNDERFLOW_FRAME, P_ unused)
+INFO_TABLE_RET (stg_stack_underflow_frame, UNDERFLOW_FRAME,
+ W_ info_ptr, P_ unused)
+ /* no args => explicit stack */
{
W_ new_tso;
W_ ret_off;
+ SAVE_STGREGS
+
SAVE_THREAD_STATE();
("ptr" ret_off) = foreign "C" threadStackUnderflow(MyCapability(),
CurrentTSO);
LOAD_THREAD_STATE();
- jump %ENTRY_CODE(Sp(ret_off));
+ RESTORE_STGREGS
+
+ jump %ENTRY_CODE(Sp(ret_off)) [*]; // NB. all registers live!
}
/* ----------------------------------------------------------------------------
Restore a saved cost centre
------------------------------------------------------------------------- */
-INFO_TABLE_RET (stg_restore_cccs, RET_SMALL, W_ cccs)
+INFO_TABLE_RET (stg_restore_cccs, RET_SMALL, W_ info_ptr, W_ cccs)
{
#if defined(PROFILING)
CCCS = Sp(1);
#endif
Sp_adj(2);
- jump %ENTRY_CODE(Sp(0));
+ jump %ENTRY_CODE(Sp(0)) [*]; // NB. all registers live!
}
/* ----------------------------------------------------------------------------
@@ -53,10 +59,9 @@ INFO_TABLE_RET (stg_restore_cccs, RET_SMALL, W_ cccs)
------------------------------------------------------------------------- */
/* 9 bits of return code for constructors created by the interpreter. */
-stg_interp_constr_entry
+stg_interp_constr_entry (P_ ret)
{
- /* R1 points at the constructor */
- jump %ENTRY_CODE(Sp(0));
+ return (ret);
}
/* Some info tables to be used when compiled code returns a value to
@@ -94,76 +99,83 @@ stg_interp_constr_entry
*/
INFO_TABLE_RET( stg_ctoi_R1p, RET_BCO)
+ /* explicit stack */
{
Sp_adj(-2);
Sp(1) = R1;
Sp(0) = stg_enter_info;
- jump stg_yield_to_interpreter;
+ jump stg_yield_to_interpreter [];
}
/*
* When the returned value is a pointer, but unlifted, in R1 ...
*/
INFO_TABLE_RET( stg_ctoi_R1unpt, RET_BCO )
+ /* explicit stack */
{
Sp_adj(-2);
Sp(1) = R1;
- Sp(0) = stg_gc_unpt_r1_info;
- jump stg_yield_to_interpreter;
+ Sp(0) = stg_ret_p_info;
+ jump stg_yield_to_interpreter [];
}
/*
* When the returned value is a non-pointer in R1 ...
*/
INFO_TABLE_RET( stg_ctoi_R1n, RET_BCO )
+ /* explicit stack */
{
Sp_adj(-2);
Sp(1) = R1;
- Sp(0) = stg_gc_unbx_r1_info;
- jump stg_yield_to_interpreter;
+ Sp(0) = stg_ret_p_info;
+ jump stg_yield_to_interpreter [];
}
/*
* When the returned value is in F1
*/
INFO_TABLE_RET( stg_ctoi_F1, RET_BCO )
+ /* explicit stack */
{
Sp_adj(-2);
F_[Sp + WDS(1)] = F1;
- Sp(0) = stg_gc_f1_info;
- jump stg_yield_to_interpreter;
+ Sp(0) = stg_ret_f_info;
+ jump stg_yield_to_interpreter [];
}
/*
* When the returned value is in D1
*/
INFO_TABLE_RET( stg_ctoi_D1, RET_BCO )
+ /* explicit stack */
{
Sp_adj(-1) - SIZEOF_DOUBLE;
D_[Sp + WDS(1)] = D1;
- Sp(0) = stg_gc_d1_info;
- jump stg_yield_to_interpreter;
+ Sp(0) = stg_ret_d_info;
+ jump stg_yield_to_interpreter [];
}
/*
* When the returned value is in L1
*/
INFO_TABLE_RET( stg_ctoi_L1, RET_BCO )
+ /* explicit stack */
{
Sp_adj(-1) - 8;
L_[Sp + WDS(1)] = L1;
- Sp(0) = stg_gc_l1_info;
- jump stg_yield_to_interpreter;
+ Sp(0) = stg_ret_l_info;
+ jump stg_yield_to_interpreter [];
}
/*
* When the returned value is a void
*/
INFO_TABLE_RET( stg_ctoi_V, RET_BCO )
+ /* explicit stack */
{
Sp_adj(-1);
- Sp(0) = stg_gc_void_info;
- jump stg_yield_to_interpreter;
+ Sp(0) = stg_ret_v_info;
+ jump stg_yield_to_interpreter [];
}
/*
@@ -172,9 +184,10 @@ INFO_TABLE_RET( stg_ctoi_V, RET_BCO )
* stack.
*/
INFO_TABLE_RET( stg_apply_interp, RET_BCO )
+ /* explicit stack */
{
/* Just in case we end up in here... (we shouldn't) */
- jump stg_yield_to_interpreter;
+ jump stg_yield_to_interpreter [];
}
/* ----------------------------------------------------------------------------
@@ -182,12 +195,13 @@ INFO_TABLE_RET( stg_apply_interp, RET_BCO )
------------------------------------------------------------------------- */
INFO_TABLE_FUN( stg_BCO, 4, 0, BCO, "BCO", "BCO", ARG_BCO )
+ /* explicit stack */
{
/* entering a BCO means "apply it", same as a function */
Sp_adj(-2);
Sp(1) = R1;
Sp(0) = stg_apply_interp_info;
- jump stg_yield_to_interpreter;
+ jump stg_yield_to_interpreter [];
}
/* ----------------------------------------------------------------------------
@@ -201,30 +215,48 @@ INFO_TABLE_FUN( stg_BCO, 4, 0, BCO, "BCO", "BCO", ARG_BCO )
------------------------------------------------------------------------- */
INFO_TABLE(stg_IND,1,0,IND,"IND","IND")
+#if 0
+/*
+ This version in high-level cmm generates slightly less good code
+ than the low-level version below it. (ToDo)
+*/
+ (P_ node)
+{
+ TICK_ENT_DYN_IND(); /* tick */
+ node = UNTAG(StgInd_indirectee(node));
+ TICK_ENT_VIA_NODE();
+ jump %GET_ENTRY(node) (node);
+}
+#else
+ /* explicit stack */
{
TICK_ENT_DYN_IND(); /* tick */
R1 = UNTAG(StgInd_indirectee(R1));
TICK_ENT_VIA_NODE();
- jump %GET_ENTRY(R1);
+ jump %GET_ENTRY(R1) [R1];
}
+#endif
INFO_TABLE(stg_IND_direct,1,0,IND,"IND","IND")
+ (P_ node)
{
TICK_ENT_DYN_IND(); /* tick */
- R1 = StgInd_indirectee(R1);
+ node = StgInd_indirectee(node);
TICK_ENT_VIA_NODE();
- jump %ENTRY_CODE(Sp(0));
+ jump %ENTRY_CODE(Sp(0)) (node);
}
INFO_TABLE(stg_IND_STATIC,1,0,IND_STATIC,"IND_STATIC","IND_STATIC")
+ /* explicit stack */
{
TICK_ENT_STATIC_IND(); /* tick */
R1 = UNTAG(StgInd_indirectee(R1));
TICK_ENT_VIA_NODE();
- jump %GET_ENTRY(R1);
+ jump %GET_ENTRY(R1) [R1];
}
INFO_TABLE(stg_IND_PERM,1,0,IND_PERM,"IND_PERM","IND_PERM")
+ /* explicit stack */
{
/* Don't add INDs to granularity cost */
@@ -259,7 +291,7 @@ INFO_TABLE(stg_IND_PERM,1,0,IND_PERM,"IND_PERM","IND_PERM")
TICK_ENT_VIA_NODE();
#endif
- jump %GET_ENTRY(R1);
+ jump %GET_ENTRY(R1) [R1];
}
/* ----------------------------------------------------------------------------
@@ -272,16 +304,17 @@ INFO_TABLE(stg_IND_PERM,1,0,IND_PERM,"IND_PERM","IND_PERM")
------------------------------------------------------------------------- */
INFO_TABLE(stg_BLACKHOLE,1,0,BLACKHOLE,"BLACKHOLE","BLACKHOLE")
+ (P_ node)
{
- W_ r, p, info, bq, msg, owner, bd;
+ W_ r, info, owner, bd;
+ P_ p, bq, msg;
TICK_ENT_DYN_IND(); /* tick */
retry:
- p = StgInd_indirectee(R1);
+ p = StgInd_indirectee(node);
if (GETTAG(p) != 0) {
- R1 = p;
- jump %ENTRY_CODE(Sp(0));
+ return (p);
}
info = StgHeader_info(p);
@@ -296,33 +329,33 @@ retry:
info == stg_BLOCKING_QUEUE_CLEAN_info ||
info == stg_BLOCKING_QUEUE_DIRTY_info)
{
- ("ptr" msg) = foreign "C" allocate(MyCapability() "ptr",
- BYTES_TO_WDS(SIZEOF_MessageBlackHole)) [R1];
+ ("ptr" msg) = ccall allocate(MyCapability() "ptr",
+ BYTES_TO_WDS(SIZEOF_MessageBlackHole));
SET_HDR(msg, stg_MSG_BLACKHOLE_info, CCS_SYSTEM);
MessageBlackHole_tso(msg) = CurrentTSO;
- MessageBlackHole_bh(msg) = R1;
+ MessageBlackHole_bh(msg) = node;
- (r) = foreign "C" messageBlackHole(MyCapability() "ptr", msg "ptr") [R1];
+ (r) = ccall messageBlackHole(MyCapability() "ptr", msg "ptr");
if (r == 0) {
goto retry;
} else {
StgTSO_why_blocked(CurrentTSO) = BlockedOnBlackHole::I16;
StgTSO_block_info(CurrentTSO) = msg;
- jump stg_block_blackhole;
+ jump stg_block_blackhole(node);
}
}
else
{
- R1 = p;
- ENTER();
+ ENTER(p);
}
}
INFO_TABLE(__stg_EAGER_BLACKHOLE,1,0,BLACKHOLE,"BLACKHOLE","BLACKHOLE")
+ (P_ node)
{
- jump ENTRY_LBL(stg_BLACKHOLE);
+ jump ENTRY_LBL(stg_BLACKHOLE) (node);
}
// CAF_BLACKHOLE is allocated when entering a CAF. The reason it is
@@ -332,8 +365,9 @@ INFO_TABLE(__stg_EAGER_BLACKHOLE,1,0,BLACKHOLE,"BLACKHOLE","BLACKHOLE")
// evaluation by another thread (a BLACKHOLE). See threadPaused().
//
INFO_TABLE(stg_CAF_BLACKHOLE,1,0,BLACKHOLE,"BLACKHOLE","BLACKHOLE")
+ (P_ node)
{
- jump ENTRY_LBL(stg_BLACKHOLE);
+ jump ENTRY_LBL(stg_BLACKHOLE) (node);
}
INFO_TABLE(stg_BLOCKING_QUEUE_CLEAN,4,0,BLOCKING_QUEUE,"BLOCKING_QUEUE","BLOCKING_QUEUE")
@@ -349,6 +383,7 @@ INFO_TABLE(stg_BLOCKING_QUEUE_DIRTY,4,0,BLOCKING_QUEUE,"BLOCKING_QUEUE","BLOCKIN
------------------------------------------------------------------------- */
INFO_TABLE(stg_WHITEHOLE, 0,0, WHITEHOLE, "WHITEHOLE", "WHITEHOLE")
+ (P_ node)
{
#if defined(THREADED_RTS)
W_ info, i;
@@ -356,18 +391,18 @@ INFO_TABLE(stg_WHITEHOLE, 0,0, WHITEHOLE, "WHITEHOLE", "WHITEHOLE")
i = 0;
loop:
// spin until the WHITEHOLE is updated
- info = StgHeader_info(R1);
+ info = StgHeader_info(node);
if (info == stg_WHITEHOLE_info) {
i = i + 1;
if (i == SPIN_COUNT) {
i = 0;
- foreign "C" yieldThread() [R1];
+ ccall yieldThread();
}
goto loop;
}
- jump %ENTRY_CODE(info);
+ jump %ENTRY_CODE(info) (node);
#else
- foreign "C" barf("WHITEHOLE object entered!") never returns;
+ ccall barf("WHITEHOLE object entered!") never returns;
#endif
}
@@ -556,8 +591,9 @@ INFO_TABLE(stg_MUT_VAR_DIRTY, 1, 0, MUT_VAR_DIRTY, "MUT_VAR_DIRTY", "MUT_VAR_DIR
* ------------------------------------------------------------------------- */
INFO_TABLE( stg_dummy_ret, 0, 0, CONSTR_NOCAF_STATIC, "DUMMY_RET", "DUMMY_RET")
+ ()
{
- jump %ENTRY_CODE(Sp(0));
+ return ();
}
CLOSURE(stg_dummy_ret_closure,stg_dummy_ret);
diff --git a/rts/StgStartup.cmm b/rts/StgStartup.cmm
index 4aace82deb..6793913464 100644
--- a/rts/StgStartup.cmm
+++ b/rts/StgStartup.cmm
@@ -35,11 +35,9 @@
-------------------------------------------------------------------------- */
INFO_TABLE_RET(stg_stop_thread, STOP_FRAME,
-#if defined(PROFILING)
- W_ unused,
- W_ unused
-#endif
-)
+ W_ info_ptr,
+ PROF_HDR_FIELDS(W_))
+/* no return list: explicit stack layout */
{
/*
The final exit.
@@ -75,7 +73,7 @@ INFO_TABLE_RET(stg_stop_thread, STOP_FRAME,
StgRegTable_rRet(BaseReg) = ThreadFinished;
R1 = BaseReg;
- jump StgReturn;
+ jump StgReturn [R1];
}
/* -----------------------------------------------------------------------------
@@ -87,46 +85,57 @@ INFO_TABLE_RET(stg_stop_thread, STOP_FRAME,
the thread's state away nicely.
-------------------------------------------------------------------------- */
-stg_returnToStackTop
+stg_returnToStackTop /* no args: explicit stack layout */
{
LOAD_THREAD_STATE();
CHECK_SENSIBLE_REGS();
- jump %ENTRY_CODE(Sp(0));
+ jump %ENTRY_CODE(Sp(0)) [];
}
-stg_returnToSched
+stg_returnToSched /* no args: explicit stack layout */
{
+ W_ r1;
+ r1 = R1; // foreign calls may clobber R1
SAVE_THREAD_STATE();
foreign "C" threadPaused(MyCapability() "ptr", CurrentTSO);
- jump StgReturn;
+ R1 = r1;
+ jump StgReturn [R1];
}
// A variant of stg_returnToSched that doesn't call threadPaused() on the
// current thread. This is used for switching from compiled execution to the
// interpreter, where calling threadPaused() on every switch would be too
// expensive.
-stg_returnToSchedNotPaused
+stg_returnToSchedNotPaused /* no args: explicit stack layout */
{
SAVE_THREAD_STATE();
- jump StgReturn;
+ jump StgReturn [R1];
}
// A variant of stg_returnToSched, but instead of returning directly to the
// scheduler, we jump to the code fragment pointed to by R2. This lets us
// perform some final actions after making the thread safe, such as unlocking
// the MVar on which we are about to block in SMP mode.
-stg_returnToSchedButFirst
+stg_returnToSchedButFirst /* no args: explicit stack layout */
{
+ W_ r1, r2, r3;
+ r1 = R1;
+ r2 = R2;
+ r3 = R3;
SAVE_THREAD_STATE();
+ // foreign calls may clobber R1/R2/.., so we save them above
foreign "C" threadPaused(MyCapability() "ptr", CurrentTSO);
- jump R2;
+ R1 = r1;
+ R2 = r2;
+ R3 = r3;
+ jump R2 [R1,R3];
}
-stg_threadFinished
+stg_threadFinished /* no args: explicit stack layout */
{
StgRegTable_rRet(BaseReg) = ThreadFinished;
R1 = BaseReg;
- jump StgReturn;
+ jump StgReturn [R1];
}
/* -----------------------------------------------------------------------------
@@ -143,31 +152,30 @@ stg_threadFinished
------------------------------------------------------------------------- */
-INFO_TABLE_RET(stg_forceIO, RET_SMALL)
-
+INFO_TABLE_RET(stg_forceIO, RET_SMALL, P_ info_ptr)
+ return (P_ ret)
{
- Sp_adj(1);
- ENTER();
+ ENTER(ret);
}
/* -----------------------------------------------------------------------------
Special STG entry points for module registration.
-------------------------------------------------------------------------- */
-stg_init_finish
+stg_init_finish /* no args: explicit stack layout */
{
- jump StgReturn;
+ jump StgReturn [];
}
/* On entry to stg_init:
* init_stack[0] = &stg_init_ret;
* init_stack[1] = __stginit_Something;
*/
-stg_init
+stg_init /* no args: explicit stack layout */
{
W_ next;
Sp = W_[BaseReg + OFFSET_StgRegTable_rSp];
next = W_[Sp];
Sp_adj(1);
- jump next;
+ jump next [];
}
diff --git a/rts/StgStdThunks.cmm b/rts/StgStdThunks.cmm
index 171ab52b96..0b69a9a279 100644
--- a/rts/StgStdThunks.cmm
+++ b/rts/StgStdThunks.cmm
@@ -11,6 +11,7 @@
* ---------------------------------------------------------------------------*/
#include "Cmm.h"
+#include "Updates.h"
/* -----------------------------------------------------------------------------
The code for a thunk that simply extracts a field from a
@@ -26,17 +27,15 @@
matching.
-------------------------------------------------------------------------- */
-#define WITHUPD_FRAME_SIZE (SIZEOF_StgUpdateFrame + SIZEOF_StgHeader)
-#define NOUPD_FRAME_SIZE (SIZEOF_StgHeader)
-
#ifdef PROFILING
-#define SAVE_CCCS(fs) StgHeader_ccs(Sp-fs) = CCCS
-#define GET_SAVED_CCCS CCCS = StgHeader_ccs(Sp)
-#define RET_PARAMS W_ unused1, W_ unused2
+#define RET_FIELDS(w_,info_ptr,ccs) \
+ w_ info_ptr, \
+ w_ ccs
+#define GET_SAVED_CCCS CCCS = ccs
#else
-#define SAVE_CCCS(fs) /* empty */
+#define RET_FIELDS(w_,info_ptr,ccs) \
+ w_ info_ptr
#define GET_SAVED_CCCS /* empty */
-#define RET_PARAMS
#endif
/*
@@ -56,42 +55,34 @@
// When profiling, we cannot shortcut by checking the tag,
// because LDV profiling relies on entering closures to mark them as
// "used".
-#define SEL_ENTER(offset) \
- R1 = UNTAG(R1); \
- jump %GET_ENTRY(R1);
+#define NEED_EVAL(__x__) 1
#else
-#define SEL_ENTER(offset) \
- if (GETTAG(R1) != 0) { \
- jump RET_LBL(stg_sel_ret_##offset##_upd); \
- } \
- jump %GET_ENTRY(R1);
+#define NEED_EVAL(__x__) GETTAG(__x__) == 0
#endif
#define SELECTOR_CODE_UPD(offset) \
- INFO_TABLE_RET(stg_sel_ret_##offset##_upd, RET_SMALL, RET_PARAMS) \
- { \
- R1 = StgClosure_payload(UNTAG(R1),offset); \
- GET_SAVED_CCCS; \
- Sp = Sp + SIZEOF_StgHeader; \
- ENTER(); \
- } \
- \
INFO_TABLE_SELECTOR(stg_sel_##offset##_upd, offset, THUNK_SELECTOR, "stg_sel_upd", "stg_sel_upd") \
- { \
- TICK_ENT_DYN_THK(); \
- STK_CHK_NP(WITHUPD_FRAME_SIZE); \
- UPD_BH_UPDATABLE(); \
- LDV_ENTER(R1); \
- PUSH_UPD_FRAME(Sp - SIZEOF_StgUpdateFrame, R1); \
- ENTER_CCS_THUNK(R1); \
- SAVE_CCCS(WITHUPD_FRAME_SIZE); \
- W_[Sp-WITHUPD_FRAME_SIZE] = stg_sel_ret_##offset##_upd_info; \
- Sp = Sp - WITHUPD_FRAME_SIZE; \
- R1 = StgThunk_payload(R1,0); \
- SEL_ENTER(offset); \
+ (P_ node) \
+ { \
+ P_ selectee, field; \
+ TICK_ENT_DYN_THK(); \
+ STK_CHK_NP(node); \
+ UPD_BH_UPDATABLE(node); \
+ LDV_ENTER(node); \
+ ENTER_CCS_THUNK(node); \
+ selectee = StgThunk_payload(node,0); \
+ push (UPDATE_FRAME_FIELDS(,,stg_upd_frame_info,node)) { \
+ if (NEED_EVAL(selectee)) { \
+ (P_ constr) = call %GET_ENTRY(selectee) (selectee); \
+ selectee = constr; \
+ } \
+ field = StgClosure_payload(UNTAG(selectee),offset); \
+ jump stg_ap_0_fast(field); \
+ } \
}
- /* NOTE: no need to ENTER() here, we know the closure cannot evaluate to a function,
- because we're going to do a field selection on the result. */
+ /* NOTE: no need to ENTER() here, we know the closure cannot
+ evaluate to a function, because we're going to do a field
+ selection on the result. */
SELECTOR_CODE_UPD(0)
SELECTOR_CODE_UPD(1)
@@ -110,33 +101,27 @@ SELECTOR_CODE_UPD(13)
SELECTOR_CODE_UPD(14)
SELECTOR_CODE_UPD(15)
-#define SELECTOR_CODE_NOUPD(offset) \
- INFO_TABLE_RET(stg_sel_ret_##offset##_noupd, RET_SMALL, RET_PARAMS) \
- { \
- R1 = StgClosure_payload(UNTAG(R1),offset); \
- GET_SAVED_CCCS; \
- Sp = Sp + SIZEOF_StgHeader; \
- ENTER(); \
- } \
- \
- INFO_TABLE_SELECTOR(stg_sel_##offset##_noupd, offset, THUNK_SELECTOR, "stg_sel_noupd", "stg_sel_noupd")\
- { \
- TICK_ENT_DYN_THK(); \
- STK_CHK_NP(NOUPD_FRAME_SIZE); \
- UPD_BH_SINGLE_ENTRY(); \
- LDV_ENTER(R1); \
- TICK_UPDF_OMITTED(); \
- ENTER_CCS_THUNK(R1); \
- SAVE_CCCS(NOUPD_FRAME_SIZE); \
- W_[Sp-NOUPD_FRAME_SIZE] = stg_sel_ret_##offset##_noupd_info; \
- Sp = Sp - NOUPD_FRAME_SIZE; \
- R1 = StgThunk_payload(R1,0); \
- if (GETTAG(R1) != 0) { \
- jump RET_LBL(stg_sel_ret_##offset##_noupd); \
+
+#define SELECTOR_CODE_NOUPD(offset) \
+ INFO_TABLE_SELECTOR(stg_sel_##offset##_noupd, offset, THUNK_SELECTOR, "stg_sel_noupd", "stg_sel_noupd") \
+ (P_ node) \
+ { \
+ P_ selectee, field; \
+ TICK_ENT_DYN_THK(); \
+ STK_CHK_NP(node); \
+ UPD_BH_UPDATABLE(node); \
+ LDV_ENTER(node); \
+ ENTER_CCS_THUNK(node); \
+ selectee = StgThunk_payload(node,0); \
+ if (NEED_EVAL(selectee)) { \
+ (P_ constr) = call %GET_ENTRY(selectee) (selectee); \
+ selectee = constr; \
} \
- jump %GET_ENTRY(R1); \
+ field = StgClosure_payload(UNTAG(selectee),offset); \
+ jump stg_ap_0_fast(field); \
}
+
SELECTOR_CODE_NOUPD(0)
SELECTOR_CODE_NOUPD(1)
SELECTOR_CODE_NOUPD(2)
@@ -173,131 +158,120 @@ SELECTOR_CODE_NOUPD(15)
*/
INFO_TABLE(stg_ap_1_upd,1,0,THUNK_1_0,"stg_ap_1_upd_info","stg_ap_1_upd_info")
+ (P_ node)
{
- TICK_ENT_DYN_THK();
- STK_CHK_NP(SIZEOF_StgUpdateFrame+WDS(1));
- UPD_BH_UPDATABLE();
- LDV_ENTER(R1);
- ENTER_CCS_THUNK(R1);
- PUSH_UPD_FRAME(Sp-SIZEOF_StgUpdateFrame,R1);
- R1 = StgThunk_payload(R1,0);
- Sp = Sp - SIZEOF_StgUpdateFrame;
- jump stg_ap_0_fast;
+ TICK_ENT_DYN_THK();
+ STK_CHK_NP(node);
+ UPD_BH_UPDATABLE(node);
+ LDV_ENTER(node);
+ ENTER_CCS_THUNK(node);
+ push (UPDATE_FRAME_FIELDS(,,stg_upd_frame_info, node)) {
+ jump stg_ap_0_fast
+ (StgThunk_payload(node,0));
+ }
}
INFO_TABLE(stg_ap_2_upd,2,0,THUNK_2_0,"stg_ap_2_upd_info","stg_ap_2_upd_info")
+ (P_ node)
{
- TICK_ENT_DYN_THK();
- STK_CHK_NP(SIZEOF_StgUpdateFrame+WDS(2));
- UPD_BH_UPDATABLE();
- LDV_ENTER(R1);
- ENTER_CCS_THUNK(R1);
- PUSH_UPD_FRAME(Sp-SIZEOF_StgUpdateFrame,R1);
- W_[Sp-SIZEOF_StgUpdateFrame-WDS(1)] = StgThunk_payload(R1,1);
- R1 = StgThunk_payload(R1,0);
- Sp = Sp - SIZEOF_StgUpdateFrame - WDS(1);
- Sp_adj(-1); // for stg_ap_*_ret
- TICK_UNKNOWN_CALL();
- TICK_SLOW_CALL_p();
- jump RET_LBL(stg_ap_p);
+ TICK_ENT_DYN_THK();
+ STK_CHK_NP(node);
+ UPD_BH_UPDATABLE(node);
+ LDV_ENTER(node);
+ ENTER_CCS_THUNK(node);
+ push (UPDATE_FRAME_FIELDS(,,stg_upd_frame_info, node)) {
+ jump stg_ap_p_fast
+ (StgThunk_payload(node,0),
+ StgThunk_payload(node,1));
+ }
}
INFO_TABLE(stg_ap_3_upd,3,0,THUNK,"stg_ap_3_upd_info","stg_ap_3_upd_info")
+ (P_ node)
{
- TICK_ENT_DYN_THK();
- STK_CHK_NP(SIZEOF_StgUpdateFrame+WDS(3));
- UPD_BH_UPDATABLE();
- LDV_ENTER(R1);
- ENTER_CCS_THUNK(R1);
- PUSH_UPD_FRAME(Sp-SIZEOF_StgUpdateFrame,R1);
- W_[Sp-SIZEOF_StgUpdateFrame-WDS(1)] = StgThunk_payload(R1,2);
- W_[Sp-SIZEOF_StgUpdateFrame-WDS(2)] = StgThunk_payload(R1,1);
- R1 = StgThunk_payload(R1,0);
- Sp = Sp - SIZEOF_StgUpdateFrame - WDS(2);
- Sp_adj(-1); // for stg_ap_*_ret
- TICK_UNKNOWN_CALL();
- TICK_SLOW_CALL_pp();
- jump RET_LBL(stg_ap_pp);
+ TICK_ENT_DYN_THK();
+ STK_CHK_NP(node);
+ UPD_BH_UPDATABLE(node);
+ LDV_ENTER(node);
+ ENTER_CCS_THUNK(node);
+ push (UPDATE_FRAME_FIELDS(,,stg_upd_frame_info, node)) {
+ jump stg_ap_pp_fast
+ (StgThunk_payload(node,0),
+ StgThunk_payload(node,1),
+ StgThunk_payload(node,2));
+ }
}
INFO_TABLE(stg_ap_4_upd,4,0,THUNK,"stg_ap_4_upd_info","stg_ap_4_upd_info")
+ (P_ node)
{
- TICK_ENT_DYN_THK();
- STK_CHK_NP(SIZEOF_StgUpdateFrame+WDS(4));
- UPD_BH_UPDATABLE();
- LDV_ENTER(R1);
- ENTER_CCS_THUNK(R1);
- PUSH_UPD_FRAME(Sp-SIZEOF_StgUpdateFrame,R1);
- W_[Sp-SIZEOF_StgUpdateFrame-WDS(1)] = StgThunk_payload(R1,3);
- W_[Sp-SIZEOF_StgUpdateFrame-WDS(2)] = StgThunk_payload(R1,2);
- W_[Sp-SIZEOF_StgUpdateFrame-WDS(3)] = StgThunk_payload(R1,1);
- R1 = StgThunk_payload(R1,0);
- Sp = Sp - SIZEOF_StgUpdateFrame - WDS(3);
- Sp_adj(-1); // for stg_ap_*_ret
- TICK_UNKNOWN_CALL();
- TICK_SLOW_CALL_ppp();
- jump RET_LBL(stg_ap_ppp);
+ TICK_ENT_DYN_THK();
+ STK_CHK_NP(node);
+ UPD_BH_UPDATABLE(node);
+ LDV_ENTER(node);
+ ENTER_CCS_THUNK(node);
+ push (UPDATE_FRAME_FIELDS(,,stg_upd_frame_info, node)) {
+ jump stg_ap_ppp_fast
+ (StgThunk_payload(node,0),
+ StgThunk_payload(node,1),
+ StgThunk_payload(node,2),
+ StgThunk_payload(node,3));
+ }
}
INFO_TABLE(stg_ap_5_upd,5,0,THUNK,"stg_ap_5_upd_info","stg_ap_5_upd_info")
+ (P_ node)
{
- TICK_ENT_DYN_THK();
- STK_CHK_NP(SIZEOF_StgUpdateFrame+WDS(5));
- UPD_BH_UPDATABLE();
- LDV_ENTER(R1);
- ENTER_CCS_THUNK(R1);
- PUSH_UPD_FRAME(Sp-SIZEOF_StgUpdateFrame,R1);
- W_[Sp-SIZEOF_StgUpdateFrame-WDS(1)] = StgThunk_payload(R1,4);
- W_[Sp-SIZEOF_StgUpdateFrame-WDS(2)] = StgThunk_payload(R1,3);
- W_[Sp-SIZEOF_StgUpdateFrame-WDS(3)] = StgThunk_payload(R1,2);
- W_[Sp-SIZEOF_StgUpdateFrame-WDS(4)] = StgThunk_payload(R1,1);
- R1 = StgThunk_payload(R1,0);
- Sp = Sp - SIZEOF_StgUpdateFrame - WDS(4);
- Sp_adj(-1); // for stg_ap_*_ret
- TICK_UNKNOWN_CALL();
- TICK_SLOW_CALL_pppp();
- jump RET_LBL(stg_ap_pppp);
+ TICK_ENT_DYN_THK();
+ STK_CHK_NP(node);
+ UPD_BH_UPDATABLE(node);
+ LDV_ENTER(node);
+ ENTER_CCS_THUNK(node);
+ push (UPDATE_FRAME_FIELDS(,,stg_upd_frame_info, node)) {
+ jump stg_ap_pppp_fast
+ (StgThunk_payload(node,0),
+ StgThunk_payload(node,1),
+ StgThunk_payload(node,2),
+ StgThunk_payload(node,3),
+ StgThunk_payload(node,4));
+ }
}
INFO_TABLE(stg_ap_6_upd,6,0,THUNK,"stg_ap_6_upd_info","stg_ap_6_upd_info")
+ (P_ node)
{
- TICK_ENT_DYN_THK();
- STK_CHK_NP(SIZEOF_StgUpdateFrame+WDS(6));
- UPD_BH_UPDATABLE();
- LDV_ENTER(R1);
- ENTER_CCS_THUNK(R1);
- PUSH_UPD_FRAME(Sp-SIZEOF_StgUpdateFrame,R1);
- W_[Sp-SIZEOF_StgUpdateFrame-WDS(1)] = StgThunk_payload(R1,5);
- W_[Sp-SIZEOF_StgUpdateFrame-WDS(2)] = StgThunk_payload(R1,4);
- W_[Sp-SIZEOF_StgUpdateFrame-WDS(3)] = StgThunk_payload(R1,3);
- W_[Sp-SIZEOF_StgUpdateFrame-WDS(4)] = StgThunk_payload(R1,2);
- W_[Sp-SIZEOF_StgUpdateFrame-WDS(5)] = StgThunk_payload(R1,1);
- R1 = StgThunk_payload(R1,0);
- Sp = Sp - SIZEOF_StgUpdateFrame - WDS(5);
- Sp_adj(-1); // for stg_ap_*_ret
- TICK_UNKNOWN_CALL();
- TICK_SLOW_CALL_ppppp();
- jump RET_LBL(stg_ap_ppppp);
+ TICK_ENT_DYN_THK();
+ STK_CHK_NP(node);
+ UPD_BH_UPDATABLE(node);
+ LDV_ENTER(node);
+ ENTER_CCS_THUNK(node);
+ push (UPDATE_FRAME_FIELDS(,,stg_upd_frame_info, node)) {
+ jump stg_ap_ppppp_fast
+ (StgThunk_payload(node,0),
+ StgThunk_payload(node,1),
+ StgThunk_payload(node,2),
+ StgThunk_payload(node,3),
+ StgThunk_payload(node,4),
+ StgThunk_payload(node,5));
+ }
}
INFO_TABLE(stg_ap_7_upd,7,0,THUNK,"stg_ap_7_upd_info","stg_ap_7_upd_info")
+ (P_ node)
{
- TICK_ENT_DYN_THK();
- STK_CHK_NP(SIZEOF_StgUpdateFrame+WDS(7));
- UPD_BH_UPDATABLE();
- LDV_ENTER(R1);
- ENTER_CCS_THUNK(R1);
- PUSH_UPD_FRAME(Sp-SIZEOF_StgUpdateFrame,R1);
- W_[Sp-SIZEOF_StgUpdateFrame-WDS(1)] = StgThunk_payload(R1,6);
- W_[Sp-SIZEOF_StgUpdateFrame-WDS(2)] = StgThunk_payload(R1,5);
- W_[Sp-SIZEOF_StgUpdateFrame-WDS(3)] = StgThunk_payload(R1,4);
- W_[Sp-SIZEOF_StgUpdateFrame-WDS(4)] = StgThunk_payload(R1,3);
- W_[Sp-SIZEOF_StgUpdateFrame-WDS(5)] = StgThunk_payload(R1,2);
- W_[Sp-SIZEOF_StgUpdateFrame-WDS(6)] = StgThunk_payload(R1,1);
- R1 = StgThunk_payload(R1,0);
- Sp = Sp - SIZEOF_StgUpdateFrame - WDS(6);
- Sp_adj(-1); // for stg_ap_*_ret
- TICK_UNKNOWN_CALL();
- TICK_SLOW_CALL_pppppp();
- jump RET_LBL(stg_ap_pppppp);
+ TICK_ENT_DYN_THK();
+ STK_CHK_NP(node);
+ UPD_BH_UPDATABLE(node);
+ LDV_ENTER(node);
+ ENTER_CCS_THUNK(node);
+ push (UPDATE_FRAME_FIELDS(,,stg_upd_frame_info, node)) {
+ jump stg_ap_pppppp_fast
+ (StgThunk_payload(node,0),
+ StgThunk_payload(node,1),
+ StgThunk_payload(node,2),
+ StgThunk_payload(node,3),
+ StgThunk_payload(node,4),
+ StgThunk_payload(node,5),
+ StgThunk_payload(node,6));
+ }
}
diff --git a/rts/Updates.cmm b/rts/Updates.cmm
index 44fbc0e194..2bc21ec332 100644
--- a/rts/Updates.cmm
+++ b/rts/Updates.cmm
@@ -16,85 +16,72 @@
#include "Updates.h"
-#if defined(PROFILING)
-#define UPD_FRAME_PARAMS W_ unused1, W_ unused2, P_ unused3
-#else
-#define UPD_FRAME_PARAMS P_ unused1
-#endif
-
-/* The update fragment has been tuned so as to generate good
- code with gcc, which accounts for some of the strangeness in the
- way it is written.
-
- In particular, the JMP_(ret) bit is passed down and pinned on the
- end of each branch (there end up being two major branches in the
- code), since we don't mind duplicating this jump.
-*/
-
-/* on entry to the update code
- (1) R1 points to the closure being returned
- (2) Sp points to the update frame
-*/
-
-INFO_TABLE_RET( stg_upd_frame, UPDATE_FRAME, UPD_FRAME_PARAMS)
+/*
+ * The update code is PERFORMANCE CRITICAL, if you make any changes
+ * here make sure you eyeball the assembly and check that the fast
+ * path (update in generation 0) is optimal.
+ *
+ * The return(ret) bit is passed down and pinned on the end of each
+ * branch (there end up being two major branches in the code), since
+ * we don't mind duplicating this jump.
+ */
+INFO_TABLE_RET ( stg_upd_frame, UPDATE_FRAME,
+ UPDATE_FRAME_FIELDS(W_,P_,info_ptr,updatee) )
+ return (P_ ret) /* the closure being returned */
{
- W_ updatee;
-
- updatee = StgUpdateFrame_updatee(Sp);
-
- /* remove the update frame from the stack */
- Sp = Sp + SIZEOF_StgUpdateFrame;
-
/* ToDo: it might be a PAP, so we should check... */
TICK_UPD_CON_IN_NEW(sizeW_fromITBL(%GET_STD_INFO(updatee)));
-
- updateWithIndirection(updatee,
- R1,
- jump %ENTRY_CODE(Sp(0)) [R1]);
-}
+ updateWithIndirection(updatee, ret, return (ret));
+}
-INFO_TABLE_RET( stg_marked_upd_frame, UPDATE_FRAME, UPD_FRAME_PARAMS)
+/*
+ * An update frame where the updatee has been replaced by a BLACKHOLE
+ * closure by threadPaused. We may have threads to wake up, and we
+ * also have to check whether the blackhole has been updated by
+ * another thread in the meantime.
+ */
+INFO_TABLE_RET ( stg_marked_upd_frame, UPDATE_FRAME,
+ UPDATE_FRAME_FIELDS(W_,P_,info_ptr,updatee) )
+ return (P_ ret) /* the closure being returned */
{
- W_ updatee, v, i, tso, link;
+ W_ v, i, tso, link;
// we know the closure is a BLACKHOLE
- updatee = StgUpdateFrame_updatee(Sp);
v = StgInd_indirectee(updatee);
- // remove the update frame from the stack
- Sp = Sp + SIZEOF_StgUpdateFrame;
-
if (GETTAG(v) != 0) {
// updated by someone else: discard our value and use the
// other one to increase sharing, but check the blocking
// queues to see if any threads were waiting on this BLACKHOLE.
- R1 = v;
- foreign "C" checkBlockingQueues(MyCapability() "ptr",
- CurrentTSO "ptr") [R1];
- jump %ENTRY_CODE(Sp(0)) [R1];
+ ccall checkBlockingQueues(MyCapability() "ptr", CurrentTSO "ptr");
+ return (v);
}
// common case: it is still our BLACKHOLE
if (v == CurrentTSO) {
- updateWithIndirection(updatee,
- R1,
- jump %ENTRY_CODE(Sp(0)) [R1]);
+ updateWithIndirection(updatee, ret, return (ret));
}
// The other cases are all handled by the generic code
- foreign "C" updateThunk (MyCapability() "ptr", CurrentTSO "ptr",
- updatee "ptr", R1 "ptr") [R1];
+ ccall updateThunk (MyCapability() "ptr", CurrentTSO "ptr",
+ updatee "ptr", ret "ptr");
- jump %ENTRY_CODE(Sp(0)) [R1];
+ return (ret);
}
-// Special update frame code for CAFs and eager-blackholed thunks: it
-// knows how to update blackholes, but is distinct from
-// stg_marked_upd_frame so that lazy blackholing won't treat it as the
-// high watermark.
-INFO_TABLE_RET (stg_bh_upd_frame, UPDATE_FRAME, UPD_FRAME_PARAMS)
+/*
+ * Special update frame code for CAFs and eager-blackholed thunks: it
+ * knows how to update blackholes, but is distinct from
+ * stg_marked_upd_frame so that lazy blackholing won't treat it as the
+ * high watermark.
+ */
+INFO_TABLE_RET ( stg_bh_upd_frame, UPDATE_FRAME,
+ UPDATE_FRAME_FIELDS(W_,P_,info_ptr,updatee) )
+ return (P_ ret) /* the closure being returned */
{
- jump RET_LBL(stg_marked_upd_frame) [R1];
+ // This all compiles away to a single jump instruction (sigh)
+ jump RET_LBL(stg_marked_upd_frame)
+ ( UPDATE_FRAME_FIELDS(,,info_ptr,updatee) )
+ (ret);
}
-
diff --git a/rts/Updates.h b/rts/Updates.h
index 954f02afe1..0205e6e763 100644
--- a/rts/Updates.h
+++ b/rts/Updates.h
@@ -24,29 +24,34 @@
* field. So, we call LDV_RECORD_CREATE().
*/
-/* We have two versions of this macro (sadly), one for use in C-- code,
+/*
+ * We have two versions of this macro (sadly), one for use in C-- code,
* and the other for C.
*
* The and_then argument is a performance hack so that we can paste in
* the continuation code directly. It helps shave a couple of
* instructions off the common case in the update code, which is
* worthwhile (the update code is often part of the inner loop).
- * (except that gcc now appears to common up this code again and
- * invert the optimisation. Grrrr --SDM).
*/
#ifdef CMINUSMINUS
-#define updateWithIndirection(p1, p2, and_then) \
+#define UPDATE_FRAME_FIELDS(w_,p_,info_ptr,updatee) \
+ w_ info_ptr, \
+ PROF_HDR_FIELDS(w_) \
+ p_ updatee
+
+
+#define updateWithIndirection(p1, p2, and_then) \
W_ bd; \
\
OVERWRITING_CLOSURE(p1); \
StgInd_indirectee(p1) = p2; \
- prim %write_barrier() []; \
+ prim %write_barrier(); \
SET_INFO(p1, stg_BLACKHOLE_info); \
LDV_RECORD_CREATE(p1); \
bd = Bdescr(p1); \
if (bdescr_gen_no(bd) != 0 :: bits16) { \
- recordMutableCap(p1, TO_W_(bdescr_gen_no(bd)), R1); \
+ recordMutableCap(p1, TO_W_(bdescr_gen_no(bd))); \
TICK_UPD_OLD_IND(); \
and_then; \
} else { \
diff --git a/rts/sm/Compact.c b/rts/sm/Compact.c
index c97e168433..34111f9206 100644
--- a/rts/sm/Compact.c
+++ b/rts/sm/Compact.c
@@ -301,37 +301,7 @@ thread_stack(StgPtr p, StgPtr stack_end)
switch (info->i.type) {
- // Dynamic bitmap: the mask is stored on the stack
- case RET_DYN:
- {
- StgWord dyn;
- dyn = ((StgRetDyn *)p)->liveness;
-
- // traverse the bitmap first
- bitmap = RET_DYN_LIVENESS(dyn);
- p = (P_)&((StgRetDyn *)p)->payload[0];
- size = RET_DYN_BITMAP_SIZE;
- while (size > 0) {
- if ((bitmap & 1) == 0) {
- thread((StgClosure **)p);
- }
- p++;
- bitmap = bitmap >> 1;
- size--;
- }
-
- // skip over the non-ptr words
- p += RET_DYN_NONPTRS(dyn) + RET_DYN_NONPTR_REGS_SIZE;
-
- // follow the ptr words
- for (size = RET_DYN_PTRS(dyn); size > 0; size--) {
- thread((StgClosure **)p);
- p++;
- }
- continue;
- }
-
- // small bitmap (<= 32 entries, or 64 on a 64-bit machine)
+ // small bitmap (<= 32 entries, or 64 on a 64-bit machine)
case CATCH_RETRY_FRAME:
case CATCH_STM_FRAME:
case ATOMICALLY_FRAME:
diff --git a/rts/sm/Evac.c b/rts/sm/Evac.c
index 8be393b4bc..0ac9e2623a 100644
--- a/rts/sm/Evac.c
+++ b/rts/sm/Evac.c
@@ -670,7 +670,6 @@ loop:
case RET_BCO:
case RET_SMALL:
case RET_BIG:
- case RET_DYN:
case UPDATE_FRAME:
case UNDERFLOW_FRAME:
case STOP_FRAME:
diff --git a/rts/sm/Sanity.c b/rts/sm/Sanity.c
index 5c7fb8aa76..6237662720 100644
--- a/rts/sm/Sanity.c
+++ b/rts/sm/Sanity.c
@@ -105,32 +105,6 @@ checkStackFrame( StgPtr c )
/* All activation records have 'bitmap' style layout info. */
switch (info->i.type) {
- case RET_DYN: /* Dynamic bitmap: the mask is stored on the stack */
- {
- StgWord dyn;
- StgPtr p;
- StgRetDyn* r;
-
- r = (StgRetDyn *)c;
- dyn = r->liveness;
-
- p = (P_)(r->payload);
- checkSmallBitmap(p,RET_DYN_LIVENESS(r->liveness),RET_DYN_BITMAP_SIZE);
- p += RET_DYN_BITMAP_SIZE + RET_DYN_NONPTR_REGS_SIZE;
-
- // skip over the non-pointers
- p += RET_DYN_NONPTRS(dyn);
-
- // follow the ptr words
- for (size = RET_DYN_PTRS(dyn); size > 0; size--) {
- checkClosureShallow((StgClosure *)*p);
- p++;
- }
-
- return sizeofW(StgRetDyn) + RET_DYN_BITMAP_SIZE +
- RET_DYN_NONPTR_REGS_SIZE +
- RET_DYN_NONPTRS(dyn) + RET_DYN_PTRS(dyn);
- }
case UPDATE_FRAME:
ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgUpdateFrame*)c)->updatee));
@@ -381,7 +355,6 @@ checkClosure( StgClosure* p )
case RET_BCO:
case RET_SMALL:
case RET_BIG:
- case RET_DYN:
case UPDATE_FRAME:
case UNDERFLOW_FRAME:
case STOP_FRAME:
diff --git a/rts/sm/Scav.c b/rts/sm/Scav.c
index cbdf01b720..668b95da6b 100644
--- a/rts/sm/Scav.c
+++ b/rts/sm/Scav.c
@@ -1685,32 +1685,6 @@ scavenge_stack(StgPtr p, StgPtr stack_end)
goto follow_srt;
}
- // Dynamic bitmap: the mask is stored on the stack, and
- // there are a number of non-pointers followed by a number
- // of pointers above the bitmapped area. (see StgMacros.h,
- // HEAP_CHK_GEN).
- case RET_DYN:
- {
- StgWord dyn;
- dyn = ((StgRetDyn *)p)->liveness;
-
- // traverse the bitmap first
- bitmap = RET_DYN_LIVENESS(dyn);
- p = (P_)&((StgRetDyn *)p)->payload[0];
- size = RET_DYN_BITMAP_SIZE;
- p = scavenge_small_bitmap(p, size, bitmap);
-
- // skip over the non-ptr words
- p += RET_DYN_NONPTRS(dyn) + RET_DYN_NONPTR_REGS_SIZE;
-
- // follow the ptr words
- for (size = RET_DYN_PTRS(dyn); size > 0; size--) {
- evacuate((StgClosure **)p);
- p++;
- }
- continue;
- }
-
case RET_FUN:
{
StgRetFun *ret_fun = (StgRetFun *)p;