summaryrefslogtreecommitdiff
path: root/rts/sm
diff options
context:
space:
mode:
authorSimon Marlow <marlowsd@gmail.com>2012-10-03 09:30:56 +0100
committerSimon Marlow <marlowsd@gmail.com>2012-10-08 09:04:40 +0100
commita7c0387d20c1c9994d1100b14fbb8fb4e28a259e (patch)
treeb95d0a512f951a4a463f1aa5178b0cd5c4fdb410 /rts/sm
parentaed37acd4d157791381800d5de960a2461bcbef3 (diff)
downloadhaskell-a7c0387d20c1c9994d1100b14fbb8fb4e28a259e.tar.gz
Produce new-style Cmm from the Cmm parser
The main change here is that the Cmm parser now allows high-level cmm code with argument-passing and function calls. For example: foo ( gcptr a, bits32 b ) { if (b > 0) { // we can make tail calls passing arguments: jump stg_ap_0_fast(a); } return (x,y); } More details on the new cmm syntax are in Note [Syntax of .cmm files] in CmmParse.y. The old syntax is still more-or-less supported for those occasional code fragments that really need to explicitly manipulate the stack. However there are a couple of differences: it is now obligatory to give a list of live GlobalRegs on every jump, e.g. jump %ENTRY_CODE(Sp(0)) [R1]; Again, more details in Note [Syntax of .cmm files]. I have rewritten most of the .cmm files in the RTS into the new syntax, except for AutoApply.cmm which is generated by the genapply program: this file could be generated in the new syntax instead and would probably be better off for it, but I ran out of enthusiasm. Some other changes in this batch: - The PrimOp calling convention is gone, primops now use the ordinary NativeNodeCall convention. This means that primops and "foreign import prim" code must be written in high-level cmm, but they can now take more than 10 arguments. - CmmSink now does constant-folding (should fix #7219) - .cmm files now go through the cmmPipeline, and as a result we generate better code in many cases. All the object files generated for the RTS .cmm files are now smaller. Performance should be better too, but I haven't measured it yet. - RET_DYN frames are removed from the RTS, lots of code goes away - we now have some more canned GC points to cover unboxed-tuples with 2-4 pointers, which will reduce code size a little.
Diffstat (limited to 'rts/sm')
-rw-r--r--rts/sm/Compact.c32
-rw-r--r--rts/sm/Evac.c1
-rw-r--r--rts/sm/Sanity.c27
-rw-r--r--rts/sm/Scav.c26
4 files changed, 1 insertions, 85 deletions
diff --git a/rts/sm/Compact.c b/rts/sm/Compact.c
index c97e168433..34111f9206 100644
--- a/rts/sm/Compact.c
+++ b/rts/sm/Compact.c
@@ -301,37 +301,7 @@ thread_stack(StgPtr p, StgPtr stack_end)
switch (info->i.type) {
- // Dynamic bitmap: the mask is stored on the stack
- case RET_DYN:
- {
- StgWord dyn;
- dyn = ((StgRetDyn *)p)->liveness;
-
- // traverse the bitmap first
- bitmap = RET_DYN_LIVENESS(dyn);
- p = (P_)&((StgRetDyn *)p)->payload[0];
- size = RET_DYN_BITMAP_SIZE;
- while (size > 0) {
- if ((bitmap & 1) == 0) {
- thread((StgClosure **)p);
- }
- p++;
- bitmap = bitmap >> 1;
- size--;
- }
-
- // skip over the non-ptr words
- p += RET_DYN_NONPTRS(dyn) + RET_DYN_NONPTR_REGS_SIZE;
-
- // follow the ptr words
- for (size = RET_DYN_PTRS(dyn); size > 0; size--) {
- thread((StgClosure **)p);
- p++;
- }
- continue;
- }
-
- // small bitmap (<= 32 entries, or 64 on a 64-bit machine)
+ // small bitmap (<= 32 entries, or 64 on a 64-bit machine)
case CATCH_RETRY_FRAME:
case CATCH_STM_FRAME:
case ATOMICALLY_FRAME:
diff --git a/rts/sm/Evac.c b/rts/sm/Evac.c
index 8be393b4bc..0ac9e2623a 100644
--- a/rts/sm/Evac.c
+++ b/rts/sm/Evac.c
@@ -670,7 +670,6 @@ loop:
case RET_BCO:
case RET_SMALL:
case RET_BIG:
- case RET_DYN:
case UPDATE_FRAME:
case UNDERFLOW_FRAME:
case STOP_FRAME:
diff --git a/rts/sm/Sanity.c b/rts/sm/Sanity.c
index 5c7fb8aa76..6237662720 100644
--- a/rts/sm/Sanity.c
+++ b/rts/sm/Sanity.c
@@ -105,32 +105,6 @@ checkStackFrame( StgPtr c )
/* All activation records have 'bitmap' style layout info. */
switch (info->i.type) {
- case RET_DYN: /* Dynamic bitmap: the mask is stored on the stack */
- {
- StgWord dyn;
- StgPtr p;
- StgRetDyn* r;
-
- r = (StgRetDyn *)c;
- dyn = r->liveness;
-
- p = (P_)(r->payload);
- checkSmallBitmap(p,RET_DYN_LIVENESS(r->liveness),RET_DYN_BITMAP_SIZE);
- p += RET_DYN_BITMAP_SIZE + RET_DYN_NONPTR_REGS_SIZE;
-
- // skip over the non-pointers
- p += RET_DYN_NONPTRS(dyn);
-
- // follow the ptr words
- for (size = RET_DYN_PTRS(dyn); size > 0; size--) {
- checkClosureShallow((StgClosure *)*p);
- p++;
- }
-
- return sizeofW(StgRetDyn) + RET_DYN_BITMAP_SIZE +
- RET_DYN_NONPTR_REGS_SIZE +
- RET_DYN_NONPTRS(dyn) + RET_DYN_PTRS(dyn);
- }
case UPDATE_FRAME:
ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgUpdateFrame*)c)->updatee));
@@ -381,7 +355,6 @@ checkClosure( StgClosure* p )
case RET_BCO:
case RET_SMALL:
case RET_BIG:
- case RET_DYN:
case UPDATE_FRAME:
case UNDERFLOW_FRAME:
case STOP_FRAME:
diff --git a/rts/sm/Scav.c b/rts/sm/Scav.c
index cbdf01b720..668b95da6b 100644
--- a/rts/sm/Scav.c
+++ b/rts/sm/Scav.c
@@ -1685,32 +1685,6 @@ scavenge_stack(StgPtr p, StgPtr stack_end)
goto follow_srt;
}
- // Dynamic bitmap: the mask is stored on the stack, and
- // there are a number of non-pointers followed by a number
- // of pointers above the bitmapped area. (see StgMacros.h,
- // HEAP_CHK_GEN).
- case RET_DYN:
- {
- StgWord dyn;
- dyn = ((StgRetDyn *)p)->liveness;
-
- // traverse the bitmap first
- bitmap = RET_DYN_LIVENESS(dyn);
- p = (P_)&((StgRetDyn *)p)->payload[0];
- size = RET_DYN_BITMAP_SIZE;
- p = scavenge_small_bitmap(p, size, bitmap);
-
- // skip over the non-ptr words
- p += RET_DYN_NONPTRS(dyn) + RET_DYN_NONPTR_REGS_SIZE;
-
- // follow the ptr words
- for (size = RET_DYN_PTRS(dyn); size > 0; size--) {
- evacuate((StgClosure **)p);
- p++;
- }
- continue;
- }
-
case RET_FUN:
{
StgRetFun *ret_fun = (StgRetFun *)p;