summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBen Gamari <ben@smart-cactus.org>2022-12-19 19:00:05 -0500
committerBen Gamari <ben@smart-cactus.org>2022-12-20 20:05:36 -0500
commitea5254d1989743e4ccf8003bd9f0b911d11223da (patch)
tree759429531b6eeea6f862999843ba16fd3e300491
parentd32e90bc55f1cc1ff817ae3de9f69e179d92dd94 (diff)
downloadhaskell-ea5254d1989743e4ccf8003bd9f0b911d11223da.tar.gz
MMTK port
-rw-r--r--.gitignore4
-rw-r--r--.gitmodules3
-rw-r--r--compiler/GHC/Cmm/CLabel.hs3
-rw-r--r--compiler/GHC/Cmm/Graph.hs16
-rw-r--r--compiler/GHC/Cmm/LayoutStack.hs29
-rw-r--r--compiler/GHC/Driver/Session.hs2
-rw-r--r--compiler/GHC/Platform/Ways.hs36
-rw-r--r--compiler/GHC/StgToCmm/Foreign.hs253
-rw-r--r--compiler/GHC/StgToCmm/Heap.hs6
-rw-r--r--compiler/GHC/StgToCmm/Monad.hs6
-rw-r--r--compiler/GHC/Utils/ExceptionWithPlatform.hs50
-rw-r--r--compiler/ghc.cabal.in1
-rw-r--r--fibo.hs27
-rw-r--r--ghc/Main.hs3
-rw-r--r--hadrian/src/Hadrian/BuildPath.hs3
-rw-r--r--hadrian/src/Settings/Builders/Ghc.hs4
-rw-r--r--hadrian/src/Settings/Default.hs12
-rw-r--r--hadrian/src/Settings/Packages.hs1
-rw-r--r--hadrian/src/UserSettings.hs6
-rw-r--r--hadrian/src/Way.hs12
-rw-r--r--hadrian/src/Way/Type.hs3
-rw-r--r--rts/Arena.c48
-rw-r--r--rts/Capability.c22
-rw-r--r--rts/Capability.h16
-rw-r--r--rts/Heap.c48
-rw-r--r--rts/HeapStackCheck.cmm20
-rw-r--r--rts/MmtkGhc.c14
-rw-r--r--rts/RtsAPI.c9
-rw-r--r--rts/RtsFlags.c8
-rw-r--r--rts/RtsStartup.c20
-rw-r--r--rts/RtsUtils.c11
-rw-r--r--rts/Schedule.c70
-rw-r--r--rts/StablePtr.c5
-rw-r--r--rts/Task.c43
-rw-r--r--rts/Task.h5
-rw-r--r--rts/Threads.c47
-rw-r--r--rts/Threads.h9
-rw-r--r--rts/Updates.h47
-rw-r--r--rts/include/Cmm.h7
-rw-r--r--rts/include/mmtk.h142
-rw-r--r--rts/include/rts/Flags.h2
-rw-r--r--rts/include/rts/StablePtr.h1
-rw-r--r--rts/include/rts/storage/ClosureMacros.h4
-rw-r--r--rts/include/rts/storage/Closures.h3
-rw-r--r--rts/include/rts/storage/InfoTables.h8
-rw-r--r--rts/include/rts/storage/TSO.h5
-rw-r--r--rts/include/stg/Regs.h1
-rw-r--r--rts/linker/M32Alloc.c2
-rw-r--r--rts/mmtk/.gitignore17
-rw-r--r--rts/mmtk/README.md83
-rw-r--r--rts/mmtk/docs/GSoC_report.md138
-rw-r--r--rts/mmtk/docs/introduce_nogc.pdfbin0 -> 1122027 bytes
-rw-r--r--rts/mmtk/flake.lock60
-rw-r--r--rts/mmtk/flake.nix32
-rw-r--r--rts/mmtk/ghc/mmtk.h142
-rw-r--r--rts/mmtk/ghc/mmtk_upcalls.h9
m---------rts/mmtk/mmtk-core0
-rw-r--r--rts/mmtk/mmtk/Cargo.toml33
-rw-r--r--rts/mmtk/mmtk/build.rs31
-rw-r--r--rts/mmtk/mmtk/rust-toolchain1
-rw-r--r--rts/mmtk/mmtk/src/active_plan.rs132
-rw-r--r--rts/mmtk/mmtk/src/api.rs236
-rw-r--r--rts/mmtk/mmtk/src/collection.rs68
-rw-r--r--rts/mmtk/mmtk/src/edges.rs146
-rw-r--r--rts/mmtk/mmtk/src/ghc.rs127
-rw-r--r--rts/mmtk/mmtk/src/lib.rs63
-rw-r--r--rts/mmtk/mmtk/src/mmtk-heap-closure-test/.gitignore4
-rw-r--r--rts/mmtk/mmtk/src/mmtk-heap-closure-test/Main.hs70
-rw-r--r--rts/mmtk/mmtk/src/mmtk-heap-closure-test/Makefile15
-rw-r--r--rts/mmtk/mmtk/src/mmtk-heap-closure-test/c_printClosure.c12
-rw-r--r--rts/mmtk/mmtk/src/mmtk-heap-closure-test/cabal.project19
-rw-r--r--rts/mmtk/mmtk/src/mmtk-heap-closure-test/cmm_printClosure.cmm6
-rw-r--r--rts/mmtk/mmtk/src/mmtk-heap-closure-test/mmtk-heap-closure-test.cabal27
-rw-r--r--rts/mmtk/mmtk/src/object_model.rs81
-rw-r--r--rts/mmtk/mmtk/src/object_scanning.rs325
-rw-r--r--rts/mmtk/mmtk/src/reference_glue.rs20
-rw-r--r--rts/mmtk/mmtk/src/scanning.rs191
-rw-r--r--rts/mmtk/mmtk/src/stg_closures.rs958
-rw-r--r--rts/mmtk/mmtk/src/stg_info_table.rs396
-rw-r--r--rts/mmtk/mmtk/src/test.rs168
-rw-r--r--rts/mmtk/mmtk/src/tests/allocate_with_disable_collection.rs22
-rw-r--r--rts/mmtk/mmtk/src/tests/allocate_with_initialize_collection.rs18
-rw-r--r--rts/mmtk/mmtk/src/tests/allocate_with_re_enable_collection.rs26
-rw-r--r--rts/mmtk/mmtk/src/tests/allocate_without_initialize_collection.rs17
-rw-r--r--rts/mmtk/mmtk/src/tests/conservatism.rs154
-rw-r--r--rts/mmtk/mmtk/src/tests/fixtures/mod.rs68
-rw-r--r--rts/mmtk/mmtk/src/tests/handle_mmap_conflict.rs24
-rw-r--r--rts/mmtk/mmtk/src/tests/handle_mmap_oom.rs23
-rw-r--r--rts/mmtk/mmtk/src/tests/is_in_mmtk_spaces.rs69
-rw-r--r--rts/mmtk/mmtk/src/tests/issue139.rs17
-rw-r--r--rts/mmtk/mmtk/src/tests/malloc.rs36
-rw-r--r--rts/mmtk/mmtk/src/tests/mod.rs18
-rw-r--r--rts/mmtk/mmtk/src/types.rs217
-rw-r--r--rts/mmtk/mmtk/src/util.rs23
-rw-r--r--rts/mmtk/mmtk/wrapper.h3
-rw-r--r--rts/mmtk/notes.hs22
-rw-r--r--rts/rts.cabal.in7
-rw-r--r--rts/sm/CheckGc.cpp976
-rw-r--r--rts/sm/Compact.c3
-rw-r--r--rts/sm/Evac.c15
-rw-r--r--rts/sm/GC.c46
-rw-r--r--rts/sm/HeapUtils.h2
-rw-r--r--rts/sm/MarkWeak.c10
-rw-r--r--rts/sm/NonMoving.c1
-rw-r--r--rts/sm/Sanity.c4
-rw-r--r--rts/sm/Scav.c11
-rw-r--r--rts/sm/Storage.c50
-rw-r--r--rts/sm/Storage.h1
-rw-r--r--testsuite/config/ghc3
-rw-r--r--utils/deriveConstants/Main.hs9
110 files changed, 6378 insertions, 224 deletions
diff --git a/.gitignore b/.gitignore
index f9367b9735..b1e3a3d2a4 100644
--- a/.gitignore
+++ b/.gitignore
@@ -248,3 +248,7 @@ ghc.nix/
# clangd
.clangd
dist-newstyle/
+
+
+# fibonacci test
+fibo \ No newline at end of file
diff --git a/.gitmodules b/.gitmodules
index c44e7335e5..3295395280 100644
--- a/.gitmodules
+++ b/.gitmodules
@@ -110,3 +110,6 @@
[submodule "libraries/exceptions"]
path = libraries/exceptions
url = https://gitlab.haskell.org/ghc/packages/exceptions.git
+[submodule "rts/mmtk/mmtk-core"]
+ path = rts/mmtk/mmtk-core
+ url = git@github.com:mmtk/mmtk-core.git
diff --git a/compiler/GHC/Cmm/CLabel.hs b/compiler/GHC/Cmm/CLabel.hs
index c492e8e7c1..1e6499f284 100644
--- a/compiler/GHC/Cmm/CLabel.hs
+++ b/compiler/GHC/Cmm/CLabel.hs
@@ -67,6 +67,7 @@ module GHC.Cmm.CLabel (
mkOutOfBoundsAccessLabel,
mkArrWords_infoLabel,
mkSRTInfoLabel,
+ mkIsMMTk_infoLabel,
mkTopTickyCtrLabel,
mkCAFBlackHoleInfoTableLabel,
@@ -649,6 +650,7 @@ mkDirty_MUT_VAR_Label,
mkCAFBlackHoleInfoTableLabel,
mkSMAP_FROZEN_CLEAN_infoLabel, mkSMAP_FROZEN_DIRTY_infoLabel,
mkSMAP_DIRTY_infoLabel, mkBadAlignmentLabel,
+ mkIsMMTk_infoLabel,
mkOutOfBoundsAccessLabel, mkMUT_VAR_CLEAN_infoLabel :: CLabel
mkDirty_MUT_VAR_Label = mkForeignLabel (fsLit "dirty_MUT_VAR") Nothing ForeignLabelInExternalPackage IsFunction
mkNonmovingWriteBarrierEnabledLabel
@@ -669,6 +671,7 @@ mkSMAP_DIRTY_infoLabel = CmmLabel rtsUnitId (NeedExternDecl False) (fsL
mkBadAlignmentLabel = CmmLabel rtsUnitId (NeedExternDecl False) (fsLit "stg_badAlignment") CmmEntry
mkOutOfBoundsAccessLabel = mkForeignLabel (fsLit "rtsOutOfBoundsAccess") Nothing ForeignLabelInExternalPackage IsFunction
mkMUT_VAR_CLEAN_infoLabel = CmmLabel rtsUnitId (NeedExternDecl False) (fsLit "stg_MUT_VAR_CLEAN") CmmInfo
+mkIsMMTk_infoLabel = CmmLabel rtsUnitId (NeedExternDecl False) (fsLit "is_MMTk") CmmData
mkSRTInfoLabel :: Int -> CLabel
mkSRTInfoLabel n = CmmLabel rtsUnitId (NeedExternDecl False) lbl CmmInfo
diff --git a/compiler/GHC/Cmm/Graph.hs b/compiler/GHC/Cmm/Graph.hs
index d59658e2af..3516ba0f78 100644
--- a/compiler/GHC/Cmm/Graph.hs
+++ b/compiler/GHC/Cmm/Graph.hs
@@ -5,6 +5,8 @@ module GHC.Cmm.Graph
, (<*>), catAGraphs
, mkLabel, mkMiddle, mkLast, outOfLine
, lgraphOfAGraph, labelAGraph
+ , blockToAGraphOO
+ , blockToAGraph
, stackStubExpr
, mkNop, mkAssign, mkStore
@@ -178,6 +180,20 @@ lgraphOfAGraph g = do
labelAGraph :: BlockId -> CmmAGraphScoped -> CmmGraph
labelAGraph lbl ag = flattenCmmAGraph lbl ag
+blockToAGraphOO :: Block CmmNode O O -> CmmAGraph
+blockToAGraphOO blk =
+ catAGraphs (map mkMiddle $ blockToList blk)
+
+blockToAGraph :: Block CmmNode C C -> CmmAGraph
+blockToAGraph blk =
+ agraph
+ where
+ (CmmEntry blkid tscp, middle, x) = blockSplit blk
+ agraph =
+ mkLabel blkid tscp <*>
+ blockToAGraphOO middle <*>
+ mkLast x
+
---------- No-ops
mkNop :: CmmAGraph
mkNop = nilOL
diff --git a/compiler/GHC/Cmm/LayoutStack.hs b/compiler/GHC/Cmm/LayoutStack.hs
index 0d759f5559..76872b7483 100644
--- a/compiler/GHC/Cmm/LayoutStack.hs
+++ b/compiler/GHC/Cmm/LayoutStack.hs
@@ -254,7 +254,7 @@ cmmLayoutStack cfg procpoints entry_args
blocks_with_reloads <-
insertReloadsAsNeeded platform procpoints final_stackmaps entry new_blocks
- new_blocks' <- mapM (lowerSafeForeignCall profile) blocks_with_reloads
+ new_blocks' <- concat <$> mapM (lowerSafeForeignCall profile) blocks_with_reloads
return (ofBlockList entry new_blocks', final_stackmaps)
-- -----------------------------------------------------------------------------
@@ -1134,9 +1134,9 @@ expecting them (see Note [safe foreign call convention]). Note also
that safe foreign call is replace by an unsafe one in the Cmm graph.
-}
-lowerSafeForeignCall :: Profile -> CmmBlock -> UniqSM CmmBlock
+lowerSafeForeignCall :: Profile -> CmmBlock -> UniqSM [CmmBlock]
lowerSafeForeignCall profile block
- | (entry@(CmmEntry _ tscp), middle, CmmForeignCall { .. }) <- blockSplit block
+ | (CmmEntry lbl tscp, middle, CmmForeignCall { .. }) <- blockSplit block
= do
let platform = profilePlatform profile
-- Both 'id' and 'new_base' are KindNonPtr because they're
@@ -1144,8 +1144,8 @@ lowerSafeForeignCall profile block
id <- newTemp (bWord platform)
new_base <- newTemp (cmmRegType platform baseReg)
let (caller_save, caller_load) = callerSaveVolatileRegs platform
- save_state_code <- saveThreadState profile
- load_state_code <- loadThreadState profile
+ save_state_code <- saveThreadState profile tscp
+ load_state_code <- loadThreadState profile tscp
let suspend = save_state_code <*>
caller_save <*>
mkMiddle (callSuspendThread platform id intrbl)
@@ -1175,19 +1175,16 @@ lowerSafeForeignCall profile block
, cml_ret_args = ret_args
, cml_ret_off = ret_off }
- graph' <- lgraphOfAGraph ( suspend <*>
- midCall <*>
- resume <*>
- copyout <*>
- mkLast jump, tscp)
-
- case toBlockList graph' of
- [one] -> let (_, middle', last) = blockSplit one
- in return (blockJoin entry (middle `blockAppend` middle') last)
- _ -> panic "lowerSafeForeignCall0"
+ let graph' = labelAGraph lbl (blockToAGraphOO middle <*>
+ suspend <*>
+ midCall <*>
+ resume <*>
+ copyout <*>
+ mkLast jump, tscp)
+ return $ toBlockList graph'
-- Block doesn't end in a safe foreign call:
- | otherwise = return block
+ | otherwise = return [block]
callSuspendThread :: Platform -> LocalReg -> Bool -> CmmNode O O
diff --git a/compiler/GHC/Driver/Session.hs b/compiler/GHC/Driver/Session.hs
index 831267e2bf..729294b578 100644
--- a/compiler/GHC/Driver/Session.hs
+++ b/compiler/GHC/Driver/Session.hs
@@ -2080,7 +2080,7 @@ dynamic_flags_deps = [
return d)
, make_ord_flag defGhcFlag "debug" (NoArg (addWayDynP WayDebug))
, make_ord_flag defGhcFlag "threaded" (NoArg (addWayDynP WayThreaded))
-
+ , make_ord_flag defGhcFlag "mmtk" (NoArg (addWayDynP WayMMTK))
, make_ord_flag defGhcFlag "ticky"
(NoArg (setGeneralFlag Opt_Ticky >> addWayDynP WayDebug))
diff --git a/compiler/GHC/Platform/Ways.hs b/compiler/GHC/Platform/Ways.hs
index e66b8a496d..818409f805 100644
--- a/compiler/GHC/Platform/Ways.hs
+++ b/compiler/GHC/Platform/Ways.hs
@@ -48,6 +48,7 @@ module GHC.Platform.Ways
, hostIsThreaded
, hostIsDebugged
, hostIsTracing
+ , hostIsMMTK
)
where
@@ -69,6 +70,7 @@ data Way
| WayDebug -- ^ Debugging, enable trace messages and extra checks
| WayProf -- ^ Profiling, enable cost-centre stacks and profiling reports
| WayDyn -- ^ Dynamic linking
+ | WayMMTK -- ^ (RTS) enable MMTK storage manage, disable GHCSM and its GC
deriving (Eq, Ord, Show, Read)
type Ways = Set Way
@@ -117,6 +119,8 @@ wayTag WayThreaded = "thr"
wayTag WayDebug = "debug"
wayTag WayDyn = "dyn"
wayTag WayProf = "p"
+-- wayTag WayTracing = "l" -- "l" for "logging"
+wayTag WayMMTK = "mmtk"
-- | Return true for ways that only impact the RTS, not the generated code
wayRTSOnly :: Way -> Bool
@@ -125,6 +129,8 @@ wayRTSOnly WayDyn = False
wayRTSOnly WayProf = False
wayRTSOnly WayThreaded = True
wayRTSOnly WayDebug = True
+-- wayRTSOnly WayTracing = True
+wayRTSOnly WayMMTK = True -- might effect
-- | Filter ways that have an impact on compilation
fullWays :: Ways -> Ways
@@ -140,6 +146,8 @@ wayDesc WayThreaded = "Threaded"
wayDesc WayDebug = "Debug"
wayDesc WayDyn = "Dynamic"
wayDesc WayProf = "Profiling"
+-- wayDesc WayTracing = "Tracing"
+wayDesc WayMMTK = "MMTK"
-- | Turn these flags on when enabling this way
wayGeneralFlags :: Platform -> Way -> [GeneralFlag]
@@ -155,6 +163,8 @@ wayGeneralFlags _ WayDyn = [Opt_PIC, Opt_ExternalDynamicRefs]
-- PIC objects can be linked into a .so, we have to compile even
-- modules of the main program with -fPIC when using -dynamic.
wayGeneralFlags _ WayProf = []
+-- wayGeneralFlags _ WayTracing = []
+wayGeneralFlags _ WayMMTK = []
-- | Turn these flags off when enabling this way
wayUnsetGeneralFlags :: Platform -> Way -> [GeneralFlag]
@@ -165,6 +175,8 @@ wayUnsetGeneralFlags _ WayDyn = [Opt_SplitSections]
-- There's no point splitting when we're going to be dynamically linking.
-- Plus it breaks compilation on OSX x86.
wayUnsetGeneralFlags _ WayProf = []
+-- wayUnsetGeneralFlags _ WayTracing = []
+wayUnsetGeneralFlags _ WayMMTK = []
-- | Pass these options to the C compiler when enabling this way
wayOptc :: Platform -> Way -> [String]
@@ -176,6 +188,8 @@ wayOptc platform WayThreaded = case platformOS platform of
wayOptc _ WayDebug = []
wayOptc _ WayDyn = []
wayOptc _ WayProf = ["-DPROFILING"]
+-- wayOptc _ WayTracing = ["-DTRACING"]
+wayOptc _ WayMMTK = ["-DMMTK_GHC"]
-- | Pass these options to linker when enabling this way
wayOptl :: Platform -> Way -> [String]
@@ -191,6 +205,8 @@ wayOptl platform WayThreaded =
wayOptl _ WayDebug = []
wayOptl _ WayDyn = []
wayOptl _ WayProf = []
+-- wayOptl _ WayTracing = []
+wayOptl _ WayMMTK = []
-- | Pass these options to the preprocessor when enabling this way
wayOptP :: Platform -> Way -> [String]
@@ -199,6 +215,8 @@ wayOptP _ WayThreaded = []
wayOptP _ WayDebug = []
wayOptP _ WayDyn = []
wayOptP _ WayProf = ["-DPROFILING"]
+-- wayOptP _ WayTracing = ["-DTRACING"]
+wayOptP _ WayMMTK = ["-DMMTK_GHC"]
-- | Consult the RTS to find whether it has been built with profiling enabled.
@@ -217,6 +235,21 @@ hostIsDynamic = rtsIsDynamic_ /= 0
foreign import ccall unsafe "rts_isDynamic" rtsIsDynamic_ :: Int
-- we need this until the bootstrap GHC is always recent enough
+#if MIN_VERSION_GLASGOW_HASKELL(9,3,0,0)
+
+-- | Consult the RTS to find whether it is in MMTK mode.
+hostIsMMTK :: Bool
+hostIsMMTK = rts_isMMTK_ /= 0
+
+foreign import ccall unsafe "rts_isMMTK" rts_isMMTK_ :: Int
+
+#else
+
+hostIsMMTK :: Bool
+hostIsMMTK = False
+
+#endif
+
#if MIN_VERSION_GLASGOW_HASKELL(9,1,0,0)
-- | Consult the RTS to find whether it is threaded.
@@ -237,7 +270,6 @@ hostIsTracing = rtsIsTracing_ /= 0
foreign import ccall unsafe "rts_isTracing" rtsIsTracing_ :: Int
-
#else
hostIsThreaded :: Bool
@@ -259,6 +291,8 @@ hostWays = Set.unions
, if hostIsProfiled then Set.singleton WayProf else Set.empty
, if hostIsThreaded then Set.singleton WayThreaded else Set.empty
, if hostIsDebugged then Set.singleton WayDebug else Set.empty
+ -- , if hostIsTracing then Set.singleton WayTracing else Set.empty
+ , if hostIsMMTK then Set.singleton WayMMTK else Set.empty
]
-- | Host "full" ways (i.e. ways that have an impact on the compilation,
diff --git a/compiler/GHC/StgToCmm/Foreign.hs b/compiler/GHC/StgToCmm/Foreign.hs
index e71c418530..a4d6511f5b 100644
--- a/compiler/GHC/StgToCmm/Foreign.hs
+++ b/compiler/GHC/StgToCmm/Foreign.hs
@@ -194,9 +194,9 @@ continuation, resulting in just one proc point instead of two. Yay!
-}
-emitCCall :: [(CmmFormal,ForeignHint)]
- -> CmmExpr
- -> [(CmmActual,ForeignHint)]
+emitCCall :: [(CmmFormal,ForeignHint)] -- result: pointer to the object CmmFormal-result
+ -> CmmExpr -- actual funtion
+ -> [(CmmActual,ForeignHint)] -- cmmActual: expression
-> FCode ()
emitCCall hinted_results fn hinted_args
= void $ emitForeignCall PlayRisky results target args
@@ -288,15 +288,16 @@ maybe_assign_temp e = do
emitSaveThreadState :: FCode ()
emitSaveThreadState = do
profile <- getProfile
- code <- saveThreadState profile
+ tscp <- getTickScope
+ code <- saveThreadState profile tscp
emit code
-- | Produce code to save the current thread state to @CurrentTSO@
-saveThreadState :: MonadUnique m => Profile -> m CmmAGraph
-saveThreadState profile = do
+saveThreadState :: MonadUnique m => Profile -> CmmTickScope -> m CmmAGraph
+saveThreadState profile tscp = do
let platform = profilePlatform profile
tso <- newTemp (gcWord platform)
- close_nursery <- closeNursery profile tso
+ close_nursery <- closeNursery profile tscp tso
pure $ catAGraphs
[ -- tso = CurrentTSO;
mkAssign (CmmLocal tso) currentTSOExpr
@@ -405,7 +406,8 @@ emitCloseNursery = do
profile <- getProfile
let platform = profilePlatform profile
tso <- newTemp (bWord platform)
- code <- closeNursery profile tso
+ tscp <- getTickScope
+ code <- closeNursery profile tscp tso
emit $ mkAssign (CmmLocal tso) currentTSOExpr <*> code
{- |
@@ -429,45 +431,68 @@ Closing the nursery corresponds to the following code:
cn->free = Hp + WDS(1);
@
-}
-closeNursery :: MonadUnique m => Profile -> LocalReg -> m CmmAGraph
-closeNursery profile tso = do
- let tsoreg = CmmLocal tso
- platform = profilePlatform profile
- cnreg <- CmmLocal <$> newTemp (bWord platform)
- pure $ catAGraphs [
- mkAssign cnreg currentNurseryExpr,
-
- -- CurrentNursery->free = Hp+1;
- mkStore (nursery_bdescr_free platform cnreg) (cmmOffsetW platform hpExpr 1),
-
- let alloc =
- CmmMachOp (mo_wordSub platform)
- [ cmmOffsetW platform hpExpr 1
- , cmmLoadBWord platform (nursery_bdescr_start platform cnreg)
- ]
-
- alloc_limit = cmmOffset platform (CmmReg tsoreg) (tso_alloc_limit profile)
- in
+closeNursery :: MonadUnique m => Profile -> CmmTickScope -> LocalReg -> m CmmAGraph
+closeNursery profile tscp tso = do
+ ghcsm <- ghcsm_path
+ mmtk <- mmtk_path
+ mkCmmIfThenElseUniq tscp is_MMTk mmtk ghcsm (Just False)
+ where
+ tsoreg = CmmLocal tso
+ platform = profilePlatform profile
+ isMMTk = cmmLoadBWord platform (CmmLit $ CmmLabel mkIsMMTk_infoLabel)
+ zero = zeroExpr platform
+ is_MMTk = cmmNeWord platform isMMTk zero
+
+ ghcsm_path = do
+ cnreg <- CmmLocal <$> newTemp (bWord platform)
+ pure $ catAGraphs [
+ mkAssign cnreg currentNurseryExpr,
+
+ -- CurrentNursery->free = Hp + WORD_SIZE;
+ mkStore (nursery_bdescr_free platform cnreg) (cmmOffsetW platform hpExpr 1),
+
+ let alloc =
+ CmmMachOp (mo_wordSub platform)
+ [ cmmOffsetW platform hpExpr 1
+ , cmmLoadBWord platform (nursery_bdescr_start platform cnreg)
+ ]
+
+ alloc_limit = cmmOffset platform (CmmReg tsoreg) (tso_alloc_limit profile)
+ in
+
+ -- tso->alloc_limit += alloc
+ mkStore alloc_limit (CmmMachOp (MO_Sub W64)
+ [ CmmLoad alloc_limit b64 NaturallyAligned
+ , CmmMachOp (mo_WordTo64 platform) [alloc] ])
+ ]
+
+ mmtk_path = do
+ bump_alloc_reg <- CmmLocal <$> newTemp (bWord platform)
+ pure $ catAGraphs [
+ mkAssign bump_alloc_reg (mmtkBumpAllocator platform),
+
+ -- P_[BumpAlloc->cursor] = Hp + WORD_SIZE
+ let cursor_ptr = bumpAllocator_cursor platform bump_alloc_reg
+ in mkStore cursor_ptr (cmmOffsetW platform (CmmReg hpReg) 1)
+
+ -- TODO: update alloc
+ ]
- -- tso->alloc_limit += alloc
- mkStore alloc_limit (CmmMachOp (MO_Sub W64)
- [ CmmLoad alloc_limit b64 NaturallyAligned
- , CmmMachOp (mo_WordTo64 platform) [alloc] ])
- ]
emitLoadThreadState :: FCode ()
emitLoadThreadState = do
profile <- getProfile
- code <- loadThreadState profile
+ tscp <- getTickScope
+ code <- loadThreadState profile tscp
emit code
-- | Produce code to load the current thread state from @CurrentTSO@
-loadThreadState :: MonadUnique m => Profile -> m CmmAGraph
-loadThreadState profile = do
+loadThreadState :: MonadUnique m => Profile -> CmmTickScope -> m CmmAGraph
+loadThreadState profile tscp = do
let platform = profilePlatform profile
tso <- newTemp (gcWord platform)
stack <- newTemp (gcWord platform)
- open_nursery <- openNursery profile tso
+ open_nursery <- openNursery profile tscp tso
pure $ catAGraphs [
-- tso = CurrentTSO;
mkAssign (CmmLocal tso) currentTSOExpr,
@@ -478,11 +503,11 @@ loadThreadState profile = do
-- SpLim = stack->stack + RESERVED_STACK_WORDS;
mkAssign spLimReg (cmmOffsetW platform (cmmOffset platform (CmmReg (CmmLocal stack)) (stack_STACK profile))
(pc_RESERVED_STACK_WORDS (platformConstants platform))),
+ open_nursery,
-- HpAlloc = 0;
-- HpAlloc is assumed to be set to non-zero only by a failed
-- a heap check, see HeapStackCheck.cmm:GC_GENERIC
mkAssign hpAllocReg (zeroExpr platform),
- open_nursery,
-- and load the current cost centre stack from the TSO when profiling:
if profileIsProfiling profile
then let ccs_ptr = cmmOffset platform (CmmReg (CmmLocal tso)) (tso_CCCS profile)
@@ -496,7 +521,8 @@ emitOpenNursery = do
profile <- getProfile
let platform = profilePlatform profile
tso <- newTemp (bWord platform)
- code <- openNursery profile tso
+ tscp <- getTickScope
+ code <- openNursery profile tscp tso
emit $ mkAssign (CmmLocal tso) currentTSOExpr <*> code
{- |
@@ -527,57 +553,106 @@ Opening the nursery corresponds to the following code:
HpLim = bdstart + CurrentNursery->blocks*BLOCK_SIZE_W - 1;
@
-}
-openNursery :: MonadUnique m => Profile -> LocalReg -> m CmmAGraph
-openNursery profile tso = do
- let tsoreg = CmmLocal tso
- platform = profilePlatform profile
- cnreg <- CmmLocal <$> newTemp (bWord platform)
- bdfreereg <- CmmLocal <$> newTemp (bWord platform)
- bdstartreg <- CmmLocal <$> newTemp (bWord platform)
-
- -- These assignments are carefully ordered to reduce register
- -- pressure and generate not completely awful code on x86. To see
- -- what code we generate, look at the assembly for
- -- stg_returnToStackTop in rts/StgStartup.cmm.
- pure $ catAGraphs [
- mkAssign cnreg currentNurseryExpr,
- mkAssign bdfreereg (cmmLoadBWord platform (nursery_bdescr_free platform cnreg)),
-
- -- Hp = CurrentNursery->free - 1;
- mkAssign hpReg (cmmOffsetW platform (CmmReg bdfreereg) (-1)),
-
- mkAssign bdstartreg (cmmLoadBWord platform (nursery_bdescr_start platform cnreg)),
-
- -- HpLim = CurrentNursery->start +
- -- CurrentNursery->blocks*BLOCK_SIZE_W - 1;
- mkAssign hpLimReg
- (cmmOffsetExpr platform
- (CmmReg bdstartreg)
- (cmmOffset platform
- (CmmMachOp (mo_wordMul platform)
- [ CmmMachOp (MO_SS_Conv W32 (wordWidth platform))
- [CmmLoad (nursery_bdescr_blocks platform cnreg) b32 NaturallyAligned]
- , mkIntExpr platform (pc_BLOCK_SIZE (platformConstants platform))
- ])
- (-1)
- )
- ),
-
- -- alloc = bd->free - bd->start
- let alloc =
- CmmMachOp (mo_wordSub platform) [CmmReg bdfreereg, CmmReg bdstartreg]
-
- alloc_limit = cmmOffset platform (CmmReg tsoreg) (tso_alloc_limit profile)
- in
-
- -- tso->alloc_limit += alloc
- mkStore alloc_limit (CmmMachOp (MO_Add W64)
- [ CmmLoad alloc_limit b64 NaturallyAligned
- , CmmMachOp (mo_WordTo64 platform) [alloc] ])
-
- ]
+openNursery :: MonadUnique m => Profile -> CmmTickScope -> LocalReg -> m CmmAGraph
+openNursery profile tscp tso = do
+ ghcsm <- ghcsm_path
+ mmtk <- mmtk_path
+ mkCmmIfThenElseUniq tscp is_MMTk mmtk ghcsm (Just False)
+ where
+ tsoreg = CmmLocal tso
+ platform = profilePlatform profile
+ isMMTk = cmmLoadBWord platform (CmmLit $ CmmLabel mkIsMMTk_infoLabel)
+ zero = zeroExpr platform
+ is_MMTk = cmmNeWord platform isMMTk zero
+
+ ghcsm_path = do
+ cnreg <- CmmLocal <$> newTemp (bWord platform)
+ bdfreereg <- CmmLocal <$> newTemp (bWord platform)
+ bdstartreg <- CmmLocal <$> newTemp (bWord platform)
+
+ -- These assignments are carefully ordered to reduce register
+ -- pressure and generate not completely awful code on x86. To see
+ -- what code we generate, look at the assembly for
+ -- stg_returnToStackTop in rts/StgStartup.cmm.
+ pure $ catAGraphs [
+ mkAssign cnreg currentNurseryExpr,
+ mkAssign bdfreereg (cmmLoadBWord platform (nursery_bdescr_free platform cnreg)),
+
+ -- Hp = CurrentNursery->free - WORD_SIZE;
+ mkAssign hpReg (cmmOffsetW platform (CmmReg bdfreereg) (-1)),
+
+ mkAssign bdstartreg (cmmLoadBWord platform (nursery_bdescr_start platform cnreg)),
+
+ -- HpLim = CurrentNursery->start +
+ -- CurrentNursery->blocks*BLOCK_SIZE_W - 1;
+ mkAssign hpLimReg
+ (cmmOffsetExpr platform
+ (CmmReg bdstartreg)
+ (cmmOffset platform
+ (CmmMachOp (mo_wordMul platform)
+ [ CmmMachOp (MO_SS_Conv W32 (wordWidth platform))
+ [CmmLoad (nursery_bdescr_blocks platform cnreg) b32 NaturallyAligned]
+ , mkIntExpr platform (pc_BLOCK_SIZE (platformConstants platform))
+ ])
+ (-1)
+ )
+ ),
+
+ -- alloc = bd->free - bd->start
+ let alloc =
+ CmmMachOp (mo_wordSub platform) [CmmReg bdfreereg, CmmReg bdstartreg]
+
+ alloc_limit = cmmOffset platform (CmmReg tsoreg) (tso_alloc_limit profile)
+ in
+
+ -- tso->alloc_limit += alloc
+ mkStore alloc_limit (CmmMachOp (MO_Add W64)
+ [ CmmLoad alloc_limit b64 NaturallyAligned
+ , CmmMachOp (mo_WordTo64 platform) [alloc] ])
+ ]
+
+ -- Note [MMTK off-by-one]
+ -- ~~~~~~~~~~~~~~~~~~~~~~
+ -- The STG machine's Hp register always points to the first byte of the
+ -- *last allocated* word. By contrast, MMTK's BumpAllocator::cursor field
+ -- points to the first byte of the first unallocated word. We must take
+ -- care to account for this offset when opening/closing the nursery.
+ --
+ -- In the case of the limit semantics, STG's HpLim register points to the
+ -- last byte *inside* of the nursery, which is the same as MMTk's
+ -- semantics.
+ mmtk_path = do
+ bump_alloc_reg <- CmmLocal <$> newTemp (bWord platform)
+ cursor_reg <- CmmLocal <$> newTemp (bWord platform)
+ limit_reg <- CmmLocal <$> newTemp (bWord platform)
+ pure $ catAGraphs [
+ mkAssign bump_alloc_reg (mmtkBumpAllocator platform),
+ mkAssign cursor_reg (cmmLoadBWord platform (bumpAllocator_cursor platform bump_alloc_reg)),
+ mkAssign limit_reg (cmmLoadBWord platform (bumpAllocator_limit platform bump_alloc_reg)),
+
+ -- Hp = BumpAlloc->cursor - WORD_SIZE
+ mkAssign hpReg $ cmmOffsetW platform (CmmReg cursor_reg) (-1),
+
+ -- HpLim = BumpAlloc->limit
+ mkAssign hpLimReg (cmmOffsetW platform (CmmReg limit_reg) (-1))
+
+ -- TODO: alloc = bd->free - bd->start
+ ]
+
+mmtkBumpAllocator :: Platform -> CmmExpr
+mmtkBumpAllocator platform =
+ bump_alloc_addr
+ where
+ pc = platformConstants platform
+ capability_addr = cmmOffset platform baseExpr (negate $ pc_OFFSET_Capability_r pc) -- :: Ptr Capability
+ task_addr = cmmLoadBWord platform (cmmOffset platform capability_addr (pc_OFFSET_Capability_running_task pc)) -- :: Ptr Task
+ mutator_addr = cmmLoadBWord platform (cmmOffset platform task_addr (pc_OFFSET_Task_mmutator pc)) -- :: Ptr MMTK_Mutator
+ -- TODO: Account for potential offset; we currently assume that `mutator.allocators.bump_allocator[0]` is
+ -- at offset 0 of `mutator`
+ bump_alloc_addr = mutator_addr -- :: Ptr BumpAllocator
nursery_bdescr_free, nursery_bdescr_start, nursery_bdescr_blocks
+ , bumpAllocator_cursor, bumpAllocator_limit
:: Platform -> CmmReg -> CmmExpr
nursery_bdescr_free platform cn =
cmmOffset platform (CmmReg cn) (pc_OFFSET_bdescr_free (platformConstants platform))
@@ -585,6 +660,10 @@ nursery_bdescr_start platform cn =
cmmOffset platform (CmmReg cn) (pc_OFFSET_bdescr_start (platformConstants platform))
nursery_bdescr_blocks platform cn =
cmmOffset platform (CmmReg cn) (pc_OFFSET_bdescr_blocks (platformConstants platform))
+bumpAllocator_cursor platform cn =
+ cmmOffset platform (CmmReg cn) (pc_OFFSET_BumpAllocator_cursor (platformConstants platform))
+bumpAllocator_limit platform cn =
+ cmmOffset platform (CmmReg cn) (pc_OFFSET_BumpAllocator_limit (platformConstants platform))
tso_stackobj, tso_CCCS, tso_alloc_limit, stack_STACK, stack_SP :: Profile -> ByteOff
tso_stackobj profile = closureField profile (pc_OFFSET_StgTSO_stackobj (profileConstants profile))
diff --git a/compiler/GHC/StgToCmm/Heap.hs b/compiler/GHC/StgToCmm/Heap.hs
index 55132860d6..74a9b1e552 100644
--- a/compiler/GHC/StgToCmm/Heap.hs
+++ b/compiler/GHC/StgToCmm/Heap.hs
@@ -106,6 +106,7 @@ allocDynClosureCmm mb_id info_tbl lf_info use_cc _blame_cc amodes_w_offsets = do
-- | Low-level heap object allocation.
+-- Combine GHCSM heap allocation; and a call to MMTk allocation
allocHeapClosure
:: SMRep -- ^ representation of the object
-> CmmExpr -- ^ info pointer
@@ -124,9 +125,9 @@ allocHeapClosure rep info_ptr use_cc payload = do
-- Remember, virtHp points to last allocated word,
-- ie 1 *before* the info-ptr word of new object.
- base <- getHpRelOffset info_offset
+ base <- getHpRelOffset info_offset -- actual allocation
emitComment $ mkFastString "allocHeapClosure"
- emitSetDynHdr base info_ptr use_cc
+ emitSetDynHdr base info_ptr use_cc -- info pointer header and profiling header
-- Fill in the fields
hpStore base payload
@@ -137,7 +138,6 @@ allocHeapClosure rep info_ptr use_cc payload = do
return base
-
emitSetDynHdr :: CmmExpr -> CmmExpr -> CmmExpr -> FCode ()
emitSetDynHdr base info_ptr ccs
= do profile <- getProfile
diff --git a/compiler/GHC/StgToCmm/Monad.hs b/compiler/GHC/StgToCmm/Monad.hs
index 9f9d292937..5288abd6fd 100644
--- a/compiler/GHC/StgToCmm/Monad.hs
+++ b/compiler/GHC/StgToCmm/Monad.hs
@@ -32,6 +32,7 @@ module GHC.StgToCmm.Monad (
mkCmmIfThenElse, mkCmmIfThen, mkCmmIfGoto,
mkCmmIfThenElse', mkCmmIfThen', mkCmmIfGoto',
+ mkCmmIfThenElseUniq,
mkCall, mkCmmCall,
@@ -814,6 +815,11 @@ mkCmmIfThenElse' :: CmmExpr -> CmmAGraph -> CmmAGraph
-> Maybe Bool -> FCode CmmAGraph
mkCmmIfThenElse' e tbranch fbranch likely = do
tscp <- getTickScope
+ mkCmmIfThenElseUniq tscp e tbranch fbranch likely
+
+mkCmmIfThenElseUniq :: MonadUnique m => CmmTickScope -> CmmExpr -> CmmAGraph -> CmmAGraph
+ -> Maybe Bool -> m CmmAGraph
+mkCmmIfThenElseUniq tscp e tbranch fbranch likely = do
endif <- newBlockId
tid <- newBlockId
fid <- newBlockId
diff --git a/compiler/GHC/Utils/ExceptionWithPlatform.hs b/compiler/GHC/Utils/ExceptionWithPlatform.hs
new file mode 100644
index 0000000000..823c5fd5f1
--- /dev/null
+++ b/compiler/GHC/Utils/ExceptionWithPlatform.hs
@@ -0,0 +1,50 @@
+{-# LANGUAGE GADTs #-}
+
+module GHC.Utils.ExceptionWithPlatform
+ ( -- * Exceptions requiring 'Platform'
+ ExceptionWithPlatform(..)
+ , handleWithPlatform
+ , rethrowWithPlatform
+ , throwWithPlatform
+ ) where
+
+import GHC.Prelude
+import GHC.Platform
+
+import Control.Monad.Catch
+import Control.Exception (throw)
+import Data.Typeable ( cast, typeRep )
+
+-- | An exception which requires access to a 'Platform' to produce.
+data ExceptionWithPlatform where
+ ExceptionWithPlatform :: forall a. (Exception a)
+ => (Platform -> a) -> ExceptionWithPlatform
+
+instance Show ExceptionWithPlatform where
+ show (ExceptionWithPlatform (_ :: Platform -> a)) =
+ "ExceptionWithPlatform @("++show ty++") _"
+ where
+ ty = typeRep ([] :: [a])
+
+instance Exception ExceptionWithPlatform
+
+handleWithPlatform
+ :: forall m a r. (Exception a, MonadCatch m)
+ => Platform
+ -> (a -> m r)
+ -> m r
+ -> m r
+handleWithPlatform platform handler action =
+ catchJust select action handler
+ where
+ select :: ExceptionWithPlatform -> Maybe a
+ select (ExceptionWithPlatform e) = cast e
+
+rethrowWithPlatform
+ :: (MonadCatch m) => Platform -> m r -> m r
+rethrowWithPlatform platform action =
+ catch action (\(ExceptionWithPlatform f) -> throwM (f platform))
+
+throwWithPlatform :: (Exception a) => (Platform -> a) -> r
+throwWithPlatform f = throw (ExceptionWithPlatform f)
+
diff --git a/compiler/ghc.cabal.in b/compiler/ghc.cabal.in
index 4b66f8dc33..0d2f198dbc 100644
--- a/compiler/ghc.cabal.in
+++ b/compiler/ghc.cabal.in
@@ -841,6 +841,7 @@ Library
GHC.Utils.Constants
GHC.Utils.Error
GHC.Utils.Exception
+ GHC.Utils.ExceptionWithPlatform
GHC.Utils.Fingerprint
GHC.Utils.FV
GHC.Utils.GlobalVars
diff --git a/fibo.hs b/fibo.hs
new file mode 100644
index 0000000000..9f7f87baac
--- /dev/null
+++ b/fibo.hs
@@ -0,0 +1,27 @@
+{-# LANGUAGE BangPatterns #-}
+
+import System.Environment
+main = do
+ [x] <- getArgs
+ let n :: Int
+ n = read x
+ let fibo = fibo2
+ print $ fibo n
+ print $ fibo (2*n)
+
+fibo_list :: Int -> Int
+fibo_list n = fibs !! n
+
+fibs :: [Int]
+fibs = 1 : 1 : go 1 1
+ where
+ go n0 n1 =
+ let n2 = n0+n1
+ in n2 : go n1 n2
+
+fibo2 :: Int -> Integer
+fibo2 = go 1 1
+ where
+ go !x0 !x1 1 = x1
+ go !x0 !x1 n = let x2 = x0 + x1
+ in go x1 x2 (n-1) \ No newline at end of file
diff --git a/ghc/Main.hs b/ghc/Main.hs
index ae862a7014..dd6bc67179 100644
--- a/ghc/Main.hs
+++ b/ghc/Main.hs
@@ -60,6 +60,7 @@ import GHC.Types.Unique.Supply
import GHC.Types.PkgQual
import GHC.Utils.Error
+import GHC.Utils.ExceptionWithPlatform
import GHC.Utils.Misc
import GHC.Utils.Panic
import GHC.Utils.Outputable as Outputable
@@ -272,7 +273,7 @@ main' postLoadMode units dflags0 args flagWarnings = do
---------------- Do the business -----------
handleSourceError (\e -> do
GHC.printException e
- liftIO $ exitWith (ExitFailure 1)) $ do
+ liftIO $ exitWith (ExitFailure 1)) $ rethrowWithPlatform (targetPlatform dflags6) $ do
case postLoadMode of
ShowInterface f -> liftIO $ showIface logger
(hsc_dflags hsc_env)
diff --git a/hadrian/src/Hadrian/BuildPath.hs b/hadrian/src/Hadrian/BuildPath.hs
index 8d2806b587..5d21eb953b 100644
--- a/hadrian/src/Hadrian/BuildPath.hs
+++ b/hadrian/src/Hadrian/BuildPath.hs
@@ -108,7 +108,8 @@ parseWayUnit = Parsec.choice
, Parsec.string "yn" *> pure Dynamic ])
, Parsec.char 'p' *> pure Profiling
, Parsec.char 'l' *> pure Logging
- ] Parsec.<?> "way unit (thr, debug, dyn, p, l)"
+ , Parsec.string "mmtk" *> pure MMTK
+ ] Parsec.<?> "way unit (thr, debug, dyn, p, l, mmtk)"
-- | Parse a @"pkgname-pkgversion"@ string into the package name and the
-- integers that make up the package version.
diff --git a/hadrian/src/Settings/Builders/Ghc.hs b/hadrian/src/Settings/Builders/Ghc.hs
index 88bd26bd62..120d7e322e 100644
--- a/hadrian/src/Settings/Builders/Ghc.hs
+++ b/hadrian/src/Settings/Builders/Ghc.hs
@@ -240,6 +240,10 @@ wayGhcArgs = do
, (Threaded `wayUnit` way) ? arg "-optc-DTHREADED_RTS"
, (Debug `wayUnit` way) ? arg "-optc-DDEBUG"
, (Profiling `wayUnit` way) ? arg "-prof"
+ , (MMTK `wayUnit` way) ? arg "-DMMTK_GHC"
+ , (MMTK `wayUnit` way) ? arg "-optc-DMMTK_GHC"
+ , (MMTK `wayUnit` way) ? arg "-optP-DMMTK_GHC"
+ -- , supportsEventlog way ? arg "-eventlog"
, (way == debug || way == debugDynamic) ?
pure ["-ticky", "-DTICKY_TICKY"] ]
diff --git a/hadrian/src/Settings/Default.hs b/hadrian/src/Settings/Default.hs
index 312747141b..cc3cf56ce1 100644
--- a/hadrian/src/Settings/Default.hs
+++ b/hadrian/src/Settings/Default.hs
@@ -180,10 +180,12 @@ defaultRtsWays = Set.fromList <$>
, notStage0 ? pure
[ profiling, debugProfiling
, debug
+ , threadedMmtk, threadedDebugMmtk
]
, notStage0 ? targetSupportsThreadedRts ? pure [threaded, threadedProfiling, threadedDebugProfiling, threadedDebug]
, notStage0 ? platformSupportsSharedLibs ? pure
[ dynamic, debugDynamic
+ , threadedMmtkDynamic, threadedDebugMmtkDynamic
]
, notStage0 ? platformSupportsSharedLibs ? targetSupportsThreadedRts ? pure [ threadedDynamic, threadedDebugDynamic ]
]
@@ -291,5 +293,11 @@ defaultBuilderArgs = mconcat
-- | All 'Package'-dependent command line arguments.
defaultPackageArgs :: Args
-defaultPackageArgs = mconcat [ packageArgs
- , builder Ghc ? ghcWarningsArgs ]
+defaultPackageArgs = mconcat
+ [ packageArgs
+ , do ways <- getWay -- FIXME
+ (MMTK `wayUnit` ways) ? builder (Ghc LinkHs) ? mconcat
+ [ arg "-Lrts/mmtk/mmtk/target/debug"
+ , arg "-lmmtk_ghc"
+ ]
+ , builder Ghc ? ghcWarningsArgs ]
diff --git a/hadrian/src/Settings/Packages.hs b/hadrian/src/Settings/Packages.hs
index 128262a0ab..6e9db0d744 100644
--- a/hadrian/src/Settings/Packages.hs
+++ b/hadrian/src/Settings/Packages.hs
@@ -301,6 +301,7 @@ rtsPackageArgs = package rts ? do
, "-optc-DTICKY_TICKY"]
, Profiling `wayUnit` way ? arg "-DPROFILING"
, Threaded `wayUnit` way ? arg "-DTHREADED_RTS"
+ , MMTK `wayUnit` way ? arg "-DMMTK_GHC"
, notM targetSupportsSMP ? pure [ "-DNOSMP"
, "-optc-DNOSMP" ]
]
diff --git a/hadrian/src/UserSettings.hs b/hadrian/src/UserSettings.hs
index 5920b57f24..2139cb0984 100644
--- a/hadrian/src/UserSettings.hs
+++ b/hadrian/src/UserSettings.hs
@@ -33,7 +33,11 @@ userFlavours = [userFlavour] -- Add more build flavours if need be.
-- | This is an example user-defined build flavour. Feel free to modify it and
-- use by passing @--flavour=user@ from the command line.
userFlavour :: Flavour
-userFlavour = defaultFlavour { name = "user" } -- Modify other settings here.
+userFlavour = defaultFlavour { name = "user"
+ , args = x <> args defaultFlavour
+ }
+ where
+ x = stage1 ? builder (Ghc LinkHs) ? arg "rts/libmmtk_ghc.a"
-- | Add user-defined packages. Note, this only lets Hadrian know about the
-- existence of a new package; to actually build it you need to create a new
diff --git a/hadrian/src/Way.hs b/hadrian/src/Way.hs
index 044d781661..4c282906e2 100644
--- a/hadrian/src/Way.hs
+++ b/hadrian/src/Way.hs
@@ -2,10 +2,10 @@ module Way (
WayUnit (..), Way, wayUnit, addWayUnit, removeWayUnit, wayFromUnits, allWays,
vanilla, profiling, dynamic, profilingDynamic, threaded, debug,
- threadedDebug, threadedProfiling, threadedDynamic,
+ threadedDebug, threadedProfiling, threadedDynamic, threadedMmtk,
threadedDebugProfiling, threadedDebugDynamic, threadedProfilingDynamic,
debugProfiling, debugDynamic,
-
+ threadedMmtkDynamic, threadedDebugMmtkDynamic, threadedDebugMmtk,
wayPrefix, waySuffix, hisuf, osuf, hcsuf, obootsuf, hibootsuf, ssuf
) where
@@ -39,7 +39,8 @@ debug = wayFromUnits [Debug]
-- | Various combinations of RTS only ways.
threadedDebug, threadedProfiling, threadedDynamic,
threadedDebugProfiling, threadedDebugDynamic, threadedProfilingDynamic,
- debugProfiling, debugDynamic :: Way
+ debugProfiling, debugDynamic,
+ threadedMmtk, threadedMmtkDynamic, threadedDebugMmtk :: Way
threadedDebug = wayFromUnits [Threaded, Debug]
threadedProfiling = wayFromUnits [Threaded, Profiling]
threadedDynamic = wayFromUnits [Threaded, Dynamic]
@@ -48,12 +49,17 @@ threadedDebugDynamic = wayFromUnits [Threaded, Debug, Dynamic]
threadedProfilingDynamic = wayFromUnits [Threaded, Profiling, Dynamic]
debugProfiling = wayFromUnits [Debug, Profiling]
debugDynamic = wayFromUnits [Debug, Dynamic]
+threadedMmtk = wayFromUnits [Threaded, MMTK]
+threadedMmtkDynamic = wayFromUnits [Threaded, MMTK, Dynamic]
+threadedDebugMmtkDynamic = wayFromUnits [Threaded, Debug, MMTK, Dynamic]
+threadedDebugMmtk = wayFromUnits [Threaded, Debug, MMTK]
-- | All ways supported by the build system.
allWays :: [Way]
allWays =
[ vanilla, profiling, dynamic, profilingDynamic, threaded, debug
, threadedDebug, threadedProfiling, threadedDynamic
+ , threadedMmtk, threadedDebugMmtk, threadedMmtkDynamic, threadedDebugMmtkDynamic
, threadedDebugProfiling, threadedDebugDynamic, threadedProfilingDynamic
, debugProfiling, debugDynamic ]
diff --git a/hadrian/src/Way/Type.hs b/hadrian/src/Way/Type.hs
index b205390d37..c56f0f3178 100644
--- a/hadrian/src/Way/Type.hs
+++ b/hadrian/src/Way/Type.hs
@@ -18,6 +18,7 @@ data WayUnit = Threaded
| Profiling
| Logging
| Dynamic
+ | MMTK
deriving (Bounded, Enum, Eq, Ord)
-- TODO: get rid of non-derived Show instances
@@ -28,6 +29,7 @@ instance Show WayUnit where
Profiling -> "p"
Logging -> "l"
Dynamic -> "dyn"
+ MMTK -> "mmtk"
-- TODO: get rid of non-derived Read instance
instance Read WayUnit where
@@ -37,6 +39,7 @@ instance Read WayUnit where
"p" -> [(Profiling,"")]
"l" -> [(Logging,"")]
"dyn" -> [(Dynamic,"")]
+ "mmtk" -> [(MMTK,"")]
_ -> []
-- | Collection of 'WayUnit's that stands for the different ways source code
diff --git a/rts/Arena.c b/rts/Arena.c
index 8129475332..d4155fa746 100644
--- a/rts/Arena.c
+++ b/rts/Arena.c
@@ -24,9 +24,14 @@
#include "RtsUtils.h"
#include "Arena.h"
+typedef struct ArenaBlock {
+ struct ArenaBlock *link;
+ StgPtr start;
+}ArenaBlock;
+
// Each arena struct is allocated using malloc().
struct _Arena {
- bdescr *current;
+ ArenaBlock *current;
StgWord *free; // ptr to next free byte in current block
StgWord *lim; // limit (== last free byte + 1)
};
@@ -42,7 +47,7 @@ newArena( void )
Arena *arena;
arena = stgMallocBytes(sizeof(Arena), "newArena");
- arena->current = allocBlock_lock();
+ arena->current = stgMallocBytes(sizeof(ArenaBlock), "newArena");
arena->current->link = NULL;
arena->free = arena->current->start;
arena->lim = arena->current->start + BLOCK_SIZE_W;
@@ -80,20 +85,22 @@ arenaAlloc( Arena *arena, size_t size )
return p;
} else {
// allocate a fresh block...
+
+ uint32_t req_blocks;
+ ArenaBlock *new_;
+
req_blocks = (W_)BLOCK_ROUND_UP(size) / BLOCK_SIZE;
- bd = allocGroup_lock(req_blocks);
- arena_blocks += bd->blocks;
-
- bd->gen_no = 0;
- bd->gen = NULL;
- bd->dest_no = 0;
- bd->flags = 0;
- bd->free = bd->start;
- bd->link = arena->current;
- arena->current = bd;
- arena->free = bd->free + size_w;
- arena->lim = bd->free + bd->blocks * BLOCK_SIZE_W;
- return bd->start;
+ new_ = stgMallocBytes(sizeof(ArenaBlock), "newArena");
+ new_->start = malloc(BLOCK_SIZE * req_blocks);
+ new_->link = arena->current;
+
+ arena_blocks += req_blocks;
+ arena->current = new_;
+ arena->free = new_->start + size_w;
+ arena->lim = new_->start + req_blocks * BLOCK_SIZE_W;
+
+ return new_->start;
+
}
}
@@ -101,13 +108,12 @@ arenaAlloc( Arena *arena, size_t size )
void
arenaFree( Arena *arena )
{
- bdescr *bd, *next;
+ ArenaBlock *bd, *next;
for (bd = arena->current; bd != NULL; bd = next) {
next = bd->link;
- arena_blocks -= bd->blocks;
- ASSERT(arena_blocks >= 0);
- freeGroup_lock(bd);
+ free(bd->start);
+ stgFree(bd);
}
stgFree(arena);
}
@@ -130,8 +136,8 @@ void checkPtrInArena( StgPtr p, Arena *arena )
// Rest of the blocks should be full (except there may be a little bit of
// slop at the end). Again, free pointers are not updated so we can't use
// those.
- for (bdescr *bd = arena->current->link; bd; bd = bd->link) {
- if (p >= bd->start && p < bd->start + (bd->blocks*BLOCK_SIZE_W)) {
+ for (ArenaBlock *bd = arena->current->link; bd; bd = bd->link) {
+ if (p >= bd->start && p < bd->start + BLOCK_SIZE_W) {
return;
}
}
diff --git a/rts/Capability.c b/rts/Capability.c
index 98c80887e5..bd270e6b4e 100644
--- a/rts/Capability.c
+++ b/rts/Capability.c
@@ -31,6 +31,13 @@
#include "sm/BlockAlloc.h" // for countBlocks()
#include "IOManager.h"
+#if defined(MMTK_GHC)
+#if !defined(THREADED_RTS)
+#error MMTK is only supported in the threaded RTS
+#endif
+#include "mmtk/ghc/mmtk_upcalls.h"
+#endif
+
#include <string.h>
// one global capability, this is the Capability for non-threaded
@@ -333,6 +340,7 @@ initCapability (Capability *cap, uint32_t i)
#endif
}
+
/* ---------------------------------------------------------------------------
* Function: initCapabilities()
*
@@ -493,7 +501,7 @@ giveCapabilityToTask (Capability *cap USED_IF_DEBUG, Task *task)
ASSERT_LOCK_HELD(&cap->lock);
ASSERT(task->cap == cap);
debugTrace(DEBUG_sched, "passing capability %d to %s %#" FMT_HexWord64,
- cap->no, task->incall->tso ? "bound task" : "worker",
+ cap->no, (task->incall && task->incall->tso) ? "bound task" : "worker",
serialisableTaskId(task));
ACQUIRE_LOCK(&task->lock);
if (task->wakeup == false) {
@@ -936,6 +944,16 @@ void waitForCapability (Capability **pCap, Task *task)
#if defined(THREADED_RTS)
+bool /* Did we GC? */
+yieldCapabilityForMMTK
+ ( Task *task // [in] This thread's task.
+ , bool gcAllowed
+ )
+{
+ Capability *cap = task->cap;
+ return yieldCapability(&cap, task, gcAllowed);
+}
+
/* See Note [GC livelock] in Schedule.c for why we have gcAllowed
and return the bool */
bool /* Did we GC? */
@@ -977,6 +995,8 @@ yieldCapability
case SYNC_FLUSH_EVENT_LOG:
/* N.B. the actual flushing is performed by flushEventLog */
break;
+ case SYNC_GC_MMTK:
+ break;
default:
break;
diff --git a/rts/Capability.h b/rts/Capability.h
index 2910c3faf0..1590df55a3 100644
--- a/rts/Capability.h
+++ b/rts/Capability.h
@@ -27,6 +27,7 @@
#include "IOManager.h" // for CapIOManager
#include "BeginPrivate.h"
+#include "mmtk.h"
/* N.B. This must be consistent with CapabilityPublic in RtsAPI.h */
struct Capability_ {
@@ -90,6 +91,7 @@ struct Capability_ {
// The update remembered set for the non-moving collector
UpdRemSet upd_rem_set;
+ // MMTK-TODO: move pinned object blocks inside storage manager
// block for allocating pinned objects into
bdescr *pinned_object_block;
// full pinned object blocks allocated since the last GC
@@ -276,7 +278,8 @@ typedef enum {
SYNC_GC_SEQ,
SYNC_GC_PAR,
SYNC_FLUSH_UPD_REM_SET,
- SYNC_FLUSH_EVENT_LOG
+ SYNC_FLUSH_EVENT_LOG,
+ SYNC_GC_MMTK
} SyncType;
//
@@ -327,6 +330,8 @@ EXTERN_INLINE void recordClosureMutated (Capability *cap, StgClosure *p);
// On return: *pCap is NULL if the capability was released. The
// current task should then re-acquire it using waitForCapability().
//
+
+bool yieldCapabilityForMMTK (Task *task, bool gcAllowed);
bool yieldCapability (Capability** pCap, Task *task, bool gcAllowed);
// Wakes up a worker thread on just one Capability, used when we
@@ -425,6 +430,11 @@ INLINE_HEADER bool emptyInbox(Capability *cap);
EXTERN_INLINE void
recordMutableCap (const StgClosure *p, Capability *cap, uint32_t gen)
{
+#if defined(MMTK_GHC)
+ // TODO: call MMTK write barrier
+ return;
+#endif
+
bdescr *bd;
// We must own this Capability in order to modify its mutable list.
@@ -446,6 +456,10 @@ recordMutableCap (const StgClosure *p, Capability *cap, uint32_t gen)
EXTERN_INLINE void
recordClosureMutated (Capability *cap, StgClosure *p)
{
+#if defined(MMTK_GHC)
+ // TODO: call MMTK write barrier
+ return;
+#endif
bdescr *bd;
bd = Bdescr((StgPtr)p);
if (bd->gen_no != 0) recordMutableCap(p,cap,bd->gen_no);
diff --git a/rts/Heap.c b/rts/Heap.c
index a3be4da749..64f1a84dc0 100644
--- a/rts/Heap.c
+++ b/rts/Heap.c
@@ -205,29 +205,52 @@ StgWord collect_pointers(StgClosure *closure, StgClosure *ptrs[]) {
ptrs[nptrs++] = ((StgMVar *)closure)->value;
break;
case TSO:
- ASSERT((StgClosure *)((StgTSO *)closure)->_link != NULL);
- ptrs[nptrs++] = (StgClosure *)((StgTSO *)closure)->_link;
+ {
+ StgTSO* tso = ((StgTSO *)closure);
- ASSERT((StgClosure *)((StgTSO *)closure)->global_link != NULL);
- ptrs[nptrs++] = (StgClosure *)((StgTSO *)closure)->global_link;
+ if (tso->bound != NULL) {
+ ptrs[nptrs++] = (StgClosure *)tso->bound->tso;
+ }
+
+ ASSERT((StgClosure *)tso->_link != NULL);
+ ptrs[nptrs++] = (StgClosure *)tso->_link;
- ASSERT((StgClosure *)((StgTSO *)closure)->stackobj != NULL);
- ptrs[nptrs++] = (StgClosure *)((StgTSO *)closure)->stackobj;
+ ASSERT((StgClosure *)tso->global_link != NULL);
+ ptrs[nptrs++] = (StgClosure *)tso->global_link;
- ASSERT((StgClosure *)((StgTSO *)closure)->trec != NULL);
- ptrs[nptrs++] = (StgClosure *)((StgTSO *)closure)->trec;
+ ASSERT((StgClosure *)tso->stackobj != NULL);
+ ptrs[nptrs++] = (StgClosure *)tso->stackobj;
- ASSERT((StgClosure *)((StgTSO *)closure)->blocked_exceptions != NULL);
- ptrs[nptrs++] = (StgClosure *)((StgTSO *)closure)->blocked_exceptions;
+ ASSERT((StgClosure *)tso->trec != NULL);
+ ptrs[nptrs++] = (StgClosure *)tso->trec;
- ASSERT((StgClosure *)((StgTSO *)closure)->bq != NULL);
- ptrs[nptrs++] = (StgClosure *)((StgTSO *)closure)->bq;
+ ASSERT((StgClosure *)tso->blocked_exceptions != NULL);
+ ptrs[nptrs++] = (StgClosure *)tso->blocked_exceptions;
+
+ ASSERT((StgClosure *)tso->bq != NULL);
+ ptrs[nptrs++] = (StgClosure *)tso->bq;
if ((StgClosure *)((StgTSO *)closure)->label != NULL) {
ptrs[nptrs++] = (StgClosure *)((StgTSO *)closure)->label;
}
+ ASSERT((StgClosure *)tso->tso_link_prev != NULL);
+ ptrs[nptrs++] = (StgClosure *)tso->tso_link_prev;
+
+ ASSERT((StgClosure *)tso->tso_link_next != NULL);
+ ptrs[nptrs++] = (StgClosure *)tso->tso_link_next;
+
+ if ( tso->why_blocked == BlockedOnMVar
+ || tso->why_blocked == BlockedOnMVarRead
+ || tso->why_blocked == BlockedOnBlackHole
+ || tso->why_blocked == BlockedOnMsgThrowTo
+ || tso->why_blocked == NotBlocked
+ ) {
+ ASSERT((StgClosure *)tso->block_info.closure != NULL);
+ ptrs[nptrs++] = (StgClosure *)tso->block_info.closure;
+ }
break;
+ }
case WEAK: {
StgWeak *w = (StgWeak *)closure;
ptrs[nptrs++] = (StgClosure *) w->cfinalizers;
@@ -264,6 +287,7 @@ StgMutArrPtrs *heap_view_closurePtrs(Capability *cap, StgClosure *closure) {
// the closure and then we can allocate space on the heap and copy them
// there. Note that we cannot allocate this on the C stack as the closure
// may be, e.g., a large array.
+ // upper bound: sizeof(StgClosure *) * size
StgClosure **ptrs = (StgClosure **) stgMallocBytes(sizeof(StgClosure *) * size, "heap_view_closurePtrs");
StgWord nptrs = collect_pointers(closure, ptrs);
diff --git a/rts/HeapStackCheck.cmm b/rts/HeapStackCheck.cmm
index 18cd9eef6f..ab1bcec00d 100644
--- a/rts/HeapStackCheck.cmm
+++ b/rts/HeapStackCheck.cmm
@@ -108,14 +108,30 @@ import CLOSURE stg_ret_p_info;
stg_gc_noregs
{
W_ ret;
-
DEBUG_ONLY(foreign "C" heapCheckFail());
+ // allocation amount exceeded
if (Hp > HpLim) {
Hp = Hp - HpAlloc/*in bytes*/;
- if (HpLim == 0) {
+ // otherwise, allocate another chunk of available heap
+#if defined(MMTK_GHC)
+ if (HpAlloc != 0) {
+ CLOSE_NURSERY();
+ ret = HeapOverflow;
+ goto sched;
+ }
+ // need to check again to make sure that
+ if (Capability_context_switch(MyCapability()) != 0 :: CInt ||
+ Capability_interrupt(MyCapability()) != 0 :: CInt ||
+ (StgTSO_alloc_limit(CurrentTSO) `lt` (0::I64) &&
+ (TO_W_(StgTSO_flags(CurrentTSO)) & TSO_ALLOC_LIMIT) != 0)) {
ret = ThreadYielding;
goto sched;
+ } else {
+ jump %ENTRY_CODE(Sp(0)) [];
}
+#endif
+ // GCHSM case:
+ // if there is another chunk of allocated nursery
if (HpAlloc <= BLOCK_SIZE
&& bdescr_link(CurrentNursery) != NULL) {
HpAlloc = 0;
diff --git a/rts/MmtkGhc.c b/rts/MmtkGhc.c
new file mode 100644
index 0000000000..2e27013833
--- /dev/null
+++ b/rts/MmtkGhc.c
@@ -0,0 +1,14 @@
+#include "Rts.h"
+#include "mmtk.h"
+#include "Capability.h"
+#include "Task.h"
+#include "mmtk/ghc/mmtk_upcalls.h"
+
+#if defined(MMTK_GHC)
+MMTk_Mutator *upcall_get_mutator(void *tls)
+{
+ ASSERT(upcall_is_task(tls));
+ Task *task = (Task *) tls;
+ return task->mmutator;
+}
+#endif \ No newline at end of file
diff --git a/rts/RtsAPI.c b/rts/RtsAPI.c
index 46cb8a91f8..d04ca91f86 100644
--- a/rts/RtsAPI.c
+++ b/rts/RtsAPI.c
@@ -828,11 +828,10 @@ void rts_listThreads(ListThreadsCb cb, void *user)
// The rts is paused and can only be resumed by the current thread. Hence it
// is safe to read global thread data.
- for (uint32_t g=0; g < RtsFlags.GcFlags.generations; g++) {
- StgTSO *tso = generations[g].threads;
- while (tso != END_TSO_QUEUE) {
- cb(user, tso);
- tso = tso->global_link;
+ // check all the currently alive TSO
+ for (StgTSO *t = global_TSOs; t->tso_link_next != END_TSO_QUEUE; t = t->tso_link_next) {
+ if (t != END_TSO_QUEUE) {
+ cb(user, t);
}
}
}
diff --git a/rts/RtsFlags.c b/rts/RtsFlags.c
index bb82ea4515..fb90c40cfd 100644
--- a/rts/RtsFlags.c
+++ b/rts/RtsFlags.c
@@ -316,6 +316,9 @@ usage_text[] = {
" --copying-gc",
" Selects the copying garbage collector to manage all generations.",
"",
+" --no-gc",
+" Performs no garbage collection.",
+"",
" -K<size> Sets the maximum stack size (default: 80% of the heap)",
" e.g.: -K32k -K512k -K8M",
" -ki<size> Sets the initial thread stack size (default 1k) e.g.: -ki4k -ki2m",
@@ -1025,6 +1028,11 @@ error = true;
OPTION_SAFE;
RtsFlags.GcFlags.useNonmoving = true;
}
+ else if (strequal("no-gc",
+ &rts_argv[arg][2])) {
+ OPTION_SAFE;
+ RtsFlags.GcFlags.no_gc = true;
+ }
#if defined(THREADED_RTS)
#if defined(mingw32_HOST_OS)
else if (!strncmp("io-manager-threads",
diff --git a/rts/RtsStartup.c b/rts/RtsStartup.c
index 83f43bf619..5cdb2e8735 100644
--- a/rts/RtsStartup.c
+++ b/rts/RtsStartup.c
@@ -42,6 +42,10 @@
#include "sm/CNF.h"
#include "TopHandler.h"
+#if defined(MMTK_GHC)
+# include "mmtk.h"
+#endif
+
#if defined(PROFILING)
# include "ProfHeap.h"
# include "RetainerProfile.h"
@@ -357,6 +361,16 @@ hs_init_ghc(int *argc, char **argv[], RtsConfig rts_config)
*/
initTimer();
+ /* initialize the storage manager */
+ /* NOTE: Need to init MMTk before init collection for each task*/
+#if defined(MMTK_GHC)
+ size_t mmtk_heap_size = RtsFlags.GcFlags.maxHeapSize * BLOCK_SIZE;
+ if (mmtk_heap_size == 0) {
+ mmtk_heap_size = 1024*1024*1024;
+ }
+ mmtk_init(mmtk_heap_size);
+#endif
+
/* initialise scheduler data structures (needs to be done before
* initStorage()).
*/
@@ -367,7 +381,6 @@ hs_init_ghc(int *argc, char **argv[], RtsConfig rts_config)
traceInitEvent(traceOSProcessInfo);
flushTrace();
- /* initialize the storage manager */
initStorage();
/* initialise the stable pointer table */
@@ -421,6 +434,11 @@ hs_init_ghc(int *argc, char **argv[], RtsConfig rts_config)
/* Record initialization times */
stat_endInit();
+
+#if defined(MMTK_GHC)
+ mmtk_initialize_collection(capabilities[0]->running_task);
+#endif
+
}
// Compatibility interface
diff --git a/rts/RtsUtils.c b/rts/RtsUtils.c
index d3dcdf3092..3ecd5281b2 100644
--- a/rts/RtsUtils.c
+++ b/rts/RtsUtils.c
@@ -396,6 +396,17 @@ int rts_isTracing(void)
#endif
}
+// Provides a way for Haskell programs to tell whether they're
+// linked with the MMTK mode or not.
+int rts_isMMTK(void)
+{
+#if defined(MMTK_GHC)
+ return 1;
+#else
+ return 0;
+#endif
+}
+
// Used for detecting a non-empty FPU stack on x86 (see #4914)
void checkFPUStack(void)
{
diff --git a/rts/Schedule.c b/rts/Schedule.c
index 8afcf54ca0..101b4529a7 100644
--- a/rts/Schedule.c
+++ b/rts/Schedule.c
@@ -1145,6 +1145,10 @@ scheduleHandleHeapOverflow( Capability *cap, StgTSO *t )
pushOnRunQueue(cap,t);
}
+#if defined(MMTK_GHC)
+ return true;
+#endif
+
// did the task ask for a large block?
if (cap->r.rHpAlloc > BLOCK_SIZE) {
// if so, get one and push it on the front of the nursery.
@@ -1435,6 +1439,12 @@ void stopAllCapabilitiesWith (Capability **pCap, Task *task, SyncType sync_type)
}
#endif
+#if defined(MMTK_GHC)
+void stopAllCapabilitiesForMMTK (Task *task) {
+ stopAllCapabilitiesWith (NULL, task, SYNC_GC_MMTK);
+}
+#endif
+
/* -----------------------------------------------------------------------------
* requestSync()
*
@@ -1585,6 +1595,36 @@ static void
scheduleDoGC (Capability **pcap, Task *task USED_IF_THREADS,
bool force_major, bool is_overflow_gc, bool deadlock_detect)
{
+ ASSERT((*pcap)->running_task == task);
+
+#if defined(MMTK_GHC)
+ // exit GC if in MMTK Mode
+ // TODO: distinguish scheduling and GC after delete_threads_and_gc
+ if (RELAXED_LOAD(&sched_state) == SCHED_INTERRUPTING) {
+ sched_state = SCHED_SHUTTING_DOWN;
+ deleteAllThreads();
+ } else {
+ Capability *cap = *pcap;
+ StgWord hp_alloc = cap->r.rHpAlloc;
+ // N.B. MMTK doesn't like it if we ask for a size-0 allocation.
+ if (hp_alloc != 0) {
+ StgPtr p = mmtk_alloc_slow(task->mmutator, hp_alloc, sizeof(W_), 0, 0);
+
+ // N.B. mmtk_alloc_slow may yield our capability by calling block_for_gc;
+ // ensure that pcap is updated appropriately.
+ *pcap = task->cap;
+
+ // mmtk_alloc_slow has allocated hp_alloc bytes for us but we want
+ // the mutator to be the one to advance the cursor; roll it back.
+ mmtk_get_nursery_allocator(task->mmutator)->cursor = p;
+ }
+ else {
+ // TODO: handle force GC during shutdown (or more cases)
+ }
+ return;
+ }
+#endif
+
Capability *cap = *pcap;
bool heap_census;
uint32_t collect_gen;
@@ -2027,7 +2067,6 @@ forkProcess(HsStablePtr *entry
{
#if defined(FORKPROCESS_PRIMOP_SUPPORTED)
pid_t pid;
- StgTSO* t,*next;
Capability *cap;
uint32_t g;
Task *task = NULL;
@@ -2127,9 +2166,8 @@ forkProcess(HsStablePtr *entry
// all Tasks, because they correspond to OS threads that are
// now gone.
- for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
- for (t = generations[g].threads; t != END_TSO_QUEUE; t = next) {
- next = t->global_link;
+ StgTSO* t, *next;
+ for (t = global_TSOs; t->tso_link_next != END_TSO_QUEUE; t = next) {
// don't allow threads to catch the ThreadKilled
// exception, but we do want to raiseAsync() because these
// threads may be evaluating thunks that we need later.
@@ -2141,7 +2179,7 @@ forkProcess(HsStablePtr *entry
// won't get a chance to exit in the usual way (see
// also scheduleHandleThreadFinished).
t->bound = NULL;
- }
+ next = t->tso_link_next;
}
discardTasksExcept(task);
@@ -2233,6 +2271,12 @@ forkProcess(HsStablePtr *entry
void
setNumCapabilities (uint32_t new_n_capabilities USED_IF_THREADS)
{
+#if defined(MMTK_GHC)
+ if (new_n_capabilities != 1) {
+ barf("noGC try to change number of capabilities; \
+ Should change gen _mut_lists too.");
+ }
+#endif
#if !defined(THREADED_RTS)
if (new_n_capabilities != 1) {
errorBelch("setNumCapabilities: not supported in the non-threaded RTS");
@@ -2368,17 +2412,13 @@ setNumCapabilities (uint32_t new_n_capabilities USED_IF_THREADS)
static void
deleteAllThreads ()
{
- // NOTE: only safe to call if we own all capabilities.
-
StgTSO* t, *next;
- uint32_t g;
+ // NOTE: only safe to call if we own all capabilities.
debugTrace(DEBUG_sched,"deleting all threads");
- for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
- for (t = generations[g].threads; t != END_TSO_QUEUE; t = next) {
- next = t->global_link;
- deleteThread(t);
- }
+ for(t = global_TSOs; t->tso_link_next != END_TSO_QUEUE; t = next) {
+ next = t->tso_link_next;
+ deleteThread(t);
}
// The run queue now contains a bunch of ThreadKilled threads. We
@@ -2824,9 +2864,13 @@ performGC_(bool force_major)
// TODO: do we need to traceTask*() here?
+#if defined(MMTK_GHC)
+ mmtk_handle_user_collection_request(cap);
+#else
waitForCapability(&cap,task);
scheduleDoGC(&cap,task,force_major,false,false);
releaseCapability(cap);
+#endif
exitMyTask();
}
diff --git a/rts/StablePtr.c b/rts/StablePtr.c
index c2e7cda2c3..c9282a66a8 100644
--- a/rts/StablePtr.c
+++ b/rts/StablePtr.c
@@ -100,7 +100,8 @@
spEntry *stable_ptr_table = NULL;
static spEntry *stable_ptr_free = NULL;
-static unsigned int SPT_size = 0;
+// static unsigned int SPT_size = 0;
+unsigned int SPT_size = 0;
#define INIT_SPT_SIZE 64
/* Each time the stable pointer table is enlarged, we temporarily retain the old
@@ -312,7 +313,7 @@ getStablePtr(StgPtr p)
spEntry *__end_ptr = &stable_ptr_table[SPT_size]; \
for (p = stable_ptr_table; p < __end_ptr; p++) { \
/* Internal pointers are free slots. NULL is last in free */ \
- /* list. */ \
+ /* list. (when addr null don't trace) */ \
if (p->addr && \
(p->addr < (P_)stable_ptr_table || p->addr >= (P_)__end_ptr)) \
{ \
diff --git a/rts/Task.c b/rts/Task.c
index de24253db5..8348dacc83 100644
--- a/rts/Task.c
+++ b/rts/Task.c
@@ -25,6 +25,10 @@
#include <signal.h>
#endif
+#if defined(MMTK_GHC)
+#include "mmtk/ghc/mmtk_upcalls.h"
+#endif
+
// Task lists and global counters.
// Locks required: all_tasks_mutex.
Task *all_tasks = NULL;
@@ -247,6 +251,25 @@ newTask (bool worker)
peakWorkerCount = currentWorkerCount;
}
}
+
+#if defined(MMTK_GHC)
+ task->mmutator = mmtk_bind_mutator(task);
+ BumpAllocator *bump_alloc = mmtk_get_nursery_allocator(task->mmutator);
+
+ // We set the initial cursor and limit of the MMTk BumpAllocator used
+ // by the STG machine such that they are
+ // not NULL since otherwise we will end up with a unsigned integer
+ // underflow due to the offset described in Note [MMTK off-by-one]
+ // in GHC.StgToCmm.Foreign.
+ bump_alloc->cursor = (void*) (2*sizeof(StgWord));
+ bump_alloc->limit = bump_alloc->cursor;
+
+ // rts_mutator is used by allocate() and friends.
+ task->rts_mutator = mmtk_bind_mutator((void*) ((uintptr_t) task + 1));
+
+ // mmtk_initialize_collection(task);
+#endif
+
RELEASE_LOCK(&all_tasks_mutex);
return task;
@@ -317,6 +340,26 @@ newBoundTask (void)
return task;
}
+#if defined(MMTK_GHC)
+bool upcall_is_task(void* task)
+{
+ bool is_task = false;
+
+ ACQUIRE_LOCK(&all_tasks_mutex);
+ Task* t = all_tasks;
+ while (t != NULL) {
+ if (task == t) {
+ is_task = true;
+ break;
+ }
+ t = t->all_next;
+ }
+ RELEASE_LOCK(&all_tasks_mutex);
+
+ return is_task;
+}
+#endif
+
void
exitMyTask (void)
{
diff --git a/rts/Task.h b/rts/Task.h
index 84617fd450..dd829109bb 100644
--- a/rts/Task.h
+++ b/rts/Task.h
@@ -15,6 +15,8 @@
#include "BeginPrivate.h"
+#include "mmtk.h"
+
/*
Note [Definition of a Task]
~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -164,6 +166,9 @@ typedef struct Task_ {
struct Task_ *all_next;
struct Task_ *all_prev;
+ // MMTk mutator (per OS thread)
+ void *mmutator;
+ void *rts_mutator;
} Task;
INLINE_HEADER bool
diff --git a/rts/Threads.c b/rts/Threads.c
index 2dce11a901..81579d87d2 100644
--- a/rts/Threads.c
+++ b/rts/Threads.c
@@ -26,6 +26,8 @@
#include <string.h>
+StgTSO* global_TSOs = END_TSO_QUEUE;
+
/* Next thread ID to allocate.
* LOCK: sched_mutex
*/
@@ -131,6 +133,12 @@ createThread(Capability *cap, W_ size)
/* Mutations above need no memory barrier since this lock will provide
* a release barrier */
g0->threads = tso;
+
+ // add the newly created tso to the global double linked list
+ tso->tso_link_prev = END_TSO_QUEUE;
+ tso->tso_link_next = global_TSOs;
+ global_TSOs = tso;
+
RELEASE_LOCK(&sched_mutex);
// ToDo: report the stack size in the event?
@@ -862,10 +870,8 @@ StgMutArrPtrs *listThreads(Capability *cap)
// First count how many threads we have...
StgWord n_threads = 0;
- for (unsigned g = 0; g < RtsFlags.GcFlags.generations; g++) {
- for (StgTSO *t = generations[g].threads; t != END_TSO_QUEUE; t = t->global_link) {
- n_threads++;
- }
+ for (StgTSO *t = global_TSOs; t != END_TSO_QUEUE; t = t->tso_link_next) {
+ n_threads++;
}
// Allocate a suitably-sized array...
@@ -878,21 +884,21 @@ StgMutArrPtrs *listThreads(Capability *cap)
// Populate it...
StgWord i = 0;
- for (unsigned g = 0; g < RtsFlags.GcFlags.generations; g++) {
- for (StgTSO *t = generations[g].threads; t != END_TSO_QUEUE; t = t->global_link) {
- // It's possible that new threads have been created since we counted.
- // Ignore them.
- if (i == n_threads)
- break;
- arr->payload[i] = (StgClosure *) t;
- i++;
+ for (StgTSO *t = global_TSOs; t != END_TSO_QUEUE; t = t->tso_link_next) {
+ // It's possible that new threads have been created since we counted.
+ // Ignore them.
+ if (i == n_threads) {
+ break;
}
+ arr->payload[i] = (StgClosure *) t;
+ i++;
}
CHECKM(i == n_threads, "listThreads: Found too few threads");
RELEASE_LOCK(&sched_mutex);
return arr;
}
+
/* ----------------------------------------------------------------------------
* Debugging: why is a thread blocked
* ------------------------------------------------------------------------- */
@@ -981,28 +987,23 @@ printThreadStatus(StgTSO *t)
void
printAllThreads(void)
{
- StgTSO *t, *next;
- uint32_t i, g;
- Capability *cap;
-
debugBelch("all threads:\n");
- for (i = 0; i < getNumCapabilities(); i++) {
- cap = getCapability(i);
+ for (unsigned int i = 0; i < getNumCapabilities(); i++) {
+ Capability *cap = getCapability(i);
debugBelch("threads on capability %d:\n", cap->no);
- for (t = cap->run_queue_hd; t != END_TSO_QUEUE; t = t->_link) {
+ for (StgTSO *t = cap->run_queue_hd; t != END_TSO_QUEUE; t = t->_link) {
printThreadStatus(t);
}
}
debugBelch("other threads:\n");
- for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
- for (t = generations[g].threads; t != END_TSO_QUEUE; t = next) {
+ StgTSO *next;
+ for (StgTSO *t = global_TSOs; t != END_TSO_QUEUE; t = next) {
if (t->why_blocked != NotBlocked) {
printThreadStatus(t);
}
- next = t->global_link;
- }
+ next = t->tso_link_next;
}
}
diff --git a/rts/Threads.h b/rts/Threads.h
index c0a9f1cf1c..6bc79b994f 100644
--- a/rts/Threads.h
+++ b/rts/Threads.h
@@ -12,6 +12,15 @@
#define END_BLOCKED_EXCEPTIONS_QUEUE ((MessageThrowTo*)END_TSO_QUEUE)
+
+/* ---------------------------------
+ * A global double linked list of TSO object.
+ * Used in RTS for tracing through all current threads,
+ * without exposing generation to non-sm part of RTS */
+extern StgTSO* global_TSOs;
+/* ------------------------------------------------- */
+
+
StgTSO * unblockOne (Capability *cap, StgTSO *tso);
StgTSO * unblockOne_ (Capability *cap, StgTSO *tso, bool allow_migrate);
diff --git a/rts/Updates.h b/rts/Updates.h
index b35790b536..ef08404ec8 100644
--- a/rts/Updates.h
+++ b/rts/Updates.h
@@ -45,8 +45,24 @@
* before we introduce the indirection.
* See Note [Heap memory barriers] in SMP.h.
*/
+#if defined(MMTK_GHC)
#define updateWithIndirection(p1, p2, and_then) \
W_ bd; \
+ \
+ prim_write_barrier; \
+ OVERWRITING_CLOSURE(p1); \
+ %relaxed StgInd_indirectee(p1) = p2; \
+ prim_write_barrier; \
+ SET_INFO_RELEASE(p1, stg_BLACKHOLE_info); \
+ LDV_RECORD_CREATE(p1); \
+ and_then;
+
+#else
+
+#define updateWithIndirection(p1, p2, and_then) \
+ W_ bd; \
+ \
+ prim_write_barrier; \
bd = Bdescr(p1); \
if (bdescr_gen_no(bd) != 0 :: bits16) { \
IF_NONMOVING_WRITE_BARRIER_ENABLED { \
@@ -63,27 +79,30 @@
SET_INFO_RELEASE(p1, stg_BLACKHOLE_info); \
LDV_RECORD_CREATE(p1); \
and_then;
+#endif
#else /* !CMINUSMINUS */
-INLINE_HEADER void updateWithIndirection (Capability *cap,
+INLINE_HEADER void updateWithIndirection (Capability *cap STG_UNUSED,
StgClosure *p1,
StgClosure *p2)
{
ASSERT( (P_)p1 != (P_)p2 );
- /* not necessarily true: ASSERT( !closure_IND(p1) ); */
- /* occurs in RaiseAsync.c:raiseAsync() */
- /* See Note [Heap memory barriers] in SMP.h */
- bdescr *bd = Bdescr((StgPtr)p1);
- if (bd->gen_no != 0) {
- IF_NONMOVING_WRITE_BARRIER_ENABLED {
- updateRemembSetPushThunk(cap, (StgThunk*)p1);
- }
- recordMutableCap(p1, cap, bd->gen_no);
- TICK_UPD_OLD_IND();
- } else {
- TICK_UPD_NEW_IND();
- }
+#if !defined(MMTK_GHC)
+ /* not necessarily true: ASSERT( !closure_IND(p1) ); */
+ /* occurs in RaiseAsync.c:raiseAsync() */
+ /* See Note [Heap memory barriers] in SMP.h */
+ bdescr *bd = Bdescr((StgPtr)p1);
+ if (bd->gen_no != 0) {
+ IF_NONMOVING_WRITE_BARRIER_ENABLED {
+ updateRemembSetPushThunk(cap, (StgThunk*)p1);
+ }
+ recordClosureMutated(cap, p1);
+ TICK_UPD_OLD_IND();
+ } else {
+ TICK_UPD_NEW_IND();
+ }
+#endif
OVERWRITING_CLOSURE(p1);
RELEASE_STORE(&((StgInd *)p1)->indirectee, p2);
SET_INFO_RELEASE(p1, &stg_BLACKHOLE_info);
diff --git a/rts/include/Cmm.h b/rts/include/Cmm.h
index 9d2ebda52f..d91b2d6c50 100644
--- a/rts/include/Cmm.h
+++ b/rts/include/Cmm.h
@@ -376,6 +376,8 @@
#include "rts/storage/Block.h" /* For Bdescr() */
+// BaseReg is pointing to a StgRegTable inside my capability
+// OFFSET_Capability_r is the auto-generated offset
#define MyCapability() (BaseReg - OFFSET_Capability_r)
/* -------------------------------------------------------------------------
@@ -444,9 +446,14 @@
HP_CHK_P(bytes); \
TICK_ALLOC_HEAP_NOCTR(bytes);
+#if defined(MMTK_GHC)
+#define CHECK_GC() \
+ (generation_n_new_large_words(W_[g0]) >= TO_W_(CLong[large_alloc_lim]))
+#else
#define CHECK_GC() \
(bdescr_link(CurrentNursery) == NULL || \
generation_n_new_large_words(W_[g0]) >= TO_W_(CLong[large_alloc_lim]))
+#endif
// allocate() allocates from the nursery, so we check to see
// whether the nursery is nearly empty in any function that uses
diff --git a/rts/include/mmtk.h b/rts/include/mmtk.h
new file mode 100644
index 0000000000..f9d24828ba
--- /dev/null
+++ b/rts/include/mmtk.h
@@ -0,0 +1,142 @@
+// This is an example of native API for the single instance MMTk.
+
+// Note: the mmtk core does not directly provide this API. However, it provides
+// a similar multi-instance Rust API. A VM binding should write their own C
+// header file (possibly based on this example with their own extension and
+// modification), and expose the Rust API based on their native API.
+
+
+// nogc would require:
+// - gc_init
+// - alloc
+// - bind_mutator
+
+#ifndef MMTK_H
+#define MMTK_H
+
+#include <stdbool.h>
+#include <stddef.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef struct BumpAllocator {
+ void* tls;
+ void* cursor;
+ void* limit;
+}BumpAllocator;
+
+typedef void* MMTk_Mutator;
+
+// Initialize an MMTk instance
+extern void mmtk_init(size_t heap_size);
+
+// Request MMTk to create a new mutator for the given `tls` thread
+extern MMTk_Mutator mmtk_bind_mutator(void* tls);
+
+// Reclaim mutator that is no longer needed
+extern void mmtk_destroy_mutator(MMTk_Mutator mutator);
+
+// Flush mutator local state
+extern void mmtk_flush_mutator(MMTk_Mutator mutator);
+
+// Initialize MMTk scheduler and GC workers
+extern void mmtk_initialize_collection(void* tls);
+
+// Allow MMTk to perform a GC when the heap is full
+extern void mmtk_enable_collection(void);
+
+// Disallow MMTk to perform a GC when the heap is full
+extern void mmtk_disable_collection(void);
+
+extern BumpAllocator* mmtk_get_nursery_allocator(MMTk_Mutator mutator);
+
+// Allocate memory for an object
+extern void* mmtk_alloc(MMTk_Mutator mutator,
+ size_t size,
+ size_t align,
+ ssize_t offset,
+ int allocator);
+
+// Slowpath allocation for an object
+extern void* mmtk_alloc_slow(MMTk_Mutator mutator,
+ size_t size,
+ size_t align,
+ ssize_t offset,
+ int allocator);
+
+// Perform post-allocation hooks or actions such as initializing object metadata
+extern void mmtk_post_alloc(MMTk_Mutator mutator,
+ void* refer,
+ int bytes,
+ int allocator);
+
+// Return if the object pointed to by `ref` is live
+extern bool mmtk_is_live_object(void* ref);
+
+// Return if the object pointed to by `ref` is in mapped memory
+extern bool mmtk_is_mapped_object(void* ref);
+
+// Return if the address pointed to by `addr` is in mapped memory
+extern bool mmtk_is_mapped_address(void* addr);
+
+// Check if a GC is in progress and if the object `ref` is movable
+extern void mmtk_modify_check(void* ref);
+
+// Return if object pointed to by `object` will never move
+extern bool mmtk_will_never_move(void* object);
+
+// Process an MMTk option. Return true if option was processed successfully
+extern bool mmtk_process(char* name, char* value);
+
+// Process MMTk options. Return true if all options were processed successfully
+extern bool mmtk_process_bulk(char* options);
+
+// Sanity only. Scan heap for discrepancies and errors
+extern void mmtk_scan_region(void);
+
+// Request MMTk to trigger a GC. Note that this may not actually trigger a GC
+extern void mmtk_handle_user_collection_request(void* tls);
+
+// Run the main loop for the GC controller thread. Does not return
+extern void mmtk_start_control_collector(void* tls, void* worker);
+
+// Run the main loop for a GC worker. Does not return
+extern void mmtk_start_worker(void* tls, void* worker);
+
+// Return the current amount of free memory in bytes
+extern size_t mmtk_free_bytes(void);
+
+// Return the current amount of used memory in bytes
+extern size_t mmtk_used_bytes(void);
+
+// Return the current amount of total memory in bytes
+extern size_t mmtk_total_bytes(void);
+
+// Return the starting address of MMTk's heap
+extern void* mmtk_starting_heap_address(void);
+
+// Return the ending address of MMTk's heap
+extern void* mmtk_last_heap_address(void);
+
+// Add a reference to the list of weak references
+extern void mmtk_add_weak_candidate(void* ref);
+
+// Add a reference to the list of soft references
+extern void mmtk_add_soft_candidate(void* ref);
+
+// Add a reference to the list of phantom references
+extern void mmtk_add_phantom_candidate(void* ref);
+
+// Generic hook to allow benchmarks to be harnessed
+extern void mmtk_harness_begin(void* tls);
+
+// Generic hook to allow benchmarks to be harnessed
+extern void mmtk_harness_end(void);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // MMTK_H
diff --git a/rts/include/rts/Flags.h b/rts/include/rts/Flags.h
index e33d97b17c..4ef1b44d8d 100644
--- a/rts/include/rts/Flags.h
+++ b/rts/include/rts/Flags.h
@@ -88,6 +88,8 @@ typedef struct _GC_FLAGS {
bool numa; /* Use NUMA */
StgWord numaMask;
+
+ bool no_gc;
} GC_FLAGS;
/* See Note [Synchronization of flags and base APIs] */
diff --git a/rts/include/rts/StablePtr.h b/rts/include/rts/StablePtr.h
index 2af8c152a3..c49b1386b3 100644
--- a/rts/include/rts/StablePtr.h
+++ b/rts/include/rts/StablePtr.h
@@ -27,6 +27,7 @@ typedef struct {
} spEntry;
extern DLL_IMPORT_RTS spEntry *stable_ptr_table;
+extern unsigned int SPT_size;
ATTR_ALWAYS_INLINE EXTERN_INLINE
StgPtr deRefStablePtr(StgStablePtr sp)
diff --git a/rts/include/rts/storage/ClosureMacros.h b/rts/include/rts/storage/ClosureMacros.h
index ba57df1699..1c88f2e8de 100644
--- a/rts/include/rts/storage/ClosureMacros.h
+++ b/rts/include/rts/storage/ClosureMacros.h
@@ -427,6 +427,9 @@ EXTERN_INLINE StgWord stack_frame_sizeW( StgClosure *frame )
-------------------------------------------------------------------------- */
// The number of card bytes needed
+// (1 << MUT_ARR_PTRS_CARD_BITS): number of elements
+// elems : total number of elements in the array
+// return round(N / 2^c)
EXTERN_INLINE W_ mutArrPtrsCards (W_ elems);
EXTERN_INLINE W_ mutArrPtrsCards (W_ elems)
{
@@ -442,6 +445,7 @@ EXTERN_INLINE W_ mutArrPtrsCardTableSize (W_ elems)
}
// The address of the card for a particular card number
+// at the end of the array, each byte is the flag/state of the nth card
EXTERN_INLINE StgWord8 *mutArrPtrsCard (StgMutArrPtrs *a, W_ n);
EXTERN_INLINE StgWord8 *mutArrPtrsCard (StgMutArrPtrs *a, W_ n)
{
diff --git a/rts/include/rts/storage/Closures.h b/rts/include/rts/storage/Closures.h
index 01ae438a43..1af689e5f1 100644
--- a/rts/include/rts/storage/Closures.h
+++ b/rts/include/rts/storage/Closures.h
@@ -211,8 +211,7 @@ typedef struct {
// An array of heap objects, ie Array# v and MutableArray# v
//
// Closure types: MUT_ARR_PTRS_CLEAN, MUT_ARR_PTRS_DIRTY,
-// MUT_ARR_PTRS_FROZEN_DIRTY, MUT_ARR_PTRS_FROZEN_CLEAN, MUT_VAR_CLEAN,
-// MUT_VAR_DIRTY
+// MUT_ARR_PTRS_FROZEN_DIRTY, MUT_ARR_PTRS_FROZEN_CLEAN
typedef struct _StgMutArrPtrs {
StgHeader header;
StgWord ptrs;
diff --git a/rts/include/rts/storage/InfoTables.h b/rts/include/rts/storage/InfoTables.h
index 77f45fb533..3d03ff260a 100644
--- a/rts/include/rts/storage/InfoTables.h
+++ b/rts/include/rts/storage/InfoTables.h
@@ -186,6 +186,8 @@ typedef StgHalfWord StgSRTField;
/*
* The "standard" part of an info table. Every info table has this bit.
*/
+// we can assume the macro here is set (TABLES_NEXT_TO_CODE)
+// construct similar rust file (abi.rs)
typedef struct StgInfoTable_ {
#if !defined(TABLES_NEXT_TO_CODE)
@@ -203,9 +205,9 @@ typedef struct StgInfoTable_ {
/* In a CONSTR:
- the zero-based constructor tag
In a FUN/THUNK
- - if USE_INLINE_SRT_FIELD
- - offset to the SRT (or zero if no SRT)
- - otherwise
+ - if USE_INLINE_SRT_FIELD (halfint)
+ - offset to the SRT (or zero if no SRT)
+ - otherwise (full word)
- non-zero if there is an SRT, offset is in srt_offset
*/
diff --git a/rts/include/rts/storage/TSO.h b/rts/include/rts/storage/TSO.h
index 4ca19853d7..16e8889e92 100644
--- a/rts/include/rts/storage/TSO.h
+++ b/rts/include/rts/storage/TSO.h
@@ -106,6 +106,11 @@ typedef struct StgTSO_ {
struct StgTSO_* global_link; // Links threads on the
// generation->threads lists
+ // use a global double linked list to trace TSO
+ // to avoid the exposure of gen structure
+ struct StgTSO_* tso_link_prev;
+ struct StgTSO_* tso_link_next;
+
/*
* The thread's stack
*/
diff --git a/rts/include/stg/Regs.h b/rts/include/stg/Regs.h
index a4e6db2415..6d8c6c5804 100644
--- a/rts/include/stg/Regs.h
+++ b/rts/include/stg/Regs.h
@@ -106,6 +106,7 @@ typedef struct {
struct CostCentreStack_ * rCCCS; /* current cost-centre-stack */
struct StgTSO_ * rCurrentTSO;
struct nursery_ * rNursery;
+ // set rcn to null if not using
struct bdescr_ * rCurrentNursery; /* Hp/HpLim point into this block */
struct bdescr_ * rCurrentAlloc; /* for allocation using allocate() */
StgWord rHpAlloc; /* number of *bytes* being allocated in heap */
diff --git a/rts/linker/M32Alloc.c b/rts/linker/M32Alloc.c
index 6ad316e164..17d3d12459 100644
--- a/rts/linker/M32Alloc.c
+++ b/rts/linker/M32Alloc.c
@@ -286,13 +286,13 @@ m32_release_page(struct m32_page_t *page)
const size_t pgsz = getPageSize();
ssize_t sz = page->filled_page.size;
- IF_DEBUG(sanity, memset(page, 0xaa, sz));
// Break the page, which may be a large multi-page allocation, into
// individual pages for the page pool
while (sz > 0) {
if (m32_free_page_pool_size < M32_MAX_FREE_PAGE_POOL_SIZE) {
mprotectForLinker(page, pgsz, MEM_READ_WRITE);
+ IF_DEBUG(sanity, memset(page, 0xaa, pgsz));
SET_PAGE_TYPE(page, FREE_PAGE);
page->free_page.next = m32_free_page_pool;
m32_free_page_pool = page;
diff --git a/rts/mmtk/.gitignore b/rts/mmtk/.gitignore
new file mode 100644
index 0000000000..0468884a60
--- /dev/null
+++ b/rts/mmtk/.gitignore
@@ -0,0 +1,17 @@
+# will have compiled files and executables
+target/
+
+# Remove Cargo.lock from gitignore if creating an executable, leave it for libraries
+# More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html
+Cargo.lock
+
+# VSCode
+/.vscode/
+
+# build.py & bench.sh
+/*.dylib
+/*.so
+
+*.o
+*.hi
+mmtk/src/mmtk-heap-closure-test/dist-newstyle/ \ No newline at end of file
diff --git a/rts/mmtk/README.md b/rts/mmtk/README.md
new file mode 100644
index 0000000000..38e01c2d76
--- /dev/null
+++ b/rts/mmtk/README.md
@@ -0,0 +1,83 @@
+# MMTk-GHC
+
+This repository provides binding between [MMTK](https://github.com/mmtk/mmtk-core) and [GHC](https://gitlab.haskell.org/ghc/ghc).
+
+This binding is currently under development. We aim to make MMTk's garbage collectors work with GHC.
+
+In order to use, follow the steps in [Contents](#contents).
+
+---
+
+## Contents
+
+- [MMTk-GHC](#mmtk-ghc)
+ - [Contents](#contents)
+ - [Requirements](#requirements)
+ - [Build](#build)
+ - [Run](#run)
+ - [Development Details](#development-details)
+
+
+## Requirements
+
+We maintain an up to date list of the prerequisites for building MMTk and its bindings in the [mmtk-dev-env](https://github.com/mmtk/mmtk-dev-env) repository. Please make sure your dev machine satisfies those prerequisites.
+
+## Build
+
+1. Clone the repository:
+
+ ```
+ git clone --recurse-submodules https://gitlab.haskell.org/JunmingZhao42/ghc.git
+ cd ghc/
+ git checkout mmtk/nogc2.0
+ git submodule update --init --recursive
+ ```
+
+
+2. Build MMTk binding:
+
+ ```
+ cd rts/mmtk
+ cargo build
+ ```
+
+3. [Build GHC](https://gitlab.haskell.org/ghc/ghc/-/wikis/building/hadrian):
+
+ ```
+ ./boot && ./configure
+ hadrian/build --flavour=default+debug_info -j16
+ ```
+
+
+## Run
+
+1. Specify MMTk's GC plan:
+
+ ```
+ export MMTK_PLAN=Immix
+ ```
+
+ Specify MMTk's thread number:
+
+ ```
+ export MMTK_THREADS=1
+ ```
+
+2. Compile your Haskell program (e.g. `hello.hs`) with MMTk (add `-rtsopts` to specify the heap size later):
+
+ ```
+ _build/stage1/bin/ghc -fforce-recomp -mmtk -rtsopts -threaded -debug -g3 -Lrts/mmtk/mmtk/target/debug -optl-lmmtk_ghc hello.hs
+ ```
+
+3. Run the program (e.g. with heap size 8M):
+
+ ```
+ ./hello +RTS -M8M
+ ```
+
+---
+## Development Details
+- University semester-project Report: [Introduce NoGC](https://gitlab.haskell.org/JunmingZhao42/ghc/-/tree/mmtk/nogc2.0/rts/mmtk/docs/introduce_nogc.pdf)
+- [GSoC Report](https://gitlab.haskell.org/JunmingZhao42/ghc/-/tree/mmtk/nogc2.0/rts/mmtk/docs/GSoC_report.md)
+- [Development Diary](https://edit.smart-cactus.org/n4X1UZK_TAOhI1LmmBs6Lg#Mmtk-Notes)
+- IRC Channel: #ghc-mmtk:libera.chat \ No newline at end of file
diff --git a/rts/mmtk/docs/GSoC_report.md b/rts/mmtk/docs/GSoC_report.md
new file mode 100644
index 0000000000..24d2b30721
--- /dev/null
+++ b/rts/mmtk/docs/GSoC_report.md
@@ -0,0 +1,138 @@
+# Google Summer of Code 2022: Project Summary
+## MMTk Port for GHC: An alternative of the current GHC storage manager
+
+This is the final report for the GSoC 2022 project - binding MMTk-GHC. The project is a continuation of the semester project of [Introduce NoGC from MMTk](https://gitlab.haskell.org/JunmingZhao42/ghc/-/tree/mmtk/nogc2.0/rts/mmtk/docs/introduce_nogc.pdf).
+
+
+## Outline
+- [Motivation](#motivation)
+- [Completed Work](#completed-work)
+- [Demo & Conclusion](#demo-conclusion)
+- [Future Work](#future-work)
+- [Acknowledgement](#acknowledgement)
+- [References](#references)
+
+
+## Motivation
+
+### What is MMTk-GHC binding?
+
+It's connection between the GHC runtime and a set of garbage collectors in MMTk. A successful binding will allow developers to choose MMTk's collectors, instead of the native GHC storage manager, providing a greater variety for their needs.
+
+### Why do we need the binding?
+
+- Introduce more garbage collection strategies to GHC, potentially improving performance;
+- Improve the generality of GHC's runtime;
+- Allow more opportunities for further development of GHC's garbage collectors, such as multi-domain GC;
+- Improve the compatibility of MMTk to different programming languages.
+
+
+## Completed Work
+
+We have been working on the implementations of the VM interface of MMTk by GHC's runtime and object model:
+- Implement and refine a binding to GHC's object model in Rust
+- Introduce a new RTS way and associated compiler flag, `-mmtk`, to select the MMTk-enabled runtime system
+- Implement tracing (i.e. scavenge and evacuate) of GHC heap objects in Rust
+- Connect MMTk's allocators and GC workers to GHC's runtime
+- Integrate MMTk's garbage collection cycle into the runtime system as depicted below:
+```
+ GHC runtime MMTk
+
+ Init runtime │ │
+ │ Bind MMTk allocator(s) │
+ │ ────────────────────────────────► │
+ │ │
+ │ Require allocation │
+ │ ────────────────────────────────► │
+ │ . │
+ │ . │
+ │ . │
+ │ │
+ │ │
+ │ Require allocation │
+ │ ───────────────────────────────► │
+ │ │
+ │ │
+ │ │ Schedule GC
+ │ │
+ │ Stop runtime mutators │
+ │ ◄─────────────────────────────── │
+ │ │ Trace objects from roots
+ │ │
+ │ │ MMTk GC
+ │ │
+ │ Resume runtime mutators │
+ │ ◄─────────────────────────────── │
+ │ . │
+ │ . │
+ │ . │
+ │ │
+```
+
+### Major Implementations
+**Modifications to GHC's for MMTk integration:**
+- MMTk is initialized in `rts/RtsStartup.c:hs_init_ghc`
+- MMTk's mutators are bound in `rts/Task.c:newTask`
+- The code generator was modified to populate the STG machine's `Hp` and `HpLim` registers from MMTk's BumpAllocator in `GHC.StgToCmm.Foreign`.
+- `HeapStackCheck.cmm` gained some logic for handling heap overflows in `stg_no_regs` when using MMTk
+- MMTk is entered in `rts/Schedule.c:scheduleDoGC` during the handling of heap overflows
+
+**The main part of the binding is under `rts/mmtk/mmtk/src`, where we teach MMTk about the runtime logic and object model of GHC:**
+- Various details to do with starting and stopping mutators around GC are found in `rts/mmtk/mmtk/rts/collection.rs`
+- The scanning logic is found in `rts/mmtk/mmtk/src/{scanning,object_scanning,active_plan}.rs`
+- The Rust representation of the GHC heap object model is found in `rts/mmtk/mmtk/src/{stg_closures,stg_info_table}.rs`
+- The linking between GHC and MMTk is found in `rts/mmtk/mmtk/build.rs` and `rts/mmtk/mmtk/src/ghc.rs`
+- Documentation about how to use the binding in `rts/mmtk/README.md`
+
+## Demo & Conclusion
+Currently, we can test the binding with a non-moving Immix GC plan from MMTk.
+
+We have a demonstration program [`fibo.hs`](https://gitlab.haskell.org/JunmingZhao42/ghc/-/blob/mmtk/nogc2.0/fibo.hs), which can complete a few MMTk's garbage collection cycles.
+To test it, follow the steps below:
+
+0. [Build MMTk binding and GHC](https://gitlab.haskell.org/JunmingZhao42/ghc/-/blob/mmtk/nogc2.0/rts/mmtk/README.md#build)
+1. Compile `fibo.hs`:
+ ```
+ _build/stage1/bin/ghc -fforce-recomp -mmtk -rtsopts -g3 -threaded -debug -Lrts/mmtk/mmtk/target/debug -optl-lmmtk_ghc fibo.hs
+ ```
+2. Run:
+ ```
+ MMTK_THREADS=1 RUST_BACKTRACE=1 MMTK_PLAN=Immix ./fibo 5000 +RTS -M3M
+ ```
+
+Expected output:
+```
+[2022-09-11T11:57:00Z INFO mmtk::policy::immix::immixspace] Creating non-moving ImmixSpace: immix. Block size: 2^15
+[2022-09-11T11:57:02Z INFO mmtk::memory_manager] Initialized MMTk with Immix
+...
+... # Program prints fibonacci number
+...
+[2022-09-11T16:13:25Z INFO mmtk::plan::global] [POLL] immortal: Triggering collection
+[2022-09-11T16:13:26Z INFO mmtk::scheduler::gc_work] End of GC
+[2022-09-11T16:13:26Z INFO mmtk::plan::global] [POLL] immortal: Triggering collection
+[2022-09-11T16:13:26Z INFO mmtk::scheduler::gc_work] End of GC
+Segmentation fault (core dumped)
+```
+
+`fibo.hs` can complete a few GC cycles, but unfortunately hits segfault in the end. The most likely cause is that the current tracing is non-exhaustive, which results in MMTk treating alive objects as dead.
+MMTk then reuses the object space for allocation, but later GHC runtime attempts to read the already-overwritten space, causing segfault.
+
+
+## Future Work
+- [ ] Integrate support for clearing swept objects into MMTk's Immix policy
+- [ ] Debug the current object tracing bug seen above
+- [ ] Implement tests for object tracing model
+- [ ] Refine object tracing model on objects such as weak pointers
+- [ ] Support more GC plans from MMTk
+
+
+## Acknowledgement
+I am grateful to Google for facilitating the whole program and providing such a fantastic experience to work on real-world open-source project.
+
+Special thanks to my mentors Ben, Dominic from Haskell, my supervisor Steve from MMTk, and the GHC and MMTk communities who helped me throughout the summer.
+
+
+## References
+- [MMTk porting guide](https://www.mmtk.io/mmtk-core/portingguide/prefix.html)
+- [MMTk-core](https://github.com/mmtk/mmtk-core)
+- [GHCSM](https://well-typed.com/blog/aux/files/nonmoving-gc/design.pdf)
diff --git a/rts/mmtk/docs/introduce_nogc.pdf b/rts/mmtk/docs/introduce_nogc.pdf
new file mode 100644
index 0000000000..4d7f0256ec
--- /dev/null
+++ b/rts/mmtk/docs/introduce_nogc.pdf
Binary files differ
diff --git a/rts/mmtk/flake.lock b/rts/mmtk/flake.lock
new file mode 100644
index 0000000000..715825a2c4
--- /dev/null
+++ b/rts/mmtk/flake.lock
@@ -0,0 +1,60 @@
+{
+ "nodes": {
+ "flake-utils": {
+ "locked": {
+ "lastModified": 1656928814,
+ "narHash": "sha256-RIFfgBuKz6Hp89yRr7+NR5tzIAbn52h8vT6vXkYjZoM=",
+ "owner": "numtide",
+ "repo": "flake-utils",
+ "rev": "7e2a3b3dfd9af950a856d66b0a7d01e3c18aa249",
+ "type": "github"
+ },
+ "original": {
+ "owner": "numtide",
+ "repo": "flake-utils",
+ "type": "github"
+ }
+ },
+ "gitignore": {
+ "flake": false,
+ "locked": {
+ "lastModified": 1657706534,
+ "narHash": "sha256-5jIzNHKtDu06mA325K/5CshUVb5r7sSmnRiula6Gr7o=",
+ "owner": "hercules-ci",
+ "repo": "gitignore.nix",
+ "rev": "f840a659d57e53fa751a9248b17149fd0cf2a221",
+ "type": "github"
+ },
+ "original": {
+ "owner": "hercules-ci",
+ "repo": "gitignore.nix",
+ "type": "github"
+ }
+ },
+ "nixpkgs": {
+ "locked": {
+ "lastModified": 1657624652,
+ "narHash": "sha256-rFJNM0X/dxekT6EESSh80mlBGqztfN/XOF/oRL6in68=",
+ "owner": "nixos",
+ "repo": "nixpkgs",
+ "rev": "68c63e60b8413260605efbe1ac5addaa099cdfb3",
+ "type": "github"
+ },
+ "original": {
+ "owner": "nixos",
+ "ref": "nixos-unstable",
+ "repo": "nixpkgs",
+ "type": "github"
+ }
+ },
+ "root": {
+ "inputs": {
+ "flake-utils": "flake-utils",
+ "gitignore": "gitignore",
+ "nixpkgs": "nixpkgs"
+ }
+ }
+ },
+ "root": "root",
+ "version": 7
+}
diff --git a/rts/mmtk/flake.nix b/rts/mmtk/flake.nix
new file mode 100644
index 0000000000..b57ac8ea23
--- /dev/null
+++ b/rts/mmtk/flake.nix
@@ -0,0 +1,32 @@
+{
+ description = "MMTK-GHC development environment";
+
+ inputs = {
+ nixpkgs.url = "github:nixos/nixpkgs/nixos-unstable"; # We want to use packages from the binary cache
+ flake-utils.url = "github:numtide/flake-utils";
+ gitignore = { url = "github:hercules-ci/gitignore.nix"; flake = false; };
+ };
+
+ outputs = inputs@{ self, nixpkgs, flake-utils, ... }:
+ flake-utils.lib.eachSystem [ "x86_64-linux" ] (system:
+ let
+ pkgs = nixpkgs.legacyPackages.${system};
+ gitignoreSrc = pkgs.callPackage inputs.gitignore { };
+ llvmPackages = pkgs.llvmPackages_11;
+ in rec {
+ devShell = pkgs.mkShell {
+ CARGO_INSTALL_ROOT = "${toString ./.}/.cargo";
+
+ LIBCLANG_PATH = "${llvmPackages.libclang.lib}/lib";
+
+ LLVM_CONFIG_PATH = "${llvmPackages.llvm.dev}/bin/llvm-config";
+
+ buildInputs = with pkgs; [
+ rustup
+ git
+ # For bindgen
+ llvm clang
+ ];
+ };
+ });
+}
diff --git a/rts/mmtk/ghc/mmtk.h b/rts/mmtk/ghc/mmtk.h
new file mode 100644
index 0000000000..f9d24828ba
--- /dev/null
+++ b/rts/mmtk/ghc/mmtk.h
@@ -0,0 +1,142 @@
+// This is an example of native API for the single instance MMTk.
+
+// Note: the mmtk core does not directly provide this API. However, it provides
+// a similar multi-instance Rust API. A VM binding should write their own C
+// header file (possibly based on this example with their own extension and
+// modification), and expose the Rust API based on their native API.
+
+
+// nogc would require:
+// - gc_init
+// - alloc
+// - bind_mutator
+
+#ifndef MMTK_H
+#define MMTK_H
+
+#include <stdbool.h>
+#include <stddef.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef struct BumpAllocator {
+ void* tls;
+ void* cursor;
+ void* limit;
+}BumpAllocator;
+
+typedef void* MMTk_Mutator;
+
+// Initialize an MMTk instance
+extern void mmtk_init(size_t heap_size);
+
+// Request MMTk to create a new mutator for the given `tls` thread
+extern MMTk_Mutator mmtk_bind_mutator(void* tls);
+
+// Reclaim mutator that is no longer needed
+extern void mmtk_destroy_mutator(MMTk_Mutator mutator);
+
+// Flush mutator local state
+extern void mmtk_flush_mutator(MMTk_Mutator mutator);
+
+// Initialize MMTk scheduler and GC workers
+extern void mmtk_initialize_collection(void* tls);
+
+// Allow MMTk to perform a GC when the heap is full
+extern void mmtk_enable_collection(void);
+
+// Disallow MMTk to perform a GC when the heap is full
+extern void mmtk_disable_collection(void);
+
+extern BumpAllocator* mmtk_get_nursery_allocator(MMTk_Mutator mutator);
+
+// Allocate memory for an object
+extern void* mmtk_alloc(MMTk_Mutator mutator,
+ size_t size,
+ size_t align,
+ ssize_t offset,
+ int allocator);
+
+// Slowpath allocation for an object
+extern void* mmtk_alloc_slow(MMTk_Mutator mutator,
+ size_t size,
+ size_t align,
+ ssize_t offset,
+ int allocator);
+
+// Perform post-allocation hooks or actions such as initializing object metadata
+extern void mmtk_post_alloc(MMTk_Mutator mutator,
+ void* refer,
+ int bytes,
+ int allocator);
+
+// Return if the object pointed to by `ref` is live
+extern bool mmtk_is_live_object(void* ref);
+
+// Return if the object pointed to by `ref` is in mapped memory
+extern bool mmtk_is_mapped_object(void* ref);
+
+// Return if the address pointed to by `addr` is in mapped memory
+extern bool mmtk_is_mapped_address(void* addr);
+
+// Check if a GC is in progress and if the object `ref` is movable
+extern void mmtk_modify_check(void* ref);
+
+// Return if object pointed to by `object` will never move
+extern bool mmtk_will_never_move(void* object);
+
+// Process an MMTk option. Return true if option was processed successfully
+extern bool mmtk_process(char* name, char* value);
+
+// Process MMTk options. Return true if all options were processed successfully
+extern bool mmtk_process_bulk(char* options);
+
+// Sanity only. Scan heap for discrepancies and errors
+extern void mmtk_scan_region(void);
+
+// Request MMTk to trigger a GC. Note that this may not actually trigger a GC
+extern void mmtk_handle_user_collection_request(void* tls);
+
+// Run the main loop for the GC controller thread. Does not return
+extern void mmtk_start_control_collector(void* tls, void* worker);
+
+// Run the main loop for a GC worker. Does not return
+extern void mmtk_start_worker(void* tls, void* worker);
+
+// Return the current amount of free memory in bytes
+extern size_t mmtk_free_bytes(void);
+
+// Return the current amount of used memory in bytes
+extern size_t mmtk_used_bytes(void);
+
+// Return the current amount of total memory in bytes
+extern size_t mmtk_total_bytes(void);
+
+// Return the starting address of MMTk's heap
+extern void* mmtk_starting_heap_address(void);
+
+// Return the ending address of MMTk's heap
+extern void* mmtk_last_heap_address(void);
+
+// Add a reference to the list of weak references
+extern void mmtk_add_weak_candidate(void* ref);
+
+// Add a reference to the list of soft references
+extern void mmtk_add_soft_candidate(void* ref);
+
+// Add a reference to the list of phantom references
+extern void mmtk_add_phantom_candidate(void* ref);
+
+// Generic hook to allow benchmarks to be harnessed
+extern void mmtk_harness_begin(void* tls);
+
+// Generic hook to allow benchmarks to be harnessed
+extern void mmtk_harness_end(void);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // MMTK_H
diff --git a/rts/mmtk/ghc/mmtk_upcalls.h b/rts/mmtk/ghc/mmtk_upcalls.h
new file mode 100644
index 0000000000..259ad4357e
--- /dev/null
+++ b/rts/mmtk/ghc/mmtk_upcalls.h
@@ -0,0 +1,9 @@
+#pragma once
+
+#include "mmtk.h"
+
+bool upcall_is_task(void* task);
+MMTk_Mutator *upcall_get_mutator(void *tls);
+
+void upcall_spawn_gc_controller(void *controller);
+void upcall_spawn_gc_worker(void *worker);
diff --git a/rts/mmtk/mmtk-core b/rts/mmtk/mmtk-core
new file mode 160000
+Subproject 4aa643773ec3ec9c4f47f696533bdd56554c03d
diff --git a/rts/mmtk/mmtk/Cargo.toml b/rts/mmtk/mmtk/Cargo.toml
new file mode 100644
index 0000000000..b3bab64214
--- /dev/null
+++ b/rts/mmtk/mmtk/Cargo.toml
@@ -0,0 +1,33 @@
+[package]
+name = "mmtk_ghc"
+version = "0.0.1"
+authors = [" <>"]
+edition = "2021"
+
+[lib]
+name = "mmtk_ghc"
+# be careful - LTO is only allowed for certain crate types
+crate-type = ["staticlib"]
+
+[profile.release]
+lto = true
+
+[package.metadata.ghc]
+ghc_repo = "https://gitlab.haskell.org/ghc/ghc"
+# ghc_version = "a4313b7f470afdc49bb9b1d32fd205c1e94db367" # dummy version (HEAD)
+
+[build-dependencies]
+bindgen = "0.60.1"
+
+[dependencies]
+libc = "0.2"
+lazy_static = "1.1"
+atomic_refcell = "0.1.7"
+# mmtk = { git = "https://github.com/mmtk/mmtk-core.git", rev = "cd6d8984c10c294c991dcd5f154ce41073c06ab9" }
+# Uncomment the following to build locally
+mmtk = { path = "../mmtk-core", features=["immix_no_defrag", "work_packet_stats"] }
+
+[features]
+default = []
+is_mmtk_object = ["mmtk/is_mmtk_object"]
+nogc = []
diff --git a/rts/mmtk/mmtk/build.rs b/rts/mmtk/mmtk/build.rs
new file mode 100644
index 0000000000..1337df401c
--- /dev/null
+++ b/rts/mmtk/mmtk/build.rs
@@ -0,0 +1,31 @@
+extern crate bindgen;
+
+use std::env;
+use std::path::PathBuf;
+
+fn main() {
+ println!("cargo:rerun-if-changed=wrapper.h");
+
+ let bindings = bindgen::Builder::default()
+ .header("wrapper.h")
+ .clang_arg("-I../../../_build/stage1/rts/build/include")
+ .clang_arg("-I../../include")
+ .clang_arg("-I../..")
+ .clang_arg("-DTHREADED_RTS") // todo: handle non threaded runtime
+ .allowlist_type("Capability")
+ .allowlist_var("capabilities")
+ .allowlist_var("n_capabilities")
+ .allowlist_type("Task")
+ .blocklist_type("StgTSO_")
+ .blocklist_type("StgTSO")
+ .parse_callbacks(Box::new(bindgen::CargoCallbacks))
+ .generate()
+ .expect("Unable to generate bindings");
+
+ let out_path = PathBuf::from(env::var("OUT_DIR").unwrap());
+ bindings
+ .write_to_file(out_path.join("bindings.rs"))
+ .expect("Couldn't write bindings!");
+}
+
+
diff --git a/rts/mmtk/mmtk/rust-toolchain b/rts/mmtk/mmtk/rust-toolchain
new file mode 100644
index 0000000000..8445feb8bd
--- /dev/null
+++ b/rts/mmtk/mmtk/rust-toolchain
@@ -0,0 +1 @@
+nightly-2022-02-11
diff --git a/rts/mmtk/mmtk/src/active_plan.rs b/rts/mmtk/mmtk/src/active_plan.rs
new file mode 100644
index 0000000000..359f7beb05
--- /dev/null
+++ b/rts/mmtk/mmtk/src/active_plan.rs
@@ -0,0 +1,132 @@
+use mmtk::Plan;
+use mmtk::vm::ActivePlan;
+use mmtk::util::{opaque_pointer::*, ObjectReference, Address};
+use mmtk::Mutator;
+use mmtk::ObjectQueue;
+use mmtk::scheduler::GCWorker;
+use crate::GHCVM;
+use crate::SINGLETON;
+use crate::ghc::*;
+use crate::stg_closures::*;
+use crate::stg_info_table::*;
+
+
+static mut STATIC_FLAG: bool = false;
+
+pub fn bump_static_flag() {
+ unsafe {
+ STATIC_FLAG = !STATIC_FLAG;
+ }
+}
+
+fn get_static_flag() -> bool {
+ unsafe {
+ STATIC_FLAG
+ }
+}
+
+static mut ITERATOR: *const Task = std::ptr::null();
+
+pub struct VMActivePlan<> {}
+
+impl ActivePlan<GHCVM> for VMActivePlan {
+ fn global() -> &'static dyn Plan<VM=GHCVM> {
+ SINGLETON.get_plan()
+ }
+
+ fn number_of_mutators() -> usize {
+ // todo: number of tasks
+ unsafe {n_capabilities as usize}
+ }
+
+ fn is_mutator(tls: VMThread) -> bool {
+ unsafe { upcall_is_task(tls) }
+ }
+
+ fn mutator(tls: VMMutatorThread) -> &'static mut Mutator<GHCVM> {
+ unsafe { &mut *upcall_get_mutator(tls) }
+ }
+
+ fn reset_mutator_iterator() {
+ unsafe {
+ ITERATOR = all_tasks;
+ }
+ }
+
+ fn get_next_mutator() -> Option<&'static mut Mutator<GHCVM>> {
+ unsafe {
+ // println!("Next Iterator {:?}", ITERATOR);
+ // TODO: acquire all_tasks_mutex
+ if ! ITERATOR.is_null() {
+ let task = ITERATOR;
+ ITERATOR = (*task).all_next;
+ let result = (*task).mmutator;
+ Some(std::mem::transmute(result))
+ }
+ else {
+ None
+ }
+ }
+ }
+
+ fn vm_trace_object<Q: ObjectQueue>(
+ queue: &mut Q,
+ object: ObjectReference,
+ _worker: &mut GCWorker<GHCVM>,
+ ) -> ObjectReference {
+ // Modelled after evacuate_static_object, returns true if this
+ // is the first time the object has been visited in this GC.
+ let mut evacuate_static = |static_link: &mut TaggedClosureRef| -> bool {
+ let cur_static_flag = if get_static_flag() { 2 } else { 1 };
+ let prev_static_flag = if get_static_flag() { 1 } else { 2 };
+ let object_visited: bool = (static_link.get_tag() | prev_static_flag) != 3;
+ if !object_visited {
+ // N.B. We don't need to maintain a list of static objects, therefore ZERO
+ *static_link = TaggedClosureRef::from_address(Address::ZERO).set_tag(cur_static_flag);
+ enqueue_roots(queue, object);
+ }
+ !object_visited
+ };
+
+ // Modelled after evacuate() (lines 713 through 760)
+ let tagged_ref = TaggedClosureRef::from_object_reference(object);
+ let info_table = tagged_ref.get_info_table();
+
+ use crate::stg_closures::Closure::*;
+ match tagged_ref.to_closure() {
+ ThunkStatic(thunk) => {
+ // if srt != 0
+ if let Some(_) = StgThunkInfoTable::from_info_table(info_table).get_srt() {
+ let static_link_ref = &mut thunk.static_link;
+ evacuate_static(static_link_ref);
+ }
+ },
+ FunStatic(fun) => {
+ let srt = StgFunInfoTable::from_info_table(info_table).get_srt();
+ let ptrs = unsafe { info_table.layout.payload.ptrs };
+ // if srt != 0 || ptrs != 0
+ if srt.is_some() || (ptrs != 0) {
+ let offset = unsafe { ptrs + info_table.layout.payload.nptrs };
+ let static_link_ref = fun.payload.get_ref(offset as usize);
+ evacuate_static(static_link_ref);
+ }
+ },
+ IndirectStatic(ind) => {
+ evacuate_static(&mut ind.static_link);
+ },
+ Constr(constr) => {
+ let offset = unsafe { info_table.layout.payload.ptrs + info_table.layout.payload.nptrs };
+ let static_link_ref = constr.payload.get_ref(offset as usize);
+ evacuate_static(static_link_ref);
+ },
+ _ => panic!("invalid static closure"),
+ };
+ object
+ }
+}
+
+
+pub fn enqueue_roots<Q: ObjectQueue>(queue: &mut Q, object: ObjectReference)
+{
+ queue.enqueue(object);
+} \ No newline at end of file
diff --git a/rts/mmtk/mmtk/src/api.rs b/rts/mmtk/mmtk/src/api.rs
new file mode 100644
index 0000000000..c19c031b0d
--- /dev/null
+++ b/rts/mmtk/mmtk/src/api.rs
@@ -0,0 +1,236 @@
+// All functions here are extern function. There is no point for marking them as unsafe.
+#![allow(clippy::not_unsafe_ptr_arg_deref)]
+
+use libc::c_char;
+use std::sync::atomic::Ordering;
+use std::ffi::CStr;
+use mmtk::memory_manager;
+use mmtk::AllocationSemantics;
+use mmtk::util::{ObjectReference, Address};
+use mmtk::util::opaque_pointer::*;
+use mmtk::scheduler::{GCController, GCWorker};
+use mmtk::Mutator;
+use crate::GHCVM;
+use crate::SINGLETON;
+use crate::BUILDER;
+
+#[no_mangle]
+pub extern "C" fn mmtk_init(heap_size: usize) {
+ // set heap size first
+ {
+ let mut builder = BUILDER.lock().unwrap();
+ let success = builder.options.heap_size.set(heap_size);
+ assert!(success, "Failed to set heap size to {}", heap_size);
+ }
+
+ // Make sure MMTk has not yet been initialized
+ assert!(!crate::MMTK_INITIALIZED.load(Ordering::SeqCst));
+ // Initialize MMTk here
+ lazy_static::initialize(&SINGLETON);
+}
+
+#[no_mangle]
+pub extern "C" fn mmtk_bind_mutator(tls: VMMutatorThread) -> *mut Mutator<GHCVM> {
+ Box::into_raw(memory_manager::bind_mutator(&SINGLETON, tls))
+}
+
+#[no_mangle]
+pub extern "C" fn mmtk_destroy_mutator(mutator: *mut Mutator<GHCVM>) {
+ // notify mmtk-core about destroyed mutator
+ memory_manager::destroy_mutator(unsafe { &mut *mutator });
+ // turn the ptr back to a box, and let Rust properly reclaim it
+ let _ = unsafe { Box::from_raw(mutator) };
+}
+
+#[no_mangle]
+pub extern "C" fn mmtk_get_nursery_allocator(mutator: *mut Mutator<GHCVM>) -> *mut mmtk::util::alloc::BumpAllocator<GHCVM> {
+ let mutator = unsafe { &mut *mutator };
+ let allocator = unsafe { mutator.allocators.bump_pointer[0].assume_init_mut() };
+ allocator
+}
+
+#[no_mangle]
+pub extern "C" fn mmtk_alloc(mutator: *mut Mutator<GHCVM>, size: usize,
+ align: usize, offset: isize, mut semantics: AllocationSemantics) -> Address {
+ if size >= SINGLETON.get_plan().constraints().max_non_los_default_alloc_bytes {
+ semantics = AllocationSemantics::Los;
+ }
+ memory_manager::alloc::<GHCVM>(unsafe { &mut *mutator }, size, align, offset, semantics)
+}
+
+#[no_mangle]
+pub extern "C" fn mmtk_alloc_slow(mutator: *mut Mutator<GHCVM>, size: usize,
+ align: usize, offset: isize) -> Address {
+ use crate::mmtk::util::alloc::Allocator;
+ let mutator = unsafe { &mut *mutator };
+ let allocator = unsafe { mutator.allocators.bump_pointer[0].assume_init_mut() };
+ allocator.alloc_slow(size, align, offset)
+}
+
+#[no_mangle]
+pub extern "C" fn mmtk_post_alloc(mutator: *mut Mutator<GHCVM>, refer: ObjectReference,
+ bytes: usize, mut semantics: AllocationSemantics) {
+ if bytes >= SINGLETON.get_plan().constraints().max_non_los_default_alloc_bytes {
+ semantics = AllocationSemantics::Los;
+ }
+ memory_manager::post_alloc::<GHCVM>(unsafe { &mut *mutator }, refer, bytes, semantics)
+}
+
+#[no_mangle]
+pub extern "C" fn mmtk_will_never_move(object: ObjectReference) -> bool {
+ !object.is_movable()
+}
+
+#[no_mangle]
+pub extern "C" fn mmtk_start_control_collector(tls: VMWorkerThread, controller: &'static mut GCController<GHCVM>) {
+ memory_manager::start_control_collector(&SINGLETON, tls, controller);
+}
+
+#[no_mangle]
+pub extern "C" fn mmtk_start_worker(tls: VMWorkerThread, worker: &'static mut GCWorker<GHCVM>) {
+ memory_manager::start_worker::<GHCVM>(&SINGLETON, tls, worker)
+}
+
+#[no_mangle]
+pub extern "C" fn mmtk_initialize_collection(tls: VMThread) {
+ memory_manager::initialize_collection(&SINGLETON, tls)
+}
+
+#[no_mangle]
+pub extern "C" fn mmtk_disable_collection() {
+ memory_manager::disable_collection(&SINGLETON)
+}
+
+#[no_mangle]
+pub extern "C" fn mmtk_enable_collection() {
+ memory_manager::enable_collection(&SINGLETON)
+}
+
+#[no_mangle]
+pub extern "C" fn mmtk_used_bytes() -> usize {
+ memory_manager::used_bytes(&SINGLETON)
+}
+
+#[no_mangle]
+pub extern "C" fn mmtk_free_bytes() -> usize {
+ memory_manager::free_bytes(&SINGLETON)
+}
+
+#[no_mangle]
+pub extern "C" fn mmtk_total_bytes() -> usize {
+ memory_manager::total_bytes(&SINGLETON)
+}
+
+#[no_mangle]
+pub extern "C" fn mmtk_is_live_object(object: ObjectReference) -> bool{
+ memory_manager::is_live_object(object)
+}
+
+#[cfg(feature = "is_mmtk_object")]
+#[no_mangle]
+pub extern "C" fn mmtk_is_mmtk_object(addr: Address) -> bool {
+ memory_manager::is_mmtk_object(addr)
+}
+
+#[no_mangle]
+pub extern "C" fn mmtk_is_in_mmtk_spaces(object: ObjectReference) -> bool {
+ memory_manager::is_in_mmtk_spaces::<GHCVM>(object)
+}
+
+#[no_mangle]
+pub extern "C" fn mmtk_is_mapped_address(address: Address) -> bool {
+ memory_manager::is_mapped_address(address)
+}
+
+#[no_mangle]
+pub extern "C" fn mmtk_modify_check(object: ObjectReference) {
+ memory_manager::modify_check(&SINGLETON, object)
+}
+
+#[no_mangle]
+pub extern "C" fn mmtk_handle_user_collection_request(tls: VMMutatorThread) {
+ memory_manager::handle_user_collection_request::<GHCVM>(&SINGLETON, tls);
+}
+
+#[no_mangle]
+pub extern "C" fn mmtk_add_weak_candidate(reff: ObjectReference) {
+ memory_manager::add_weak_candidate(&SINGLETON, reff)
+}
+
+#[no_mangle]
+pub extern "C" fn mmtk_add_soft_candidate(reff: ObjectReference) {
+ memory_manager::add_soft_candidate(&SINGLETON, reff)
+}
+
+#[no_mangle]
+pub extern "C" fn mmtk_add_phantom_candidate(reff: ObjectReference) {
+ memory_manager::add_phantom_candidate(&SINGLETON, reff)
+}
+
+#[no_mangle]
+pub extern "C" fn mmtk_harness_begin(tls: VMMutatorThread) {
+ memory_manager::harness_begin(&SINGLETON, tls)
+}
+
+#[no_mangle]
+pub extern "C" fn mmtk_harness_end() {
+ memory_manager::harness_end(&SINGLETON)
+}
+
+#[no_mangle]
+pub extern "C" fn mmtk_process(name: *const c_char, value: *const c_char) -> bool {
+ let name_str: &CStr = unsafe { CStr::from_ptr(name) };
+ let value_str: &CStr = unsafe { CStr::from_ptr(value) };
+ let mut builder = BUILDER.lock().unwrap();
+ memory_manager::process(&mut builder, name_str.to_str().unwrap(), value_str.to_str().unwrap())
+}
+
+#[no_mangle]
+pub extern "C" fn mmtk_starting_heap_address() -> Address {
+ memory_manager::starting_heap_address()
+}
+
+#[no_mangle]
+pub extern "C" fn mmtk_last_heap_address() -> Address {
+ memory_manager::last_heap_address()
+}
+
+#[no_mangle]
+#[cfg(feature = "malloc_counted_size")]
+pub extern "C" fn mmtk_counted_malloc(size: usize) -> Address {
+ memory_manager::counted_malloc::<GHCVM>(&SINGLETON, size)
+}
+#[no_mangle]
+pub extern "C" fn mmtk_malloc(size: usize) -> Address {
+ memory_manager::malloc(size)
+}
+
+#[no_mangle]
+#[cfg(feature = "malloc_counted_size")]
+pub extern "C" fn mmtk_counted_calloc(num: usize, size: usize) -> Address {
+ memory_manager::counted_calloc::<GHCVM>(&SINGLETON, num, size)
+}
+#[no_mangle]
+pub extern "C" fn mmtk_calloc(num: usize, size: usize) -> Address {
+ memory_manager::calloc(num, size)
+}
+
+#[no_mangle]
+#[cfg(feature = "malloc_counted_size")]
+pub extern "C" fn mmtk_realloc_with_old_size(addr: Address, size: usize, old_size: usize) -> Address {
+ memory_manager::realloc_with_old_size::<GHCVM>(&SINGLETON, addr, size, old_size)
+}
+#[no_mangle]
+pub extern "C" fn mmtk_realloc(addr: Address, size: usize) -> Address {
+ memory_manager::realloc(addr, size)
+}
+
+#[no_mangle]
+#[cfg(feature = "malloc_counted_size")]
+pub extern "C" fn mmtk_free_with_size(addr: Address, old_size: usize) {
+ memory_manager::free_with_size::<GHCVM>(&SINGLETON, addr, old_size)
+}
+#[no_mangle]
+pub extern "C" fn mmtk_free(addr: Address) {
+ memory_manager::free(addr)
+} \ No newline at end of file
diff --git a/rts/mmtk/mmtk/src/collection.rs b/rts/mmtk/mmtk/src/collection.rs
new file mode 100644
index 0000000000..983aae2543
--- /dev/null
+++ b/rts/mmtk/mmtk/src/collection.rs
@@ -0,0 +1,68 @@
+use std::sync::atomic::{AtomicBool, Ordering};
+use mmtk::vm::Collection;
+use mmtk::vm::GCThreadContext;
+use mmtk::MutatorContext;
+use mmtk::util::opaque_pointer::*;
+use mmtk::scheduler::*;
+use mmtk::Mutator;
+use crate::ghc::{Capability, n_capabilities, Task, vm_mutator_thread_to_task};
+use crate::GHCVM;
+
+pub struct VMCollection {}
+
+static mut MMTK_GC_PENDING: AtomicBool = AtomicBool::new(false);
+
+extern "C" {
+ fn getMyTask() -> *const Task;
+ fn stopAllCapabilitiesForMMTK(task: *const Task);
+ fn releaseAllCapabilities(n_capabilities: u32, cap: *const Capability, task: *const Task);
+ fn yieldCapabilityForMMTK(task: *const Task, did_gc_last: bool);
+ fn upcall_spawn_gc_controller(controller: *mut GCController<GHCVM>);
+ fn upcall_spawn_gc_worker(worker: *mut GCWorker<GHCVM>);
+}
+
+impl Collection<GHCVM> for VMCollection {
+ fn stop_all_mutators<F: FnMut(&'static mut Mutator<GHCVM>)>(_tls: VMWorkerThread, _mutator_visitor: F) {
+ unsafe {
+ let task = getMyTask();
+ stopAllCapabilitiesForMMTK(task);
+ }
+ }
+
+ fn resume_mutators(_tls: VMWorkerThread) {
+ unsafe {
+ let task = getMyTask();
+ MMTK_GC_PENDING.store(false, Ordering::SeqCst);
+ let no_capability = 0 as *const Capability;
+ releaseAllCapabilities(n_capabilities, no_capability, task);
+ }
+ }
+
+ fn block_for_gc(tls: VMMutatorThread) {
+ unsafe {
+ MMTK_GC_PENDING.store(true, Ordering::SeqCst);
+ let task = vm_mutator_thread_to_task(tls);
+ while MMTK_GC_PENDING.load(Ordering::SeqCst) {
+ yieldCapabilityForMMTK(task, false);
+ }
+ }
+ }
+
+ fn spawn_gc_thread(_tls: VMThread, ctx: GCThreadContext<GHCVM>) {
+ unsafe {
+ match ctx {
+ GCThreadContext::Controller(controller) => upcall_spawn_gc_controller(Box::into_raw(controller)),
+ GCThreadContext::Worker(worker) => upcall_spawn_gc_worker(Box::into_raw(worker)),
+ }
+ }
+ }
+
+ fn prepare_mutator<T: MutatorContext<GHCVM>>(_tls_w: VMWorkerThread, _tls_m: VMMutatorThread, _mutator: &T) {
+ }
+
+ fn vm_release() {
+ crate::active_plan::bump_static_flag();
+ }
+
+ // TODO: handle schedule_finalization, process_weak_refs
+}
diff --git a/rts/mmtk/mmtk/src/edges.rs b/rts/mmtk/mmtk/src/edges.rs
new file mode 100644
index 0000000000..9d0108a93c
--- /dev/null
+++ b/rts/mmtk/mmtk/src/edges.rs
@@ -0,0 +1,146 @@
+use crate::stg_closures::TaggedClosureRef;
+use crate::stg_info_table::*;
+use mmtk::util::constants::LOG_BYTES_IN_ADDRESS;
+use mmtk::util::{Address, ObjectReference};
+use mmtk::vm::edge_shape::{Edge, MemorySlice};
+
+#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
+pub enum GHCEdge {
+ /// An edge corresponding to a pointer field of a closure
+ ClosureRef(*mut TaggedClosureRef), // TODO: Use Atomic<...>
+
+ /// An edge corresponding to the SRT of an info table.
+ /// Precondition: The info table must have an SRT
+ ThunkSrtRef(*mut StgThunkInfoTable),
+
+ /// An edge corresponding to the SRT of an info table.
+ /// Precondition: The info table must have an SRT
+ RetSrtRef(*mut StgRetInfoTable),
+
+ FunSrtRef(*mut StgFunInfoTable),
+}
+
+unsafe impl Send for GHCEdge {}
+
+impl Edge for GHCEdge {
+ fn load(&self) -> ObjectReference {
+ match self {
+ GHCEdge::ClosureRef(c) => unsafe {
+ let c: *mut TaggedClosureRef = *c;
+ let closure_ref: TaggedClosureRef = *c; // loads the pointer from the reference field
+ let addr: Address = closure_ref.to_address(); // untags the pointer
+ ObjectReference::from_raw_address(addr) // converts it to an mmtk ObjectReference
+ },
+ GHCEdge::ThunkSrtRef(info_tbl) => unsafe {
+ let some_table = <*mut StgThunkInfoTable>::as_ref(*info_tbl);
+ if let Some(table) = some_table {
+ match table.get_srt() {
+ Some(srt) => ObjectReference::from_raw_address(Address::from_ptr(srt)),
+ None => panic!("Pushed SrtRef edge for info table without SRT"),
+ }
+ } else {
+ panic!("PUshed SrtRef edge without info table")
+ }
+ }
+ GHCEdge::RetSrtRef(info_tbl) => unsafe {
+ let some_table = <*mut StgRetInfoTable>::as_ref(*info_tbl);
+ if let Some(table) = some_table {
+ match table.get_srt() {
+ Some(srt) => ObjectReference::from_raw_address(Address::from_ptr(srt)),
+ None => panic!("Pushed SrtRef edge for info table without SRT"),
+ }
+ } else {
+ panic!("PUshed SrtRef edge without info table")
+ }
+ }
+ GHCEdge::FunSrtRef(info_tbl) => unsafe {
+ let some_table = <*mut StgFunInfoTable>::as_ref(*info_tbl);
+ if let Some(table) = some_table {
+ match table.get_srt() {
+ Some(srt) => ObjectReference::from_raw_address(Address::from_ptr(srt)),
+ None => panic!("Pushed SrtRef edge for info table without SRT"),
+ }
+ } else {
+ panic!("PUshed SrtRef edge without info table")
+ }
+ }
+ }
+ }
+
+ fn store(&self, object: ObjectReference) {
+ match self {
+ GHCEdge::ClosureRef(c) => unsafe {
+ *(*c) = TaggedClosureRef::from_address(object.to_raw_address());
+ },
+ GHCEdge::FunSrtRef(_) | GHCEdge::ThunkSrtRef(_) | GHCEdge::RetSrtRef(_) => {
+ panic!("Attempted to store into an SrtRef");
+ }
+ }
+ }
+}
+
+
+#[derive(Clone, Debug, PartialEq, Eq, Hash)]
+pub struct GHCVMMemorySlice(*mut [ObjectReference]);
+
+unsafe impl Send for GHCVMMemorySlice {}
+
+impl MemorySlice for GHCVMMemorySlice {
+ type Edge = GHCEdge;
+ type EdgeIterator = GHCVMMemorySliceIterator;
+
+ fn iter_edges(&self) -> Self::EdgeIterator {
+ GHCVMMemorySliceIterator {
+ cursor: unsafe { (*self.0).as_mut_ptr_range().start },
+ limit: unsafe { (*self.0).as_mut_ptr_range().end },
+ }
+ }
+
+ fn start(&self) -> Address {
+ Address::from_ptr(unsafe { (*self.0).as_ptr_range().start })
+ }
+
+ fn bytes(&self) -> usize {
+ unsafe { (*self.0).len() * std::mem::size_of::<ObjectReference>() }
+ }
+
+ fn copy(src: &Self, tgt: &Self) {
+ debug_assert_eq!(src.bytes(), tgt.bytes());
+ debug_assert_eq!(
+ src.bytes() & ((1 << LOG_BYTES_IN_ADDRESS) - 1),
+ 0,
+ "bytes are not a multiple of words"
+ );
+ // Raw memory copy
+ unsafe {
+ let words = tgt.bytes() >> LOG_BYTES_IN_ADDRESS;
+ let src = src.start().to_ptr::<usize>();
+ let tgt = tgt.start().to_mut_ptr::<usize>();
+ std::ptr::copy(src, tgt, words)
+ }
+ }
+}
+
+pub struct GHCVMMemorySliceIterator {
+ cursor: *mut ObjectReference,
+ limit: *mut ObjectReference,
+}
+
+impl Iterator for GHCVMMemorySliceIterator {
+ type Item = GHCEdge;
+
+ #[inline]
+ fn next(&mut self) -> Option<Self::Item> {
+ if self.cursor >= self.limit {
+ None
+ } else {
+ // TODO: next object
+ // let edge = self.cursor;
+ // self.cursor = unsafe { self.cursor.add(1) };
+ // Some(GHCEdge::Simple(SimpleEdge::from_address(
+ // Address::from_ptr(edge),
+ // )))
+ None
+ }
+ }
+} \ No newline at end of file
diff --git a/rts/mmtk/mmtk/src/ghc.rs b/rts/mmtk/mmtk/src/ghc.rs
new file mode 100644
index 0000000000..52fcdff951
--- /dev/null
+++ b/rts/mmtk/mmtk/src/ghc.rs
@@ -0,0 +1,127 @@
+use mmtk::util::opaque_pointer::*;
+use mmtk::Mutator;
+use crate::GHCVM;
+use crate::types::{StgPtr, StgWord16};
+use crate::stg_closures::{StgTSO, TaggedClosureRef, StgClosure};
+
+pub use binding::Task;
+
+mod binding {
+ #![allow(dead_code)]
+ #![allow(non_upper_case_globals)]
+ #![allow(non_camel_case_types)]
+ #![allow(non_snake_case)]
+
+ use crate::stg_closures::{TaggedClosureRef, StgTSO};
+ use libc::c_void;
+
+ type StgTSO_ = StgTSO;
+
+ extern "C" {
+ pub fn markCapabilities(
+ f: unsafe extern "C" fn(*const c_void, *const TaggedClosureRef) -> (),
+ ctx: *const c_void
+ );
+ }
+
+ include!(concat!(env!("OUT_DIR"), "/bindings.rs"));
+}
+
+extern "C" {
+ pub fn closure_sizeW (p : *const StgClosure) -> u32;
+ pub fn upcall_get_mutator(tls: VMMutatorThread) -> *mut Mutator<GHCVM>;
+ pub fn upcall_is_task(tls: VMThread) -> bool;
+ pub static closure_flags : *const StgWord16;
+ pub static all_tasks: *const Task;
+ pub static SPT_size: u32;
+ pub static stg_END_TSO_QUEUE_closure: StgTSO;
+ pub static n_capabilities: u32;
+ pub static mut global_TSOs: *mut StgTSO;
+ pub static mut stable_ptr_table: *mut spEntry;
+}
+
+pub fn markCapabilities<F: Fn(*const TaggedClosureRef)>(f: F) {
+ use libc::c_void;
+ unsafe extern "C" fn wrapper<F: Fn(*const TaggedClosureRef)>(
+ ctx: *const c_void,
+ value: *const TaggedClosureRef
+ ) {
+ (*(ctx as *const F))(value);
+ }
+ unsafe {
+ binding::markCapabilities(wrapper::<F>, &f as *const F as *const c_void);
+ }
+}
+
+
+#[repr(C)]
+pub struct Capability (*mut binding::Capability);
+
+impl Capability {
+ pub fn iter_run_queue(&self) -> TSOIter {
+ TSOIter(self.run_queue_hd)
+ }
+}
+
+impl std::ops::Deref for Capability {
+ type Target = binding::Capability;
+ fn deref(&self) -> &binding::Capability {
+ unsafe { &(*self.0) }
+ }
+}
+
+impl std::ops::DerefMut for Capability {
+ fn deref_mut(&mut self) -> &mut binding::Capability {
+ unsafe { &mut (*self.0) }
+ }
+}
+
+#[repr(C)]
+pub struct spEntry {
+ pub addr : StgPtr
+}
+
+/// An iterator over a linked-list of TSOs (via the `link` field).
+#[repr(C)]
+pub struct TSOIter (*mut StgTSO);
+
+impl Iterator for TSOIter {
+ type Item = &'static mut StgTSO;
+ fn next(&mut self) -> Option<&'static mut StgTSO> {
+ unsafe {
+ if self.0 as *const StgTSO == &stg_END_TSO_QUEUE_closure {
+ None
+ } else if (*self.0).link as *const StgTSO == &stg_END_TSO_QUEUE_closure {
+ None
+ } else {
+ self.0 = (*self.0).link;
+ Some(&mut *self.0)
+ }
+ }
+ }
+}
+
+/// This must only be used during a stop-the-world period, when the capability count is known to be
+/// fixed.
+pub fn iter_capabilities() -> impl Iterator<Item=Capability> {
+ unsafe {
+ binding::capabilities.iter()
+ .take(binding::n_capabilities.try_into().unwrap())
+ .map(|x| Capability(*x))
+ }
+}
+
+
+// TODO: need to consider when table is enlarged
+pub fn iter_stable_ptr_table() -> impl Iterator<Item=&'static spEntry> {
+ unsafe {
+ let tables: &[spEntry] = std::slice::from_raw_parts(stable_ptr_table, SPT_size as usize);
+ tables.iter().map(|x| &*x)
+ }
+}
+
+pub fn vm_mutator_thread_to_task(mutator: mmtk::util::VMMutatorThread) -> *const Task {
+ let optr: mmtk::util::opaque_pointer::OpaquePointer = mutator.0.0;
+ // TODO: mmtk should allow unsafe inspection of OpaquePointer's payload
+ unsafe { std::mem::transmute(optr) }
+}
diff --git a/rts/mmtk/mmtk/src/lib.rs b/rts/mmtk/mmtk/src/lib.rs
new file mode 100644
index 0000000000..c50d0b0156
--- /dev/null
+++ b/rts/mmtk/mmtk/src/lib.rs
@@ -0,0 +1,63 @@
+#![feature(untagged_unions)]
+#![feature(if_let_guard)]
+
+extern crate mmtk;
+extern crate libc;
+#[macro_use]
+extern crate lazy_static;
+
+use mmtk::vm::VMBinding;
+use mmtk::MMTK;
+use mmtk::MMTKBuilder;
+
+mod ghc;
+pub mod scanning;
+pub mod object_scanning;
+pub mod collection;
+pub mod object_model;
+pub mod active_plan;
+pub mod reference_glue;
+pub mod api;
+pub mod types;
+pub mod stg_closures;
+pub mod stg_info_table;
+pub mod util;
+pub mod test;
+pub mod edges;
+
+#[cfg(test)]
+mod tests;
+
+#[derive(Default)]
+pub struct GHCVM;
+
+impl VMBinding for GHCVM {
+ type VMObjectModel = object_model::VMObjectModel;
+ type VMScanning = scanning::VMScanning;
+ type VMCollection = collection::VMCollection;
+ type VMActivePlan = active_plan::VMActivePlan;
+ type VMReferenceGlue = reference_glue::VMReferenceGlue;
+ type VMEdge = edges::GHCEdge;
+ type VMMemorySlice = edges::GHCVMMemorySlice;
+
+
+ /// Allowed maximum alignment in bytes.
+ const MAX_ALIGNMENT: usize = 1 << 6;
+}
+
+use std::sync::atomic::{AtomicBool, Ordering};
+use std::sync::Mutex;
+
+/// This is used to ensure we initialize MMTk at a specified timing.
+pub static MMTK_INITIALIZED: AtomicBool = AtomicBool::new(false);
+
+lazy_static! {
+ pub static ref BUILDER: Mutex<MMTKBuilder> = Mutex::new(MMTKBuilder::new());
+ pub static ref SINGLETON: MMTK<GHCVM> = {
+ let builder = BUILDER.lock().unwrap();
+ debug_assert!(!MMTK_INITIALIZED.load(Ordering::SeqCst));
+ let ret = mmtk::memory_manager::mmtk_init(&builder);
+ MMTK_INITIALIZED.store(true, std::sync::atomic::Ordering::Relaxed);
+ *ret
+ };
+} \ No newline at end of file
diff --git a/rts/mmtk/mmtk/src/mmtk-heap-closure-test/.gitignore b/rts/mmtk/mmtk/src/mmtk-heap-closure-test/.gitignore
new file mode 100644
index 0000000000..281d526c61
--- /dev/null
+++ b/rts/mmtk/mmtk/src/mmtk-heap-closure-test/.gitignore
@@ -0,0 +1,4 @@
+*.hi
+*.o
+Main
+dist-newstyle/ \ No newline at end of file
diff --git a/rts/mmtk/mmtk/src/mmtk-heap-closure-test/Main.hs b/rts/mmtk/mmtk/src/mmtk-heap-closure-test/Main.hs
new file mode 100644
index 0000000000..7fccbe0bc3
--- /dev/null
+++ b/rts/mmtk/mmtk/src/mmtk-heap-closure-test/Main.hs
@@ -0,0 +1,70 @@
+{-# LANGUAGE ForeignFunctionInterface #-}
+{-# LANGUAGE GHCForeignImportPrim #-}
+{-# LANGUAGE MagicHash #-}
+{-# LANGUAGE UnboxedTuples #-}
+{-# LANGUAGE UnliftedFFITypes #-}
+{-# LANGUAGE TypeInType #-}
+{-# LANGUAGE RankNTypes #-}
+
+module Main where
+
+import GHC.Exts
+import Unsafe.Coerce
+import GHC.IO
+import GHC.MVar
+import Control.Concurrent
+import Test.QuickCheck
+import RandomHeap
+import Types.Shape
+import Types.Heap
+import Control.Monad
+
+foreign import prim "cmm_printClosure"
+ cmm_printClosure :: Any -> (# #)
+
+pap :: Int -> Int -> Int
+pap x y = x+y
+
+printClosure
+ :: a -> IO ()
+printClosure x =
+ IO (\s -> case cmm_printClosure (unsafeCoerce# x) of
+ (# #) -> (# s, () #))
+
+printClosureUnlifted
+ :: forall (a :: TYPE UnliftedRep). a -> IO ()
+printClosureUnlifted x =
+ IO (\s -> case cmm_printClosure (unsafeCoerce# x) of
+ (# #) -> (# s, () #))
+
+main :: IO ()
+main = do
+ printClosure (Just 42 :: Maybe Int)
+ printClosure ([1,2,3,4])
+ printClosure ("HelloWorld")
+ printClosure (("haskell", 1))
+ printClosure (id :: Int -> Int) -- (this is FUN_STATIC)
+ printClosure (head [42,53] :: Int)
+ printClosure (pap 42)
+ -- shape <- Test.QuickCheck.generate (arbitrary :: Gen Shape)
+ -- Heap heap <- toHeap shape
+ -- printClosure heap
+ replicateM_ 50 $ do
+ shape <- Test.QuickCheck.generate (arbitrary :: Gen Shape)
+ Heap heap <- RandomHeap.toHeap shape
+ printClosure heap
+ -- IO (\s0# -> case newByteArray# 42# s0# of
+ -- (# s1#, ba# #) -> unIO (printClosureUnlifted ba#) s1#)
+ -- mvar <- newEmptyMVar
+ -- forkIO $ takeMVar mvar
+ -- printClosureUnlifted (case mvar of MVar mvar# -> mvar#) -- case analysis to print: allocate a thunk
+ -- x <- newMVar 0 -- create Mvar with value
+ -- -- stack will be reachable via the heap due to the blocking queue of MVar
+ -- forkIO $ do -- forking a thread
+ -- putMVar x 1 -- put a value in the mvar, but mvar is full, get blocked, add itself to blocking queue of Mvar
+ -- putStrLn "child done"
+ -- threadDelay 100
+ -- -- readMVar x
+ -- putStrLn "parent done"
+ -- printClosureUnlifted (case x of MVar x# -> x#)
+
diff --git a/rts/mmtk/mmtk/src/mmtk-heap-closure-test/Makefile b/rts/mmtk/mmtk/src/mmtk-heap-closure-test/Makefile
new file mode 100644
index 0000000000..3d751e62d2
--- /dev/null
+++ b/rts/mmtk/mmtk/src/mmtk-heap-closure-test/Makefile
@@ -0,0 +1,15 @@
+GHC ?= ghc
+
+all : Main
+
+c_printClosure.o : c_printClosure.c
+ ${GHC} -c $< -o $@ -g3 -optc-g3
+
+cmm_printClosure.o : cmm_printClosure.cmm
+ ${GHC} -c $< -o $@ -g3
+
+Main.o : Main.hs
+ ${GHC} -c $< -o $@ -g3
+
+Main : Main.o cmm_printClosure.o c_printClosure.o
+ ${GHC} ${EXTRA_HC_OPTS} $+ -g3 -debug -mmtk -o $@
diff --git a/rts/mmtk/mmtk/src/mmtk-heap-closure-test/c_printClosure.c b/rts/mmtk/mmtk/src/mmtk-heap-closure-test/c_printClosure.c
new file mode 100644
index 0000000000..3ae33c7e25
--- /dev/null
+++ b/rts/mmtk/mmtk/src/mmtk-heap-closure-test/c_printClosure.c
@@ -0,0 +1,12 @@
+#include "Rts.h"
+
+void printObj( StgClosure *obj );
+void print_obj( StgClosure *obj );
+void rs_collect_pointers( StgClosure *obj);
+
+void c_printClosure(StgClosure *p) {
+ printObj(p);
+ // print_obj(p);
+ // collect_pointers(p);
+ rs_collect_pointers(p);
+}
diff --git a/rts/mmtk/mmtk/src/mmtk-heap-closure-test/cabal.project b/rts/mmtk/mmtk/src/mmtk-heap-closure-test/cabal.project
new file mode 100644
index 0000000000..77788c2318
--- /dev/null
+++ b/rts/mmtk/mmtk/src/mmtk-heap-closure-test/cabal.project
@@ -0,0 +1,19 @@
+packages: .
+with-compiler: /home/junmingz/ghc_nogc/_build/stage1/bin/ghc
+
+source-repository-package
+ type: git
+ location: https://github.com/bgamari/random-heap
+ tag: master
+
+package *
+ ld-options: /home/junmingz/ghc_nogc/rts/mmtk/mmtk-ghc/mmtk/target/debug/libmmtk_ghc.a
+
+repository head.hackage.ghc.haskell.org
+ url: https://ghc.gitlab.haskell.org/head.hackage/
+ secure: True
+ key-threshold: 3
+ root-keys:
+ 26021a13b401500c8eb2761ca95c61f2d625bfef951b939a8124ed12ecf07329
+ 7541f32a4ccca4f97aea3b22f5e593ba2c0267546016b992dfadcd2fe944e55d
+ f76d08be13e9a61a377a85e2fb63f4c5435d40f8feb3e12eb05905edb8cdea89 \ No newline at end of file
diff --git a/rts/mmtk/mmtk/src/mmtk-heap-closure-test/cmm_printClosure.cmm b/rts/mmtk/mmtk/src/mmtk-heap-closure-test/cmm_printClosure.cmm
new file mode 100644
index 0000000000..900a0cf7e6
--- /dev/null
+++ b/rts/mmtk/mmtk/src/mmtk-heap-closure-test/cmm_printClosure.cmm
@@ -0,0 +1,6 @@
+#include "Cmm.h"
+
+cmm_printClosure(P_ p) {
+ ccall c_printClosure(p);
+ return ();
+}
diff --git a/rts/mmtk/mmtk/src/mmtk-heap-closure-test/mmtk-heap-closure-test.cabal b/rts/mmtk/mmtk/src/mmtk-heap-closure-test/mmtk-heap-closure-test.cabal
new file mode 100644
index 0000000000..31ada2c63d
--- /dev/null
+++ b/rts/mmtk/mmtk/src/mmtk-heap-closure-test/mmtk-heap-closure-test.cabal
@@ -0,0 +1,27 @@
+cabal-version: 3.0
+name: mmtk-heap-closure-test
+version: 0.1.0.0
+
+-- A short (one-line) description of the package.
+-- synopsis:
+
+-- A longer description of the package.
+-- description:
+
+-- A URL where users can report bugs.
+-- bug-reports:
+
+-- The license under which the package is released.
+-- license:
+author: Junming Zhao
+maintainer: junming.zhao@anu.edu.au
+
+library
+ c-sources: c_printClosure.c
+ cmm-sources: cmm_printClosure.cmm
+ default-language: Haskell2010
+
+executable mmtk-heap-closure-test
+ main-is: Main.hs
+ build-depends: base, mmtk-heap-closure-test, random-heap, QuickCheck
+ default-language: Haskell2010
diff --git a/rts/mmtk/mmtk/src/object_model.rs b/rts/mmtk/mmtk/src/object_model.rs
new file mode 100644
index 0000000000..0c80f724a3
--- /dev/null
+++ b/rts/mmtk/mmtk/src/object_model.rs
@@ -0,0 +1,81 @@
+use mmtk::util::copy::{CopySemantics, GCWorkerCopyContext};
+use mmtk::util::{Address, ObjectReference};
+use mmtk::vm::*;
+use crate::GHCVM;
+use crate::ghc::closure_sizeW;
+use crate::stg_closures::StgClosure;
+use crate::types::StgWord;
+
+pub struct VMObjectModel {}
+
+// This is intentionally set to a non-zero value to see if it breaks.
+// Change this if you want to test other values.
+pub const OBJECT_REF_OFFSET: usize = 0;
+
+impl ObjectModel<GHCVM> for VMObjectModel {
+ const GLOBAL_LOG_BIT_SPEC: VMGlobalLogBitSpec = VMGlobalLogBitSpec::side_first();
+ const LOCAL_FORWARDING_POINTER_SPEC: VMLocalForwardingPointerSpec = VMLocalForwardingPointerSpec::side_first();
+ const LOCAL_FORWARDING_BITS_SPEC: VMLocalForwardingBitsSpec = VMLocalForwardingBitsSpec::side_first();
+ const LOCAL_MARK_BIT_SPEC: VMLocalMarkBitSpec = VMLocalMarkBitSpec::side_first();
+ const LOCAL_LOS_MARK_NURSERY_SPEC: VMLocalLOSMarkNurserySpec = VMLocalLOSMarkNurserySpec::side_after(Self::LOCAL_MARK_BIT_SPEC.as_spec());
+
+ const OBJECT_REF_OFFSET_LOWER_BOUND: isize = OBJECT_REF_OFFSET as isize;
+
+ fn copy(
+ _from: ObjectReference,
+ _semantics: CopySemantics,
+ _copy_context: &mut GCWorkerCopyContext<GHCVM>,
+ ) -> ObjectReference {
+ unimplemented!()
+ }
+
+ fn copy_to(_from: ObjectReference, _to: ObjectReference, _region: Address) -> Address {
+ unimplemented!()
+ }
+
+ fn get_current_size(object: ObjectReference) -> usize {
+ let size_w = unsafe { closure_sizeW(object.to_raw_address().to_ptr() as *const StgClosure) as usize };
+ size_w * std::mem::size_of::<StgWord>()
+ }
+
+ fn get_size_when_copied(object: ObjectReference) -> usize {
+ Self::get_current_size(object)
+ }
+
+ fn get_align_when_copied(_object: ObjectReference) -> usize {
+ ::std::mem::size_of::<usize>()
+ }
+
+ fn get_align_offset_when_copied(_object: ObjectReference) -> isize {
+ 0
+ }
+
+ fn get_reference_when_copied_to(_from: ObjectReference, _to: Address) -> ObjectReference {
+ unimplemented!()
+ }
+
+ fn get_type_descriptor(_reference: ObjectReference) -> &'static [i8] {
+ unimplemented!()
+ }
+
+ fn ref_to_object_start(object: ObjectReference) -> Address {
+ object.to_raw_address().sub(OBJECT_REF_OFFSET)
+ }
+
+ fn ref_to_header(object: ObjectReference) -> Address {
+ object.to_raw_address()
+ }
+
+ fn ref_to_address(object: ObjectReference) -> Address {
+ // Just use object start.
+ Self::ref_to_object_start(object)
+ }
+
+ fn address_to_ref(addr: Address) -> ObjectReference {
+ ObjectReference::from_raw_address(addr.add(OBJECT_REF_OFFSET))
+ }
+
+ fn dump_object(_object: ObjectReference) {
+ unimplemented!()
+ }
+}
diff --git a/rts/mmtk/mmtk/src/object_scanning.rs b/rts/mmtk/mmtk/src/object_scanning.rs
new file mode 100644
index 0000000000..c38558dc33
--- /dev/null
+++ b/rts/mmtk/mmtk/src/object_scanning.rs
@@ -0,0 +1,325 @@
+// use mmtk::util::opaque_pointer::*;
+use mmtk::vm::EdgeVisitor;
+
+use crate::types::*;
+use crate::stg_closures::*;
+use crate::stg_info_table::*;
+use crate::edges::GHCEdge;
+use crate::ghc::*;
+use std::cmp::min;
+use std::mem::size_of;
+
+pub fn scan_closure_payload<EV: EdgeVisitor<GHCEdge>>(
+ // _tls: VMWorkerThread,
+ payload : &ClosurePayload,
+ n_ptrs : u32,
+ ev: &mut EV,
+)
+{
+ for n in 0..n_ptrs {
+ let edge = payload.get_ref(n as usize);
+ visit(ev, edge);
+ }
+}
+
+/// Helper function to visit (standard StgClosure) edge
+pub fn visit<EV: EdgeVisitor<GHCEdge>, Ref: IsClosureRef>(
+ ev: &mut EV,
+ slot: &mut Ref,
+)
+{
+ crate::util::push_node(unsafe{ *IsClosureRef::to_tagged_closure_ref(slot) });
+ ev.visit_edge(GHCEdge::ClosureRef(IsClosureRef::to_tagged_closure_ref(slot)))
+}
+
+/// Helper function to push (standard StgClosure) edge to root packet
+pub fn push_root<Ref: IsClosureRef>(
+ roots: &mut Vec<GHCEdge>,
+ slot: &mut Ref,
+)
+{
+ crate::util::push_node(unsafe{ *IsClosureRef::to_tagged_closure_ref(slot) });
+ roots.push(GHCEdge::ClosureRef(IsClosureRef::to_tagged_closure_ref(slot)))
+}
+
+#[allow(non_snake_case)]
+pub fn scan_TSO<EV: EdgeVisitor<GHCEdge>>(
+ // _tls: VMWorkerThread,
+ tso : &mut StgTSO,
+ ev: &mut EV,
+)
+{
+ // update the pointer from the InCall
+ if !tso.bound.is_null() {
+ visit(ev, unsafe { &mut (*(*tso).bound).tso });
+ }
+
+ visit(ev, &mut tso.blocked_exceptions);
+ visit(ev, &mut tso.blocking_queue);
+ visit(ev, &mut tso.trec);
+ visit(ev, &mut tso.stackobj);
+ visit(ev, &mut tso.link);
+
+ if tso.why_blocked == StgTSOBlocked::BLOCKED_ON_MVAR
+ || tso.why_blocked == StgTSOBlocked::BLOCKED_ON_MVAR_READ
+ || tso.why_blocked == StgTSOBlocked::BLOCKED_ON_BLACK_HOLE
+ || tso.why_blocked == StgTSOBlocked::BLOCKED_ON_MSG_THROW_TO
+ || tso.why_blocked == StgTSOBlocked::NOT_BLOCKED {
+ unsafe {
+ let edge = &mut tso.block_info.closure;
+ visit(ev, edge);
+ }
+ }
+
+ // TODO: GC should not trace (related to weak pointer)
+ visit(ev, &mut tso.global_link);
+
+ visit(ev, &mut tso.tso_link_prev);
+ visit(ev, &mut tso.tso_link_next);
+}
+
+#[allow(non_snake_case)]
+pub fn scan_PAP_payload<EV: EdgeVisitor<GHCEdge>>(
+ // _tls: VMWorkerThread,
+ fun_info: &StgFunInfoTable,
+ payload : &ClosurePayload,
+ size : usize,
+ ev: &mut EV,
+)
+{
+ use StgFunType::*;
+ debug_assert_ne!(fun_info.i.type_, StgClosureType::PAP);
+
+ match fun_info.f.fun_type {
+ ARG_GEN => unsafe {
+ let small_bitmap : StgSmallBitmap = fun_info.f.bitmap.small_bitmap;
+ scan_small_bitmap( payload, small_bitmap, ev);
+ }
+ ARG_GEN_BIG => unsafe {
+ let large_bitmap : &StgLargeBitmap =
+ &*(fun_info.f.bitmap.large_bitmap_ref.deref(fun_info));
+ scan_large_bitmap( payload, large_bitmap, size, ev);
+ }
+ // TODO: handle ARG_BCO case
+ _ => {
+ let small_bitmap = StgFunType::get_small_bitmap(&fun_info.f.fun_type);
+ scan_small_bitmap( payload, small_bitmap, ev);
+ }
+ }
+}
+
+static MUT_ARR_PTRS_CARD_BITS : usize = 7;
+
+
+/// Scan mutable arrays of pointers
+/// See rts/sm/Scav.c:scavenge_mut_arr_ptrs()
+pub unsafe fn scan_mut_arr_ptrs<EV: EdgeVisitor<GHCEdge>>(
+ // _tls: VMWorkerThread,
+ array : &StgMutArrPtrs,
+ ev: &mut EV,
+)
+{
+ // number of cards in the array
+ let n_cards : StgWord = (array.n_ptrs + (1 << MUT_ARR_PTRS_CARD_BITS) - 1)
+ >> MUT_ARR_PTRS_CARD_BITS;
+
+ // scan card 0..n-1
+ for m in 0..n_cards-1 {
+ // m-th card, iterate through 2^MUT_ARR_PTRS_CARD_BITS many elements
+ for p in m*(1<<MUT_ARR_PTRS_CARD_BITS) .. (m+1)*(1<<MUT_ARR_PTRS_CARD_BITS) {
+ let edge = array.payload.get_ref(p);
+ visit(ev, edge);
+
+ // mark m-th card to 0
+ let m_card_address : *const StgWord8 = (array.payload.get(array.n_ptrs).to_ptr()
+ as usize + m) as *const StgWord8;
+ let mut _m_card_mark = &*m_card_address;
+ _m_card_mark = &0;
+ }
+ }
+
+ // scan the last card (no need to scan entirely)
+ for p in (n_cards-1)*(1<<MUT_ARR_PTRS_CARD_BITS) .. array.n_ptrs {
+ let edge = array.payload.get_ref(p);
+ visit(ev, edge);
+
+ // mark m-th card to 0
+ let m_card_address : *const StgWord8 = (array.payload.get(array.n_ptrs).to_ptr()
+ as usize + (n_cards-1)) as *const StgWord8;
+ let mut _m_card_mark = &*m_card_address;
+ _m_card_mark = &0;
+ }
+
+ // TODO: use the optimised version later for card marking
+ // (bool: whether there's an inter generation pointer (old to young))
+}
+
+/// See rts/sm/Scav.c:scavenge_small_bitmap()
+pub fn scan_small_bitmap<EV: EdgeVisitor<GHCEdge>>(
+ // _tls: VMWorkerThread,
+ payload : &ClosurePayload,
+ small_bitmap : StgSmallBitmap,
+ ev: &mut EV,
+)
+{
+ let size = small_bitmap.size();
+ let mut bitmap = small_bitmap.bits();
+
+ for i in 0..size {
+ if (bitmap & 1) == 0 {
+ visit(ev, payload.get_ref(i));
+ }
+ bitmap = bitmap >> 1;
+ }
+}
+
+/// See rts/sm/Scav.c:scavenge_large_bitmap()
+pub fn scan_large_bitmap<EV: EdgeVisitor<GHCEdge>>(
+ // _tls: VMWorkerThread,
+ payload : &ClosurePayload,
+ large_bitmap : &StgLargeBitmap,
+ size : usize,
+ ev: &mut EV,
+)
+{
+ // Bitmap may have more bits than `size` when scavenging PAP payloads
+ // PAP n_args < fun.bitmap.size
+ // AP n_args = fun.bitmap.size
+ debug_assert!(size <= large_bitmap.size);
+
+ let mut b : usize = 0;
+ let mut i : usize = 0;
+ while i < size {
+ let mut bitmap = unsafe {*(large_bitmap.bitmap).get_w(b)};
+ // word_len is the size is min(wordsize, (size_w - i) bits)
+ let word_len = min(size - i, 8*size_of::<StgWord>());
+ i += word_len;
+ for j in 0..word_len {
+ if (bitmap & 1) == 0 {
+ let edge = payload.get_ref(j);
+ visit(ev, edge);
+ }
+ bitmap = bitmap >> 1;
+ }
+ b += 1;
+ }
+}
+
+
+/// See rts/sm/Scav.c:scavenge_stack()
+pub fn scan_stack<EV: EdgeVisitor<GHCEdge>>(
+ stack : StackIterator,
+ ev: &mut EV,
+)
+{
+ for stackframe in stack {
+ use StackFrame::*;
+ match stackframe {
+ UPD_FRAME(frame) => {
+ // evacuate_BLACKHOLE
+ visit(ev, &mut frame.updatee);
+ }
+ RET_SMALL(frame, bitmap) => {
+ let payload : &'static ClosurePayload = &(frame.payload);
+ scan_small_bitmap(payload, bitmap, ev);
+ let ret_itbl = unsafe {&mut *(frame.header.info_table.get_mut_ptr())};
+ scan_srt(ret_itbl, ev);
+ }
+ RET_BIG(frame, bitmap_ref) => {
+ let payload : &'static ClosurePayload = &(frame.payload);
+ let size : usize = bitmap_ref.size;
+ scan_large_bitmap(payload, bitmap_ref, size, ev);
+ let ret_itbl = unsafe {&mut *(frame.header.info_table.get_mut_ptr())};
+ scan_srt(ret_itbl, ev);
+ }
+ RET_FUN_SMALL(frame, bitmap) => {
+ visit(ev, &mut frame.fun);
+ let payload : &'static ClosurePayload = &(frame.payload);
+ scan_small_bitmap(payload, bitmap, ev);
+ let ret_itbl = unsafe {&mut *(frame.info_table.get_mut_ptr())};
+ scan_srt(ret_itbl, ev);
+ }
+ RET_FUN_LARGE(frame, bitmap_ref) => {
+ visit(ev, &mut frame.fun);
+ let payload : &'static ClosurePayload = &(frame.payload);
+ let size : usize = bitmap_ref.size;
+ scan_large_bitmap(payload, bitmap_ref, size, ev);
+ let ret_itbl = unsafe {&mut *(frame.info_table.get_mut_ptr())};
+ scan_srt(ret_itbl, ev);
+ }
+ _ => panic!("Unexpected stackframe type {:?}", stackframe)
+ }
+ }
+}
+
+/// See (follow_srt) in rts/sm/Scav.c:scavenge_stack
+pub fn scan_srt<EV: EdgeVisitor<GHCEdge>>(
+ ret_info_table : &mut StgRetInfoTable,
+ ev: &mut EV,
+)
+{
+ // TODO: only for major gc
+ // TODO: non USE_INLINE_SRT_FIELD
+ match ret_info_table.get_srt() {
+ None => (),
+ Some(_srt) => {
+ ev.visit_edge(GHCEdge::RetSrtRef(ret_info_table));
+ }
+ }
+}
+
+/// In the case of USE_INLINE_SRT_FIELD, SRT is reperesented using an offset,
+/// so we cannot use the standard edge representation
+pub fn scan_srt_thunk<EV: EdgeVisitor<GHCEdge>>(
+ thunk_info_table : &mut StgThunkInfoTable,
+ ev: &mut EV,
+)
+{
+ // TODO: only for major gc
+ // TODO: non USE_INLINE_SRT_FIELD
+ match thunk_info_table.get_srt() {
+ None => (),
+ Some(_srt) => {
+ ev.visit_edge(GHCEdge::ThunkSrtRef(thunk_info_table));
+ }
+ }
+}
+
+pub fn scan_srt_fun<EV: EdgeVisitor<GHCEdge>>(
+ fun_info_table: &mut StgFunInfoTable,
+ ev: &mut EV,
+)
+{
+ // TODO: only for major gc
+ // TODO: non USE_INLINE_SRT_FIELD
+ match fun_info_table.get_srt() {
+ None => (),
+ Some(_srt) => {
+ ev.visit_edge(GHCEdge::FunSrtRef(fun_info_table));
+ }
+ }
+}
+
+/// Treat objects from SRT as roots
+/// See rts/StablePtr.c/FOR_EACH_STABLE_PTR
+pub fn get_stable_ptr_table_roots() -> Vec<GHCEdge>
+{
+ unsafe {
+ let mut roots: Vec<GHCEdge> = vec![];
+ let tables : *mut spEntry = stable_ptr_table;
+ let __end_ptr : *mut spEntry = tables.offset(SPT_size as isize);
+
+ for table in iter_stable_ptr_table() {
+ if (table.addr != 0 as *mut _) &&
+ ((table.addr < stable_ptr_table as *mut usize) ||
+ (table.addr >= __end_ptr as *mut usize))
+ {
+ let edge_addr: *const *mut usize = &(table.addr) as *const *mut usize;
+ let edge: *mut TaggedClosureRef = edge_addr as *mut TaggedClosureRef;
+ crate::util::push_node(*edge);
+ roots.push(GHCEdge::ClosureRef(edge));
+ }
+ }
+ roots
+ }
+} \ No newline at end of file
diff --git a/rts/mmtk/mmtk/src/reference_glue.rs b/rts/mmtk/mmtk/src/reference_glue.rs
new file mode 100644
index 0000000000..eb29bd0be5
--- /dev/null
+++ b/rts/mmtk/mmtk/src/reference_glue.rs
@@ -0,0 +1,20 @@
+use mmtk::vm::ReferenceGlue;
+use mmtk::util::ObjectReference;
+use mmtk::util::opaque_pointer::*;
+use crate::GHCVM;
+
+pub struct VMReferenceGlue {}
+
+impl ReferenceGlue<GHCVM> for VMReferenceGlue {
+ type FinalizableType = ObjectReference;
+
+ fn get_referent(_object: ObjectReference) -> ObjectReference {
+ unimplemented!();
+ }
+ fn set_referent(_reff: ObjectReference, _referent: ObjectReference) {
+ unimplemented!();
+ }
+ fn enqueue_references(_references: &[ObjectReference], _tls: VMWorkerThread) {
+ unimplemented!();
+ }
+}
diff --git a/rts/mmtk/mmtk/src/scanning.rs b/rts/mmtk/mmtk/src/scanning.rs
new file mode 100644
index 0000000000..dbeaebcdb3
--- /dev/null
+++ b/rts/mmtk/mmtk/src/scanning.rs
@@ -0,0 +1,191 @@
+use crate::GHCVM;
+use mmtk::util::opaque_pointer::*;
+use mmtk::vm::{EdgeVisitor, Scanning, RootsWorkFactory};
+use mmtk::util::{ObjectReference};
+use mmtk::Mutator;
+use crate::stg_closures::*;
+use crate::stg_info_table::*;
+use crate::object_scanning::*;
+use crate::ghc::*;
+use crate::edges::GHCEdge;
+
+pub struct VMScanning {}
+
+impl Scanning<GHCVM> for VMScanning {
+
+ /// Scan all capabilities' run queues.
+ fn scan_thread_roots(_tls: VMWorkerThread, mut factory: impl RootsWorkFactory<GHCEdge>) {
+ let mut roots: Vec<GHCEdge> = vec![];
+ for mut cap in iter_capabilities() {
+ push_root(&mut roots, &mut cap.run_queue_hd);
+ push_root(&mut roots, &mut cap.run_queue_tl);
+ push_root(&mut roots, &mut (cap.inbox as *mut Message));
+ let mut incall = cap.suspended_ccalls;
+ while incall != std::ptr::null_mut() {
+ push_root(&mut roots, unsafe { &mut (*incall).suspended_tso });
+ incall = unsafe{ (*incall).next };
+ }
+ }
+ // TODO: traverseSparkQueue
+ factory.create_process_edge_roots_work(roots);
+ }
+
+ fn scan_thread_root(
+ _tls: VMWorkerThread,
+ _mutator: &'static mut Mutator<GHCVM>,
+ _factory: impl RootsWorkFactory<GHCEdge>,
+ ) {
+ unimplemented!()
+ }
+
+ /// Treate static objects as GC roots
+ fn scan_vm_specific_roots(
+ _tls: VMWorkerThread,
+ mut factory: impl RootsWorkFactory<GHCEdge>,
+ ) {
+ unsafe {
+ let mut roots = get_stable_ptr_table_roots();
+
+ let edge = IsClosureRef::to_tagged_closure_ref(&mut global_TSOs);
+ roots.push(GHCEdge::ClosureRef(edge));
+
+ factory.create_process_edge_roots_work(roots);
+ }
+ // markWeakPtrList
+ // todo: markCAFs (not handling ghci atm)
+ // todo: gcStableNameTable
+ }
+
+ fn scan_object<EV: EdgeVisitor<GHCEdge>>(
+ _tls: VMWorkerThread,
+ obj: ObjectReference,
+ ev: &mut EV,
+ )
+ {
+ let closure_ref = TaggedClosureRef::from_object_reference(obj);
+ visit_closure(closure_ref, ev);
+ }
+
+
+ fn notify_initial_thread_scan_complete(_partial_scan: bool, _tls: VMWorkerThread) {
+ }
+ fn supports_return_barrier() -> bool {
+ unimplemented!()
+ }
+ fn prepare_for_roots_re_scanning() {
+ unimplemented!()
+ }
+}
+
+/// Visit the pointers inside a closure, depending on its closure type
+/// See rts/sm/Scav.c:scavenge_one()
+pub fn visit_closure<EV : EdgeVisitor<GHCEdge>>(closure_ref: TaggedClosureRef, ev: &mut EV) {
+ let itbl: &'static StgInfoTable = closure_ref.get_info_table();
+
+ match closure_ref.to_closure() {
+ Closure::MVar(mvar) => {
+ visit(ev, &mut mvar.head);
+ visit(ev, &mut mvar.tail);
+ visit(ev, &mut mvar.value);
+ }
+ Closure::TVar(tvar) => {
+ visit(ev, &mut tvar.current_value);
+ visit(ev, &mut tvar.first_watch_queue_entry);
+ }
+ Closure::Thunk(thunk) => unsafe {
+ let n_ptrs : u32 = itbl.layout.payload.ptrs;
+ scan_closure_payload(&thunk.payload, n_ptrs, ev);
+ }
+ Closure::Constr(closure) => unsafe {
+ let n_ptrs = itbl.layout.payload.ptrs;
+ scan_closure_payload(&closure.payload, n_ptrs, ev);
+ }
+ Closure::Fun(fun) => unsafe {
+ let n_ptrs = itbl.layout.payload.ptrs;
+ scan_closure_payload(&fun.payload, n_ptrs, ev);
+ }
+ Closure::Weak(weak) => {
+ visit(ev, &mut weak.value);
+ visit(ev, &mut weak.key);
+ visit(ev, &mut weak.finalizer);
+ visit(ev, &mut weak.cfinalizers);
+ }
+ Closure::MutVar(mut_var) => {
+ visit(ev, &mut mut_var.var);
+ }
+ Closure::BlockingQueue(bq) => {
+ visit(ev, &mut bq.bh);
+ visit(ev, &mut bq.owner);
+ visit(ev, &mut bq.queue);
+ visit(ev, &mut bq.link);
+ }
+ Closure::ThunkSelector(selector) => {
+ visit(ev, &mut selector.selectee);
+ }
+ Closure::ApStack(ap) => unsafe {
+ visit(ev, &mut ap.fun);
+ scan_stack(ap.iter(), ev);
+ }
+ Closure::PartialAP(pap) => {
+ visit(ev, &mut pap.fun);
+ let size : usize = pap.n_args as usize;
+ let fun_info : & StgFunInfoTable =
+ StgFunInfoTable::from_info_table(pap.fun.get_info_table());
+ let payload : &ClosurePayload = &pap.payload;
+ scan_PAP_payload(fun_info, payload, size, ev);
+ }
+ Closure::AP(ap) => {
+ visit(ev, &mut ap.fun);
+ let size : usize = ap.n_args as usize;
+ let fun_info : & StgFunInfoTable =
+ StgFunInfoTable::from_info_table(ap.fun.get_info_table());
+ let payload : &ClosurePayload = &ap.payload;
+ scan_PAP_payload(fun_info, payload, size, ev);
+ }
+ // ARR_WORDS
+ Closure::ArrBytes(_) => { return; }
+ Closure::ArrMutPtr(array) => unsafe {
+ scan_mut_arr_ptrs(array, ev);
+ }
+ Closure::ArrMutPtrSmall(array) => {
+ scan_closure_payload(&array.payload, array.ptrs as u32, ev)
+ }
+ Closure::TSO(tso) => {
+ scan_TSO(tso, ev);
+ }
+ Closure::Stack(stack) => {
+ scan_stack(stack.iter(), ev);
+ }
+ Closure::TRecChunk(trec_chunk) => {
+ visit(ev, &mut trec_chunk.prev_chunk);
+ // visit payload
+ let n_ptrs = trec_chunk.next_entry_idx;
+ for n in 0..n_ptrs {
+ let trec_entry = &mut trec_chunk.entries[n];
+ visit(ev, &mut trec_entry.tvar);
+ visit(ev, &mut trec_entry.expected_value);
+ visit(ev, &mut trec_entry.new_value);
+ }
+ }
+ Closure::Indirect(ind) => {
+ visit(ev, &mut ind.indirectee);
+ }
+ // scan static: see rts/sm/Scav.c:scavenge_static
+ Closure::IndirectStatic(ind) => {
+ visit(ev, &mut ind.indirectee);
+ }
+ Closure::ThunkStatic(_) => {
+ let thunk_info = StgThunkInfoTable::from_info_table(itbl);
+ scan_srt_thunk(thunk_info, ev);
+ }
+ Closure::FunStatic(fun) => unsafe {
+ let fun_info = StgFunInfoTable::from_info_table(itbl);
+ scan_srt_fun(fun_info, ev);
+ let n_ptrs = itbl.layout.payload.ptrs;
+ scan_closure_payload(&fun.payload, n_ptrs, ev);
+ }
+ // TODO: scavenge_compact for COMPACT_NFDATA
+ _ => panic!("scavenge_one: strange object type={:?}, address={:?}",
+ itbl.type_, itbl)
+ }
+}
diff --git a/rts/mmtk/mmtk/src/stg_closures.rs b/rts/mmtk/mmtk/src/stg_closures.rs
new file mode 100644
index 0000000000..1c0812d2c6
--- /dev/null
+++ b/rts/mmtk/mmtk/src/stg_closures.rs
@@ -0,0 +1,958 @@
+// use crate::DummyVM;
+use crate::types::*;
+use crate::stg_info_table::*;
+use crate::util::*;
+use crate::ghc::Task;
+use mmtk::util::{Address, ObjectReference};
+use std::mem::size_of;
+use std::fmt;
+
+
+// ------------ Closures.h ------------
+
+// TODO: handle when profiling case
+#[repr(C)]
+#[derive(Debug)]
+pub struct StgProfHeader {}
+
+// ------------ Closure headers ------------
+#[repr(C)]
+#[derive(Debug)]
+pub struct StgSMPThunkHeader {
+ pub pad : StgWord
+}
+
+#[repr(C)]
+#[derive(Debug)]
+pub struct StgHeader {
+ pub info_table: StgInfoTableRef,
+ pub prof_header : StgProfHeader,
+}
+
+#[repr(C)]
+#[derive(Debug)]
+pub struct StgThunkHeader {
+ pub info_table : StgInfoTableRef,
+ pub prof_header : StgProfHeader,
+ pub smp : StgSMPThunkHeader,
+}
+
+// ------------ payload ------------
+#[repr(C)]
+#[derive(Debug)]
+pub struct ClosurePayload {}
+
+// TODO: check other instances of indexing in payload
+impl ClosurePayload {
+ pub fn get(&self, i: usize) -> TaggedClosureRef {
+ unsafe {
+ let ptr: *const ClosurePayload = &*self;
+ let payload: *const TaggedClosureRef = ptr.cast();
+ *payload.offset(i as isize)
+ }
+ }
+
+ pub fn get_ref(&self, i: usize) -> &'static mut TaggedClosureRef {
+ unsafe {
+ let ptr: *const ClosurePayload = &*self;
+ let payload: *mut TaggedClosureRef = ptr as *mut TaggedClosureRef;
+ let slot: *mut TaggedClosureRef = payload.offset(i as isize);
+ &mut (*slot) as &'static mut TaggedClosureRef
+ }
+ }
+}
+
+// ------------ Closure types ------------
+#[derive(Debug)]
+pub enum Closure {
+ Constr(&'static StgClosure),
+ Weak(&'static mut StgWeak),
+
+ Thunk(&'static StgThunk),
+ ThunkSelector(&'static mut StgSelector),
+ ThunkStatic(&'static mut StgThunkStatic),
+
+ Fun(&'static StgClosure),
+ PartialAP(&'static mut StgPAP),
+ AP(&'static mut StgAP),
+ ApStack(&'static mut StgAP_STACK),
+ FunStatic(&'static mut StgClosure),
+
+ Indirect(&'static mut StgInd),
+ IndirectStatic(&'static mut StgIndStatic),
+
+ BlockingQueue(&'static mut StgBlockingQueue),
+ ArrBytes(&'static StgArrBytes),
+ ArrMutPtr(&'static StgMutArrPtrs),
+ ArrMutPtrSmall(&'static StgSmallMutArrPtrs),
+ MutVar(&'static mut StgMutVar),
+
+ Stack(&'static StgStack),
+
+ ByteCodeObj(&'static StgBCO),
+ TSOQueueMVar(&'static StgMVarTSOQueue),
+
+ TSO(&'static mut StgTSO),
+
+ MVar(&'static mut StgMVar),
+ TVar(&'static mut StgTVar),
+
+ TRecChunk(&'static mut StgTRecChunk),
+
+ // TODO: static pointers?
+}
+
+impl Closure{
+ pub unsafe fn from_ptr(p: *const StgClosure) -> Closure {
+ let info: &'static StgInfoTable = &*(*p).header.info_table;
+ use StgClosureType::*;
+ match info.type_ {
+ // what are PRIM and MUT_PRIM?
+ CONSTR | CONSTR_NOCAF | CONSTR_1_0 | CONSTR_0_1 | CONSTR_2_0 |
+ CONSTR_1_1 | CONSTR_0_2 | PRIM | MUT_PRIM => {
+ Closure::Constr(&*(p as *const StgClosure))
+ }
+ THUNK | THUNK_1_0 | THUNK_0_1 | THUNK_2_0 | THUNK_1_1 | THUNK_0_2 => {
+ Closure::Thunk(&*(p as *const StgThunk))
+ }
+ THUNK_SELECTOR => {
+ Closure::ThunkSelector(&mut *(p as *mut StgSelector))
+ }
+ THUNK_STATIC => {
+ Closure::ThunkStatic(&mut *(p as *mut StgThunkStatic))
+ }
+ FUN | FUN_1_0 | FUN_0_1 | FUN_1_1 | FUN_0_2 | FUN_2_0 => {
+ Closure::Fun(&*(p as *const StgClosure))
+ }
+ FUN_STATIC => {
+ Closure::FunStatic(&mut *(p as *mut StgClosure))
+ }
+ PAP => {
+ Closure::PartialAP(&mut *(p as *mut StgPAP))
+ }
+ AP => {
+ Closure::AP(&mut *(p as *mut StgAP))
+ }
+ AP_STACK => {
+ Closure::ApStack(&mut *(p as *mut StgAP_STACK))
+ }
+ IND | BLACKHOLE => {
+ Closure::Indirect(&mut *(p as *mut StgInd))
+ }
+ IND_STATIC => {
+ Closure::IndirectStatic(&mut *(p as *mut StgIndStatic))
+ }
+ BLOCKING_QUEUE => {
+ Closure::BlockingQueue(&mut *(p as *mut StgBlockingQueue))
+ }
+ ARR_WORDS => {
+ Closure::ArrBytes(&*(p as *const StgArrBytes))
+ }
+ MUT_ARR_PTRS_CLEAN | MUT_ARR_PTRS_DIRTY |
+ MUT_ARR_PTRS_FROZEN_DIRTY | MUT_ARR_PTRS_FROZEN_CLEAN => {
+ Closure::ArrMutPtr(&*(p as *const StgMutArrPtrs))
+ }
+ SMALL_MUT_ARR_PTRS_CLEAN | SMALL_MUT_ARR_PTRS_DIRTY |
+ SMALL_MUT_ARR_PTRS_FROZEN_DIRTY | SMALL_MUT_ARR_PTRS_FROZEN_CLEAN => {
+ Closure::ArrMutPtrSmall(&*(p as *const StgSmallMutArrPtrs))
+ }
+ MUT_VAR_CLEAN | MUT_VAR_DIRTY => {
+ Closure::MutVar(&mut *(p as *mut StgMutVar))
+ }
+ STACK => {
+ Closure::Stack(&*(p as *const StgStack))
+ }
+ WEAK => {
+ Closure::Weak(&mut *(p as *mut StgWeak))
+ }
+ // TODO: BCO => {
+ // Closure::ByteCodeObj(&*(p as *const StgBCO))
+ // }
+ MVAR_CLEAN | MVAR_DIRTY => {
+ Closure::MVar(&mut *(p as *mut StgMVar))
+ }
+ TVAR => {
+ Closure::TVar(&mut *(p as *mut StgTVar))
+ }
+ TSO => {
+ Closure::TSO(&mut *(p as *mut StgTSO))
+ }
+ TREC_CHUNK => {
+ Closure::TRecChunk(&mut *(p as *mut StgTRecChunk))
+ }
+
+ _ => panic!("info={:?} address={:?}", info, info as *const StgInfoTable)
+ }
+ }
+
+ pub fn to_address(&self) -> Address {
+ Address::from_ptr(self)
+ }
+}
+
+
+#[repr(C)]
+#[derive(Debug)]
+pub struct StgClosure {
+ pub header : StgHeader,
+ pub payload : ClosurePayload,
+}
+
+#[repr(C)]
+#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd)]
+pub struct TaggedClosureRef (*mut StgClosure);
+
+impl TaggedClosureRef {
+ const TAG_BITS: usize = 0x7;
+
+ // untagging from a tagged pointer
+ pub fn to_ptr(&self) -> *const StgClosure {
+ let masked: usize = (self.0 as usize) & !Self::TAG_BITS;
+ masked as *const StgClosure
+ }
+
+ pub fn to_tagged_ptr(&self) -> *const StgClosure {
+ self.0
+ }
+
+ pub fn from_object_reference(obj : ObjectReference) -> TaggedClosureRef {
+ let closure: *const StgClosure = obj.to_raw_address().to_ptr();
+ TaggedClosureRef(closure as *mut StgClosure)
+ }
+
+ pub fn to_object_reference(&self) -> ObjectReference {
+ ObjectReference::from_raw_address(Address::from_ptr(self))
+ }
+
+ pub fn get_info_table(&self) -> &'static StgInfoTable {
+ unsafe{
+ &*(*self.to_ptr()).header.info_table
+ }
+ }
+
+ pub fn to_closure(&self) -> Closure {
+ unsafe { Closure::from_ptr(self.to_ptr()) }
+ }
+
+ pub fn to_address(&self) -> Address {
+ Address::from_ptr(self.to_ptr())
+ }
+
+ pub fn from_address(address : Address) -> Self {
+ TaggedClosureRef(address.to_mut_ptr())
+ }
+
+ pub fn from_ptr(ptr : *mut StgClosure) -> Self {
+ TaggedClosureRef(ptr)
+ }
+
+ pub fn set_tag(&self, tag: usize) -> Self {
+ assert!(tag & !Self::TAG_BITS == 0);
+ TaggedClosureRef((self.0 as usize | tag) as *mut StgClosure)
+ }
+
+ pub fn get_tag(&self) -> usize {
+ (self.0 as usize) & Self::TAG_BITS
+ }
+}
+
+// Closure types: THUNK, THUNK_<X>_<Y>
+#[repr(C)]
+#[derive(Debug)]
+pub struct StgThunk {
+ pub header : StgThunkHeader,
+ pub payload : ClosurePayload,
+}
+
+// Closure types: THUNK_STATIC
+#[repr(C)]
+#[derive(Debug)]
+pub struct StgThunkStatic {
+ pub header : StgThunkHeader,
+ pub static_link : TaggedClosureRef,
+}
+
+// Closure types: THUNK_SELECTOR
+#[repr(C)]
+#[derive(Debug)]
+pub struct StgSelector {
+ pub header : StgThunkHeader,
+ pub selectee : TaggedClosureRef,
+}
+
+// Closure types: PAP
+#[repr(C)]
+#[derive(Debug)]
+pub struct StgPAP {
+ pub header : StgHeader,
+ pub arity : StgHalfWord,
+ pub n_args : StgHalfWord,
+ pub fun : TaggedClosureRef,
+ pub payload : ClosurePayload,
+}
+
+// Closure types: AP
+#[repr(C)]
+#[derive(Debug)]
+pub struct StgAP {
+ pub header : StgThunkHeader,
+ pub arity : StgHalfWord,
+ pub n_args : StgHalfWord,
+ pub fun : TaggedClosureRef,
+ pub payload : ClosurePayload,
+}
+
+// Closure types: AP_STACK
+#[repr(C)]
+#[derive(Debug)]
+pub struct StgAP_STACK {
+ pub header : StgThunkHeader,
+ pub size : StgWord, // number of words
+ pub fun : TaggedClosureRef,
+ pub payload : ClosurePayload,
+}
+
+impl StgAP_STACK {
+ pub unsafe fn iter(&mut self) -> StackIterator {
+ let start : *mut StgStackFrame = (&mut (self.payload) as *mut ClosurePayload).cast();
+ StackIterator{
+ current : start,
+ end : offset_words(start, self.size as isize),
+ }
+ }
+}
+
+// Closure types: IND
+#[repr(C)]
+#[derive(Debug)]
+pub struct StgInd {
+ pub header : StgHeader,
+ pub indirectee : TaggedClosureRef,
+}
+
+// Closure types: IND_STATIC
+#[repr(C)]
+#[derive(Debug)]
+pub struct StgIndStatic {
+ pub header : StgHeader,
+ pub indirectee : TaggedClosureRef,
+ pub static_link : TaggedClosureRef,
+ pub saved_info_table : StgInfoTableRef,
+}
+
+// Closure types: BLOCKING_QUEUE
+#[repr(C)]
+#[derive(Debug)]
+pub struct StgBlockingQueue {
+ pub header : StgHeader,
+ pub link : *mut StgBlockingQueue,
+ pub bh : TaggedClosureRef,
+ pub owner : *mut StgTSO, // TODO: StgTSO
+ pub queue : *mut MessageBlackHole,
+}
+
+#[repr(C)]
+#[derive(Debug)]
+pub struct StgTSO {
+ pub header : StgHeader,
+ pub link : *mut StgTSO,
+ pub global_link : *mut StgTSO,
+ pub tso_link_prev : *mut StgTSO,
+ pub tso_link_next : *mut StgTSO,
+ pub stackobj : *mut StgStack,
+ pub what_next : StgTSONext, // in types.rs
+ pub why_blocked : StgTSOBlocked, // in types.rs
+ pub flags : StgTSOFlag, // in types.rs
+ pub block_info : StgTSOBlockInfo,
+ pub id : StgThreadID,
+ pub saved_errno : StgWord32,
+ pub dirty : StgWord32,
+ pub bound : *mut InCall,
+ pub cap : *mut Capability,
+ pub trec : *mut StgTRecHeader,
+ pub blocked_exceptions : *mut MessageThrowTo,
+ pub blocking_queue : *mut StgBlockingQueue,
+ pub alloc_limit : StgInt64,
+ pub tot_stack_size : StgWord32,
+
+ // TODO: handle TICKY_TICKY, PROFILING, mingw32_HOST_OS
+}
+
+#[repr(C)]
+#[derive(Debug)]
+pub struct StgThreadID(StgWord64);
+
+// TODO: here are some dummy structs to complete fields in TSO
+#[repr(C)]
+#[derive(Debug)]
+pub struct InCall {
+ pub tso : *mut StgTSO,
+ pub suspended_tso : *mut StgTSO,
+ pub suspended_cap : *mut Capability,
+ pub rstat : StgInt, // TODO
+ pub ret : *mut TaggedClosureRef,
+ pub task : *mut Task,
+ pub prev_stack : *mut InCall,
+ pub prev : *mut InCall,
+ pub next : *mut InCall,
+}
+
+#[repr(C)]
+// #[derive(Debug)]
+pub union StgTSOBlockInfo {
+ pub closure : TaggedClosureRef,
+ pub prev : *mut StgTSO,
+ pub black_hole : *mut MessageBlackHole,
+ pub throw_to : *mut MessageThrowTo,
+ pub wake_up : *mut MessageWakeup,
+ pub fd : StgInt,
+ pub target : StgWord,
+ // TODO: THREADED_RTS
+}
+
+impl fmt::Debug for StgTSOBlockInfo {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ unsafe {
+ write!(f, "StgTsoBlockInfo({:?})", self.fd)
+ }
+ }
+}
+
+#[repr(C)]
+pub struct Capability {}
+
+
+// Closure types: ARR_WORDS
+// an array of bytes -- a buffer of memory
+#[repr(C)]
+#[derive(Debug)]
+pub struct StgArrBytes {
+ pub header : StgHeader,
+ pub bytes : StgWord, // number of bytes in payload
+ // pub payload : *mut StgWord, // Why is it StgWord here not StgClosure?
+}
+
+// Closure types: MUT_ARR_PTRS_CLEAN, MUT_ARR_PTRS_DIRTY,
+// MUT_ARR_PTRS_FROZEN_DIRTY, MUT_ARR_PTRS_FROZEN_CLEAN, MUT_VAR_CLEAN,
+// MUT_VAR_DIRTY
+#[repr(C)]
+#[derive(Debug)]
+pub struct StgMutArrPtrs {
+ pub header : StgHeader,
+ pub n_ptrs : StgWord,
+ pub size : StgWord,
+ pub payload : ClosurePayload,
+}
+
+// Closure types: SMALL_MUT_ARR_PTRS_CLEAN, SMALL_MUT_ARR_PTRS_DIRTY,
+// SMALL_MUT_ARR_PTRS_FROZEN_DIRTY, SMALL_MUT_ARR_PTRS_FROZEN_CLEAN,
+#[repr(C)]
+#[derive(Debug)]
+pub struct StgSmallMutArrPtrs {
+ pub header : StgHeader,
+ pub ptrs : StgWord,
+ pub payload : ClosurePayload,
+}
+
+// Closure types: MUT_VAR_CLEAN, MUT_VAR_DIRTY
+#[repr(C)]
+#[derive(Debug)]
+pub struct StgMutVar {
+ pub header : StgHeader,
+ pub var : TaggedClosureRef,
+}
+
+#[repr(C)]
+#[derive(Debug)]
+pub struct StgStack {
+ pub header : StgHeader,
+ pub stack_size : StgWord32, // number of words
+ pub dirty : StgWord8,
+ pub marking : StgWord8,
+ pub sp : *mut StgStackFrame,
+ pub payload : StgStackPayload, // stack contents - stack frames
+}
+
+impl StgStack {
+ pub fn iter(&self) -> StackIterator {
+ unsafe {
+ let start : *const StgWord = (&self.payload as *const StgStackPayload).cast();
+ StackIterator{
+ current : self.sp,
+ end : start.offset(self.stack_size as isize) as *mut StgStackFrame,
+ }
+ }
+ }
+}
+
+#[repr(C)]
+#[derive(Debug)]
+pub struct StgStackPayload {}
+
+pub struct StackIterator {
+ current : *mut StgStackFrame, // fst word of the sf
+ end : *mut StgStackFrame,
+}
+
+impl Iterator for StackIterator {
+ type Item = StackFrame;
+ fn next(&mut self) -> Option<StackFrame> {
+ if self.current < self.end {
+ // info table of the stackframe
+ let itbl: &'static StgRetInfoTable = unsafe { &(*self.current).header.info_table};
+ let current = self.current;
+ use StgClosureType::*;
+
+ match itbl.i.type_ {
+ UPDATE_FRAME => unsafe {
+ // self.current = self.current.offset(size_of::<StgUpdateFrame>() as isize);
+ self.current = offset_bytes(self.current, size_of::<StgUpdateFrame>() as isize);
+ Some(StackFrame::UPD_FRAME(&mut *current.cast()))
+ }
+ CATCH_STM_FRAME | CATCH_RETRY_FRAME | ATOMICALLY_FRAME | UNDERFLOW_FRAME |
+ STOP_FRAME | CATCH_FRAME | RET_SMALL => unsafe {
+ let bitmap = itbl.i.layout.small_bitmap;
+ let mut size = bitmap.size() * size_of::<StgWord>(); // words
+ size += size_of::<StgStackFrame>(); // bytes
+ self.current = offset_bytes(self.current, size as isize);
+ Some(StackFrame::RET_SMALL(&mut *current.cast(), bitmap))
+ }
+ RET_BIG => unsafe {
+ let bitmap = &*(itbl.i.layout.large_bitmap_ref.deref(&itbl.i));
+ let size = bitmap.size * size_of::<StgWord>() + size_of::<StgStackFrame>();
+ self.current = offset_bytes(self.current, size as isize);
+ Some(StackFrame::RET_BIG(&mut *current.cast(), bitmap))
+ }
+ RET_FUN => unsafe {
+ let ret_fun : &'static StgRetFunFrame = &*current.cast();
+ let fun_info : &'static StgFunInfoTable =
+ StgFunInfoTable::from_info_table(ret_fun.fun.get_info_table());
+ use StgFunType::*;
+ match fun_info.f.fun_type {
+ ARG_GEN => {
+ // small bitmap
+ let small_bitmap : StgSmallBitmap = fun_info.f.bitmap.small_bitmap;
+ let mut size = small_bitmap.size() * size_of::<StgWord>();
+ size += size_of::<StgRetFunFrame>();
+ self.current = offset_bytes(self.current, size as isize);
+ Some(StackFrame::RET_FUN_SMALL(&mut *current.cast(), small_bitmap))
+ }
+ ARG_GEN_BIG => {
+ // large bitmap
+ let bitmap = &*(fun_info.f.bitmap.large_bitmap_ref.deref(&itbl.i));
+ let mut size = bitmap.size * size_of::<StgWord>() + size_of::<StgStackFrame>();
+ size += size_of::<StgRetFunFrame>();
+ self.current = offset_bytes(self.current, size as isize);
+ Some(StackFrame::RET_FUN_LARGE(&mut *current.cast(), bitmap))
+ }
+ _ => {
+ // small bitmap indexed by the function type
+ let small_bitmap = StgFunType::get_small_bitmap(&fun_info.f.fun_type);
+ let mut size = small_bitmap.size() * size_of::<StgWord>();
+ size += size_of::<StgRetFunFrame>();
+ self.current = offset_bytes(self.current, size as isize);
+ Some(StackFrame::RET_FUN_SMALL(&mut *current.cast(), small_bitmap))
+ }
+ }
+ }
+ // TODO: add RET_BCO case
+ _ => panic!("Unexpected stackframe type {:?} ", itbl.i.type_)
+
+ }
+ }
+ else {
+ return None;
+ }
+ }
+}
+
+
+
+// ------ stack frames -----------
+#[repr(C)]
+#[derive(Debug)]
+pub struct StgStackFrameHeader {
+ pub info_table : StgRetInfoTableRef,
+ pub prof_header : StgProfHeader,
+}
+
+pub struct StgStackFrame {
+ pub header : StgStackFrameHeader,
+ pub payload : ClosurePayload,
+}
+
+#[allow(non_camel_case_types)]
+#[derive(Debug)]
+pub enum StackFrame {
+ RET_BCO(&'static StgRetFrame),
+ RET_SMALL(&'static mut StgRetFrame, StgSmallBitmap),
+ RET_BIG(&'static mut StgRetFrame, &'static StgLargeBitmap),
+ RET_FUN_SMALL(&'static mut StgRetFunFrame, StgSmallBitmap),
+ RET_FUN_LARGE(&'static mut StgRetFunFrame, &'static StgLargeBitmap),
+ UPD_FRAME(&'static mut StgUpdateFrame),
+ // CATCH_FRAME(&'static StgCatchFrame),
+ // UNDERFLOW_FRAME(&'static StgUnderflowFrame),
+ // STOP_FRAME(&'static StgStopFrame),
+ // ATOMICALLY_FRAME(&'static StgAtomicallyFrame),
+ // CATCH_RETRY_FRAME(&'static StgCatchRetryFrame),
+ // CATCH_STM_FRAME(&'static StgCatchSTMFrame),
+}
+
+#[repr(C)]
+#[derive(Debug)]
+pub struct StgRetFrame {
+ pub header : StgStackFrameHeader,
+ pub payload : ClosurePayload,
+}
+
+// Closure types: UPDATE_FRAME
+#[repr(C)]
+#[derive(Debug)]
+pub struct StgUpdateFrame {
+ pub header : StgStackFrameHeader,
+ pub updatee : TaggedClosureRef,
+}
+
+// Closure types: CATCH_FRAME
+#[repr(C)]
+#[derive(Debug)]
+pub struct StgCatchFrame {
+ pub header : StgStackFrameHeader,
+ pub exceptions_blocked : StgWord,
+ pub handler : TaggedClosureRef,
+}
+
+// impl walk through stack?
+
+// Closure types: UNDERFLOW_FRAME
+#[repr(C)]
+#[derive(Debug)]
+pub struct StgUnderflowFrame {
+ pub info_table : StgRetInfoTableRef,
+ pub next_chunk : *mut StgStack,
+}
+
+// Closure types: STOP_FRAME
+#[repr(C)]
+#[derive(Debug)]
+pub struct StgStopFrame {
+ pub header : StgStackFrameHeader,
+}
+
+#[repr(C)]
+#[derive(Debug)]
+pub struct StgAtomicallyFrame {
+ pub header : StgStackFrameHeader,
+ pub code : TaggedClosureRef,
+ pub result : TaggedClosureRef,
+}
+
+#[repr(C)]
+#[derive(Debug)]
+pub struct StgCatchSTMFrame {
+ pub header : StgStackFrameHeader,
+ pub code : TaggedClosureRef,
+ pub handler : TaggedClosureRef,
+}
+
+#[repr(C)]
+#[derive(Debug)]
+pub struct StgCatchRetryFrame {
+ pub header : StgStackFrameHeader,
+ pub running_alt_code : StgWord,
+ pub first_code : TaggedClosureRef,
+ pub alt_code : TaggedClosureRef,
+}
+// Closure types: RET_FUN
+#[repr(C)]
+#[derive(Debug)]
+pub struct StgRetFunFrame{
+ pub info_table : StgRetInfoTableRef,
+ pub size : StgWord,
+ pub fun : TaggedClosureRef,
+ pub payload : ClosurePayload,
+}
+
+/// end of stack frame types
+
+// Closure type: CONSTR_0_1
+#[repr(C)]
+pub struct StgIntCharlikeClosure {
+ pub header : StgHeader,
+ pub data : StgWord,
+}
+
+// Stable name, StableName# v
+#[repr(C)]
+pub struct StgStableName {
+ pub header : StgHeader,
+ pub sn : StgWord,
+}
+
+// Closure types: WEAK
+#[repr(C)]
+#[derive(Debug)]
+pub struct StgWeak {
+ pub header : StgHeader,
+ pub cfinalizers : TaggedClosureRef,
+ pub key : TaggedClosureRef,
+ pub value : TaggedClosureRef,
+ pub finalizer : TaggedClosureRef,
+ pub link : *mut StgWeak,
+}
+
+
+#[repr(C)]
+union FinalizerFn {
+ pub without_env: *const extern "C" fn(*mut u8),
+ // ^ (ptr)
+ pub with_env: *const extern "C" fn(*mut u8, *mut u8)
+ // ^ (eptr, ptr)
+}
+
+// Closure type: CONSTR
+#[repr(C)]
+pub struct StgCFinalizerList {
+ header: StgHeader,
+ link: TaggedClosureRef,
+ finalize: FinalizerFn,
+ ptr: *mut u8,
+ eptr: *mut u8,
+ flag: StgWord,
+}
+
+impl StgCFinalizerList {
+ // example of how to use
+ pub unsafe fn run(&self) {
+ match self.flag {
+ 0 => (*self.finalize.without_env)(self.ptr),
+ 1 => (*self.finalize.with_env)(self.eptr, self.ptr),
+ _ => panic!("oh no!")
+ }
+ }
+}
+
+// Closure types: BCO
+#[repr(C)]
+#[derive(Debug)]
+pub struct StgBCO {
+ pub header : StgHeader,
+ pub instrs : *mut StgArrBytes,
+ pub literals : *mut StgArrBytes,
+ pub ptrs : *mut StgMutArrPtrs,
+ pub arity : StgHalfWord,
+ pub size : StgHalfWord,
+ pub bitmap : StgLargeBitmap, // TODO: large bitmap ? check
+}
+
+/*
+TODO: have a look at BCO functions later
+impl StgBCO {
+ // TODO: inline functions of StgBCO
+ #[inline(always)]
+ pub fn BCO_BITMAP(&self) -> *mut StgLargeBitmap {
+ unimplemented!()
+ }
+
+ #[inline(always)]
+ pub fn BCO_BITMAP_SIZE(&self) -> StgWord {
+ unimplemented!()
+ }
+
+ #[inline(always)]
+ pub fn BCO_BITMAP_SIZE(&self) -> StgLargeBitmap {
+ unimplemented!()
+ }
+
+ #[inline(always)]
+ pub fn BCO_BITMAP_SIZEW(&self) -> StgWord {
+ unimplemented!()
+ }
+}
+*/
+
+// which closure type?
+#[repr(C)]
+#[derive(Debug)]
+pub struct StgMVarTSOQueue {
+ pub header : StgHeader,
+ pub link : *mut StgMVarTSOQueue,
+ pub tso : *mut StgTSO, // TODO: define TSO
+}
+
+// Closure types: MVAR_CLEAN, MVAR_DIRTY
+#[repr(C)]
+#[derive(Debug)]
+pub struct StgMVar {
+ pub header : StgHeader,
+ pub head : *mut StgMVarTSOQueue,
+ pub tail : *mut StgMVarTSOQueue,
+ pub value : TaggedClosureRef,
+}
+
+#[repr(C)]
+pub struct StgTVarWatchQueue {
+ pub header : StgHeader,
+ pub closure : *mut StgTSO,
+ pub next_queue_entry : *mut StgTVarWatchQueue,
+ pub prev_queue_entry : *mut StgTVarWatchQueue,
+}
+
+#[repr(C)]
+#[derive(Debug)]
+pub struct StgTVar {
+ pub header : StgHeader,
+ pub current_value : TaggedClosureRef,
+ pub first_watch_queue_entry : *mut StgTVarWatchQueue,
+ pub num_updates : StgInt,
+}
+
+#[repr(C)]
+#[derive(Debug)]
+pub struct TRecEntry {
+ pub tvar : *mut StgTVar,
+ pub expected_value : TaggedClosureRef,
+ pub new_value : TaggedClosureRef,
+ // TODO: add num_updates when THREADED_RTS
+}
+
+
+const TREC_CHUNK_NUM_ENTRIES: usize = 16;
+
+// contains many TRec entries and link them together
+#[repr(C)]
+#[derive(Debug)]
+pub struct StgTRecChunk {
+ pub header : StgHeader,
+ pub prev_chunk : *mut StgTRecChunk,
+ pub next_entry_idx : StgWord,
+ pub entries : [TRecEntry; TREC_CHUNK_NUM_ENTRIES],
+}
+
+// maybe don't need this
+pub enum TRecState {
+ TrecActive, /* Transaction in progress, outcome undecided */
+ TrecCondemned, /* Transaction in progress, inconsistent / out of date reads */
+ TrecCommitted, /* Transaction has committed, now updating tvars */
+ TrecAborted, /* Transaction has aborted, now reverting tvars */
+ TrecWaiting, /* Transaction currently waiting */
+}
+
+#[repr(C)]
+pub struct StgTRecHeader {
+ pub header : StgHeader,
+ pub enclosing_trec : *mut StgTRecHeader,
+ pub current_chunk : *mut StgTRecChunk,
+ pub state : TRecState,
+}
+
+
+/* ----------------------------------------------------------------------------
+ Messages
+ ------------------------------------------------------------------------- */
+
+#[repr(C)]
+pub struct Message {
+ pub header : StgHeader,
+ pub link : *mut Message,
+}
+
+#[repr(C)]
+pub struct MessageWakeup {
+ pub header : StgHeader,
+ pub link : *mut Message,
+ pub tso : *mut StgTSO,
+}
+
+#[repr(C)]
+pub struct MessageThrowTo {
+ pub header : StgHeader,
+ pub link : *mut MessageThrowTo, // should be just Message ?
+ pub source : *mut StgTSO,
+ pub target : *mut StgTSO,
+ pub exception : TaggedClosureRef,
+}
+
+#[repr(C)]
+pub struct MessageBlackHole {
+ pub header : StgHeader,
+ pub link : *mut MessageBlackHole, // should be just Message ?
+ pub tso : *mut StgTSO,
+ pub bh : TaggedClosureRef,
+}
+
+#[repr(C)]
+pub struct MessageCloneStack {
+ pub header : StgHeader,
+ pub link : *mut Message,
+ pub result : *mut StgMVar,
+ pub tso : *mut StgTSO,
+}
+
+
+/* ----------------------------------------------------------------------------
+ Compact Regions
+ ------------------------------------------------------------------------- */
+#[repr(C)]
+pub struct StgCompactNFDataBlock {
+ pub self_ : *mut StgCompactNFDataBlock,
+ pub owner : *mut StgCompactNFData,
+ pub next : *mut StgCompactNFDataBlock,
+}
+
+#[repr(C)]
+pub struct Hashtable {}
+
+#[repr(C)]
+pub struct StgCompactNFData {
+ pub header : StgHeader,
+ pub total_w : StgWord,
+ pub auto_block_w : StgWord,
+ pub hp : StgPtr,
+ pub hp_lim : StgPtr,
+ pub nursery : *mut StgCompactNFDataBlock,
+ pub last : *mut StgCompactNFDataBlock,
+ pub hash : *mut Hashtable, // TODO: define HashTable
+ pub result : TaggedClosureRef,
+ pub link : *mut StgCompactNFData, // maybe need to rework compact normal form
+}
+
+// TODO: test with some typical haskell objects for object scanning
+
+pub trait IsClosureRef {
+ fn to_tagged_closure_ref(ptr: *mut Self) -> *mut TaggedClosureRef {
+ ptr.cast()
+ }
+}
+
+impl IsClosureRef for TaggedClosureRef {
+ fn to_tagged_closure_ref(ptr: *mut TaggedClosureRef) -> *mut TaggedClosureRef {
+ ptr
+ }
+}
+
+macro_rules! is_closure {
+ ($ty: ident) => {
+ impl IsClosureRef for *mut $ty {
+ fn to_tagged_closure_ref(ptr: *mut Self) -> *mut TaggedClosureRef {
+ ptr.cast()
+ }
+ }
+ }
+}
+
+
+is_closure!(StgTSO);
+is_closure!(StgClosure);
+is_closure!(StgBlockingQueue);
+is_closure!(StgTRecHeader);
+is_closure!(StgStack);
+is_closure!(StgMVarTSOQueue);
+is_closure!(StgTVarWatchQueue);
+is_closure!(StgTRecChunk);
+is_closure!(StgTVar);
+is_closure!(Message);
+is_closure!(MessageThrowTo);
+is_closure!(MessageBlackHole);
+is_closure!(MessageWakeup);
+is_closure!(StgRetInfoTable); \ No newline at end of file
diff --git a/rts/mmtk/mmtk/src/stg_info_table.rs b/rts/mmtk/mmtk/src/stg_info_table.rs
new file mode 100644
index 0000000000..3a5625040b
--- /dev/null
+++ b/rts/mmtk/mmtk/src/stg_info_table.rs
@@ -0,0 +1,396 @@
+use super::types::*;
+use super::stg_closures::*;
+use super::util::*;
+use std::fmt;
+use std::ops::Deref;
+use crate::ghc::closure_flags;
+
+/**
+ * GHC closure info tables in Rust
+ * Original C code is at ghc/rts/include/rts/storage/InfoTables.h
+ */
+
+/* -----------------------------------------------------------------------------
+ Closure flags
+ -------------------------------------------------------------------------- */
+
+#[repr(C)]
+pub struct ClosureFlag (StgWord16);
+
+impl ClosureFlag {
+ const _HNF : ClosureFlag = ClosureFlag(1<<0); /* head normal form? */
+ const _BTM : ClosureFlag = ClosureFlag(1<<1); /* uses info->layout.bitmap */
+ const _NS : ClosureFlag = ClosureFlag(1<<2); /* non-sparkable */
+ const _THU : ClosureFlag = ClosureFlag(1<<3); /* thunk? */
+ const _MUT : ClosureFlag = ClosureFlag(1<<4); /* mutable? */
+ const _UPT : ClosureFlag = ClosureFlag(1<<5); /* unpointed? */
+ const _SRT : ClosureFlag = ClosureFlag(1<<6); /* has an SRT? */
+ const _IND : ClosureFlag = ClosureFlag(1<<7); /* is an indirection? */
+
+ #[inline(always)]
+ pub fn is_mutable(&self) -> bool {(self.0) & (Self::_MUT.0) != 0}
+
+ #[inline(always)]
+ pub fn is_bitmap(&self) -> bool {(self.0) & (Self::_BTM.0) != 0}
+
+ #[inline(always)]
+ pub fn is_thunk(&self) -> bool {(self.0) & (Self::_THU.0) != 0}
+
+ #[inline(always)]
+ pub fn is_unpointed(&self) -> bool {(self.0) & (Self::_UPT.0) != 0}
+
+ #[inline(always)]
+ pub fn has_srt(&self) -> bool {(self.0) & (Self::_SRT.0) != 0}
+
+
+ // TODO: implement closure flags related macros
+ #[inline(always)]
+ pub fn get_closure_flag(_c : *const StgClosure) -> ClosureFlag {
+ unimplemented!()
+ }
+
+ pub fn from_closure_type(ty: StgClosureType) -> Self {
+ unsafe { ClosureFlag(*(closure_flags).offset(ty as isize)) }
+ }
+}
+
+
+/* -----------------------------------------------------------------------------
+ Bitmaps
+ -------------------------------------------------------------------------- */
+#[repr(C)]
+pub union Bitmap {
+ pub small_bitmap : StgSmallBitmap,
+ pub large_bitmap_ref : StgLargeBitmapRef,
+}
+
+impl std::fmt::Debug for Bitmap {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> std::fmt::Result {
+ unsafe {
+ write!(f, "Bitmap({:?})", self.small_bitmap)
+ }
+ }
+}
+
+// -------------------- small bitmap --------------------
+#[repr(C)]
+#[derive(Debug, Copy, Clone)]
+pub struct StgSmallBitmap (pub StgWord);
+
+impl StgSmallBitmap {
+ // TODO: handle 32 bits constants
+ const BITMAP_BITS_SHIFT : StgWord = 6;
+ const BITMAP_SIZE_MASK : StgWord = 0x3f;
+
+ #[inline(always)]
+ pub fn make_small_bitmap(size : StgWord, bits : StgWord) -> Self {
+ StgSmallBitmap(((bits) << Self::BITMAP_BITS_SHIFT) | (size))
+ }
+
+ #[inline(always)]
+ pub fn size(&self) -> StgWord {
+ (self.0) & Self::BITMAP_SIZE_MASK
+ }
+
+ #[inline(always)]
+ pub fn bits(&self) -> StgWord {
+ (self.0) >> Self::BITMAP_BITS_SHIFT
+ }
+}
+
+// -------------------- large bitmap --------------------
+
+#[repr(C)]
+#[derive(Debug)]
+pub struct StgLargeBitmap {
+ pub size : StgWord, // number of bits
+ pub bitmap : LargeBitMapPayload // similar to closure payload in stg_closures.rs
+}
+
+#[repr(C)]
+#[derive(Debug)]
+pub struct LargeBitMapPayload {}
+
+impl LargeBitMapPayload {
+ pub unsafe fn get_w(&self, i: usize) -> *const StgWord {
+ let ptr: *const LargeBitMapPayload = &*self;
+ let payload: *const *mut StgWord = ptr.cast();
+ *payload.offset(i as isize)
+ }
+ // TODO: might want to iterate through bits as well
+}
+
+#[repr(C)]
+#[derive(Debug)]
+pub struct StgLargeBitmapRef {
+ pub offset : StgInt
+ // TODO: handle non TABLES_NEXT_TO_CODE
+}
+
+impl StgLargeBitmapRef {
+ // relative to the beginning of the infotable of the closure
+ pub unsafe fn deref<InfoTable>(&self, itbl: &InfoTable) -> *const StgLargeBitmap {
+ // TODO: make sure itbl is an info table
+ offset_from_end(itbl, self.offset as isize)
+ }
+}
+
+
+/* ----------------------------------------------------------------------------
+ Info Tables
+ ------------------------------------------------------------------------- */
+#[repr(C)]
+pub struct StgPointerFirst {
+ pub ptrs : StgHalfWord, /* number of pointers */
+ pub nptrs : StgHalfWord, /* number of non-pointers */
+}
+
+#[repr(C)]
+pub union StgClosureInfo {
+ pub payload : StgPointerFirst,
+
+ pub small_bitmap : StgSmallBitmap,
+
+ // TODO: check if x64 is still related to OFFSET_FIELD
+ // Check if hack in Note [x86-64-relative] is still necessary
+ pub large_bitmap_ref : StgLargeBitmapRef,
+
+ pub selector_offset : StgWord,
+}
+
+impl fmt::Debug for StgClosureInfo {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ write!(f, "StgClosureInfo")
+ }
+}
+
+/* ----------------------------------------------------------------------------
+ Function info tables
+ ------------------------------------------------------------------------- */
+
+#[repr(C)]
+#[derive(Debug)]
+pub struct StgSRTField {
+ pub srt : StgHalfInt,
+ // TODO: handle non USE_INLINE_SRT_FIELD
+}
+
+#[cfg(not(profiling))]
+#[repr(C)]
+#[derive(Debug)]
+pub struct StgProfInfo {} // TODO: handle profiling case
+
+/// Tables-next-to-code reference.
+///
+/// <structure, e.g. an info table>
+/// can be used for both StgInfoTable and StgRetInfoTable
+///
+/// This is a reference to a structure which, when tables-next-to-code is enabled,
+/// lives directly before code.
+/// In this case &info_table is StgHeader.info - sizeof(StgInfoTable)
+#[repr(C)]
+#[derive(Debug, Clone, Copy)]
+pub struct TntcRef<T> (*const T);
+
+impl<T> TntcRef<T> {
+ pub fn get_ptr(&self) -> *const T {
+ unsafe {
+ if true || cfg!(tables_next_to_code) {
+ self.0.offset(-1)
+ } else {
+ self.0
+ }
+ }
+ }
+
+ pub fn get_mut_ptr(&self) -> *mut T {
+ TntcRef::get_ptr(&self) as *mut T
+ }
+}
+
+impl<T> Deref for TntcRef<T> {
+ type Target = T;
+
+ fn deref(&self) -> &T {
+ unsafe {
+ &*self.get_ptr()
+ }
+ }
+}
+
+pub type StgInfoTableRef = TntcRef<StgInfoTable>;
+pub type StgRetInfoTableRef = TntcRef<StgRetInfoTable>;
+pub type StgFunInfoTableRef = TntcRef<StgFunInfoTable>;
+pub type StgThunkInfoTableRef = TntcRef<StgThunkInfoTable>;
+
+#[repr(C)]
+#[derive(Debug)]
+pub struct StgInfoTable {
+ // TODO: non TABLES_NEXT_TO_CODE
+ // #[cfg(not(tables_next_to_code))]
+ // pub code : *const u8, // pointer to entry code
+ pub prof : StgProfInfo,
+ pub layout : StgClosureInfo,
+ pub type_ : StgClosureType,
+ pub srt : StgSRTField,
+ // pub code : *mut StgCode, (zero length array)
+}
+
+impl StgInfoTable {
+ pub fn get_srt(&self) -> Option<*const StgClosure> {
+ unsafe {
+ if self.srt.srt != 0 {
+ Some(offset_from_end(self, self.srt.srt as isize))
+ }
+ else { None }
+ }
+ }
+}
+
+
+#[repr(C)]
+#[derive(Debug)]
+pub struct StgFunInfoExtra {
+ pub slow_apply : StgInt,
+ pub bitmap : Bitmap,
+
+ // TODO: handle offset for USE_INLINE_SRT_FIELD for srtfield
+
+ pub fun_type : StgFunType, // in types.rs from rts/include/rts/storage/FunTypes.h
+ pub arity : StgHalfWord,
+ // TODO: handle non TABLES_NEXT_TO_CODE (StgFunInfoExtraFwd)
+}
+
+#[repr(C)]
+#[derive(Debug)]
+pub struct StgFunInfoTable {
+ pub f : StgFunInfoExtra, // 3 words
+ pub i : StgInfoTable // 2 words
+ // TODO: handle non TABLES_NEXT_TO_CODE (need to use StgFunInfoExtraFwd)
+}
+
+impl StgFunInfoTable {
+ // rts/include/rts/storage/ClosureMacros.h
+ pub fn from_info_table(itbl : &'static StgInfoTable) -> &'static mut StgFunInfoTable {
+ unsafe {
+ let itbl = itbl as *const StgInfoTable;
+ &mut *(itbl.offset(1) as *mut StgFunInfoTable).offset(-1)
+ }
+ }
+
+ pub fn to_info_table(itbl : &'static StgFunInfoTable) -> &'static StgInfoTable {
+ unsafe {
+ let itbl = itbl as *const StgFunInfoTable;
+ &*(itbl.offset(1) as *const StgInfoTable).offset(-1)
+ }
+ }
+
+ pub fn get_srt(&self) -> Option<*const StgClosure> {
+ unsafe {
+ if self.i.srt.srt != 0 {
+ Some(offset_from_end(self, self.i.srt.srt as isize))
+ }
+ else {
+ None
+ }
+ }
+ }
+}
+
+/* -----------------------------------------------------------------------------
+ Return info tables
+ -------------------------------------------------------------------------- */
+#[repr(C)]
+#[derive(Debug)]
+pub struct StgRetInfoTable {
+ // (check line 160 InfoTables.h)
+ // TODO: USE_SRT_POINTER is true
+ // TODO: USE_SRT_POINTER is false but USE_SRT_OFFSET is true
+ pub i : StgInfoTable, // both false case
+}
+
+impl StgRetInfoTable {
+ pub fn get_srt(&self) -> Option<*const StgClosure> {
+ unsafe {
+ if self.i.srt.srt != 0 {
+ Some(offset_from_end(self, self.i.srt.srt as isize))
+ }
+ else {
+ None
+ }
+ }
+ }
+}
+
+
+/* -----------------------------------------------------------------------------
+ Thunk info tables
+ -------------------------------------------------------------------------- */
+#[repr(C)]
+pub struct StgThunkInfoTable {
+ // (check line 160 InfoTables.h)
+ // TODO: USE_SRT_POINTER is true
+ // TODO: USE_SRT_POINTER is false but USE_SRT_OFFSET is true
+ pub i : StgInfoTable, // both false case
+}
+
+impl StgThunkInfoTable {
+ pub fn get_srt(&self) -> Option<*const StgClosure> {
+ unsafe {
+ if self.i.srt.srt != 0 {
+ Some(offset_from_end(self, self.i.srt.srt as isize))
+ }
+ else {
+ None
+ }
+ }
+ }
+
+ pub fn from_info_table(itbl : &'static StgInfoTable) -> &'static mut StgThunkInfoTable {
+ unsafe {
+ let itbl = itbl as *const StgInfoTable;
+ &mut *(itbl.offset(1) as *mut StgThunkInfoTable).offset(-1)
+ }
+ }
+
+ pub fn to_info_table(itbl : &'static StgThunkInfoTable) -> &'static StgInfoTable {
+ unsafe {
+ let itbl = itbl as *const StgThunkInfoTable;
+ &*(itbl.offset(1) as *const StgInfoTable).offset(-1)
+ }
+ }
+}
+
+
+/* -----------------------------------------------------------------------------
+ Constructor info tables
+ -------------------------------------------------------------------------- */
+#[repr(C)]
+pub struct StgConInfoTable {
+ // TODO: handle non TABLES_NEXT_TO_CODE
+ pub con_desc_offset : StgHalfInt,
+ pub padding : StgHalfInt,
+ pub i : StgInfoTable,
+}
+
+impl StgConInfoTable {
+ pub unsafe fn con_desc(&self) -> &'static std::ffi::CStr {
+ std::ffi::CStr::from_ptr(offset_from_end(self, self.con_desc_offset as isize))
+ }
+
+ pub fn from_info_table(itbl : &'static StgInfoTable) -> &'static StgConInfoTable {
+ unsafe {
+ let itbl = itbl as *const StgInfoTable;
+ &*(itbl.offset(1) as *const StgConInfoTable).offset(-1)
+ }
+ }
+
+ pub fn to_info_table(itbl : &'static StgConInfoTable) -> &'static StgInfoTable {
+ unsafe {
+ let itbl = itbl as *const StgConInfoTable;
+ &*(itbl.offset(1) as *const StgInfoTable).offset(-1)
+ }
+ }
+}
+
+// TODO: implement other macros
diff --git a/rts/mmtk/mmtk/src/test.rs b/rts/mmtk/mmtk/src/test.rs
new file mode 100644
index 0000000000..0152c517d1
--- /dev/null
+++ b/rts/mmtk/mmtk/src/test.rs
@@ -0,0 +1,168 @@
+use mmtk::vm::{EdgeVisitor};
+use crate::stg_closures::*;
+use crate::scanning::*;
+use crate::types::*;
+use crate::edges::GHCEdge;
+
+
+use std::vec::*;
+
+#[no_mangle]
+pub unsafe extern "C" fn print_obj(obj : TaggedClosureRef){
+ let closure = Closure::from_ptr(obj.to_ptr());
+ println!("obj in address {:?}:", obj.to_ptr());
+ println!("{:?}", closure);
+
+ // TODO: not working
+ // match closure {
+ // Closure::Constr(_) => {
+ // println!("closure={:?}, {:?}", closure,
+ // StgConInfoTable::from_info_table(obj.get_info_table()).con_desc());
+ // }
+ // _ => {
+ // println!("{:?}", closure);
+ // }
+ // }
+}
+
+struct CollectPointerVisitor {
+ pub pointers : Vec<TaggedClosureRef>,
+}
+
+impl EdgeVisitor<GHCEdge> for CollectPointerVisitor {
+ fn visit_edge(&mut self, _edge: GHCEdge) {
+ // TODO: redo type match
+ // self.pointers.push(TaggedClosureRef::from_address(edge.load()));
+ }
+}
+
+impl CollectPointerVisitor {
+ fn new() -> Self {
+ CollectPointerVisitor{pointers : Vec::new()}
+ }
+}
+
+
+extern "C" {
+ fn heap_view_closureSize(closure: *const StgClosure) -> usize;
+ fn collect_pointers(closure: *const StgClosure, pointers: *mut *const StgClosure) -> usize;
+}
+
+#[no_mangle]
+/*
+pub unsafe extern "C" fn rs_collect_pointers(obj : TaggedClosureRef) {
+ // keep a common set to iterate through all closures
+ // recursively visit visitor.pointers
+ // 1. set of obj to visit
+ // 2. set of obj visited
+
+ // Rust version of tracing all the pointers
+ // println!("Start tracing pointers using Rust heap model...");
+ let mut visited = Vec::new();
+ let mut to_visit = Vec::new();
+ to_visit.push(obj);
+
+ let mut visitor = CollectPointerVisitor::new();
+ while !to_visit.is_empty() {
+ let x = to_visit.pop().expect("visitor empty but still poping element...");
+ // println!("visiting this object {:?} in Rust", x.to_ptr());
+ if !visited.contains(&x) {
+ visit_closure(x, &mut visitor); // dereferencing the borrow?
+ to_visit.append(&mut visitor.pointers); // this should clear visitor.pointers
+ visited.push(x);
+ }
+ }
+
+ println!();
+ // C version of tracing all the pointers
+ // println!("Start tracing pointers using C heap model...");
+ let mut visited_c = Vec::new();
+ let mut to_visit_c = Vec::new();
+ to_visit_c.push(obj.to_ptr());
+
+ while !to_visit_c.is_empty() {
+ let mut x = to_visit_c.pop().expect("visitor empty but still poping element...");
+ x = TaggedClosureRef::from_ptr(x as *mut StgClosure).to_ptr();
+ if !visited_c.contains(&x) {
+ // println!("visiting this object {:?} in C", x);
+ let mut _to_visit : Vec<*const StgClosure> = Vec::with_capacity(heap_view_closureSize(x));
+ let _n = collect_pointers(x, _to_visit.as_mut_ptr());
+
+ _to_visit.set_len(_n); // update the length of the vector after visiting
+ to_visit_c.append(&mut _to_visit); // this should clear _to_visit
+ visited_c.push(x);
+ }
+ }
+
+ // comparing
+ // println!("\nFinish visiting all the pointers, comparing the two result...");
+
+ assert_eq!(visited.len(), visited_c.len(), "Two vector not the same length");
+
+ for (i, j) in visited.into_iter().zip(visited_c.into_iter()) {
+ // print_obj(i);
+ // print_obj(TaggedClosureRef::from_ptr(j as *mut StgClosure));
+ // println!();
+ assert_eq!(i.to_ptr(), TaggedClosureRef::from_ptr(j as *mut StgClosure).to_ptr(),
+ "Pointers not equal to each other {:?}, {:?}", i, j);
+ }
+}
+*/
+
+pub unsafe extern "C" fn rs_collect_pointers(obj : TaggedClosureRef) {
+ let mut visited : Vec<TaggedClosureRef> = Vec::new();
+ let mut to_visit : Vec<TaggedClosureRef> = Vec::new();
+
+ // let mut visited_c : Vec<*const StgClosure> = Vec::new();
+ // let mut to_visit_c : Vec<*const StgClosure> = Vec::new();
+
+ to_visit.push(obj);
+ while !to_visit.is_empty() {
+ let x = to_visit.pop().expect("visitor empty but still poping element...");
+ if visited.contains(&x) {
+ continue;
+ }
+
+ // visit with Rust implementation
+ let mut visitor = CollectPointerVisitor::new();
+ visit_closure(x, &mut visitor);
+ let mut rust_ptrs = visitor.pointers;
+
+ println!("{:?}, {:?}", x.to_ptr(), x.get_info_table().type_);
+
+ // visit with C implementation
+ // let x_ptr = TaggedClosureRef::from_ptr(x as *mut StgClosure).to_ptr();
+ let mut c_ptrs : Vec<*const StgClosure> = Vec::with_capacity(heap_view_closureSize(x.to_ptr()));
+ let _n = collect_pointers(x.to_ptr(), c_ptrs.as_mut_ptr());
+ c_ptrs.set_len(_n); // update the length of the vector after visiting
+
+ rust_ptrs.sort();
+ let rust_ptrs_new : Vec<*const StgClosure> = rust_ptrs.iter().map(|x| x.to_tagged_ptr()).collect();
+ c_ptrs.sort();
+ let c_ptrs_new : Vec<*const StgClosure> = c_ptrs.iter().map(|x| TaggedClosureRef::from_ptr(*x as *mut StgClosure).to_ptr()).collect();
+
+ // TODO: have a white list to skip some closure types
+ // 1. stack
+ match x.get_info_table().type_ {
+ StgClosureType::STACK | StgClosureType::TVAR => {
+ continue;
+ }
+ _ => ()
+ }
+
+ // check that results match
+ // assert_eq!(rust_ptrs.len(), c_ptrs.len(), "Two vector not the same length");
+ assert_eq!(rust_ptrs_new, c_ptrs_new, "Rust pointers and C pointers not matching");
+
+ // for (i, j) in rust_ptrs.iter().zip(c_ptrs.into_iter()) { // into_iter will consume the array; iter gives a reference of elements
+ // // print_obj(i);
+ // // print_obj(TaggedClosureRef::from_ptr(j as *mut StgClosure));
+ // // println!();
+ // assert_eq!(i.to_ptr(), TaggedClosureRef::from_ptr(j as *mut StgClosure).to_ptr(),
+ // "Pointers not equal to each other {:?}, {:?}", i, j);
+ // }
+
+ visited.push(x);
+ to_visit.append(&mut rust_ptrs);
+ }
+} \ No newline at end of file
diff --git a/rts/mmtk/mmtk/src/tests/allocate_with_disable_collection.rs b/rts/mmtk/mmtk/src/tests/allocate_with_disable_collection.rs
new file mode 100644
index 0000000000..7f70d00684
--- /dev/null
+++ b/rts/mmtk/mmtk/src/tests/allocate_with_disable_collection.rs
@@ -0,0 +1,22 @@
+use crate::api::*;
+use mmtk::util::opaque_pointer::*;
+use mmtk::AllocationSemantics;
+
+/// This test allocates after calling disable_collection(). When we exceed the heap limit, MMTk will NOT trigger a GC.
+/// And the allocation will succeed.
+#[test]
+pub fn allocate_with_disable_collection() {
+ const MB: usize = 1024 * 1024;
+ // 1MB heap
+ mmtk_gc_init(MB);
+ mmtk_initialize_collection(VMThread::UNINITIALIZED);
+ let handle = mmtk_bind_mutator(VMMutatorThread(VMThread::UNINITIALIZED));
+ // Allocate 1MB. It should be fine.
+ let addr = mmtk_alloc(handle, MB, 8, 0, AllocationSemantics::Default);
+ assert!(!addr.is_zero());
+ // Disable GC
+ mmtk_disable_collection();
+ // Allocate another MB. This exceeds the heap size. But as we have disabled GC, MMTk will not trigger a GC, and allow this allocation.
+ let addr = mmtk_alloc(handle, MB, 8, 0, AllocationSemantics::Default);
+ assert!(!addr.is_zero());
+}
diff --git a/rts/mmtk/mmtk/src/tests/allocate_with_initialize_collection.rs b/rts/mmtk/mmtk/src/tests/allocate_with_initialize_collection.rs
new file mode 100644
index 0000000000..837b6aa268
--- /dev/null
+++ b/rts/mmtk/mmtk/src/tests/allocate_with_initialize_collection.rs
@@ -0,0 +1,18 @@
+use crate::api::*;
+use mmtk::util::opaque_pointer::*;
+use mmtk::AllocationSemantics;
+
+/// This test allocates after calling initialize_collection(). When we exceed the heap limit, MMTk will trigger a GC. And block_for_gc will be called.
+/// We havent implemented block_for_gc so it will panic.
+#[test]
+#[should_panic(expected = "block_for_gc is not implemented")]
+pub fn allocate_with_initialize_collection() {
+ const MB: usize = 1024 * 1024;
+ // 1MB heap
+ mmtk_gc_init(MB);
+ mmtk_initialize_collection(VMThread::UNINITIALIZED);
+ let handle = mmtk_bind_mutator(VMMutatorThread(VMThread::UNINITIALIZED));
+ // Attempt to allocate 2MB. This will trigger GC.
+ let addr = mmtk_alloc(handle, 2 * MB, 8, 0, AllocationSemantics::Default);
+ assert!(!addr.is_zero());
+}
diff --git a/rts/mmtk/mmtk/src/tests/allocate_with_re_enable_collection.rs b/rts/mmtk/mmtk/src/tests/allocate_with_re_enable_collection.rs
new file mode 100644
index 0000000000..6ad75a426e
--- /dev/null
+++ b/rts/mmtk/mmtk/src/tests/allocate_with_re_enable_collection.rs
@@ -0,0 +1,26 @@
+use crate::api::*;
+use mmtk::util::opaque_pointer::*;
+use mmtk::AllocationSemantics;
+
+/// This test allocates after calling initialize_collection(). When we exceed the heap limit, MMTk will trigger a GC. And block_for_gc will be called.
+/// We havent implemented block_for_gc so it will panic. This test is similar to allocate_with_initialize_collection, except that we once disabled GC in the test.
+#[test]
+#[should_panic(expected = "block_for_gc is not implemented")]
+pub fn allocate_with_re_enable_collection() {
+ const MB: usize = 1024 * 1024;
+ // 1MB heap
+ mmtk_gc_init(MB);
+ mmtk_initialize_collection(VMThread::UNINITIALIZED);
+ let handle = mmtk_bind_mutator(VMMutatorThread(VMThread::UNINITIALIZED));
+ // Allocate 1MB. It should be fine.
+ let addr = mmtk_alloc(handle, MB, 8, 0, AllocationSemantics::Default);
+ assert!(!addr.is_zero());
+ // Disable GC. So we can keep allocate without triggering a GC.
+ mmtk_disable_collection();
+ let addr = mmtk_alloc(handle, MB, 8, 0, AllocationSemantics::Default);
+ assert!(!addr.is_zero());
+ // Enable GC again. When we allocate, we should see a GC triggered immediately.
+ mmtk_enable_collection();
+ let addr = mmtk_alloc(handle, MB, 8, 0, AllocationSemantics::Default);
+ assert!(!addr.is_zero());
+}
diff --git a/rts/mmtk/mmtk/src/tests/allocate_without_initialize_collection.rs b/rts/mmtk/mmtk/src/tests/allocate_without_initialize_collection.rs
new file mode 100644
index 0000000000..2bfd5e5c11
--- /dev/null
+++ b/rts/mmtk/mmtk/src/tests/allocate_without_initialize_collection.rs
@@ -0,0 +1,17 @@
+use crate::api::*;
+use mmtk::util::opaque_pointer::*;
+use mmtk::AllocationSemantics;
+
+/// This test allocates without calling initialize_collection(). When we exceed the heap limit, a GC should be triggered by MMTk.
+/// But as we haven't enabled collection, GC is not initialized, so MMTk will panic.
+#[test]
+#[should_panic(expected = "GC is not allowed here")]
+pub fn allocate_without_initialize_collection() {
+ const MB: usize = 1024 * 1024;
+ // 1MB heap
+ mmtk_gc_init(MB);
+ let handle = mmtk_bind_mutator(VMMutatorThread(VMThread::UNINITIALIZED));
+ // Attempt to allocate 2MB memory. This should trigger a GC, but as we never call initialize_collection(), we cannot do GC.
+ let addr = mmtk_alloc(handle, 2 * MB, 8, 0, AllocationSemantics::Default);
+ assert!(!addr.is_zero());
+}
diff --git a/rts/mmtk/mmtk/src/tests/conservatism.rs b/rts/mmtk/mmtk/src/tests/conservatism.rs
new file mode 100644
index 0000000000..e45c49b3b3
--- /dev/null
+++ b/rts/mmtk/mmtk/src/tests/conservatism.rs
@@ -0,0 +1,154 @@
+// GITHUB-CI: MMTK_PLAN=all
+// GITHUB-CI: FEATURES=is_mmtk_object
+
+use crate::api::*;
+use crate::object_model::OBJECT_REF_OFFSET;
+use crate::tests::fixtures::{Fixture, SingleObject};
+use mmtk::util::constants::LOG_BITS_IN_WORD;
+use mmtk::util::is_mmtk_object::ALLOC_BIT_REGION_SIZE;
+use mmtk::util::*;
+
+static SINGLE_OBJECT: Fixture<SingleObject> = Fixture::new();
+
+fn basic_filter(addr: Address) -> bool {
+ !addr.is_zero() && addr.as_usize() % ALLOC_BIT_REGION_SIZE == OBJECT_REF_OFFSET
+}
+
+fn assert_filter_pass(addr: Address) {
+ assert!(
+ basic_filter(addr),
+ "{} should pass basic filter, but failed.",
+ addr,
+ );
+}
+
+fn assert_filter_fail(addr: Address) {
+ assert!(
+ !basic_filter(addr),
+ "{} should fail basic filter, but passed.",
+ addr,
+ );
+}
+
+fn assert_valid_objref(addr: Address) {
+ assert!(
+ mmtk_is_mmtk_object(addr),
+ "mmtk_is_mmtk_object({}) should return true. Got false.",
+ addr,
+ );
+}
+
+fn assert_invalid_objref(addr: Address, real: Address) {
+ assert!(
+ !mmtk_is_mmtk_object(addr),
+ "mmtk_is_mmtk_object({}) should return false. Got true. Real object: {}",
+ addr,
+ real,
+ );
+}
+
+#[test]
+pub fn null() {
+ SINGLE_OBJECT.with_fixture(|fixture| {
+ let addr = Address::ZERO;
+ assert_filter_fail(addr);
+ assert_invalid_objref(addr, fixture.objref.to_address());
+ });
+}
+
+// This should be small enough w.r.t `HEAP_START` and `HEAP_END`.
+const SMALL_OFFSET: usize = 16384;
+
+#[test]
+pub fn too_small() {
+ SINGLE_OBJECT.with_fixture(|fixture| {
+ for offset in 1usize..SMALL_OFFSET {
+ let addr = Address::ZERO + offset;
+ assert_invalid_objref(addr, fixture.objref.to_address());
+ }
+ });
+}
+
+#[test]
+pub fn max() {
+ SINGLE_OBJECT.with_fixture(|fixture| {
+ let addr = Address::MAX;
+ assert_invalid_objref(addr, fixture.objref.to_address());
+ });
+}
+
+#[test]
+pub fn too_big() {
+ SINGLE_OBJECT.with_fixture(|fixture| {
+ for offset in 1usize..SMALL_OFFSET {
+ let addr = Address::MAX - offset;
+ assert_invalid_objref(addr, fixture.objref.to_address());
+ }
+ });
+}
+
+#[test]
+pub fn direct_hit() {
+ SINGLE_OBJECT.with_fixture(|fixture| {
+ let addr = fixture.objref.to_address();
+ assert_filter_pass(addr);
+ assert_valid_objref(addr);
+ });
+}
+
+const SEVERAL_PAGES: usize = 4 * mmtk::util::constants::BYTES_IN_PAGE;
+
+#[test]
+pub fn small_offsets() {
+ SINGLE_OBJECT.with_fixture(|fixture| {
+ for offset in 1usize..SEVERAL_PAGES {
+ let addr = fixture.objref.to_address() + offset;
+ if basic_filter(addr) {
+ assert_invalid_objref(addr, fixture.objref.to_address());
+ }
+ }
+ });
+}
+
+#[test]
+pub fn medium_offsets_aligned() {
+ SINGLE_OBJECT.with_fixture(|fixture| {
+ let alignment = std::mem::align_of::<Address>();
+ for offset in (alignment..(alignment * SEVERAL_PAGES)).step_by(alignment) {
+ let addr = fixture.objref.to_address() + offset;
+ assert_filter_pass(addr);
+ assert_invalid_objref(addr, fixture.objref.to_address());
+ }
+ });
+}
+
+#[test]
+pub fn large_offsets_aligned() {
+ SINGLE_OBJECT.with_fixture(|fixture| {
+ for log_offset in 12usize..(usize::BITS as usize) {
+ let offset = 1usize << log_offset;
+ let addr = match fixture.objref.to_address().as_usize().checked_add(offset) {
+ Some(n) => unsafe { Address::from_usize(n) },
+ None => break,
+ };
+ assert_filter_pass(addr);
+ assert_invalid_objref(addr, fixture.objref.to_address());
+ }
+ });
+}
+
+#[test]
+pub fn negative_offsets() {
+ SINGLE_OBJECT.with_fixture(|fixture| {
+ for log_offset in LOG_BITS_IN_WORD..(usize::BITS as usize) {
+ let offset = 1usize << log_offset;
+ let addr = match fixture.objref.to_address().as_usize().checked_sub(offset) {
+ Some(0) => break,
+ Some(n) => unsafe { Address::from_usize(n) },
+ None => break,
+ };
+ assert_filter_pass(addr);
+ assert_invalid_objref(addr, fixture.objref.to_address());
+ }
+ });
+}
diff --git a/rts/mmtk/mmtk/src/tests/fixtures/mod.rs b/rts/mmtk/mmtk/src/tests/fixtures/mod.rs
new file mode 100644
index 0000000000..9fdab97888
--- /dev/null
+++ b/rts/mmtk/mmtk/src/tests/fixtures/mod.rs
@@ -0,0 +1,68 @@
+use atomic_refcell::AtomicRefCell;
+use std::sync::Once;
+
+use mmtk::AllocationSemantics;
+use mmtk::util::{ObjectReference, VMThread, VMMutatorThread};
+
+use crate::api::*;
+use crate::object_model::OBJECT_REF_OFFSET;
+
+pub trait FixtureContent {
+ fn create() -> Self;
+}
+
+pub struct Fixture<T: FixtureContent> {
+ content: AtomicRefCell<Option<Box<T>>>,
+ once: Once,
+}
+
+unsafe impl<T: FixtureContent> Sync for Fixture<T> {}
+
+impl<T: FixtureContent> Fixture<T> {
+ pub const fn new() -> Self {
+ Self {
+ content: AtomicRefCell::new(None),
+ once: Once::new(),
+ }
+ }
+
+ pub fn with_fixture<F: Fn(&T)>(&self, func: F) {
+ self.once.call_once(|| {
+ let content = Box::new(T::create());
+ let mut borrow = self.content.borrow_mut();
+ *borrow = Some(content);
+ });
+ {
+ let borrow = self.content.borrow();
+ func(borrow.as_ref().unwrap())
+ }
+ }
+}
+
+pub struct SingleObject {
+ pub objref: ObjectReference,
+}
+
+impl FixtureContent for SingleObject {
+ fn create() -> Self {
+ const MB: usize = 1024 * 1024;
+ // 1MB heap
+ mmtk_gc_init(MB);
+ mmtk_initialize_collection(VMThread::UNINITIALIZED);
+ // Make sure GC does not run during test.
+ mmtk_disable_collection();
+ let handle = mmtk_bind_mutator(VMMutatorThread(VMThread::UNINITIALIZED));
+
+ // A relatively small object, typical for Ruby.
+ let size = 40;
+ let semantics = AllocationSemantics::Default;
+
+ let addr = mmtk_alloc(handle, size, 8, 0, semantics);
+ assert!(!addr.is_zero());
+
+ let objref = unsafe { addr.add(OBJECT_REF_OFFSET).to_object_reference() };
+ mmtk_post_alloc(handle, objref, size, semantics);
+
+ SingleObject { objref }
+ }
+}
diff --git a/rts/mmtk/mmtk/src/tests/handle_mmap_conflict.rs b/rts/mmtk/mmtk/src/tests/handle_mmap_conflict.rs
new file mode 100644
index 0000000000..5ef2fbb0be
--- /dev/null
+++ b/rts/mmtk/mmtk/src/tests/handle_mmap_conflict.rs
@@ -0,0 +1,24 @@
+use mmtk::util::Address;
+use mmtk::util::opaque_pointer::*;
+use mmtk::util::memory;
+use crate::GHCVM;
+
+#[test]
+pub fn test_handle_mmap_conflict() {
+ let start = unsafe { Address::from_usize(0x100_0000 )};
+ let one_megabyte = 1000000;
+ let mmap1_res = memory::dzmmap_noreplace(start, one_megabyte);
+ assert!(mmap1_res.is_ok());
+
+ let panic_res = std::panic::catch_unwind(|| {
+ let mmap2_res = memory::dzmmap_noreplace(start, one_megabyte);
+ assert!(mmap2_res.is_err());
+ memory::handle_mmap_error::<GHCVM>(mmap2_res.err().unwrap(), VMThread::UNINITIALIZED);
+ });
+
+ // The error should match the error message in memory::handle_mmap_error()
+ assert!(panic_res.is_err());
+ let err = panic_res.err().unwrap();
+ assert!(err.is::<&str>());
+ assert_eq!(err.downcast_ref::<&str>().unwrap(), &"Failed to mmap, the address is already mapped. Should MMTk quanrantine the address range first?");
+} \ No newline at end of file
diff --git a/rts/mmtk/mmtk/src/tests/handle_mmap_oom.rs b/rts/mmtk/mmtk/src/tests/handle_mmap_oom.rs
new file mode 100644
index 0000000000..45556d498b
--- /dev/null
+++ b/rts/mmtk/mmtk/src/tests/handle_mmap_oom.rs
@@ -0,0 +1,23 @@
+use mmtk::util::Address;
+use mmtk::util::opaque_pointer::*;
+use mmtk::util::memory;
+use crate::GHCVM;
+
+#[test]
+pub fn test_handle_mmap_oom() {
+ let panic_res = std::panic::catch_unwind(move || {
+ let start = unsafe { Address::from_usize(0x100_0000 )};
+ let one_terabyte = 1000000000000;
+ // mmap 1 terabyte memory - we expect this will fail due to out of memory.
+ // If that's not the case, increase the size we mmap.
+ let mmap_res = memory::dzmmap_noreplace(start, one_terabyte);
+
+ memory::handle_mmap_error::<GHCVM>(mmap_res.err().unwrap(), VMThread::UNINITIALIZED);
+ });
+ assert!(panic_res.is_err());
+
+ // The error should match the default implementation of Collection::out_of_memory()
+ let err = panic_res.err().unwrap();
+ assert!(err.is::<String>());
+ assert_eq!(err.downcast_ref::<String>().unwrap(), &"Out of memory with MmapOutOfMemory!");
+}
diff --git a/rts/mmtk/mmtk/src/tests/is_in_mmtk_spaces.rs b/rts/mmtk/mmtk/src/tests/is_in_mmtk_spaces.rs
new file mode 100644
index 0000000000..c966d3ffc8
--- /dev/null
+++ b/rts/mmtk/mmtk/src/tests/is_in_mmtk_spaces.rs
@@ -0,0 +1,69 @@
+// GITHUB-CI: MMTK_PLAN=all
+
+use crate::tests::fixtures::{Fixture, SingleObject};
+use mmtk::memory_manager::is_in_mmtk_spaces;
+use mmtk::util::*;
+
+static SINGLE_OBJECT: Fixture<SingleObject> = Fixture::new();
+
+#[test]
+pub fn null() {
+ SINGLE_OBJECT.with_fixture(|_fixture| {
+ assert!(
+ !is_in_mmtk_spaces(unsafe { Address::ZERO.to_object_reference() }),
+ "NULL pointer should not be in any MMTk spaces."
+ );
+ });
+}
+
+#[test]
+pub fn max() {
+ SINGLE_OBJECT.with_fixture(|_fixture| {
+ assert!(
+ !is_in_mmtk_spaces(unsafe { Address::MAX.to_object_reference() }),
+ "Address::MAX should not be in any MMTk spaces."
+ );
+ });
+}
+
+#[test]
+pub fn direct_hit() {
+ SINGLE_OBJECT.with_fixture(|fixture| {
+ assert!(
+ is_in_mmtk_spaces(fixture.objref),
+ "The address of the allocated object should be in the space"
+ );
+ });
+}
+
+#[test]
+pub fn large_offsets_aligned() {
+ SINGLE_OBJECT.with_fixture(|fixture| {
+ for log_offset in 12usize..(usize::BITS as usize) {
+ let offset = 1usize << log_offset;
+ let addr = match fixture.objref.to_address().as_usize().checked_add(offset) {
+ Some(n) => unsafe { Address::from_usize(n) },
+ None => break,
+ };
+ // It's just a smoke test. It is hard to predict if the addr is still in any space,
+ // but it must not crash.
+ let _ = is_in_mmtk_spaces(unsafe { addr.to_object_reference() });
+ }
+ });
+}
+
+#[test]
+pub fn negative_offsets() {
+ SINGLE_OBJECT.with_fixture(|fixture| {
+ for log_offset in 1usize..(usize::BITS as usize) {
+ let offset = 1usize << log_offset;
+ let addr = match fixture.objref.to_address().as_usize().checked_sub(offset) {
+ Some(n) => unsafe { Address::from_usize(n) },
+ None => break,
+ };
+ // It's just a smoke test. It is hard to predict if the addr is still in any space,
+ // but it must not crash.
+ let _ = is_in_mmtk_spaces(unsafe { addr.to_object_reference() });
+ }
+ });
+}
diff --git a/rts/mmtk/mmtk/src/tests/issue139.rs b/rts/mmtk/mmtk/src/tests/issue139.rs
new file mode 100644
index 0000000000..ed6d4e8998
--- /dev/null
+++ b/rts/mmtk/mmtk/src/tests/issue139.rs
@@ -0,0 +1,17 @@
+use crate::api::*;
+use mmtk::util::opaque_pointer::*;
+use mmtk::AllocationSemantics;
+
+#[test]
+pub fn issue139_alloc_non_multiple_of_min_alignment() {
+ mmtk_gc_init(200*1024*1024);
+ let handle = mmtk_bind_mutator(VMMutatorThread(VMThread::UNINITIALIZED));
+
+ // Allocate 6 bytes with 8 bytes ailgnment required
+ let addr = mmtk_alloc(handle, 14, 8, 0, AllocationSemantics::Default);
+ assert!(addr.is_aligned_to(8));
+ // After the allocation, the cursor is not MIN_ALIGNMENT aligned. If we have the assertion in the next allocation to check if the cursor is aligned to MIN_ALIGNMENT, it fails.
+ // We have to remove that assertion.
+ let addr2 = mmtk_alloc(handle, 14, 8, 0, AllocationSemantics::Default);
+ assert!(addr2.is_aligned_to(8));
+}
diff --git a/rts/mmtk/mmtk/src/tests/malloc.rs b/rts/mmtk/mmtk/src/tests/malloc.rs
new file mode 100644
index 0000000000..b9c5422027
--- /dev/null
+++ b/rts/mmtk/mmtk/src/tests/malloc.rs
@@ -0,0 +1,36 @@
+use mmtk::util::malloc;
+use crate::GHCVM;
+
+#[test]
+fn test_malloc() {
+ let (address1, bool1) = malloc::alloc::<GHCVM>(16, 8, 0);
+ let (address2, bool2) = malloc::alloc::<GHCVM>(16, 32, 0);
+ let (address3, bool3) = malloc::alloc::<GHCVM>(16, 8, 4);
+ let (address4, bool4) = malloc::alloc::<GHCVM>(32, 64, 4);
+
+ assert!(address1.is_aligned_to(8));
+ assert!(address2.is_aligned_to(32));
+ assert!((address3 + 4 as isize).is_aligned_to(8));
+ assert!((address4 + 4 as isize).is_aligned_to(64));
+
+ assert!(!bool1);
+ #[cfg(feature = "malloc_hoard")]
+ assert!(bool2);
+ #[cfg(not(feature = "malloc_hoard"))]
+ assert!(!bool2);
+ assert!(bool3);
+ assert!(bool4);
+
+ assert!(malloc::get_malloc_usable_size(address1, bool1) >= 16);
+ assert!(malloc::get_malloc_usable_size(address2, bool2) >= 16);
+ assert!(malloc::get_malloc_usable_size(address3, bool3) >= 16);
+ assert!(malloc::get_malloc_usable_size(address4, bool4) >= 32);
+
+ unsafe { malloc::free(address1.to_mut_ptr()); }
+ #[cfg(feature = "malloc_hoard")]
+ malloc::offset_free(address2);
+ #[cfg(not(feature = "malloc_hoard"))]
+ unsafe { malloc::free(address2.to_mut_ptr()); }
+ malloc::offset_free(address3);
+ malloc::offset_free(address4);
+}
diff --git a/rts/mmtk/mmtk/src/tests/mod.rs b/rts/mmtk/mmtk/src/tests/mod.rs
new file mode 100644
index 0000000000..d8f2a202b9
--- /dev/null
+++ b/rts/mmtk/mmtk/src/tests/mod.rs
@@ -0,0 +1,18 @@
+// NOTE: Since the dummyvm uses a global MMTK instance,
+// it will panic if MMTK initialized more than once per process.
+// We run each of the following modules in a separate test process.
+//
+// One way to avoid re-initialization is to have only one #[test] per module.
+// There are also helpers for creating fixtures in `fixture/mod.rs`.
+mod issue139;
+mod handle_mmap_oom;
+mod handle_mmap_conflict;
+mod allocate_without_initialize_collection;
+mod allocate_with_initialize_collection;
+mod allocate_with_disable_collection;
+mod allocate_with_re_enable_collection;
+mod malloc;
+#[cfg(feature = "is_mmtk_object")]
+mod conservatism;
+mod is_in_mmtk_spaces;
+mod fixtures;
diff --git a/rts/mmtk/mmtk/src/types.rs b/rts/mmtk/mmtk/src/types.rs
new file mode 100644
index 0000000000..9af13f95b0
--- /dev/null
+++ b/rts/mmtk/mmtk/src/types.rs
@@ -0,0 +1,217 @@
+// use crate::DummyVM;
+// use super::stg_closures::*;
+use super::stg_info_table::*;
+
+
+pub type StgWord = usize;
+pub type StgPtr = *mut StgWord;
+
+pub type StgHalfWord = u32; // TODO: change this size later
+pub type StgWord64 = u64;
+pub type StgInt64 = i64;
+pub type StgWord32 = u32;
+pub type StgWord16 = u16;
+pub type StgWord8 = u8;
+pub type StgInt = i64;
+pub type StgHalfInt = i32;
+
+
+// ------------ ClosureTypes.h ------------
+#[repr(u32)]
+#[derive(PartialEq, Eq, Debug, Copy, Clone)] // comparison traits
+#[allow(non_camel_case_types)]
+pub enum StgClosureType {
+ INVALID_OBJECT = 0,
+ CONSTR = 1,
+ CONSTR_1_0 = 2,
+ CONSTR_0_1 = 3,
+ CONSTR_2_0 = 4,
+ CONSTR_1_1 = 5,
+ CONSTR_0_2 = 6,
+ CONSTR_NOCAF = 7,
+ FUN = 8,
+ FUN_1_0 = 9,
+ FUN_0_1 = 10,
+ FUN_2_0 = 11,
+ FUN_1_1 = 12,
+ FUN_0_2 = 13,
+ FUN_STATIC = 14,
+ THUNK = 15,
+ THUNK_1_0 = 16,
+ THUNK_0_1 = 17,
+ THUNK_2_0 = 18,
+ THUNK_1_1 = 19,
+ THUNK_0_2 = 20,
+ THUNK_STATIC = 21,
+ THUNK_SELECTOR = 22,
+ BCO = 23,
+ AP = 24,
+ PAP = 25,
+ AP_STACK = 26,
+ IND = 27,
+ IND_STATIC = 28,
+ // stack frames
+ RET_BCO = 29,
+ RET_SMALL = 30,
+ RET_BIG = 31,
+ RET_FUN = 32,
+ UPDATE_FRAME = 33,
+ CATCH_FRAME = 34,
+ UNDERFLOW_FRAME = 35,
+ STOP_FRAME = 36,
+ // end of stack frames
+ BLOCKING_QUEUE = 37,
+ BLACKHOLE = 38,
+ MVAR_CLEAN = 39,
+ MVAR_DIRTY = 40,
+ TVAR = 41,
+ ARR_WORDS = 42,
+ MUT_ARR_PTRS_CLEAN = 43,
+ MUT_ARR_PTRS_DIRTY = 44,
+ MUT_ARR_PTRS_FROZEN_DIRTY = 45,
+ MUT_ARR_PTRS_FROZEN_CLEAN = 46,
+ MUT_VAR_CLEAN = 47,
+ MUT_VAR_DIRTY = 48,
+ WEAK = 49,
+ PRIM = 50,
+ MUT_PRIM = 51,
+ TSO = 52,
+ STACK = 53,
+ TREC_CHUNK = 54,
+ ATOMICALLY_FRAME = 55,
+ CATCH_RETRY_FRAME = 56,
+ CATCH_STM_FRAME = 57,
+ WHITEHOLE = 58,
+ SMALL_MUT_ARR_PTRS_CLEAN = 59,
+ SMALL_MUT_ARR_PTRS_DIRTY = 60,
+ SMALL_MUT_ARR_PTRS_FROZEN_DIRTY = 61,
+ SMALL_MUT_ARR_PTRS_FROZEN_CLEAN = 62,
+ COMPACT_NFDATA = 63,
+ N_CLOSURE_TYPES = 64,
+}
+
+// ------------ FunTypes.h ------------
+extern "C" {
+ static stg_arg_bitmaps : [usize; 29];
+}
+
+// pub struct StgFunType (StgHalfWord);
+#[repr(u32)]
+#[derive(PartialEq, Eq, Debug, Copy, Clone)]
+#[allow(non_camel_case_types)]
+pub enum StgFunType {
+ ARG_GEN = 0,
+ ARG_GEN_BIG = 1,
+ ARG_BCO = 2,
+ ARG_NONE = 3,
+ ARG_N = 4,
+ ARG_P = 5,
+ ARG_F = 6,
+ ARG_D = 7,
+ ARG_L = 8,
+ ARG_V16 = 9,
+ ARG_V32 = 10,
+ ARG_V64 = 11,
+ ARG_NN = 12,
+ ARG_NP = 13,
+ ARG_PN = 14,
+ ARG_PP = 15,
+ ARG_NNN = 16,
+ ARG_NNP = 17,
+ ARG_NPN = 18,
+ ARG_NPP = 19,
+ ARG_PNN = 20,
+ ARG_PNP = 21,
+ ARG_PPN = 22,
+ ARG_PPP = 23,
+ ARG_PPPP = 24,
+ ARG_PPPPP = 25,
+ ARG_PPPPPP = 26,
+ ARG_PPPPPPP = 27,
+ ARG_PPPPPPPP = 28,
+}
+
+impl StgFunType {
+ pub fn get_small_bitmap(&self) -> StgSmallBitmap {
+ unsafe {
+ // index: take the value from the ref
+ StgSmallBitmap(stg_arg_bitmaps[*self as usize])
+ }
+ }
+}
+// ------------ TSO related constants from: rts/include/rts/Constants.h ------------
+
+/*
+ * Constants for the what_next field of a TSO, which indicates how it
+ * is to be run.
+ */
+
+#[derive(Eq, Debug)]
+pub struct StgTSONext (StgWord16);
+impl StgTSONext {
+ pub const THREAD_RUN_GHC : StgTSONext = StgTSONext(1); /* return to address on top of stack */
+ pub const THREAD_INTERPRET : StgTSONext = StgTSONext(2); /* interpret this thread */
+ pub const THREAD_KILLED : StgTSONext = StgTSONext(3); /* thread has died, don't run it */
+ pub const THREAD_COMPLETE : StgTSONext = StgTSONext(4); /* thread has finished */
+}
+
+impl PartialEq for StgTSONext {
+ fn eq(&self, other: &Self) -> bool {
+ self.0 == other.0
+ }
+}
+
+/*
+ * Constants for the why_blocked field of a TSO
+ */
+
+#[derive(Eq, Debug)]
+pub struct StgTSOBlocked (StgWord16);
+impl StgTSOBlocked {
+ pub const NOT_BLOCKED : StgTSOBlocked = StgTSOBlocked(0);
+ pub const BLOCKED_ON_MVAR : StgTSOBlocked = StgTSOBlocked(1);
+ pub const BLOCKED_ON_MVAR_READ : StgTSOBlocked = StgTSOBlocked(14);
+ pub const BLOCKED_ON_BLACK_HOLE : StgTSOBlocked = StgTSOBlocked(2);
+ pub const BLOCKED_ON_READ : StgTSOBlocked = StgTSOBlocked(3);
+ pub const BLOCKED_ON_WRITE : StgTSOBlocked = StgTSOBlocked(4);
+ pub const BLOCKED_ON_DELAY : StgTSOBlocked = StgTSOBlocked(5);
+ pub const BLOCKED_ON_STM : StgTSOBlocked = StgTSOBlocked(6);
+
+ /* Win32 only: */
+ pub const BLOCKED_ON_DO_PROC : StgTSOBlocked = StgTSOBlocked(7);
+ /* Only relevant for THREADED_RTS: */
+ pub const BLOCKED_ON_CCALL : StgTSOBlocked = StgTSOBlocked(10);
+ pub const BLOCKED_ON_CCALL_INTERRUPTIBLE : StgTSOBlocked = StgTSOBlocked(11);
+
+ pub const BLOCKED_ON_MSG_THROW_TO : StgTSOBlocked = StgTSOBlocked(12);
+ pub const THREAD_MIGRATING : StgTSOBlocked = StgTSOBlocked(13);
+}
+
+impl PartialEq for StgTSOBlocked {
+ fn eq(&self, other: &Self) -> bool {
+ self.0 == other.0
+ }
+}
+
+/*
+ * Flags for the tso->flags field.
+ */
+
+#[derive(Eq, Debug)]
+pub struct StgTSOFlag (StgWord32);
+
+impl StgTSOFlag {
+ pub const TSO_LOCKED : StgTSOFlag = StgTSOFlag(2);
+ pub const TSO_BLOCKEX : StgTSOFlag = StgTSOFlag(4);
+ pub const TSO_INTERRUPTIBLE : StgTSOFlag = StgTSOFlag(8);
+ pub const TSO_STOPPED_ON_BREAKPOINT : StgTSOFlag = StgTSOFlag(16);
+ pub const TSO_MARKED : StgTSOFlag = StgTSOFlag(64);
+ pub const TSO_SQUEEZED : StgTSOFlag = StgTSOFlag(128);
+ pub const TSO_ALLOC_LIMIT : StgTSOFlag = StgTSOFlag(256);
+}
+
+impl PartialEq for StgTSOFlag {
+ fn eq(&self, other: &Self) -> bool {
+ self.0 == other.0
+ }
+} \ No newline at end of file
diff --git a/rts/mmtk/mmtk/src/util.rs b/rts/mmtk/mmtk/src/util.rs
new file mode 100644
index 0000000000..593669b5ae
--- /dev/null
+++ b/rts/mmtk/mmtk/src/util.rs
@@ -0,0 +1,23 @@
+use super::types::*;
+use crate::stg_closures::*;
+
+pub unsafe fn offset_bytes<T>(ptr: *mut T, n: isize) -> *mut T {
+ ptr.cast::<u8>().offset(n).cast()
+}
+
+pub unsafe fn offset_words<T>(ptr: *mut T, n: isize) -> *mut T {
+ ptr.cast::<StgWord>().offset(n).cast()
+}
+
+/// Compute a pointer to a structure from an offset relative
+/// to the end of another structure.
+pub unsafe fn offset_from_end<Src, Target>(ptr: &Src, offset: isize) -> *const Target {
+ let end = (ptr as *const Src).offset(1);
+ (end as *const u8).offset(offset).cast()
+}
+
+#[no_mangle]
+#[inline(never)]
+pub fn push_node(_ptr: TaggedClosureRef) {
+ ()
+} \ No newline at end of file
diff --git a/rts/mmtk/mmtk/wrapper.h b/rts/mmtk/mmtk/wrapper.h
new file mode 100644
index 0000000000..a1b6078b16
--- /dev/null
+++ b/rts/mmtk/mmtk/wrapper.h
@@ -0,0 +1,3 @@
+#include "../../include/Rts.h"
+#include "../../Capability.h"
+
diff --git a/rts/mmtk/notes.hs b/rts/mmtk/notes.hs
new file mode 100644
index 0000000000..a490f8b6af
--- /dev/null
+++ b/rts/mmtk/notes.hs
@@ -0,0 +1,22 @@
+nthPrime :: Integer -> Integer
+nthPrine n = -- computes nth prime
+
+oneThousand :: Integer
+oneThousand = 1000
+
+thousandthPrime :: Integer
+thousandthPrime = nthPrime oneThousand
+
+f x y = ...
+caf1 = ...
+caf2 = ...
+caf3 = f caf1 caf2
+
+main = do
+ print thousandthPrime
+
+ -- do something else
+
+ print thousandthPrime
+
+ -- continues executing
diff --git a/rts/rts.cabal.in b/rts/rts.cabal.in
index d10ee390e4..6c104ce8f9 100644
--- a/rts/rts.cabal.in
+++ b/rts/rts.cabal.in
@@ -127,8 +127,10 @@ library
-- using Cabal.
if flag(threaded)
extra-library-flavours: _thr
+ extra-library-flavours: _thr_mmtk, _thr_debug_mmtk
if flag(dynamic)
extra-dynamic-library-flavours: _thr
+ extra-dynamic-library-flavours: _thr_mmtk, _thr_debug_mmtk
if flag(profiling)
extra-library-flavours: _p
@@ -643,6 +645,11 @@ library
-- I wish we had wildcards..., this would be:
-- *.c hooks/**/*.c sm/**/*.c eventlog/**/*.c linker/**/*.c
+ c-sources: MmtkGhc.c
+
+ extra-libraries: stdc++
+ c-sources: sm/CheckGc.cpp
+
if os(windows)
c-sources: win32/AsyncMIO.c
win32/AsyncWinIO.c
diff --git a/rts/sm/CheckGc.cpp b/rts/sm/CheckGc.cpp
new file mode 100644
index 0000000000..c1c984109d
--- /dev/null
+++ b/rts/sm/CheckGc.cpp
@@ -0,0 +1,976 @@
+extern "C" {
+#include "Rts.h"
+#include "StableName.h" /* for FOR_EACH_STABLE_NAME */
+#include "StablePtr.h" /* for markStablePtrTable */
+#include "Capability.h"
+#include "HeapAlloc.h"
+#include "STM.h"
+#include "sm/NonMoving.h"
+}
+
+#include <iostream>
+#include <fstream>
+#include <set>
+#include <vector>
+#include <queue>
+#include <unordered_set>
+
+// C Entry points
+extern "C" {
+void dump_heap_to(const char *fname);
+void check_gc();
+}
+
+// FIXME
+extern uint8_t nonmovingMarkEpoch;
+
+class TaggedClosurePtr {
+ StgClosure *ptr;
+public:
+ TaggedClosurePtr(StgClosure* ptr) : ptr(ptr) {}
+ TaggedClosurePtr(StgClosure* ptr, uint8_t tag) : TaggedClosurePtr(TAG_CLOSURE(tag, ptr)) {}
+
+ StgClosure *get_tagged() const {
+ return ptr;
+ }
+ StgClosure *untag() const {
+ return UNTAG_CLOSURE(ptr);
+ }
+ uint8_t get_tag() const {
+ return (StgWord) ptr & TAG_MASK;
+ }
+
+ //inline StgClosure& operator->() { return *untag(); }
+
+ friend inline bool operator==(const TaggedClosurePtr& lhs, const TaggedClosurePtr& rhs) {
+ return lhs.ptr == rhs.ptr;
+ }
+ friend inline bool operator!=(const TaggedClosurePtr& lhs, const TaggedClosurePtr& rhs) { return !(lhs == rhs); }
+ friend inline bool operator< (const TaggedClosurePtr& lhs, const TaggedClosurePtr& rhs) {
+ return lhs.ptr < rhs.ptr;
+ }
+ friend inline bool operator> (const TaggedClosurePtr& lhs, const TaggedClosurePtr& rhs) { return rhs < lhs; }
+ friend inline bool operator<=(const TaggedClosurePtr& lhs, const TaggedClosurePtr& rhs) { return !(lhs > rhs); }
+ friend inline bool operator>=(const TaggedClosurePtr& lhs, const TaggedClosurePtr& rhs) { return !(lhs < rhs); }
+};
+
+template<>
+struct std::hash<TaggedClosurePtr> {
+ std::size_t operator()(TaggedClosurePtr const& p) const noexcept {
+ return std::hash<StgClosure*>{}(p.get_tagged());
+ }
+};
+
+class HeapVisitor {
+public:
+ // Visit an SRT
+ virtual void visit_srt(StgClosure* c);
+
+ // Visit a normal closure
+ virtual void visit_closure(TaggedClosurePtr c);
+
+ virtual void visit_thunk(StgThunk *thunk, size_t n_ptrs);
+ virtual void visit_fun(StgClosure *constr, size_t n_ptrs);
+ virtual void visit_constr(StgClosure *constr, size_t n_ptrs);
+ virtual void visit_array(StgMutArrPtrs *arr);
+ virtual void visit_small_array(StgSmallMutArrPtrs *arr);
+ virtual void visit_bytearray(StgArrBytes* arr);
+
+ virtual void visit_stack(StgPtr sp, StgPtr end);
+ virtual void visit_tso(StgTSO* tso);
+ virtual void visit_weak(StgWeak* w);
+ virtual void visit_mvar(StgMVar* mvar);
+ virtual void visit_tvar(StgTVar* tvar);
+ virtual void visit_trec_header(StgTRecHeader *trec);
+ virtual void visit_trec_chunk(StgTRecChunk* tc);
+ virtual void visit_continuation(StgContinuation* tc);
+
+ virtual void visit_small_bitmap(StgClosure *const *payload, StgWord bitmap, StgWord size);
+ virtual void visit_large_bitmap(StgClosure *const *payload, const StgLargeBitmap *bitmap, StgWord size);
+ void visit_pap_payload(StgClosure *fun, StgClosure **payload, StgWord n_args);
+
+ virtual void visit_invalid(StgClosure *const c);
+};
+
+void HeapVisitor::visit_thunk(StgThunk *thunk, size_t n_ptrs)
+{
+ const StgInfoTable *info = get_itbl((StgClosure *) thunk);
+ const StgThunkInfoTable *thunk_info = itbl_to_thunk_itbl(info);
+ if (thunk_info->i.srt) {
+ StgClosure *srt = (StgClosure*) GET_SRT(thunk_info);
+ visit_srt(srt);
+ };
+ for (size_t i=0; i < n_ptrs; i++) {
+ visit_closure(thunk->payload[i]);
+ }
+}
+
+void HeapVisitor::visit_fun(StgClosure *fun, size_t n_ptrs)
+{
+ const StgInfoTable *info = get_itbl(fun);
+ const StgFunInfoTable *fun_info = itbl_to_fun_itbl(info);
+ if (fun_info->i.srt) {
+ StgClosure *srt = (StgClosure*) GET_SRT(fun_info);
+ visit_srt(srt);
+ };
+ for (size_t i=0; i < n_ptrs; i++) {
+ visit_closure(fun->payload[i]);
+ }
+}
+
+void HeapVisitor::visit_constr(StgClosure *constr, size_t n_ptrs)
+{
+ for (size_t i=0; i < n_ptrs; i++) {
+ visit_closure(constr->payload[i]);
+ }
+}
+
+void HeapVisitor::visit_srt(StgClosure* c)
+{
+ visit_closure(c);
+}
+
+void HeapVisitor::visit_invalid(StgClosure *const _c)
+{
+ abort();
+}
+
+void HeapVisitor::visit_weak(StgWeak* w)
+{
+ visit_closure(w->key);
+ visit_closure(w->value);
+ visit_closure(w->finalizer);
+ visit_closure(w->cfinalizers);
+}
+
+void HeapVisitor::visit_mvar(StgMVar* mvar)
+{
+ visit_closure((StgClosure*) mvar->head);
+ visit_closure((StgClosure*) mvar->tail);
+ visit_closure(mvar->value);
+}
+
+void HeapVisitor::visit_small_array(StgSmallMutArrPtrs *arr)
+{
+ for (StgWord i=0; i < arr->ptrs; i++) {
+ visit_closure(arr->payload[i]);
+ }
+}
+
+void HeapVisitor::visit_array(StgMutArrPtrs *arr)
+{
+ for (StgWord i=0; i < arr->ptrs; i++) {
+ visit_closure(arr->payload[i]);
+ }
+}
+
+void HeapVisitor::visit_bytearray(StgArrBytes* _arr) { }
+
+void HeapVisitor::visit_tso(StgTSO *tso)
+{
+ if (tso->bound != NULL) {
+
+ visit_closure((StgClosure*) tso->bound->tso);
+ }
+ if (tso->label != NULL) {
+ visit_closure({(StgClosure*) tso->label});
+ }
+ visit_closure((StgClosure*) tso->blocked_exceptions);
+ visit_closure((StgClosure*) tso->bq);
+ visit_closure((StgClosure*) tso->stackobj);
+ visit_closure((StgClosure*) tso->_link);
+ visit_trec_header(tso->trec);
+
+ switch (tso->why_blocked) {
+ case BlockedOnMVar:
+ case BlockedOnMVarRead:
+ case BlockedOnBlackHole:
+ case BlockedOnMsgThrowTo:
+ case NotBlocked:
+ visit_closure(tso->block_info.closure);
+ break;
+ default:
+ break;
+ }
+}
+
+void HeapVisitor::visit_continuation(StgContinuation *cont)
+{
+ visit_stack(cont->stack, cont->stack + cont->stack_size);
+}
+
+void HeapVisitor::visit_tvar(StgTVar *tvar)
+{
+ visit_closure(tvar->current_value);
+ visit_closure((StgClosure*) tvar->first_watch_queue_entry);
+}
+
+void HeapVisitor::visit_trec_header(StgTRecHeader *trec)
+{
+ if (trec == NO_TREC) {
+ return;
+ }
+ visit_trec_chunk(trec->current_chunk);
+ visit_closure((StgClosure*) trec->enclosing_trec);
+}
+
+void HeapVisitor::visit_trec_chunk(StgTRecChunk *tc)
+{
+ if (tc->prev_chunk != END_STM_CHUNK_LIST) {
+ visit_closure((StgClosure*) tc->prev_chunk);
+ }
+
+ for (uint32_t i = 0; i < tc->next_entry_idx; i++) {
+ TRecEntry *e = &tc->entries[i];
+ visit_closure((StgClosure*)e->tvar);
+ visit_closure(e->expected_value);
+ visit_closure(e->new_value);
+ }
+}
+
+void HeapVisitor::visit_stack(StgPtr p, StgPtr stack_end)
+{
+ while (p < stack_end) {
+ const StgRetInfoTable* info = get_ret_itbl((StgClosure *) p);
+
+ auto add_srt_ptrs = [&] () {
+ if (info->i.srt) {
+ StgClosure *srt = (StgClosure*)GET_SRT(info);
+ visit_srt(srt);
+ }
+ };
+
+ switch (info->i.type) {
+
+ case UPDATE_FRAME:
+ {
+ StgUpdateFrame *frame = (StgUpdateFrame *)p;
+ visit_closure(frame->updatee);
+ p += sizeofW(StgUpdateFrame);
+ continue;
+ }
+
+ case CATCH_STM_FRAME:
+ case CATCH_RETRY_FRAME:
+ case ATOMICALLY_FRAME:
+ case UNDERFLOW_FRAME:
+ case STOP_FRAME:
+ case CATCH_FRAME:
+ case RET_SMALL:
+ {
+ StgWord bitmap = BITMAP_BITS(info->i.layout.bitmap);
+ StgWord size = BITMAP_SIZE(info->i.layout.bitmap);
+ // NOTE: the payload starts immediately after the info-ptr, we
+ // don't have an StgHeader in the same sense as a heap closure.
+ p++;
+ visit_small_bitmap((StgClosure**) p, bitmap, size);
+ p += size;
+ add_srt_ptrs();
+ continue;
+ }
+
+ case RET_BCO:
+ {
+ p++;
+ StgBCO *bco = (StgBCO *)*p;
+ visit_closure((StgClosure *) bco);
+ p++;
+ StgWord size = BCO_BITMAP_SIZE(bco);
+ visit_large_bitmap((StgClosure**) p, BCO_BITMAP(bco), size);
+ p += size;
+ continue;
+ }
+
+ case RET_BIG:
+ {
+ StgWord size = GET_LARGE_BITMAP(&info->i)->size;
+ p++;
+ visit_large_bitmap((StgClosure**) p, GET_LARGE_BITMAP(&info->i), size);
+ p += size;
+ // and don't forget to follow the SRT
+ add_srt_ptrs();
+ break;
+ }
+
+ case RET_FUN:
+ {
+ StgRetFun *ret_fun = (StgRetFun *)p;
+ visit_closure(ret_fun->fun);
+
+ const StgFunInfoTable *fun_info = get_fun_itbl(UNTAG_CLOSURE(ret_fun->fun));
+ switch (fun_info->f.fun_type) {
+ case ARG_GEN:
+ {
+ StgWord bitmap = BITMAP_BITS(fun_info->f.b.bitmap);
+ StgWord size = BITMAP_SIZE(fun_info->f.b.bitmap);
+ visit_small_bitmap(ret_fun->payload, bitmap, size);
+ p = (StgPtr) ((StgClosure **) &ret_fun->payload + size);
+ break;
+ }
+ case ARG_GEN_BIG:
+ {
+ StgWord size = GET_FUN_LARGE_BITMAP(fun_info)->size;
+ visit_large_bitmap(ret_fun->payload, GET_FUN_LARGE_BITMAP(fun_info), size);
+ p = (StgPtr) ((StgClosure **) &ret_fun->payload + size);
+ break;
+ }
+ default:
+ {
+ StgWord bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
+ StgWord size = BITMAP_SIZE(stg_arg_bitmaps[fun_info->f.fun_type]);
+ visit_small_bitmap(ret_fun->payload, bitmap, size);
+ p = (StgPtr) ((StgClosure **) &ret_fun->payload + size);
+ break;
+ }
+ }
+ add_srt_ptrs();
+ break;
+ }
+ default:
+ abort();
+ }
+ }
+}
+
+void HeapVisitor::visit_small_bitmap(
+ StgClosure *const *payload,
+ StgWord bitmap,
+ StgWord size)
+{
+ while (size > 0) {
+ if ((bitmap & 1) == 0) {
+ visit_closure(*payload);
+ }
+ payload++;
+ bitmap = bitmap >> 1;
+ size--;
+ }
+}
+
+void HeapVisitor::visit_large_bitmap(
+ StgClosure *const * payload,
+ const StgLargeBitmap *large_bitmap,
+ StgWord size)
+{
+ // Bitmap may have more bits than `size` when scavenging PAP payloads. See
+ // comments around StgPAP.
+ ASSERT(large_bitmap->size >= size);
+
+ uint32_t b = 0;
+ for (uint32_t i = 0; i < size; b++) {
+ StgWord bitmap = large_bitmap->bitmap[b];
+ uint32_t j = stg_min(size-i, BITS_IN(W_));
+ i += j;
+ for (; j > 0; j--, payload++) {
+ if ((bitmap & 1) == 0) {
+ visit_closure(*payload);
+ }
+ bitmap = bitmap >> 1;
+ }
+ }
+}
+
+void HeapVisitor::visit_pap_payload(
+ StgClosure *fun,
+ StgClosure **payload,
+ StgWord n_args)
+{
+ fun = UNTAG_CLOSURE(fun);
+ const StgFunInfoTable *fun_info = get_fun_itbl(fun);
+ ASSERT(fun_info->i.type != PAP);
+ switch (fun_info->f.fun_type) {
+ case ARG_GEN:
+ visit_small_bitmap(payload, BITMAP_BITS(fun_info->f.b.bitmap), n_args);
+ break;
+ case ARG_GEN_BIG:
+ visit_large_bitmap(payload, GET_FUN_LARGE_BITMAP(fun_info), n_args);
+ break;
+ case ARG_BCO:
+ visit_large_bitmap(payload, BCO_BITMAP(fun), n_args);
+ break;
+ default:
+ {
+ StgWord bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
+ visit_small_bitmap(payload, bitmap, n_args);
+ }
+ }
+}
+
+void HeapVisitor::visit_closure(TaggedClosurePtr tagged)
+{
+ StgClosure *c = tagged.untag();
+ if (c->header.info == (StgInfoTable *) 0xaaaaaaaaaaaaaaaa || !LOOKS_LIKE_CLOSURE_PTR(c)) {
+ visit_invalid(c);
+ return;
+ }
+
+ const StgInfoTable *info = get_itbl(c);
+ auto generic_closure = [&] () {
+ for (StgClosure **p = &c->payload[0]; p < &c->payload[info->layout.payload.ptrs]; p++) {
+ visit_closure(*p);
+ }
+ };
+
+ switch (info->type) {
+
+ case MVAR_CLEAN:
+ case MVAR_DIRTY:
+ visit_mvar((StgMVar *) c);
+ break;
+ case TVAR:
+ visit_tvar((StgTVar *) c);
+ break;
+
+ case IND:
+ case IND_STATIC:
+ visit_closure(((StgInd *) c)->indirectee);
+ break;
+
+ case THUNK_0_1:
+ case THUNK_0_2:
+ visit_thunk((StgThunk*) c, 0);
+ break;
+ case THUNK_1_1:
+ case THUNK_1_0:
+ visit_thunk((StgThunk*) c, 1);
+ break;
+ case THUNK_2_0:
+ visit_thunk((StgThunk*) c, 2);
+ break;
+ case THUNK:
+ visit_thunk((StgThunk*) c, info->layout.payload.ptrs);
+ break;
+ case THUNK_STATIC:
+ visit_thunk((StgThunk*) c, 0);
+ break;
+
+ case FUN_1_0:
+ visit_fun(c, 1);
+ break;
+ case FUN_0_1:
+ case FUN_0_2:
+ visit_fun(c, 0);
+ break;
+ case FUN_1_1:
+ visit_fun(c, 1);
+ break;
+ case FUN_2_0:
+ visit_fun(c, 2);
+ break;
+ case FUN:
+ case FUN_STATIC:
+ visit_fun(c, info->layout.payload.ptrs);
+ break;
+
+ case CONSTR_0_1:
+ case CONSTR_0_2:
+ visit_constr(c, 0);
+ break;
+ case CONSTR_1_0:
+ case CONSTR_1_1:
+ visit_constr(c, 1);
+ break;
+ case CONSTR_2_0:
+ visit_constr(c, 2);
+ break;
+ case CONSTR:
+ case CONSTR_NOCAF:
+ visit_constr(c, info->layout.payload.ptrs);
+ break;
+
+ case PRIM:
+ generic_closure();
+ break;
+ case WEAK:
+ visit_weak((StgWeak*) c);
+ break;
+ case BCO:
+ {
+ StgBCO *bco = (StgBCO *)c;
+ visit_closure((StgClosure*) bco->instrs);
+ visit_closure((StgClosure*) bco->literals);
+ visit_closure((StgClosure*) bco->ptrs);
+ break;
+ }
+ case BLACKHOLE:
+ {
+ StgInd *ind = (StgInd*) c;
+ visit_closure(ind->indirectee);
+ break;
+ }
+ case MUT_VAR_CLEAN:
+ case MUT_VAR_DIRTY:
+ {
+ StgMutVar *mv = (StgMutVar*) c;
+ visit_closure(mv->var);
+ break;
+ }
+ case BLOCKING_QUEUE:
+ {
+ StgBlockingQueue *bq = (StgBlockingQueue *)c;
+ visit_closure((StgClosure*) bq->bh);
+ visit_closure((StgClosure*) bq->owner);
+ visit_closure((StgClosure*) bq->queue);
+ visit_closure((StgClosure*) bq->link);
+ break;
+ }
+ case THUNK_SELECTOR:
+ {
+ StgSelector *s = (StgSelector *)c;
+ visit_closure(s->selectee);
+ break;
+ }
+ case AP_STACK:
+ {
+ StgAP_STACK *ap = (StgAP_STACK *)c;
+ visit_closure(ap->fun);
+ visit_stack((StgPtr) ap->payload, (StgPtr) ap->payload + ap->size);
+ break;
+ }
+ case PAP:
+ {
+ StgPAP *pap = (StgPAP*) c;
+ visit_closure(pap->fun);
+ visit_pap_payload(pap->fun, (StgClosure**) pap->payload, pap->n_args);
+ break;
+ }
+ case AP:
+ {
+ StgAP *ap = (StgAP*) c;
+ visit_closure(ap->fun);
+ visit_pap_payload(ap->fun, (StgClosure**) ap->payload, ap->n_args);
+ break;
+ }
+ case ARR_WORDS:
+ visit_bytearray((StgArrBytes *) c);
+ break;
+ case MUT_ARR_PTRS_CLEAN:
+ case MUT_ARR_PTRS_DIRTY:
+ case MUT_ARR_PTRS_FROZEN_DIRTY:
+ case MUT_ARR_PTRS_FROZEN_CLEAN:
+ visit_array((StgMutArrPtrs *) c);
+ break;
+ case SMALL_MUT_ARR_PTRS_CLEAN:
+ case SMALL_MUT_ARR_PTRS_DIRTY:
+ case SMALL_MUT_ARR_PTRS_FROZEN_CLEAN:
+ case SMALL_MUT_ARR_PTRS_FROZEN_DIRTY:
+ visit_small_array((StgSmallMutArrPtrs *) c);
+ break;
+ case TSO:
+ visit_tso((StgTSO *) c);
+ break;
+ case STACK:
+ {
+ StgStack *stack = (StgStack *) c;
+ visit_stack(stack->sp, stack->stack + stack->stack_size);
+ break;
+ }
+ case MUT_PRIM:
+ generic_closure();
+ break;
+ case TREC_CHUNK:
+ visit_trec_chunk((StgTRecChunk *) c);
+ break;
+ case CONTINUATION:
+ visit_continuation((StgContinuation *) c);
+ break;
+ default:
+ visit_invalid(c);
+ break;
+ }
+}
+
+class PredicatedHeapVisitor : HeapVisitor {
+ bool should_visit(StgClosure *);
+
+ virtual void visit_srt(StgClosure* c) {
+ if (should_visit(c)) { HeapVisitor::visit_srt(c); }
+ }
+
+ virtual void visit_closure(TaggedClosurePtr c) {
+ if (should_visit(c.untag())) { HeapVisitor::visit_closure(c); }
+ }
+};
+
+// Collect direct pointers
+struct CollectPointers : HeapVisitor {
+ std::set<TaggedClosurePtr> accum;
+ bool invalid;
+ CollectPointers() : accum(), invalid(false) {}
+ void visit_root(StgClosure *c) {
+ HeapVisitor::visit_closure(c);
+ }
+ void visit_closure(TaggedClosurePtr c) {
+ accum.insert(c);
+ }
+ void visit_invalid(StgClosure *const _c) { invalid = true; }
+};
+
+static std::set<TaggedClosurePtr> collect_pointers(StgClosure* c)
+{
+ CollectPointers v;
+ v.visit_root(c);
+ return v.accum;
+}
+
+
+
+struct Error {
+ StgClosure *closure;
+ std::string what;
+ Error(StgClosure *closure, std::string what) : closure(closure), what(what) {}
+};
+
+static std::ostream& operator<<(std::ostream& os, const Error& err) {
+ os << std::hex << "0x" << (StgWord) err.closure << ": " << err.what << "\n";
+ return os;
+}
+
+class CheckVisitor : HeapVisitor {
+ std::vector<Error> errors;
+ uint8_t tag;
+ void visit_constr(StgClosure* c, size_t n_ptrs) {
+ const StgInfoTable *info = get_itbl(c);
+ if (tag != 0) {
+ uint8_t constr_tag = info->srt; // zero-based
+ if (tag != std::min(TAG_MASK, constr_tag+1)) {
+ errors.push_back(Error(c, "invalid tag"));
+ }
+ }
+ }
+
+ void visit_closure(TaggedClosurePtr c) { }
+public:
+ const std::vector<Error>& get_errors() const { return errors; }
+
+ void check_closure(TaggedClosurePtr c) {
+ tag = c.get_tag();
+ HeapVisitor::visit_closure(c);
+ }
+};
+
+struct CheckGc {
+ std::queue<TaggedClosurePtr> queue;
+ std::unordered_set<TaggedClosurePtr> enqueued;
+
+ void enqueue(TaggedClosurePtr ptr) {
+ ASSERT(ptr != NULL);
+ if (!is_enqueued(ptr)) {
+ queue.push(ptr);
+ enqueued.insert(ptr);
+ }
+ }
+
+ bool finished() {
+ return queue.empty();
+ }
+
+ TaggedClosurePtr pop() {
+ TaggedClosurePtr p = queue.front();
+ queue.pop();
+ return p;
+ }
+
+ bool is_enqueued(TaggedClosurePtr ptr) {
+ return enqueued.find(ptr) != enqueued.end();
+ }
+};
+
+static void enqueue_root(void *user_data, StgClosure **root)
+{
+ CheckGc* env = (CheckGc*) user_data;
+ env->enqueue(*root);
+}
+
+static void enqueue_roots(CheckGc& env)
+{
+ FOR_EACH_STABLE_NAME(p, if (p->sn_obj) env.enqueue(p->sn_obj););
+ markStablePtrTable(enqueue_root, &env);
+ for (uint32_t n = 0; n < getNumCapabilities(); n++) {
+ markCapability(enqueue_root, (void*) &env, getCapability(n), false/*mark sparks*/);
+ }
+ markCAFs(enqueue_root, &env);
+
+ for (StgWeak *w = nonmoving_weak_ptr_list; w != NULL; w = w->link) {
+ env.enqueue((StgClosure *) w);
+ }
+
+ for (uint32_t g = 0; g <= N; g++) {
+ generation *gen = &generations[g];
+ for (StgWeak *w = gen->weak_ptr_list; w != NULL; w = RELAXED_LOAD(&w->link)) {
+ env.enqueue((StgClosure *) w);
+ }
+ }
+}
+
+struct NodeName {
+ const StgClosure *c;
+ NodeName(const StgClosure *c) : c(c) {}
+};
+
+static std::ostream& operator<<(std::ostream& os, const NodeName& n) {
+ os << std::hex << "\"" << n.c << "\"" << std::dec;
+ return os;
+}
+
+static void dump_heap(std::ofstream& of)
+{
+ of << "digraph {\n";
+ CheckGc env;
+ enqueue_roots(env);
+ while (!env.finished()) {
+ TaggedClosurePtr tagged = env.pop();
+ StgClosure* c = tagged.untag();
+ NodeName n(c);
+ if (c->header.info == (StgInfoTable *) 0xaaaaaaaaaaaaaaaa) {
+ of << n << " [type=invalid];\n";
+ continue;
+ }
+
+ const StgInfoTable *info = get_itbl(c);
+ switch (info->type) {
+ case CONSTR:
+ case CONSTR_1_0:
+ case CONSTR_0_1:
+ case CONSTR_2_0:
+ case CONSTR_1_1:
+ case CONSTR_0_2:
+ case CONSTR_NOCAF:
+ {
+ const StgConInfoTable *con_info = get_con_itbl(c);
+ of << n << " [type=CONSTR constr=\"" << GET_CON_DESC(con_info) << "\"];\n";
+ break;
+ }
+ case FUN:
+ case FUN_1_0:
+ case FUN_0_1:
+ case FUN_2_0:
+ case FUN_1_1:
+ case FUN_0_2:
+ of << n << " [type=FUN];\n";
+ break;
+ case FUN_STATIC:
+ of << n << " [type=FUN_STATIC];\n";
+ break;
+ case THUNK:
+ case THUNK_1_0:
+ case THUNK_0_1:
+ case THUNK_1_1:
+ case THUNK_0_2:
+ case THUNK_2_0:
+ of << n << " [type=THUNK];\n";
+ break;
+ case THUNK_STATIC:
+ of << n << " [type=THUNK_STATIC];\n";
+ break;
+ case THUNK_SELECTOR:
+ of << n << " [type=THUNK_SEL];\n";
+ break;
+ case BCO:
+ of << n << " [type=BCO];\n";
+ break;
+ case AP:
+ of << n << " [type=AP];\n";
+ break;
+ case PAP:
+ of << n << " [type=PAP];\n";
+ break;
+ case AP_STACK:
+ of << n << " [type=AP_STACK];\n";
+ break;
+ case IND:
+ of << n << " [type=IND];\n";
+ break;
+ case IND_STATIC:
+ of << n << " [type=IND_STATIC];\n";
+ break;
+ case BLOCKING_QUEUE:
+ of << n << " [type=BLOCKING_QUEUE];\n";
+ break;
+ case BLACKHOLE:
+ of << n << " [type=BLACKHOLE];\n";
+ break;
+ case MVAR_CLEAN:
+ case MVAR_DIRTY:
+ of << n << " [type=MVAR];\n";
+ break;
+ case TVAR:
+ of << n << " [type=TVAR];\n";
+ break;
+ case ARR_WORDS:
+ of << n << " [type=ARR_WORDS];\n";
+ break;
+ case MUT_ARR_PTRS_CLEAN:
+ case MUT_ARR_PTRS_DIRTY:
+ case MUT_ARR_PTRS_FROZEN_CLEAN:
+ case MUT_ARR_PTRS_FROZEN_DIRTY:
+ of << n << " [type=MUT_ARR_PTRS];\n";
+ break;
+ case SMALL_MUT_ARR_PTRS_CLEAN:
+ case SMALL_MUT_ARR_PTRS_DIRTY:
+ case SMALL_MUT_ARR_PTRS_FROZEN_CLEAN:
+ case SMALL_MUT_ARR_PTRS_FROZEN_DIRTY:
+ of << n << " [type=SMALL_MUT_ARR_PTRS];\n";
+ break;
+ case MUT_VAR_CLEAN:
+ case MUT_VAR_DIRTY:
+ of << n << " [type=MUT_VAR];\n";
+ break;
+ case WEAK:
+ of << n << " [type=WEAK];\n";
+ break;
+ case PRIM:
+ of << n << " [type=PRIM];\n";
+ break;
+ case MUT_PRIM:
+ of << n << " [type=MUT_PRIM];\n";
+ break;
+ case TSO:
+ of << n << " [type=TSO];\n";
+ break;
+ case STACK:
+ of << n << " [type=STACK];\n";
+ break;
+ case TREC_CHUNK:
+ of << n << " [type=TREC_CHUNK];\n";
+ break;
+ case WHITEHOLE:
+ of << n << " [type=WHITEHOLE];\n";
+ break;
+ case COMPACT_NFDATA:
+ of << n << " [type=COMPACT_NFDATA];\n";
+ break;
+ case CONTINUATION:
+ of << n << " [type=CONTINUATION];\n";
+ break;
+ default:
+ of << n << " [type=unknown];\n";
+ break;
+ }
+
+ if (!HEAP_ALLOCED((StgPtr) c)) {
+ of << n << " [static=yes];\n";
+ } else {
+ bdescr *bd = Bdescr((StgPtr) c);
+ of << n << " [gen=" << bd->gen_no << "];\n";
+ if (bd->flags & BF_EVACUATED) {
+ of << n << " [evacuated=yes];\n";
+ }
+ if (bd->flags & BF_PINNED) {
+ of << n << " [pinned=yes];\n";
+ }
+ if (bd->flags & BF_LARGE) {
+ of << n << " [large=yes];\n";
+ } else if (bd->flags & BF_NONMOVING) {
+ struct NonmovingSegment *seg = nonmovingGetSegment((StgPtr) c);
+ nonmoving_block_idx block_idx = nonmovingGetBlockIdx((StgPtr) c);
+ uint8_t mark = nonmovingGetMark(seg, block_idx);
+ StgClosure *snapshot_loc =
+ (StgClosure *) nonmovingSegmentGetBlock(seg, nonmovingSegmentInfo(seg)->next_free_snap);
+ if (c > snapshot_loc) {
+ of << n << " [nonmoving=yes new=yes mark=" << (StgWord) mark << "];\n";
+ } else {
+ of << n << " [nonmoving=yes mark=" << (StgWord) mark << "];\n";
+ }
+ } else {
+ of << n << " [moving=yes];\n";
+ }
+ }
+ for (TaggedClosurePtr p : collect_pointers(c)) {
+ of << n << " -> " << NodeName(p.untag()) << ";\n";
+ env.enqueue(p);
+ }
+ }
+ of << "}\n";
+}
+
+void dump_heap_to(const char *fname)
+{
+ std::ofstream out(fname);
+ dump_heap(out);
+ out.flush();
+}
+
+void check_gc()
+{
+ CheckGc env;
+ enqueue_roots(env);
+ std::vector<Error> errors;
+
+ while (!env.finished()) {
+ TaggedClosurePtr tagged = env.pop();
+ StgClosure* c = tagged.untag();
+
+ {
+ CheckVisitor check;
+ check.check_closure(tagged);
+ for (const Error& e : check.get_errors()) {
+ errors.push_back(e);
+ }
+ }
+
+ for (TaggedClosurePtr p : collect_pointers(c)) {
+ env.enqueue(p);
+ }
+
+ if (c->header.info == (StgInfoTable *) 0xaaaaaaaaaaaaaaaa) {
+ errors.push_back(Error(c, "is invalid closure"));
+ continue;
+ }
+
+ const StgInfoTable *info = get_itbl(c);
+ if (!HEAP_ALLOCED((StgPtr) c)) {
+ switch (info->type) {
+ case THUNK_STATIC:
+ if (info->srt != 0) {
+
+ }
+ }
+ } else {
+ bdescr *bd = Bdescr((StgPtr) c);
+ if (bd->gen_no < 1) {
+ /* nothing to check as we are focused on post nonmoving-GC checking */
+ } else if (bd->flags & BF_NONMOVING && bd->flags & BF_LARGE) {
+ if (bd->flags & BF_NONMOVING_SWEEPING && !(bd->flags & BF_MARKED)) {
+ errors.push_back(Error(c, "is not marked yet being swept"));
+ }
+ } else if (bd->flags & BF_NONMOVING) {
+ struct NonmovingSegment *seg = nonmovingGetSegment((StgPtr) c);
+ nonmoving_block_idx block_idx = nonmovingGetBlockIdx((StgPtr) c);
+ uint8_t mark = nonmovingGetMark(seg, block_idx);
+ StgClosure *snapshot_loc =
+ (StgClosure *) nonmovingSegmentGetBlock(seg, nonmovingSegmentInfo(seg)->next_free_snap);
+ if (bd->flags & BF_NONMOVING_SWEEPING) {
+ /* in a swept segment */
+ if (mark != nonmovingMarkEpoch) {
+ errors.push_back(Error(c, "is unmarked nonmoving object being swept"));
+ }
+ } else if (c < snapshot_loc) {
+ /* not in a swept segment but in the snapshot */
+ if (mark != nonmovingMarkEpoch) {
+ errors.push_back(Error(c, "is unmarked nonmoving object in the snapshot"));
+ }
+ } else {
+ /* not in the snapshot; nothing to assert */
+ }
+ } else if (bd->flags & BF_LARGE) {
+ if (! (bd->flags & BF_MARKED)) {
+ errors.push_back(Error(c, "is unmarked large object"));
+ }
+ } else {
+ if (!(bd->flags & BF_EVACUATED)) {
+ //errors.push_back(Error(c, "is in from-space block"));
+ }
+ }
+ }
+ }
+
+ if (!errors.empty()) {
+ for (auto err : errors) {
+ std::cerr << err << "\n";
+ }
+ dump_heap_to("heap.dot");
+ abort();
+ }
+}
+
diff --git a/rts/sm/Compact.c b/rts/sm/Compact.c
index f6e65ecc9a..435f9b81ac 100644
--- a/rts/sm/Compact.c
+++ b/rts/sm/Compact.c
@@ -462,6 +462,9 @@ thread_TSO (StgTSO *tso)
{
thread_(&tso->_link);
thread_(&tso->global_link);
+ thread_(&tso->tso_link_next);
+ // probably don't need
+ thread_(&tso->tso_link_prev);
if ( tso->why_blocked == BlockedOnMVar
|| tso->why_blocked == BlockedOnMVarRead
diff --git a/rts/sm/Evac.c b/rts/sm/Evac.c
index 161ccb2e40..e4ad89f06b 100644
--- a/rts/sm/Evac.c
+++ b/rts/sm/Evac.c
@@ -493,6 +493,21 @@ evacuate_static_object (StgClosure **link_field, StgClosure *q)
StgWord link = RELAXED_LOAD((StgWord*) link_field);
// See Note [STATIC_LINK fields] for how the link field bits work
+ // (link & STATIC_BITS) : mark bit of this object (visited state)
+ // prev_static_flag: "current" static flag value of the previous GC
+
+ // Let's see how this works in practice.
+ // Assume prev_static_flag == 1, cur_static_flag == 2:
+ //
+ // * If we have not visited an object, the object's flag field will be 1;
+ // therefore static_flag(obj) | prev_static_flag == 1 /= 3. Therefore
+ // we conclude that the object has not yet been visited. We therefore visit the object
+ // and set static_flag(obj) to cur_static_flag (2).
+ // * If we have visited object, the object's flag will be two, therefore (static_flag(obj) | prev_static_flag) == 3
+ // and we conclude that we have already visited the object.
+ //
+ // Then next GC we set prev_static_flag == 2, cur_static_flag == 1, flipping the sense of the flag.
+
if (((link & STATIC_BITS) | prev_static_flag) != 3) {
StgWord new_list_head = (StgWord)q | static_flag;
#if !defined(THREADED_RTS)
diff --git a/rts/sm/GC.c b/rts/sm/GC.c
index 19cb0fe31d..c278c05ed0 100644
--- a/rts/sm/GC.c
+++ b/rts/sm/GC.c
@@ -54,6 +54,10 @@
#include "RtsFlags.h"
#include "NonMoving.h"
#include "Ticky.h"
+#include "Threads.h"
+#if defined(MMTK_GHC)
+#include "mmtk/ghc/mmtk_upcalls.h"
+#endif
#include <string.h> // for memset()
#include <unistd.h>
@@ -239,6 +243,39 @@ bdescr *mark_stack_top_bd; // topmost block in the mark stack
bdescr *mark_stack_bd; // current block in the mark stack
StgPtr mark_sp; // pointer to the next unallocated mark stack entry
+#if defined(MMTK_GHC)
+struct MmtkGcWorker {
+ int dummy;
+};
+
+static void mmtk_controller_thread(void* controller)
+{
+ void *tls = stgMallocBytes(sizeof(struct MmtkGcWorker), "mmtk_controller_thread");
+ mmtk_start_control_collector(tls, controller);
+}
+
+void upcall_spawn_gc_controller(void *controller)
+{
+ OSThreadId *tid;
+ if (createOSThread(&tid, "MMTK controller", mmtk_controller_thread, controller) != 0) {
+ barf("Failed to spawn GC controller thread");
+ }
+}
+
+static void mmtk_worker_thread(void* worker)
+{
+ void *tls = stgMallocBytes(sizeof(struct MmtkGcWorker), "mmtk_worker_thread");
+ mmtk_start_worker(tls, worker);
+}
+
+void upcall_spawn_gc_worker(void *worker)
+{
+ OSThreadId *tid;
+ if (createOSThread(&tid, "MMTK worker", mmtk_worker_thread, worker) != 0) {
+ barf("Failed to spawn GC worker thread");
+ }
+}
+#endif
/* -----------------------------------------------------------------------------
Statistics from mut_list scavenging
@@ -537,6 +574,9 @@ GarbageCollect (uint32_t collect_gen,
markCapability(mark_root, gct, cap, true/*don't mark sparks*/);
}
+ // add global TSO list as a GC root
+ evacuate((StgClosure **)&global_TSOs);
+
// Mark the weak pointer list, and prepare to detect dead weak pointers.
markWeakPtrList();
initWeakForGC();
@@ -1151,15 +1191,19 @@ new_gc_thread (uint32_t n, gc_thread *t)
// but can't, because it uses gct which isn't set up at this point.
// Hence, allocate a block for todo_bd manually:
{
+#if defined(MMTK_GHC)
+ bdescr *bd = NULL;
+#else
bdescr *bd = allocBlockOnNode(capNoToNumaNode(n));
// no lock, locks aren't initialised yet
initBdescr(bd, ws->gen, ws->gen->to);
bd->flags = BF_EVACUATED;
bd->u.scan = bd->free = bd->start;
- ws->todo_bd = bd;
ws->todo_free = bd->free;
ws->todo_lim = bd->start + BLOCK_SIZE_W;
+#endif
+ ws->todo_bd = bd;
}
ws->todo_q = newWSDeque(128);
diff --git a/rts/sm/HeapUtils.h b/rts/sm/HeapUtils.h
index aa3321e5dc..56e24f205b 100644
--- a/rts/sm/HeapUtils.h
+++ b/rts/sm/HeapUtils.h
@@ -21,7 +21,7 @@ walk_large_bitmap(walk_closures_cb *cb,
// comments around StgPAP.
ASSERT(large_bitmap->size >= size);
- uint32_t b = 0;
+ uint32_t b = 0; // current word index
for (uint32_t i = 0; i < size; b++) {
StgWord bitmap = large_bitmap->bitmap[b];
diff --git a/rts/sm/MarkWeak.c b/rts/sm/MarkWeak.c
index 99383ebd42..74c195b3d4 100644
--- a/rts/sm/MarkWeak.c
+++ b/rts/sm/MarkWeak.c
@@ -219,6 +219,16 @@ static bool resurrectUnreachableThreads (generation *gen, StgTSO **resurrected_t
// it to END_TSO_QUEUE. The copying GC doesn't currently care, but
// the compacting GC does, see #17785.
t->global_link = END_TSO_QUEUE;
+
+ // also delete from the global tso list
+ if (t->tso_link_prev != END_TSO_QUEUE){
+ t->tso_link_prev->tso_link_next = t->tso_link_next;
+ }
+ if (t->tso_link_next != END_TSO_QUEUE){
+ t->tso_link_next->tso_link_prev = t->tso_link_prev;
+ }
+ t->tso_link_prev = END_TSO_QUEUE;
+ t->tso_link_next = END_TSO_QUEUE;
continue;
default:
tmp = t;
diff --git a/rts/sm/NonMoving.c b/rts/sm/NonMoving.c
index 41510e7f8e..f645be7a0c 100644
--- a/rts/sm/NonMoving.c
+++ b/rts/sm/NonMoving.c
@@ -1153,6 +1153,7 @@ static void nonmovingMark_(MarkQueue *mark_queue, StgWeak **dead_weaks, StgTSO *
// for how we deal with this.
resurrectThreads(*resurrected_threads);
#endif
+ dump_heap_to("heap.dot");
#if defined(DEBUG)
// Zap CAFs that we will sweep
diff --git a/rts/sm/Sanity.c b/rts/sm/Sanity.c
index a77eb08d7a..d58e5d5001 100644
--- a/rts/sm/Sanity.c
+++ b/rts/sm/Sanity.c
@@ -1249,7 +1249,9 @@ memInventory (bool show)
#endif
// count the blocks allocated by the arena allocator
- arena_blocks = arenaBlocks();
+ // arena_blocks = arenaBlocks();
+ // disable the counting since we are using malloc() for arena
+ arena_blocks = 0;
// count the blocks containing executable memory
exec_blocks = countAllocdBlocks(exec_block);
diff --git a/rts/sm/Scav.c b/rts/sm/Scav.c
index be30e75b8f..2617c3b08f 100644
--- a/rts/sm/Scav.c
+++ b/rts/sm/Scav.c
@@ -155,6 +155,12 @@ scavengeTSO (StgTSO *tso)
}
#endif
+ // evac in global_TSOs list too
+ // no evac for global_link
+ // should have handled in markweak - do not mark thread if thread is not reachable
+ evacuate((StgClosure **)&tso->tso_link_next);
+ evacuate((StgClosure **)&tso->tso_link_prev);
+
tso->dirty = gct->failed_to_evac;
gct->eager_promotion = saved_eager;
@@ -232,6 +238,7 @@ StgPtr scavenge_mut_arr_ptrs (StgMutArrPtrs *a)
p = (StgPtr)&a->payload[0];
for (m = 0; (int)m < (int)mutArrPtrsCards(a->ptrs) - 1; m++)
{
+ // MUT_ARR_PTRS_CARD_BITS = size of the card, each card has 2^7 elements
q = p + (1 << MUT_ARR_PTRS_CARD_BITS);
for (; p < q; p++) {
evacuate((StgClosure**)p);
@@ -245,7 +252,8 @@ StgPtr scavenge_mut_arr_ptrs (StgMutArrPtrs *a)
}
}
- q = (StgPtr)&a->payload[a->ptrs];
+ // looking at the last card, since it might not be full
+ q = (StgPtr)&a->payload[a->ptrs]; // piont to (n+1)th element in the array
if (p < q) {
for (; p < q; p++) {
evacuate((StgClosure**)p);
@@ -1975,6 +1983,7 @@ scavenge_stack(StgPtr p, StgPtr stack_end)
p = scavenge_small_bitmap(p, size, bitmap);
follow_srt:
+ // srt is generated (e.g. thunks)
if (major_gc && info->i.srt) {
StgClosure *srt = (StgClosure*)GET_SRT(info);
evacuate(&srt);
diff --git a/rts/sm/Storage.c b/rts/sm/Storage.c
index 40353ea180..eb9e3820e4 100644
--- a/rts/sm/Storage.c
+++ b/rts/sm/Storage.c
@@ -54,6 +54,15 @@
#include "ffi.h"
+#include "mmtk.h"
+
+// global variable; used in code gen compiler/GHC/StgToCmm.Foreign
+#if defined(MMTK_GHC)
+uint32_t is_MMTk = true;
+#else
+uint32_t is_MMTk = false;
+#endif
+
/*
* All these globals require sm_mutex to access in THREADED_RTS mode.
*/
@@ -316,8 +325,12 @@ void storageAddCapabilities (uint32_t from, uint32_t to)
// allocate a block for each mut list
for (n = from; n < to; n++) {
for (g = 1; g < RtsFlags.GcFlags.generations; g++) {
+#if defined(MMTK_GHC)
+ getCapability(n)->mut_lists[g] = NULL;
+#else
getCapability(n)->mut_lists[g] =
allocBlockOnNode(capNoToNumaNode(n));
+#endif
}
}
@@ -766,12 +779,18 @@ allocNursery (uint32_t node, bdescr *tail, W_ blocks)
STATIC_INLINE void
assignNurseryToCapability (Capability *cap, uint32_t n)
{
+#if defined(MMTK_GHC)
+ cap->r.rNursery = NULL;
+ cap->r.rCurrentNursery = NULL;
+ cap->r.rCurrentAlloc = NULL;
+#else
ASSERT(n < n_nurseries);
cap->r.rNursery = &nurseries[n];
cap->r.rCurrentNursery = nurseries[n].blocks;
newNurseryBlock(nurseries[n].blocks);
cap->r.rCurrentAlloc = NULL;
ASSERT(cap->r.rCurrentNursery->node == cap->node);
+#endif
}
/*
@@ -803,8 +822,10 @@ allocNurseries (uint32_t from, uint32_t to)
}
for (i = from; i < to; i++) {
+#if !defined(MMTK_GHC)
nurseries[i].blocks = allocNursery(capNoToNumaNode(i), NULL, n_blocks);
nurseries[i].n_blocks = n_blocks;
+#endif
}
}
@@ -1082,13 +1103,21 @@ allocate (Capability *cap, W_ n)
/*
* Allocate some n words of heap memory; returning NULL
- * on heap overflow
+ * on heap overflow'
*/
StgPtr
allocateMightFail (Capability *cap, W_ n)
{
- bdescr *bd;
StgPtr p;
+
+#if defined(MMTK_GHC)
+ StgWord bytes = n*sizeof(W_);
+ p = mmtk_alloc(cap->running_task->rts_mutator, bytes, sizeof(W_), 0, 0);
+ mmtk_post_alloc(cap->running_task->rts_mutator, p, bytes, 0);
+ return p;
+#endif
+
+ bdescr *bd;
if (RTS_UNLIKELY(n >= LARGE_OBJECT_THRESHOLD/sizeof(W_))) {
// The largest number of words such that
@@ -1237,6 +1266,8 @@ allocateMightFail (Capability *cap, W_ n)
StgPtr
allocatePinned (Capability *cap, W_ n /*words*/, W_ alignment /*bytes*/, W_ align_off /*bytes*/)
{
+ // Pinned: make sure GC doesn't move
+
StgPtr p;
bdescr *bd;
@@ -1256,10 +1287,14 @@ allocatePinned (Capability *cap, W_ n /*words*/, W_ alignment /*bytes*/, W_ alig
// number of extra words we could possibly need to satisfy the alignment
// constraint.
p = allocateMightFail(cap, n + alignment_w - 1);
+
if (p == NULL) {
return NULL;
} else {
+
+#if !defined(MMTK_GHC)
Bdescr(p)->flags |= BF_PINNED;
+#endif
W_ off_w = ALIGN_WITH_OFF_W(p, alignment, align_off);
MEMSET_SLOP_W(p, 0, off_w);
p += off_w;
@@ -1268,6 +1303,13 @@ allocatePinned (Capability *cap, W_ n /*words*/, W_ alignment /*bytes*/, W_ alig
}
}
+#if defined(MMTK_GHC)
+ StgWord bytes = n*sizeof(W_);
+ p = mmtk_alloc(cap->running_task->rts_mutator, bytes, alignment, align_off, 0);
+ mmtk_post_alloc(cap->running_task->rts_mutator, p, bytes, 0);
+ return p;
+#endif
+
bd = cap->pinned_object_block;
W_ off_w = 0;
@@ -1363,6 +1405,7 @@ allocatePinned (Capability *cap, W_ n /*words*/, W_ alignment /*bytes*/, W_ alig
p += off_w;
bd->free += n;
+ // here we are not zeoring the slop after closure?
accountAllocation(cap, n);
return p;
@@ -1496,6 +1539,9 @@ dirty_STACK (Capability *cap, StgStack *stack)
updateRemembSetPushStack(cap, stack);
}
+ // if stack is not dirty, set the flag, put to mut set
+ // any heap obj has been mutated since last GC, that lives in an older gen of the nursery
+ //
if (RELAXED_LOAD(&stack->dirty) == 0) {
RELAXED_STORE(&stack->dirty, 1);
recordClosureMutated(cap,(StgClosure*)stack);
diff --git a/rts/sm/Storage.h b/rts/sm/Storage.h
index 0fecc50208..efa55cd981 100644
--- a/rts/sm/Storage.h
+++ b/rts/sm/Storage.h
@@ -15,7 +15,6 @@
/* -----------------------------------------------------------------------------
Initialisation / De-initialisation
-------------------------------------------------------------------------- */
-
void initStorage(void);
void initGeneration(generation *gen, int g);
void exitStorage(void);
diff --git a/testsuite/config/ghc b/testsuite/config/ghc
index 8916ffa3a9..cb9c057c2e 100644
--- a/testsuite/config/ghc
+++ b/testsuite/config/ghc
@@ -33,6 +33,7 @@ config.other_ways = ['hpc',
'nonmoving_thr_sanity',
'nonmoving_thr_ghc',
'compacting_gc',
+ 'mmtk'
]
@@ -120,6 +121,7 @@ config.way_flags = {
'compacting_gc': [],
'winio': [],
'winio_threaded': ['-threaded'],
+ 'mmtk': ['-mmtk'],
}
config.way_rts_flags = {
@@ -165,6 +167,7 @@ config.way_rts_flags = {
'compacting_gc': ['-c'],
'winio': ['--io-manager=native'],
'winio_threaded': ['--io-manager=native'],
+ 'mmtk': [],
}
# Useful classes of ways that can be used with only_ways(), omit_ways() and
diff --git a/utils/deriveConstants/Main.hs b/utils/deriveConstants/Main.hs
index f1d1cd742c..af3e6be7d7 100644
--- a/utils/deriveConstants/Main.hs
+++ b/utils/deriveConstants/Main.hs
@@ -387,6 +387,9 @@ wanteds os = concat
,structField C "StgRegTable" "rRet"
,structField C "StgRegTable" "rNursery"
+ ,structField Both "BumpAllocator" "cursor"
+ ,structField Both "BumpAllocator" "limit"
+
,defIntOffset Both "stgEagerBlackholeInfo"
"FUN_OFFSET(stgEagerBlackholeInfo)"
,defIntOffset Both "stgGCEnter1" "FUN_OFFSET(stgGCEnter1)"
@@ -402,6 +405,9 @@ wanteds os = concat
,structField C "Capability" "total_allocated"
,structField C "Capability" "weak_ptr_list_hd"
,structField C "Capability" "weak_ptr_list_tl"
+ ,structField Both "Capability" "running_task"
+
+ ,structField Both "Task" "mmutator"
,structField Both "bdescr" "start"
,structField Both "bdescr" "free"
@@ -456,6 +462,8 @@ wanteds os = concat
,closureField C "StgTSO" "_link"
,closureField C "StgTSO" "global_link"
+ ,closureField C "StgTSO" "tso_link_prev"
+ ,closureField C "StgTSO" "tso_link_next"
,closureField C "StgTSO" "what_next"
,closureField C "StgTSO" "why_blocked"
,closureField C "StgTSO" "block_info"
@@ -766,6 +774,7 @@ getWanted verbose os tmpdir gccProgram gccFlags nmProgram mobjdumpProgram
"#include \"Rts.h\"",
"#include \"StableName.h\"",
"#include \"Capability.h\"",
+ "#include \"include/mmtk.h\"",
"",
"#include <inttypes.h>",
"#include <stddef.h>",