summaryrefslogtreecommitdiff
path: root/rts
diff options
context:
space:
mode:
authorBrian Wignall <brianwignall@gmail.com>2020-01-10 10:47:46 -0500
committerMarge Bot <ben+marge-bot@smart-cactus.org>2020-01-12 21:30:08 -0500
commit0b5ddc7f2c10ee84631dd6cb5f6368afbc389449 (patch)
treed9a77d5b2c55d75d9ae5b6fa199612315de9d163 /rts
parent350e2b78788d47255d27489dfc62d664498b5de4 (diff)
downloadhaskell-0b5ddc7f2c10ee84631dd6cb5f6368afbc389449.tar.gz
Fix more typos, via an improved Levenshtein-style corrector
Diffstat (limited to 'rts')
-rw-r--r--rts/LdvProfile.c2
-rw-r--r--rts/PrimOps.cmm2
-rw-r--r--rts/STM.c4
-rw-r--r--rts/Schedule.c4
-rw-r--r--rts/TraverseHeap.c2
-rw-r--r--rts/linker/Elf.c2
-rw-r--r--rts/linker/MachO.c6
-rw-r--r--rts/sm/GC.c4
-rw-r--r--rts/sm/NonMovingMark.c4
9 files changed, 15 insertions, 15 deletions
diff --git a/rts/LdvProfile.c b/rts/LdvProfile.c
index f4ab02886d..cf57f28eae 100644
--- a/rts/LdvProfile.c
+++ b/rts/LdvProfile.c
@@ -111,7 +111,7 @@ processHeapClosureForDead( const StgClosure *c )
case BLACKHOLE:
case BLOCKING_QUEUE:
/*
- 'Ingore' cases
+ 'Ignore' cases
*/
// Why can we ignore IND closures? We assume that
// any census is preceded by a major garbage collection, which
diff --git a/rts/PrimOps.cmm b/rts/PrimOps.cmm
index b5930363a1..0486399b46 100644
--- a/rts/PrimOps.cmm
+++ b/rts/PrimOps.cmm
@@ -1313,7 +1313,7 @@ stg_atomicallyzh (P_ stm)
code = stm;
frame_result = NO_TREC;
- /* Start the memory transcation */
+ /* Start the memory transaction */
("ptr" new_trec) = ccall stmStartTransaction(MyCapability() "ptr", old_trec "ptr");
StgTSO_trec(CurrentTSO) = new_trec;
diff --git a/rts/STM.c b/rts/STM.c
index 1dde70b485..5f72c0ebfb 100644
--- a/rts/STM.c
+++ b/rts/STM.c
@@ -828,7 +828,7 @@ static StgBool validate_and_acquire_ownership (Capability *cap,
// check_read_only : check that we've seen an atomic snapshot of the
// non-updated TVars accessed by a trec. This checks that the last TRec to
// commit an update to the TVar is unchanged since the value was stashed in
-// validate_and_acquire_ownership. If no udpate is seen to any TVar than
+// validate_and_acquire_ownership. If no update is seen to any TVar than
// all of them contained their expected values at the start of the call to
// check_read_only.
//
@@ -1252,7 +1252,7 @@ StgBool stmReWait(Capability *cap, StgTSO *tso) {
park_tso(tso);
revert_ownership(cap, trec, true);
} else {
- // The transcation has become invalid. We can now remove it from the wait
+ // The transaction has become invalid. We can now remove it from the wait
// queues.
if (trec -> state != TREC_CONDEMNED) {
remove_watch_queue_entries_for_trec (cap, trec);
diff --git a/rts/Schedule.c b/rts/Schedule.c
index 9323915dfe..ce1a1fc060 100644
--- a/rts/Schedule.c
+++ b/rts/Schedule.c
@@ -226,7 +226,7 @@ schedule (Capability *initialCapability, Task *task)
// In order to cleanly shut down the runtime, we want to:
// * make sure that all main threads return to their callers
// with the state 'Interrupted'.
- // * clean up all OS threads assocated with the runtime
+ // * clean up all OS threads associated with the runtime
// * free all memory etc.
//
// So the sequence goes like this:
@@ -2906,7 +2906,7 @@ raiseExceptionHelper (StgRegTable *reg, StgTSO *tso, StgClosure *exception)
// OLD COMMENT (we don't have MIN_UPD_SIZE now):
// LDV profiling: stg_raise_info has THUNK as its closure
// type. Since a THUNK takes at least MIN_UPD_SIZE words in its
- // payload, MIN_UPD_SIZE is more approprate than 1. It seems that
+ // payload, MIN_UPD_SIZE is more appropriate than 1. It seems that
// 1 does not cause any problem unless profiling is performed.
// However, when LDV profiling goes on, we need to linearly scan
// small object pool, where raise_closure is stored, so we should
diff --git a/rts/TraverseHeap.c b/rts/TraverseHeap.c
index 62d44c1636..8bf58c11ee 100644
--- a/rts/TraverseHeap.c
+++ b/rts/TraverseHeap.c
@@ -324,7 +324,7 @@ pushStackElement(traverseState *ts, const stackElement se)
newStackBlock(ts, nbd);
}
- // adjust stackTop (acutal push)
+ // adjust stackTop (actual push)
ts->stackTop--;
// If the size of stackElement was huge, we would better replace the
// following statement by either a memcpy() call or a switch statement
diff --git a/rts/linker/Elf.c b/rts/linker/Elf.c
index 3e19d3a2db..c240a97d5e 100644
--- a/rts/linker/Elf.c
+++ b/rts/linker/Elf.c
@@ -775,7 +775,7 @@ ocGetNames_ELF ( ObjectCode* oc )
alloc = SECTION_NOMEM;
}
// use the m32 allocator if either the image is not mapped
- // (i.e. we cannot map the secions separately), or if the section
+ // (i.e. we cannot map the sections separately), or if the section
// size is small.
else if (!oc->imageMapped || size < getPageSize() / 3) {
bool executable = kind == SECTIONKIND_CODE_OR_RODATA;
diff --git a/rts/linker/MachO.c b/rts/linker/MachO.c
index 12e84578fc..6e379ffb53 100644
--- a/rts/linker/MachO.c
+++ b/rts/linker/MachO.c
@@ -52,7 +52,7 @@ int64_t signExtend(uint64_t val, uint8_t bits);
bool isVectorPp(uint32_t *p);
bool isLoadStore(uint32_t *p);
-/* aarch64 relocations may contain an addend alreay in the position
+/* aarch64 relocations may contain an addend already in the position
* where we want to write the address offset to. Thus decoding as well
* as encoding is needed.
*/
@@ -305,7 +305,7 @@ decodeAddend(ObjectCode * oc, Section * section, MachORelocationInfo * ri) {
}
case ARM64_RELOC_BRANCH26:
/* take the lower 26 bits and shift them by 2. The last two are
- * implicilty 0 (as the instructions must be aligned!) and sign
+ * implicitly 0 (as the instructions must be aligned!) and sign
* extend to 64 bits.
*/
return signExtend( (*p & 0x03FFFFFF) << 2, 28 );
@@ -543,7 +543,7 @@ relocateSectionAarch64(ObjectCode * oc, Section * section)
*
* - loaded the sections (potentially into non-contiguous memory),
* (in ocGetNames_MachO)
- * - registered exported sybmols
+ * - registered exported symbols
* (in ocGetNames_MachO)
* - and fixed the nlist[i].n_value for common storage symbols (N_UNDF,
* N_EXT and n_value != 0) so that they point into the common storage.
diff --git a/rts/sm/GC.c b/rts/sm/GC.c
index a560c2c8fd..aac8d974ae 100644
--- a/rts/sm/GC.c
+++ b/rts/sm/GC.c
@@ -596,7 +596,7 @@ GarbageCollect (uint32_t collect_gen,
/* free old memory and shift to-space into from-space for all
* the collected generations (except the allocation area). These
- * freed blocks will probaby be quickly recycled.
+ * freed blocks will probably be quickly recycled.
*/
if (gen->mark)
{
@@ -646,7 +646,7 @@ GarbageCollect (uint32_t collect_gen,
ASSERT(countBlocks(gen->blocks) == gen->n_blocks);
ASSERT(countOccupied(gen->blocks) == gen->n_words);
}
- else // not copacted
+ else // not compacted
{
freeChain(gen->old_blocks);
}
diff --git a/rts/sm/NonMovingMark.c b/rts/sm/NonMovingMark.c
index 3ebd039c43..81000566cb 100644
--- a/rts/sm/NonMovingMark.c
+++ b/rts/sm/NonMovingMark.c
@@ -1265,7 +1265,7 @@ mark_closure (MarkQueue *queue, const StgClosure *p0, StgClosure **origin)
ASSERT(!IS_FORWARDING_PTR(p->header.info));
// N.B. only the first block of a compact region is guaranteed to carry
- // BF_NONMOVING; conseqently we must separately check for BF_COMPACT.
+ // BF_NONMOVING; consequently we must separately check for BF_COMPACT.
if (bd->flags & (BF_COMPACT | BF_NONMOVING)) {
if (bd->flags & BF_COMPACT) {
@@ -1315,7 +1315,7 @@ mark_closure (MarkQueue *queue, const StgClosure *p0, StgClosure **origin)
}
// A pinned object that is still attached to a capability (because it's not
- // filled yet). No need to trace it pinned objects can't contain poiners.
+ // filled yet). No need to trace it pinned objects can't contain pointers.
else if (bd->flags & BF_PINNED) {
#if defined(DEBUG)
bool found_it = false;