summaryrefslogtreecommitdiff
path: root/rts/sm/BlockAlloc.c
blob: 6e67f2cc8d2b9163847284e82d83d4e0c192ab7a (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
/* -----------------------------------------------------------------------------
 *
 * (c) The GHC Team 1998-2008
 *
 * The block allocator and free list manager.
 *
 * This is the architecture independent part of the block allocator.
 * It requires only the following support from the operating system:
 *
 *    void *getMBlocks(uint32_t n);
 *
 * returns the address of an n*MBLOCK_SIZE region of memory, aligned on
 * an MBLOCK_SIZE boundary.  There are no other restrictions on the
 * addresses of memory returned by getMBlocks().
 *
 * ---------------------------------------------------------------------------*/

#include "PosixSource.h"
#include "Rts.h"

#include "Storage.h"
#include "RtsUtils.h"
#include "BlockAlloc.h"
#include "OSMem.h"

#include <string.h>

static void  initMBlock(void *mblock, uint32_t node);

/* -----------------------------------------------------------------------------

  Implementation notes
  ~~~~~~~~~~~~~~~~~~~~

  Terminology:
    - bdescr = block descriptor
    - bgroup = block group (1 or more adjacent blocks)
    - mblock = mega block
    - mgroup = mega group (1 or more adjacent mblocks)

   Invariants on block descriptors
   ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
   bd->start always points to the start of the block.

   bd->free is either:
      - zero for a non-group-head; bd->link points to the head
      - (-1) for the head of a free block group
      - or it points within the block (group)

   bd->blocks is either:
      - zero for a non-group-head; bd->link points to the head
      - number of blocks in this group otherwise

   bd->link either points to a block descriptor or is NULL

   The following fields are not used by the allocator:
     bd->flags
     bd->gen_no
     bd->gen
     bd->dest

  Exceptions: we don't maintain invariants for all the blocks within a
  group on the free list, because it is expensive to modify every
  bdescr in a group when coalescing.  Just the head and last bdescrs
  will be correct for a group on the free list.


  Free lists
  ~~~~~~~~~~

  Preliminaries:
    - most allocations are for a small number of blocks
    - sometimes the OS gives us new memory backwards in the address
      space, sometimes forwards, so we should not be biased towards
      any particular layout in the address space
    - We want to avoid fragmentation
    - We want allocation and freeing to be O(1) or close.

  Coalescing trick: when a bgroup is freed (freeGroup()), we can check
  whether it can be coalesced with other free bgroups by checking the
  bdescrs for the blocks on either side of it.  This means that we can
  coalesce in O(1) time.  Every free bgroup must have its head and tail
  bdescrs initialised, the rest don't matter.

  We keep the free list in buckets, using a heap-sort strategy.
  Bucket N contains blocks with sizes 2^N - 2^(N+1)-1.  The list of
  blocks in each bucket is doubly-linked, so that if a block is
  coalesced we can easily remove it from its current free list.

  To allocate a new block of size S, grab a block from bucket
  log2ceiling(S) (i.e. log2() rounded up), in which all blocks are at
  least as big as S, and split it if necessary.  If there are no
  blocks in that bucket, look at bigger buckets until a block is found
  Allocation is therefore O(logN) time.

  To free a block:
    - coalesce it with neighbours.
    - remove coalesced neighbour(s) from free list(s)
    - add the new (coalesced) block to the front of the appropriate
      bucket, given by log2(S) where S is the size of the block.

  Free is O(1).

  Megablocks
  ~~~~~~~~~~

  Separately from the free list of block groups, which are smaller than
  an mblock, we maintain a free list of mblock groups.  This is the unit
  of memory the operating system gives us, and we may either split mblocks
  into blocks or allocate them directly (when very large contiguous regions
  of memory).  mblocks have a different set of invariants than blocks:

  bd->start points to the start of the block IF the block is in the first mblock
  bd->blocks and bd->link are only valid IF this block is the first block
    of the first mblock
  No other fields are used (in particular, free is not used, meaning that
    space that is not used by the (single) object is wasted.

  This has implications for the free list as well:
  We cannot play the coalescing trick with mblocks, because there is
  no requirement that the bdescrs in the second and subsequent mblock
  of an mgroup are initialised (the mgroup might be filled with a
  large array, overwriting the bdescrs for example).

  The separate free list for megablocks is thus sorted in *address*
  order, so that we can coalesce.  Allocation in this list is best-fit
  by traversing the whole list: we don't expect this list to be long,
  and allocation/freeing of large blocks is rare; avoiding
  fragmentation is more important than performance here.

  freeGroup() might end up moving a block from free_list to
  free_mblock_list, if after coalescing we end up with a full mblock.

  checkFreeListSanity() checks all the invariants on the free lists.

  --------------------------------------------------------------------------- */

/* ---------------------------------------------------------------------------
   WATCH OUT FOR OVERFLOW

   Be very careful with integer overflow here.  If you have an
   expression like (n_blocks * BLOCK_SIZE), and n_blocks is an int or
   a uint32_t, then it will very likely overflow on a 64-bit platform.
   Always cast to StgWord (or W_ for short) first: ((W_)n_blocks * BLOCK_SIZE).

  --------------------------------------------------------------------------- */

// free_list[i] contains blocks that are at least size 2^i, and at
// most size 2^(i+1) - 1.
//
// To find the free list in which to place a block, use log_2(size).
// To find a free block of the right size, use log_2_ceil(size).
//
// The largest free list (free_list[NUM_FREE_LISTS-1]) needs to contain sizes
// from half a megablock up to (but not including) a full megablock.

#define NUM_FREE_LISTS (MBLOCK_SHIFT-BLOCK_SHIFT)

// In THREADED_RTS mode, the free list is protected by sm_mutex.

static bdescr *free_list[MAX_NUMA_NODES][NUM_FREE_LISTS];
static bdescr *free_mblock_list[MAX_NUMA_NODES];

W_ n_alloc_blocks;   // currently allocated blocks
W_ hw_alloc_blocks;  // high-water allocated blocks

W_ n_alloc_blocks_by_node[MAX_NUMA_NODES];

/* -----------------------------------------------------------------------------
   Initialisation
   -------------------------------------------------------------------------- */

void initBlockAllocator(void)
{
    uint32_t i, node;
    for (node = 0; node < MAX_NUMA_NODES; node++) {
        for (i=0; i < NUM_FREE_LISTS; i++) {
            free_list[node][i] = NULL;
        }
        free_mblock_list[node] = NULL;
        n_alloc_blocks_by_node[node] = 0;
    }
    n_alloc_blocks = 0;
    hw_alloc_blocks = 0;
}

/* -----------------------------------------------------------------------------
   Accounting
   -------------------------------------------------------------------------- */

STATIC_INLINE
void recordAllocatedBlocks(uint32_t node, uint32_t n)
{
    n_alloc_blocks += n;
    n_alloc_blocks_by_node[node] += n;
    if (n > 0 && n_alloc_blocks > hw_alloc_blocks) {
        hw_alloc_blocks = n_alloc_blocks;
    }
}

STATIC_INLINE
void recordFreedBlocks(uint32_t node, uint32_t n)
{
    ASSERT(n_alloc_blocks >= n);
    n_alloc_blocks -= n;
    n_alloc_blocks_by_node[node] -= n;
}

/* -----------------------------------------------------------------------------
   Allocation
   -------------------------------------------------------------------------- */

STATIC_INLINE bdescr *
tail_of (bdescr *bd)
{
    return bd + bd->blocks - 1;
}

STATIC_INLINE void
initGroup(bdescr *head)
{
  head->free   = head->start;
  head->link   = NULL;

  // If this is a block group (but not a megablock group), we
  // make the last block of the group point to the head.  This is used
  // when coalescing blocks in freeGroup().  We don't do this for
  // megablock groups because blocks in the second and subsequent
  // mblocks don't have bdescrs; freeing these is handled in a
  // different way by free_mblock_group().
  if (head->blocks > 1 && head->blocks <= BLOCKS_PER_MBLOCK) {
      bdescr *last = tail_of(head);
      last->blocks = 0;
      last->link = head;
  }

#if defined(DEBUG)
  for (uint32_t i=0; i < head->blocks; i++) {
      head[i].flags = 0;
  }
#endif
}

#if SIZEOF_VOID_P == SIZEOF_LONG
#define CLZW(n) (__builtin_clzl(n))
#else
#define CLZW(n) (__builtin_clzll(n))
#endif

// log base 2 (floor), needs to support up to (2^NUM_FREE_LISTS)-1
STATIC_INLINE uint32_t
log_2(W_ n)
{
    ASSERT(n > 0 && n < (1<<NUM_FREE_LISTS));
#if defined(__GNUC__)
    return CLZW(n) ^ (sizeof(StgWord)*8 - 1);
    // generates good code on x86.  __builtin_clz() compiles to bsr+xor, but
    // we want just bsr, so the xor here cancels out gcc's xor.
#else
    W_ i, x;
    x = n;
    for (i=0; i < NUM_FREE_LISTS; i++) {
        x = x >> 1;
        if (x == 0) return i;
    }
    return NUM_FREE_LISTS;
#endif
}

// log base 2 (ceiling), needs to support up to (2^NUM_FREE_LISTS)-1
STATIC_INLINE uint32_t
log_2_ceil(W_ n)
{
    ASSERT(n > 0 && n < (1<<NUM_FREE_LISTS));
#if defined(__GNUC__)
    uint32_t r = log_2(n);
    return (n & (n-1)) ? r+1 : r;
#else
    W_ i, x;
    x = 1;
    for (i=0; i < MAX_FREE_LIST; i++) {
        if (x >= n) return i;
        x = x << 1;
    }
    return MAX_FREE_LIST;
#endif
}

STATIC_INLINE void
free_list_insert (uint32_t node, bdescr *bd)
{
    uint32_t ln;

    ASSERT(bd->blocks < BLOCKS_PER_MBLOCK);
    ln = log_2(bd->blocks);

    dbl_link_onto(bd, &free_list[node][ln]);
}

// After splitting a group, the last block of each group must have a
// tail that points to the head block, to keep our invariants for
// coalescing.
STATIC_INLINE void
setup_tail (bdescr *bd)
{
    bdescr *tail;
    tail = tail_of(bd);
    if (tail != bd) {
        tail->blocks = 0;
        tail->free = 0;
        tail->link = bd;
    }
}


// Take a free block group bd, and split off a group of size n from
// it.  Adjust the free list as necessary, and return the new group.
static bdescr *
split_free_block (bdescr *bd, uint32_t node, W_ n, uint32_t ln /* log_2_ceil(n) */)
{
    bdescr *fg; // free group

    ASSERT(bd->blocks > n);
    dbl_link_remove(bd, &free_list[node][ln]);
    fg = bd + bd->blocks - n; // take n blocks off the end
    fg->blocks = n;
    bd->blocks -= n;
    setup_tail(bd);
    ln = log_2(bd->blocks);
    dbl_link_onto(bd, &free_list[node][ln]);
    return fg;
}

// Take N blocks off the end, free the rest.
static bdescr *
split_block_high (bdescr *bd, W_ n)
{
    ASSERT(bd->blocks > n);

    bdescr* ret = bd + bd->blocks - n; // take n blocks off the end
    ret->blocks = n;
    ret->start = ret->free = bd->start + (bd->blocks - n)*BLOCK_SIZE_W;
    ret->link = NULL;

    bd->blocks -= n;

    setup_tail(ret);
    setup_tail(bd);
    freeGroup(bd);

    return ret;
}

// Like `split_block_high`, but takes n blocks off the beginning rather
// than the end.
static bdescr *
split_block_low (bdescr *bd, W_ n)
{
    ASSERT(bd->blocks > n);

    bdescr* bd_ = bd + n;
    bd_->blocks = bd->blocks - n;
    bd_->start = bd_->free = bd->start + n*BLOCK_SIZE_W;

    bd->blocks = n;

    setup_tail(bd_);
    setup_tail(bd);
    freeGroup(bd_);

    return bd;
}

/* Only initializes the start pointers on the first megablock and the
 * blocks field of the first bdescr; callers are responsible for calling
 * initGroup afterwards.
 */
static bdescr *
alloc_mega_group (uint32_t node, StgWord mblocks)
{
    bdescr *best, *bd, *prev;
    StgWord n;

    n = MBLOCK_GROUP_BLOCKS(mblocks);

    best = NULL;
    prev = NULL;
    for (bd = free_mblock_list[node]; bd != NULL; prev = bd, bd = bd->link)
    {
        if (bd->blocks == n)
        {
            if (prev) {
                prev->link = bd->link;
            } else {
                free_mblock_list[node] = bd->link;
            }
            return bd;
        }
        else if (bd->blocks > n)
        {
            if (!best || bd->blocks < best->blocks)
            {
                best = bd;
            }
        }
    }

    if (best)
    {
        // we take our chunk off the end here.
        StgWord best_mblocks  = BLOCKS_TO_MBLOCKS(best->blocks);
        bd = FIRST_BDESCR((StgWord8*)MBLOCK_ROUND_DOWN(best) +
                          (best_mblocks-mblocks)*MBLOCK_SIZE);

        best->blocks = MBLOCK_GROUP_BLOCKS(best_mblocks - mblocks);
        initMBlock(MBLOCK_ROUND_DOWN(bd), node);
    }
    else
    {
        void *mblock;
        if (RtsFlags.GcFlags.numa) {
            mblock = getMBlocksOnNode(node, mblocks);
        } else {
            mblock = getMBlocks(mblocks);
        }
        initMBlock(mblock, node); // only need to init the 1st one
        bd = FIRST_BDESCR(mblock);
    }
    bd->blocks = MBLOCK_GROUP_BLOCKS(mblocks);
    return bd;
}

bdescr *
allocGroupOnNode (uint32_t node, W_ n)
{
    bdescr *bd, *rem;
    StgWord ln;

    if (n == 0) barf("allocGroup: requested zero blocks");

    if (n >= BLOCKS_PER_MBLOCK)
    {
        StgWord mblocks;

        mblocks = BLOCKS_TO_MBLOCKS(n);

        // n_alloc_blocks doesn't count the extra blocks we get in a
        // megablock group.
        recordAllocatedBlocks(node, mblocks * BLOCKS_PER_MBLOCK);

        bd = alloc_mega_group(node, mblocks);
        // only the bdescrs of the first MB are required to be initialised
        initGroup(bd);
        goto finish;
    }

    recordAllocatedBlocks(node, n);

    ln = log_2_ceil(n);

    while (ln < NUM_FREE_LISTS && free_list[node][ln] == NULL) {
        ln++;
    }

    if (ln == NUM_FREE_LISTS) {
#if 0  /* useful for debugging fragmentation */
        if ((W_)mblocks_allocated * BLOCKS_PER_MBLOCK * BLOCK_SIZE_W
             - (W_)((n_alloc_blocks - n) * BLOCK_SIZE_W) > (2*1024*1024)/sizeof(W_)) {
            debugBelch("Fragmentation, wanted %d blocks, %ld MB free\n", n, ((mblocks_allocated * BLOCKS_PER_MBLOCK) - n_alloc_blocks) / BLOCKS_PER_MBLOCK);
            RtsFlags.DebugFlags.block_alloc = 1;
            checkFreeListSanity();
        }
#endif

        bd = alloc_mega_group(node,1);
        bd->blocks = n;
        initGroup(bd);                   // we know the group will fit
        rem = bd + n;
        rem->blocks = BLOCKS_PER_MBLOCK-n;
        initGroup(rem);                  // init the slop
        recordAllocatedBlocks(node,rem->blocks);
        freeGroup(rem);                  // add the slop on to the free list
        goto finish;
    }

    bd = free_list[node][ln];

    if (bd->blocks == n)                // exactly the right size!
    {
        dbl_link_remove(bd, &free_list[node][ln]);
        initGroup(bd);
    }
    else if (bd->blocks >  n)            // block too big...
    {
        bd = split_free_block(bd, node, n, ln);
        ASSERT(bd->blocks == n);
        initGroup(bd);
    }
    else
    {
        barf("allocGroup: free list corrupted");
    }

finish:
    IF_DEBUG(zero_on_gc, memset(bd->start, 0xaa, bd->blocks * BLOCK_SIZE));
    IF_DEBUG(sanity, checkFreeListSanity());
    return bd;
}

// Allocate `n` blocks aligned to `n` blocks, e.g. when n = 8, the blocks will
// be aligned at `8 * BLOCK_SIZE`. For a group with `n` blocks this can be used
// for easily accessing the beginning of the group from a location p in the
// group with
//
//     p % (BLOCK_SIZE*n)
//
// Used by the non-moving collector for allocating segments.
//
// Because the storage manager does not support aligned allocations, we have to
// allocate `2*n - 1` blocks here to make sure we'll be able to find an aligned
// region in the allocated blocks. After finding the aligned area we want to
// free slop on the low and high sides, and block allocator doesn't support
// freeing only some portion of a megablock (we can only free whole megablocks).
// So we disallow allocating megablocks here, and allow allocating at most
// `BLOCKS_PER_MBLOCK / 2` blocks.
bdescr *
allocAlignedGroupOnNode (uint32_t node, W_ n)
{
    // allocate enough blocks to have enough space aligned at n-block boundary
    // free any slops on the low and high side of this space

    // number of blocks to allocate to make sure we have enough aligned space
    W_ num_blocks = 2*n - 1;

    if (num_blocks >= BLOCKS_PER_MBLOCK) {
        barf("allocAlignedGroupOnNode: allocating megablocks is not supported\n"
             "    requested blocks: %" FMT_Word "\n"
             "    required for alignment: %" FMT_Word "\n"
             "    megablock size (in blocks): %" FMT_Word,
             n, num_blocks, (W_) BLOCKS_PER_MBLOCK);
    }

    W_ group_size = n * BLOCK_SIZE;

    // To reduce splitting and fragmentation we use allocLargeChunkOnNode here.
    // Tweak the max allocation to avoid allocating megablocks. Splitting slop
    // below doesn't work with megablocks (freeGroup can't free only a portion
    // of a megablock so we can't allocate megablocks and free some parts of
    // them).
    W_ max_blocks = stg_min(num_blocks * 3, BLOCKS_PER_MBLOCK - 1);
    bdescr *bd = allocLargeChunkOnNode(node, num_blocks, max_blocks);
    // We may allocate more than num_blocks, so update it
    num_blocks = bd->blocks;

    // slop on the low side
    W_ slop_low = 0;
    if ((uintptr_t)bd->start % group_size != 0) {
        slop_low = group_size - ((uintptr_t)bd->start % group_size);
    }

    W_ slop_high = (num_blocks * BLOCK_SIZE) - group_size - slop_low;

    ASSERT((slop_low % BLOCK_SIZE) == 0);
    ASSERT((slop_high % BLOCK_SIZE) == 0);

    W_ slop_low_blocks = slop_low / BLOCK_SIZE;
    W_ slop_high_blocks = slop_high / BLOCK_SIZE;

    ASSERT(slop_low_blocks + slop_high_blocks + n == num_blocks);

#if defined(DEBUG)
    checkFreeListSanity();
    W_ free_before = countFreeList();
#endif

    if (slop_low_blocks != 0) {
        bd = split_block_high(bd, num_blocks - slop_low_blocks);
        ASSERT(countBlocks(bd) == num_blocks - slop_low_blocks);
    }

#if defined(DEBUG)
    ASSERT(countFreeList() == free_before + slop_low_blocks);
    checkFreeListSanity();
#endif

    // At this point the bd should be aligned, but we may have slop on the high side
    ASSERT((uintptr_t)bd->start % group_size == 0);

#if defined(DEBUG)
    free_before = countFreeList();
#endif

    if (slop_high_blocks != 0) {
        bd = split_block_low(bd, n);
        ASSERT(bd->blocks == n);
    }

#if defined(DEBUG)
    ASSERT(countFreeList() == free_before + slop_high_blocks);
    checkFreeListSanity();
#endif

    // Should still be aligned
    ASSERT((uintptr_t)bd->start % group_size == 0);

    // Just to make sure I get this right
    ASSERT(Bdescr(bd->start) == bd);

    return bd;
}

STATIC_INLINE
uint32_t nodeWithLeastBlocks (void)
{
    uint32_t node = 0, i;
    uint32_t min_blocks = n_alloc_blocks_by_node[0];
    for (i = 1; i < n_numa_nodes; i++) {
        if (n_alloc_blocks_by_node[i] < min_blocks) {
            min_blocks = n_alloc_blocks_by_node[i];
            node = i;
        }
    }
    return node;
}

bdescr* allocGroup (W_ n)
{
    return allocGroupOnNode(nodeWithLeastBlocks(),n);
}


//
// Allocate a chunk of blocks that is at least min and at most max
// blocks in size. This API is used by the nursery allocator that
// wants contiguous memory preferably, but doesn't require it.  When
// memory is fragmented we might have lots of chunks that are
// less than a full megablock, so allowing the nursery allocator to
// use these reduces fragmentation considerably.  e.g. on a GHC build
// with +RTS -H, I saw fragmentation go from 17MB down to 3MB on a
// single compile.
//
// Further to this: in #7257 there is a program that creates serious
// fragmentation such that the heap is full of tiny <4 block chains.
// The nursery allocator therefore has to use single blocks to avoid
// fragmentation, but we make sure that we allocate large blocks
// preferably if there are any.
//
bdescr* allocLargeChunkOnNode (uint32_t node, W_ min, W_ max)
{
    bdescr *bd;
    StgWord ln, lnmax;

    if (min >= BLOCKS_PER_MBLOCK) {
        return allocGroupOnNode(node,max);
    }

    ln = log_2_ceil(min);
    lnmax = log_2_ceil(max);

    while (ln < NUM_FREE_LISTS && ln < lnmax && free_list[node][ln] == NULL) {
        ln++;
    }
    if (ln == NUM_FREE_LISTS || ln == lnmax) {
        return allocGroupOnNode(node,max);
    }
    bd = free_list[node][ln];

    if (bd->blocks <= max)              // exactly the right size!
    {
        dbl_link_remove(bd, &free_list[node][ln]);
        initGroup(bd);
    }
    else   // block too big...
    {
        bd = split_free_block(bd, node, max, ln);
        ASSERT(bd->blocks == max);
        initGroup(bd);
    }

    recordAllocatedBlocks(node, bd->blocks);

    IF_DEBUG(zero_on_gc, memset(bd->start, 0xaa, bd->blocks * BLOCK_SIZE));
    IF_DEBUG(sanity, checkFreeListSanity());
    return bd;
}

bdescr* allocLargeChunk (W_ min, W_ max)
{
    return allocLargeChunkOnNode(nodeWithLeastBlocks(), min, max);
}

bdescr *
allocGroup_lock(W_ n)
{
    bdescr *bd;
    ACQUIRE_SM_LOCK;
    bd = allocGroup(n);
    RELEASE_SM_LOCK;
    return bd;
}

bdescr *
allocBlock_lock(void)
{
    bdescr *bd;
    ACQUIRE_SM_LOCK;
    bd = allocBlock();
    RELEASE_SM_LOCK;
    return bd;
}

bdescr *
allocGroupOnNode_lock(uint32_t node, W_ n)
{
    bdescr *bd;
    ACQUIRE_SM_LOCK;
    bd = allocGroupOnNode(node,n);
    RELEASE_SM_LOCK;
    return bd;
}

bdescr *
allocBlockOnNode_lock(uint32_t node)
{
    bdescr *bd;
    ACQUIRE_SM_LOCK;
    bd = allocBlockOnNode(node);
    RELEASE_SM_LOCK;
    return bd;
}

/* -----------------------------------------------------------------------------
   De-Allocation
   -------------------------------------------------------------------------- */

STATIC_INLINE bdescr *
coalesce_mblocks (bdescr *p)
{
    bdescr *q;

    q = p->link;
    if (q != NULL &&
        MBLOCK_ROUND_DOWN(q) ==
        (StgWord8*)MBLOCK_ROUND_DOWN(p) +
        BLOCKS_TO_MBLOCKS(p->blocks) * MBLOCK_SIZE) {
        // can coalesce
        p->blocks  = MBLOCK_GROUP_BLOCKS(BLOCKS_TO_MBLOCKS(p->blocks) +
                                         BLOCKS_TO_MBLOCKS(q->blocks));
        p->link = q->link;
        return p;
    }
    return q;
}

static void
free_mega_group (bdescr *mg)
{
    bdescr *bd, *prev;
    uint32_t node;

    // Find the right place in the free list.  free_mblock_list is
    // sorted by *address*, not by size as the free_list is.
    prev = NULL;
    node = mg->node;
    bd = free_mblock_list[node];
    while (bd && bd->start < mg->start) {
        prev = bd;
        bd = bd->link;
    }

    // coalesce backwards
    if (prev)
    {
        mg->link = prev->link;
        prev->link = mg;
        mg = coalesce_mblocks(prev);
    }
    else
    {
        mg->link = free_mblock_list[node];
        free_mblock_list[node] = mg;
    }
    // coalesce forwards
    coalesce_mblocks(mg);

    IF_DEBUG(sanity, checkFreeListSanity());
}


/* Note [Data races in freeGroup]
 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 * freeGroup commits a rather serious concurrency sin in its block coalescence
 * logic: When freeing a block it looks at bd->free of the previous/next block
 * to see whether it is allocated. However, the free'ing thread likely does not
 * own the previous/next block, nor do we make any attempt to synchronize with
 * the thread that *does* own it; this makes this access a data race.
 *
 * The original design argued that this was correct because `bd->free` will
 * only take a value of -1 when the block is free and thereby owned by the
 * storage manager. However, this is nevertheless unsafe under the C11 data
 * model, which guarantees no particular semantics for data races.
 *
 * We currently assume (and hope) we won't see torn values and consequently
 * we will never see `bd->free == -1` for an allocated block which we do not
 * own. However, this is all extremely dodgy.
 *
 * This is tracked as #18913.
 */

void
freeGroup(bdescr *p)
{
  StgWord ln;
  uint32_t node;

  // not true in multithreaded GC:
  // ASSERT_SM_LOCK();

  ASSERT(RELAXED_LOAD(&p->free) != (P_)-1);

#if defined(DEBUG)
  for (uint32_t i=0; i < p->blocks; i++) {
      p[i].flags = 0;
  }
#endif

  node = p->node;

  RELAXED_STORE(&p->free, (void *) -1);  /* indicates that this block is free */
  RELAXED_STORE(&p->gen, NULL);
  RELAXED_STORE(&p->gen_no, 0);
  /* fill the block group with garbage if sanity checking is on */
  IF_DEBUG(zero_on_gc, memset(p->start, 0xaa, (W_)p->blocks * BLOCK_SIZE));

  if (p->blocks == 0) barf("freeGroup: block size is zero");

  if (p->blocks >= BLOCKS_PER_MBLOCK)
  {
      StgWord mblocks;

      mblocks = BLOCKS_TO_MBLOCKS(p->blocks);
      // If this is an mgroup, make sure it has the right number of blocks
      ASSERT(p->blocks == MBLOCK_GROUP_BLOCKS(mblocks));

      recordFreedBlocks(node, mblocks * BLOCKS_PER_MBLOCK);

      free_mega_group(p);
      return;
  }

  recordFreedBlocks(node, p->blocks);

  // coalesce forwards
  {
      bdescr *next;
      next = p + p->blocks;

      // See Note [Data races in freeGroup].
      TSAN_ANNOTATE_BENIGN_RACE(&next->free, "freeGroup");
      if (next <= LAST_BDESCR(MBLOCK_ROUND_DOWN(p))
          && RELAXED_LOAD(&next->free) == (P_)-1)
      {
          p->blocks += next->blocks;
          ln = log_2(next->blocks);
          dbl_link_remove(next, &free_list[node][ln]);
          if (p->blocks == BLOCKS_PER_MBLOCK)
          {
              free_mega_group(p);
              return;
          }
          setup_tail(p);
      }
  }

  // coalesce backwards
  if (p != FIRST_BDESCR(MBLOCK_ROUND_DOWN(p)))
  {
      bdescr *prev;
      prev = p - 1;
      if (prev->blocks == 0) prev = prev->link; // find the head

      // See Note [Data races in freeGroup].
      TSAN_ANNOTATE_BENIGN_RACE(&prev->free, "freeGroup");
      if (RELAXED_LOAD(&prev->free) == (P_)-1)
      {
          ln = log_2(prev->blocks);
          dbl_link_remove(prev, &free_list[node][ln]);
          prev->blocks += p->blocks;
          if (prev->blocks >= BLOCKS_PER_MBLOCK)
          {
              free_mega_group(prev);
              return;
          }
          p = prev;
      }
  }

  setup_tail(p);
  free_list_insert(node,p);

  IF_DEBUG(sanity, checkFreeListSanity());
}

void
freeGroup_lock(bdescr *p)
{
    ACQUIRE_SM_LOCK;
    freeGroup(p);
    RELEASE_SM_LOCK;
}

void
freeChain(bdescr *bd)
{
  bdescr *next_bd;
  while (bd != NULL) {
    next_bd = bd->link;
    freeGroup(bd);
    bd = next_bd;
  }
}

void
freeChain_lock(bdescr *bd)
{
    ACQUIRE_SM_LOCK;
    freeChain(bd);
    RELEASE_SM_LOCK;
}

static void
initMBlock(void *mblock, uint32_t node)
{
    bdescr *bd;
    StgWord8 *block;

    /* the first few Bdescr's in a block are unused, so we don't want to
     * put them all on the free list.
     */
    block = FIRST_BLOCK(mblock);
    bd    = FIRST_BDESCR(mblock);

    /* Initialise the start field of each block descriptor
     */
    for (; block <= (StgWord8*)LAST_BLOCK(mblock); bd += 1,
             block += BLOCK_SIZE) {
        bd->start = (void*)block;
        bd->node = node;
    }
}

/* -----------------------------------------------------------------------------
   Stats / metrics
   -------------------------------------------------------------------------- */

W_
countBlocks(bdescr *bd)
{
    W_ n;
    for (n=0; bd != NULL; bd=bd->link) {
        n += bd->blocks;
    }
    return n;
}

// (*1) Just like countBlocks, except that we adjust the count for a
// megablock group so that it doesn't include the extra few blocks
// that would be taken up by block descriptors in the second and
// subsequent megablock.  This is so we can tally the count with the
// number of blocks allocated in the system, for memInventory().
W_
countAllocdBlocks(bdescr *bd)
{
    W_ n;
    for (n=0; bd != NULL; bd=bd->link) {
        n += bd->blocks;

        // hack for megablock groups: see (*1) above
        if (bd->blocks > BLOCKS_PER_MBLOCK) {
            n -= (MBLOCK_SIZE / BLOCK_SIZE - BLOCKS_PER_MBLOCK)
                * (bd->blocks/(MBLOCK_SIZE/BLOCK_SIZE));
        }
    }
    return n;
}

// Returns the number of blocks which were able to be freed
uint32_t returnMemoryToOS(uint32_t n /* megablocks */)
{
    bdescr *bd;
    uint32_t node;
    StgWord size;
    uint32_t init_n;
    init_n = n;

    // ToDo: not fair, we free all the memory starting with node 0.
    for (node = 0; n > 0 && node < n_numa_nodes; node++) {
        bd = free_mblock_list[node];
        while ((n > 0) && (bd != NULL)) {
            size = BLOCKS_TO_MBLOCKS(bd->blocks);
            if (size > n) {
                StgWord newSize = size - n;
                char *freeAddr = MBLOCK_ROUND_DOWN(bd->start);
                freeAddr += newSize * MBLOCK_SIZE;
                bd->blocks = MBLOCK_GROUP_BLOCKS(newSize);
                freeMBlocks(freeAddr, n);
                n = 0;
            }
            else {
                char *freeAddr = MBLOCK_ROUND_DOWN(bd->start);
                n -= size;
                bd = bd->link;
                freeMBlocks(freeAddr, size);
            }
        }
        free_mblock_list[node] = bd;
    }

    // Ask the OS to release any address space portion
    // that was associated with the just released MBlocks
    //
    // Historically, we used to ask the OS directly (via
    // osReleaseFreeMemory()) - now the MBlock layer might
    // have a reason to preserve the address space range,
    // so we keep it
    releaseFreeMemory();

    IF_DEBUG(gc,
        if (n != 0) {
            debugBelch("Wanted to free %d more MBlocks than are freeable\n",
                       n);
        }
    );
    return (init_n - n);
}

/* -----------------------------------------------------------------------------
   Debugging
   -------------------------------------------------------------------------- */

#if defined(DEBUG)
static void
check_tail (bdescr *bd)
{
    bdescr *tail = tail_of(bd);

    if (tail != bd)
    {
        ASSERT(tail->blocks == 0);
        ASSERT(tail->free == 0);
        ASSERT(tail->link == bd);
    }
}

void
checkFreeListSanity(void)
{
    bdescr *bd, *prev;
    StgWord ln, min;
    uint32_t node;

    for (node = 0; node < n_numa_nodes; node++) {
        min = 1;
        for (ln = 0; ln < NUM_FREE_LISTS; ln++) {
            IF_DEBUG(block_alloc,
                     debugBelch("free block list [%" FMT_Word "]:\n", ln));

            prev = NULL;
            for (bd = free_list[node][ln]; bd != NULL; prev = bd, bd = bd->link)
            {
                IF_DEBUG(block_alloc,
                         debugBelch("group at %p, length %ld blocks\n",
                                    bd->start, (long)bd->blocks));
                ASSERT(bd->free == (P_)-1);
                ASSERT(bd->blocks > 0 && bd->blocks < BLOCKS_PER_MBLOCK);
                ASSERT(bd->blocks >= min && bd->blocks <= (min*2 - 1));
                ASSERT(bd->link != bd); // catch easy loops
                ASSERT(bd->node == node);

                check_tail(bd);

                if (prev)
                    ASSERT(bd->u.back == prev);
                else
                    ASSERT(bd->u.back == NULL);

                {
                    bdescr *next;
                    next = bd + bd->blocks;
                    if (next <= LAST_BDESCR(MBLOCK_ROUND_DOWN(bd)))
                    {
                        ASSERT(next->free != (P_)-1);
                    }
                }
            }
            min = min << 1;
        }

        prev = NULL;
        for (bd = free_mblock_list[node]; bd != NULL; prev = bd, bd = bd->link)
        {
            IF_DEBUG(block_alloc,
                     debugBelch("mega group at %p, length %ld blocks\n",
                                bd->start, (long)bd->blocks));

            ASSERT(bd->link != bd); // catch easy loops

            if (bd->link != NULL)
            {
                // make sure the list is sorted
                ASSERT(bd->start < bd->link->start);
            }

            ASSERT(bd->blocks >= BLOCKS_PER_MBLOCK);
            ASSERT(MBLOCK_GROUP_BLOCKS(BLOCKS_TO_MBLOCKS(bd->blocks))
                   == bd->blocks);

            // make sure we're fully coalesced
            if (bd->link != NULL)
            {
                ASSERT(MBLOCK_ROUND_DOWN(bd->link) !=
                       (StgWord8*)MBLOCK_ROUND_DOWN(bd) +
                       BLOCKS_TO_MBLOCKS(bd->blocks) * MBLOCK_SIZE);
            }
        }
    }
}

W_ /* BLOCKS */
countFreeList(void)
{
  bdescr *bd;
  W_ total_blocks = 0;
  StgWord ln;
  uint32_t node;

  for (node = 0; node < n_numa_nodes; node++) {
      for (ln=0; ln < NUM_FREE_LISTS; ln++) {
          for (bd = free_list[node][ln]; bd != NULL; bd = bd->link) {
              total_blocks += bd->blocks;
          }
      }
      for (bd = free_mblock_list[node]; bd != NULL; bd = bd->link) {
          total_blocks += BLOCKS_PER_MBLOCK * BLOCKS_TO_MBLOCKS(bd->blocks);
          // The caller of this function, memInventory(), expects to match
          // the total number of blocks in the system against mblocks *
          // BLOCKS_PER_MBLOCK, so we must subtract the space for the
          // block descriptors from *every* mblock.
      }
  }
  return total_blocks;
}

void
markBlocks (bdescr *bd)
{
    for (; bd != NULL; bd = bd->link) {
        bd->flags |= BF_KNOWN;
    }
}

void
reportUnmarkedBlocks (void)
{
    void *mblock;
    void *state;
    bdescr *bd;

    debugBelch("Unreachable blocks:\n");
    for (mblock = getFirstMBlock(&state); mblock != NULL;
         mblock = getNextMBlock(&state, mblock)) {
        for (bd = FIRST_BDESCR(mblock); bd <= LAST_BDESCR(mblock); ) {
            if (!(bd->flags & BF_KNOWN) && bd->free != (P_)-1) {
                debugBelch("  %p\n",bd);
            }
            if (bd->blocks >= BLOCKS_PER_MBLOCK) {
                mblock = (StgWord8*)mblock +
                    (BLOCKS_TO_MBLOCKS(bd->blocks) - 1) * MBLOCK_SIZE;
                break;
            } else {
                bd += bd->blocks;
            }
        }
    }
}

#endif