summaryrefslogtreecommitdiff
path: root/src/src/store.c
blob: 7d08c9804fb350a1a8780c5cb07aa8f412334734 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
/*************************************************
*     Exim - an Internet mail transport agent    *
*************************************************/

/* Copyright (c) University of Cambridge 1995 - 2018 */
/* Copyright (c) The Exim maintainers 2019 - 2020 */
/* See the file NOTICE for conditions of use and distribution. */

/* Exim gets and frees all its store through these functions. In the original
implementation there was a lot of mallocing and freeing of small bits of store.
The philosophy has now changed to a scheme which includes the concept of
"stacking pools" of store. For the short-lived processes, there isn't any real
need to do any garbage collection, but the stack concept allows quick resetting
in places where this seems sensible.

Obviously the long-running processes (the daemon, the queue runner, and eximon)
must take care not to eat store.

The following different types of store are recognized:

. Long-lived, large blocks: This is implemented by retaining the original
  malloc/free functions, and it used for permanent working buffers and for
  getting blocks to cut up for the other types.

. Long-lived, small blocks: This is used for blocks that have to survive until
  the process exits. It is implemented as a stacking pool (POOL_PERM). This is
  functionally the same as store_malloc(), except that the store can't be
  freed, but I expect it to be more efficient for handling small blocks.

. Short-lived, short blocks: Most of the dynamic store falls into this
  category. It is implemented as a stacking pool (POOL_MAIN) which is reset
  after accepting a message when multiple messages are received by a single
  process. Resetting happens at some other times as well, usually fairly
  locally after some specific processing that needs working store.

. There is a separate pool (POOL_SEARCH) that is used only for lookup storage.
  This means it can be freed when search_tidyup() is called to close down all
  the lookup caching.

. Orthogonal to the three pool types, there are two classes of memory: untainted
  and tainted.  The latter is used for values derived from untrusted input, and
  the string-expansion mechanism refuses to operate on such values (obviously,
  it can expand an untainted value to return a tainted result).  The classes
  are implemented by duplicating the three pool types.  Pool resets are requested
  against the nontainted sibling and apply to both siblings.

  Only memory blocks requested for tainted use are regarded as tainted; anything
  else (including stack auto variables) is untainted.  Care is needed when coding
  to not copy untrusted data into untainted memory, as downstream taint-checks
  would be avoided.

  Intermediate layers (eg. the string functions) can test for taint, and use this
  for ensurinng that results have proper state.  For example the
  string_vformat_trc() routing supporting the string_sprintf() interface will
  recopy a string being built into a tainted allocation if it meets a %s for a
  tainted argument.  Any intermediate-layer function that (can) return a new
  allocation should behave this way; returning a tainted result if any tainted
  content is used.  Intermediate-layer functions (eg. Ustrncpy) that modify
  existing allocations fail if tainted data is written into an untainted area.
  Users of functions that modify existing allocations should check if a tainted
  source and an untainted destination is used, and fail instead (sprintf() being
  the classic case).
*/


#include "exim.h"
/* keep config.h before memcheck.h, for NVALGRIND */
#include "config.h"

#include <sys/mman.h>
#include "memcheck.h"


/* We need to know how to align blocks of data for general use. I'm not sure
how to get an alignment factor in general. In the current world, a value of 8
is probably right, and this is sizeof(double) on some systems and sizeof(void
*) on others, so take the larger of those. Since everything in this expression
is a constant, the compiler should optimize it to a simple constant wherever it
appears (I checked that gcc does do this). */

#define alignment \
  (sizeof(void *) > sizeof(double) ? sizeof(void *) : sizeof(double))

/* store_reset() will not free the following block if the last used block has
less than this much left in it. */

#define STOREPOOL_MIN_SIZE 256

/* Structure describing the beginning of each big block. */

typedef struct storeblock {
  struct storeblock *next;
  size_t length;
} storeblock;

/* Just in case we find ourselves on a system where the structure above has a
length that is not a multiple of the alignment, set up a macro for the padded
length. */

#define ALIGNED_SIZEOF_STOREBLOCK \
  (((sizeof(storeblock) + alignment - 1) / alignment) * alignment)

/* Size of block to get from malloc to carve up into smaller ones. This
must be a multiple of the alignment. We assume that 8192 is going to be
suitably aligned. */

#define STORE_BLOCK_SIZE (8192 - ALIGNED_SIZEOF_STOREBLOCK)

/* Variables holding data for the local pools of store. The current pool number
is held in store_pool, which is global so that it can be changed from outside.
Setting the initial length values to -1 forces a malloc for the first call,
even if the length is zero (which is used for getting a point to reset to). */

int store_pool = POOL_MAIN;

#define NPOOLS 6
static storeblock *chainbase[NPOOLS];
static storeblock *current_block[NPOOLS];
static void *next_yield[NPOOLS];
static int yield_length[NPOOLS] = { -1, -1, -1,  -1, -1, -1 };

/* pool_malloc holds the amount of memory used by the store pools; this goes up
and down as store is reset or released. nonpool_malloc is the total got by
malloc from other calls; this doesn't go down because it is just freed by
pointer. */

static int pool_malloc;
static int nonpool_malloc;

/* This variable is set by store_get() to its yield, and by store_reset() to
NULL. This enables string_cat() to optimize its store handling for very long
strings. That's why the variable is global. */

void *store_last_get[NPOOLS];

/* These are purely for stats-gathering */

static int nbytes[NPOOLS];	/* current bytes allocated */
static int maxbytes[NPOOLS];	/* max number reached */
static int nblocks[NPOOLS];	/* current number of blocks allocated */
static int maxblocks[NPOOLS];
static int n_nonpool_blocks;	/* current number of direct store_malloc() blocks */
static int max_nonpool_blocks;
static int max_pool_malloc;	/* max value for pool_malloc */
static int max_nonpool_malloc;	/* max value for nonpool_malloc */


#ifndef COMPILE_UTILITY
static const uschar * pooluse[NPOOLS] = {
[POOL_MAIN] =		US"main",
[POOL_PERM] =		US"perm",
[POOL_SEARCH] =		US"search",
[POOL_TAINT_MAIN] =	US"main",
[POOL_TAINT_PERM] =	US"perm",
[POOL_TAINT_SEARCH] =	US"search",
};
static const uschar * poolclass[NPOOLS] = {
[POOL_MAIN] =		US"untainted",
[POOL_PERM] =		US"untainted",
[POOL_SEARCH] =		US"untainted",
[POOL_TAINT_MAIN] =	US"tainted",
[POOL_TAINT_PERM] =	US"tainted",
[POOL_TAINT_SEARCH] =	US"tainted",
};
#endif


static void * internal_store_malloc(int, const char *, int);
static void   internal_store_free(void *, const char *, int linenumber);

/******************************************************************************/

/* Test if a pointer refers to tainted memory.

Slower version check, for use when platform intermixes malloc and mmap area
addresses. Test against the current-block of all tainted pools first, then all
blocks of all tainted pools.

Return: TRUE iff tainted
*/

BOOL
is_tainted_fn(const void * p)
{
storeblock * b;

for (int pool = POOL_TAINT_BASE; pool < nelem(chainbase); pool++)
  if ((b = current_block[pool]))
    {
    uschar * bc = US b + ALIGNED_SIZEOF_STOREBLOCK;
    if (US p >= bc && US p < bc + b->length) return TRUE;
    }

for (int pool = POOL_TAINT_BASE; pool < nelem(chainbase); pool++)
  for (b = chainbase[pool]; b; b = b->next)
    {
    uschar * bc = US b + ALIGNED_SIZEOF_STOREBLOCK;
    if (US p >= bc && US p < bc + b->length) return TRUE;
    }
return FALSE;
}


void
die_tainted(const uschar * msg, const uschar * func, int line)
{
log_write(0, LOG_MAIN|LOG_PANIC_DIE, "Taint mismatch, %s: %s %d\n",
	msg, func, line);
}



/*************************************************
*       Get a block from the current pool        *
*************************************************/

/* Running out of store is a total disaster. This function is called via the
macro store_get(). It passes back a block of store within the current big
block, getting a new one if necessary. The address is saved in
store_last_was_get.

Arguments:
  size        amount wanted, bytes
  tainted     class: set to true for untrusted data (eg. from smtp input)
  func        function from which called
  linenumber  line number in source file

Returns:      pointer to store (panic on malloc failure)
*/

void *
store_get_3(int size, BOOL tainted, const char *func, int linenumber)
{
int pool = tainted ? store_pool + POOL_TAINT_BASE : store_pool;

/* Round up the size to a multiple of the alignment. Although this looks a
messy statement, because "alignment" is a constant expression, the compiler can
do a reasonable job of optimizing, especially if the value of "alignment" is a
power of two. I checked this with -O2, and gcc did very well, compiling it to 4
instructions on a Sparc (alignment = 8). */

if (size % alignment != 0) size += alignment - (size % alignment);

/* If there isn't room in the current block, get a new one. The minimum
size is STORE_BLOCK_SIZE, and we would expect this to be the norm, since
these functions are mostly called for small amounts of store. */

if (size > yield_length[pool])
  {
  int length = size <= STORE_BLOCK_SIZE ? STORE_BLOCK_SIZE : size;
  int mlength = length + ALIGNED_SIZEOF_STOREBLOCK;
  storeblock * newblock;

  /* Sometimes store_reset() may leave a block for us; check if we can use it */

  if (  (newblock = current_block[pool])
     && (newblock = newblock->next)
     && newblock->length < length
     )
    {
    /* Give up on this block, because it's too small */
    nblocks[pool]--;
    internal_store_free(newblock, func, linenumber);
    newblock = NULL;
    }

  /* If there was no free block, get a new one */

  if (!newblock)
    {
    if ((nbytes[pool] += mlength) > maxbytes[pool])
      maxbytes[pool] = nbytes[pool];
    if ((pool_malloc += mlength) > max_pool_malloc)	/* Used in pools */
      max_pool_malloc = pool_malloc;
    nonpool_malloc -= mlength;			/* Exclude from overall total */
    if (++nblocks[pool] > maxblocks[pool])
      maxblocks[pool] = nblocks[pool];

    newblock = internal_store_malloc(mlength, func, linenumber);
    newblock->next = NULL;
    newblock->length = length;

    if (!chainbase[pool])
      chainbase[pool] = newblock;
    else
      current_block[pool]->next = newblock;
    }

  current_block[pool] = newblock;
  yield_length[pool] = newblock->length;
  next_yield[pool] =
    (void *)(CS current_block[pool] + ALIGNED_SIZEOF_STOREBLOCK);
  (void) VALGRIND_MAKE_MEM_NOACCESS(next_yield[pool], yield_length[pool]);
  }

/* There's (now) enough room in the current block; the yield is the next
pointer. */

store_last_get[pool] = next_yield[pool];

/* Cut out the debugging stuff for utilities, but stop picky compilers from
giving warnings. */

#ifdef COMPILE_UTILITY
func = func;
linenumber = linenumber;
#else
DEBUG(D_memory)
  debug_printf("---%d Get %6p %5d %-14s %4d\n", pool,
    store_last_get[pool], size, func, linenumber);
#endif  /* COMPILE_UTILITY */

(void) VALGRIND_MAKE_MEM_UNDEFINED(store_last_get[pool], size);
/* Update next pointer and number of bytes left in the current block. */

next_yield[pool] = (void *)(CS next_yield[pool] + size);
yield_length[pool] -= size;
return store_last_get[pool];
}



/*************************************************
*       Get a block from the PERM pool           *
*************************************************/

/* This is just a convenience function, useful when just a single block is to
be obtained.

Arguments:
  size        amount wanted
  func        function from which called
  linenumber  line number in source file

Returns:      pointer to store (panic on malloc failure)
*/

void *
store_get_perm_3(int size, BOOL tainted, const char *func, int linenumber)
{
void *yield;
int old_pool = store_pool;
store_pool = POOL_PERM;
yield = store_get_3(size, tainted, func, linenumber);
store_pool = old_pool;
return yield;
}



/*************************************************
*      Extend a block if it is at the top        *
*************************************************/

/* While reading strings of unknown length, it is often the case that the
string is being read into the block at the top of the stack. If it needs to be
extended, it is more efficient just to extend within the top block rather than
allocate a new block and then have to copy the data. This function is provided
for the use of string_cat(), but of course can be used elsewhere too.
The block itself is not expanded; only the top allocation from it.

Arguments:
  ptr        pointer to store block
  oldsize    current size of the block, as requested by user
  newsize    new size required
  func       function from which called
  linenumber line number in source file

Returns:     TRUE if the block is at the top of the stack and has been
             extended; FALSE if it isn't at the top of the stack, or cannot
             be extended
*/

BOOL
store_extend_3(void *ptr, BOOL tainted, int oldsize, int newsize,
   const char *func, int linenumber)
{
int pool = tainted ? store_pool + POOL_TAINT_BASE : store_pool;
int inc = newsize - oldsize;
int rounded_oldsize = oldsize;

/* Check that the block being extended was already of the required taint status;
refuse to extend if not. */

if (is_tainted(ptr) != tainted)
  return FALSE;

if (rounded_oldsize % alignment != 0)
  rounded_oldsize += alignment - (rounded_oldsize % alignment);

if (CS ptr + rounded_oldsize != CS (next_yield[pool]) ||
    inc > yield_length[pool] + rounded_oldsize - oldsize)
  return FALSE;

/* Cut out the debugging stuff for utilities, but stop picky compilers from
giving warnings. */

#ifdef COMPILE_UTILITY
func = func;
linenumber = linenumber;
#else
DEBUG(D_memory)
  debug_printf("---%d Ext %6p %5d %-14s %4d\n", pool, ptr, newsize,
    func, linenumber);
#endif  /* COMPILE_UTILITY */

if (newsize % alignment != 0) newsize += alignment - (newsize % alignment);
next_yield[pool] = CS ptr + newsize;
yield_length[pool] -= newsize - rounded_oldsize;
(void) VALGRIND_MAKE_MEM_UNDEFINED(ptr + oldsize, inc);
return TRUE;
}




/*************************************************
*    Back up to a previous point on the stack    *
*************************************************/

/* This function resets the next pointer, freeing any subsequent whole blocks
that are now unused. Call with a cookie obtained from store_mark() only; do
not call with a pointer returned by store_get().  Both the untainted and tainted
pools corresposding to store_pool are reset.

Arguments:
  r           place to back up to
  func        function from which called
  linenumber  line number in source file

Returns:      nothing
*/

static void
internal_store_reset(void * ptr, int pool, const char *func, int linenumber)
{
storeblock * bb;
storeblock * b = current_block[pool];
char * bc = CS b + ALIGNED_SIZEOF_STOREBLOCK;
int newlength, count;
#ifndef COMPILE_UTILITY
int oldmalloc = pool_malloc;
#endif

/* Last store operation was not a get */

store_last_get[pool] = NULL;

/* See if the place is in the current block - as it often will be. Otherwise,
search for the block in which it lies. */

if (CS ptr < bc || CS ptr > bc + b->length)
  {
  for (b = chainbase[pool]; b; b = b->next)
    {
    bc = CS b + ALIGNED_SIZEOF_STOREBLOCK;
    if (CS ptr >= bc && CS ptr <= bc + b->length) break;
    }
  if (!b)
    log_write(0, LOG_MAIN|LOG_PANIC_DIE, "internal error: store_reset(%p) "
      "failed: pool=%d %-14s %4d", ptr, pool, func, linenumber);
  }

/* Back up, rounding to the alignment if necessary. When testing, flatten
the released memory. */

newlength = bc + b->length - CS ptr;
#ifndef COMPILE_UTILITY
if (debug_store)
  {
  assert_no_variables(ptr, newlength, func, linenumber);
  if (f.running_in_test_harness)
    {
    (void) VALGRIND_MAKE_MEM_DEFINED(ptr, newlength);
    memset(ptr, 0xF0, newlength);
    }
  }
#endif
(void) VALGRIND_MAKE_MEM_NOACCESS(ptr, newlength);
next_yield[pool] = CS ptr + (newlength % alignment);
count = yield_length[pool];
count = (yield_length[pool] = newlength - (newlength % alignment)) - count;
current_block[pool] = b;

/* Free any subsequent block. Do NOT free the first
successor, if our current block has less than 256 bytes left. This should
prevent us from flapping memory. However, keep this block only when it has
the default size. */

if (  yield_length[pool] < STOREPOOL_MIN_SIZE
   && b->next
   && b->next->length == STORE_BLOCK_SIZE)
  {
  b = b->next;
#ifndef COMPILE_UTILITY
  if (debug_store)
    assert_no_variables(b, b->length + ALIGNED_SIZEOF_STOREBLOCK,
			func, linenumber);
#endif
  (void) VALGRIND_MAKE_MEM_NOACCESS(CS b + ALIGNED_SIZEOF_STOREBLOCK,
		b->length - ALIGNED_SIZEOF_STOREBLOCK);
  }

bb = b->next;
b->next = NULL;

while ((b = bb))
  {
  int siz = b->length + ALIGNED_SIZEOF_STOREBLOCK;
#ifndef COMPILE_UTILITY
  if (debug_store)
    assert_no_variables(b, b->length + ALIGNED_SIZEOF_STOREBLOCK,
			func, linenumber);
#endif
  bb = bb->next;
  nbytes[pool] -= siz;
  pool_malloc -= siz;
  nblocks[pool]--;
  internal_store_free(b, func, linenumber);
  }

/* Cut out the debugging stuff for utilities, but stop picky compilers from
giving warnings. */

#ifdef COMPILE_UTILITY
func = func;
linenumber = linenumber;
#else
DEBUG(D_memory)
  debug_printf("---%d Rst %6p %5d %-14s %4d %d\n", pool, ptr,
    count + oldmalloc - pool_malloc,
    func, linenumber, pool_malloc);
#endif  /* COMPILE_UTILITY */
}


rmark
store_reset_3(rmark r, int pool, const char *func, int linenumber)
{
void ** ptr = r;

if (pool >= POOL_TAINT_BASE)
  log_write(0, LOG_MAIN|LOG_PANIC_DIE,
    "store_reset called for pool %d: %s %d\n", pool, func, linenumber);
if (!r)
  log_write(0, LOG_MAIN|LOG_PANIC_DIE,
    "store_reset called with bad mark: %s %d\n", func, linenumber);

internal_store_reset(*ptr, pool + POOL_TAINT_BASE, func, linenumber);
internal_store_reset(ptr,  pool,		   func, linenumber);
return NULL;
}



/* Free tail-end unused allocation.  This lets us allocate a big chunk
early, for cases when we only discover later how much was really needed.

Can be called with a value from store_get(), or an offset after such.  Only
the tainted or untainted pool that serviced the store_get() will be affected.

This is mostly a cut-down version of internal_store_reset().
XXX needs rationalising
*/

void
store_release_above_3(void *ptr, const char *func, int linenumber)
{
/* Search all pools' "current" blocks.  If it isn't one of those,
ignore it (it usually will be). */

for (int pool = 0; pool < nelem(current_block); pool++)
  {
  storeblock * b = current_block[pool];
  char * bc;
  int count, newlength;

  if (!b)
    continue;

  bc = CS b + ALIGNED_SIZEOF_STOREBLOCK;
  if (CS ptr < bc || CS ptr > bc + b->length)
    continue;

  /* Last store operation was not a get */

  store_last_get[pool] = NULL;

  /* Back up, rounding to the alignment if necessary. When testing, flatten
  the released memory. */

  newlength = bc + b->length - CS ptr;
#ifndef COMPILE_UTILITY
  if (debug_store)
    {
    assert_no_variables(ptr, newlength, func, linenumber);
    if (f.running_in_test_harness)
      {
      (void) VALGRIND_MAKE_MEM_DEFINED(ptr, newlength);
      memset(ptr, 0xF0, newlength);
      }
    }
#endif
  (void) VALGRIND_MAKE_MEM_NOACCESS(ptr, newlength);
  next_yield[pool] = CS ptr + (newlength % alignment);
  count = yield_length[pool];
  count = (yield_length[pool] = newlength - (newlength % alignment)) - count;

  /* Cut out the debugging stuff for utilities, but stop picky compilers from
  giving warnings. */

#ifdef COMPILE_UTILITY
  func = func;
  linenumber = linenumber;
#else
  DEBUG(D_memory)
    debug_printf("---%d Rel %6p %5d %-14s %4d %d\n", pool, ptr, count,
      func, linenumber, pool_malloc);
#endif
  return;
  }
#ifndef COMPILE_UTILITY
DEBUG(D_memory)
  debug_printf("non-last memory release try: %s %d\n", func, linenumber);
#endif
}



rmark
store_mark_3(const char *func, int linenumber)
{
void ** p;

if (store_pool >= POOL_TAINT_BASE)
  log_write(0, LOG_MAIN|LOG_PANIC_DIE,
    "store_mark called for pool %d: %s %d\n", store_pool, func, linenumber);

/* Stash a mark for the tainted-twin release, in the untainted twin. Return
a cookie (actually the address in the untainted pool) to the caller.
Reset uses the cookie to recover the t-mark, winds back the tainted pool with it
and winds back the untainted pool with the cookie. */

p = store_get_3(sizeof(void *), FALSE, func, linenumber);
*p = store_get_3(0, TRUE, func, linenumber);
return p;
}




/************************************************
*             Release store                     *
************************************************/

/* This function checks that the pointer it is given is the first thing in a
block, and if so, releases that block.

Arguments:
  block       block of store to consider
  func        function from which called
  linenumber  line number in source file

Returns:      nothing
*/

static void
store_release_3(void * block, int pool, const char * func, int linenumber)
{
/* It will never be the first block, so no need to check that. */

for (storeblock * b = chainbase[pool]; b; b = b->next)
  {
  storeblock * bb = b->next;
  if (bb && CS block == CS bb + ALIGNED_SIZEOF_STOREBLOCK)
    {
    int siz = bb->length + ALIGNED_SIZEOF_STOREBLOCK;
    b->next = bb->next;
    nbytes[pool] -= siz;
    pool_malloc -= siz;
    nblocks[pool]--;

    /* Cut out the debugging stuff for utilities, but stop picky compilers
    from giving warnings. */

#ifdef COMPILE_UTILITY
    func = func;
    linenumber = linenumber;
#else
    DEBUG(D_memory)
      debug_printf("-Release %6p %-20s %4d %d\n", (void *)bb, func,
	linenumber, pool_malloc);

    if (f.running_in_test_harness)
      memset(bb, 0xF0, bb->length+ALIGNED_SIZEOF_STOREBLOCK);
#endif  /* COMPILE_UTILITY */

    free(bb);
    return;
    }
  }
}


/************************************************
*             Move store                        *
************************************************/

/* Allocate a new block big enough to expend to the given size and
copy the current data into it.  Free the old one if possible.

This function is specifically provided for use when reading very
long strings, e.g. header lines. When the string gets longer than a
complete block, it gets copied to a new block. It is helpful to free
the old block iff the previous copy of the string is at its start,
and therefore the only thing in it. Otherwise, for very long strings,
dead store can pile up somewhat disastrously. This function checks that
the pointer it is given is the first thing in a block, and that nothing
has been allocated since. If so, releases that block.

Arguments:
  block
  newsize
  len

Returns:	new location of data
*/

void *
store_newblock_3(void * block, BOOL tainted, int newsize, int len,
  const char * func, int linenumber)
{
int pool = tainted ? store_pool + POOL_TAINT_BASE : store_pool;
BOOL release_ok = !tainted && store_last_get[pool] == block;
uschar * newtext;

#if !defined(MACRO_PREDEF) && !defined(COMPILE_UTILITY)
if (is_tainted(block) != tainted)
  die_tainted(US"store_newblock", CUS func, linenumber);
#endif

newtext = store_get(newsize, tainted);
memcpy(newtext, block, len);
if (release_ok) store_release_3(block, pool, func, linenumber);
return (void *)newtext;
}




/*************************************************
*                Malloc store                    *
*************************************************/

/* Running out of store is a total disaster for exim. Some malloc functions
do not run happily on very small sizes, nor do they document this fact. This
function is called via the macro store_malloc().

Arguments:
  size        amount of store wanted
  func        function from which called
  line	      line number in source file

Returns:      pointer to gotten store (panic on failure)
*/

static void *
internal_store_malloc(int size, const char *func, int line)
{
void * yield;

if (size < 16) size = 16;

if (!(yield = malloc((size_t)size)))
  log_write(0, LOG_MAIN|LOG_PANIC_DIE, "failed to malloc %d bytes of memory: "
    "called from line %d in %s", size, line, func);

if ((nonpool_malloc += size) > max_nonpool_malloc)
  max_nonpool_malloc = nonpool_malloc;

/* Cut out the debugging stuff for utilities, but stop picky compilers from
giving warnings. */

#ifdef COMPILE_UTILITY
func = func; line = line;
#else

/* If running in test harness, spend time making sure all the new store
is not filled with zeros so as to catch problems. */

if (f.running_in_test_harness)
  memset(yield, 0xF0, (size_t)size);
DEBUG(D_memory) debug_printf("--Malloc %6p %5d bytes\t%-14s %4d\tpool %5d  nonpool %5d\n",
  yield, size, func, line, pool_malloc, nonpool_malloc);
#endif  /* COMPILE_UTILITY */

return yield;
}

void *
store_malloc_3(int size, const char *func, int linenumber)
{
if (n_nonpool_blocks++ > max_nonpool_blocks)
  max_nonpool_blocks = n_nonpool_blocks;
return internal_store_malloc(size, func, linenumber);
}


/************************************************
*             Free store                        *
************************************************/

/* This function is called by the macro store_free().

Arguments:
  block       block of store to free
  func        function from which called
  linenumber  line number in source file

Returns:      nothing
*/

static void
internal_store_free(void * block, const char * func, int linenumber)
{
#ifdef COMPILE_UTILITY
func = func;
linenumber = linenumber;
#else
DEBUG(D_memory)
  debug_printf("----Free %6p %-20s %4d\n", block, func, linenumber);
#endif  /* COMPILE_UTILITY */
free(block);
}

void
store_free_3(void * block, const char * func, int linenumber)
{
n_nonpool_blocks--;
internal_store_free(block, func, linenumber);
}

/******************************************************************************/
/* Stats output on process exit */
void
store_exit(void)
{
#ifndef COMPILE_UTILITY
DEBUG(D_memory)
 {
 debug_printf("----Exit nonpool max: %3d kB in %d blocks\n",
  (max_nonpool_malloc+1023)/1024, max_nonpool_blocks);
 debug_printf("----Exit npools  max: %3d kB\n", max_pool_malloc/1024);
 for (int i = 0; i < NPOOLS; i++)
  debug_printf("----Exit  pool %d max: %3d kB in %d blocks\t%s %s\n",
    i, maxbytes[i]/1024, maxblocks[i], poolclass[i], pooluse[i]);
 }
#endif
}

/* End of store.c */