summaryrefslogtreecommitdiff
path: root/deps/v8/src/utils/allocation.cc
blob: 6f6225797a6d9ff13c7f6ee52d8f20eb177dea56 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
// Copyright 2012 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

#include "src/utils/allocation.h"

#include <stdlib.h>  // For free, malloc.

#include "src/base/bits.h"
#include "src/base/bounded-page-allocator.h"
#include "src/base/lazy-instance.h"
#include "src/base/logging.h"
#include "src/base/page-allocator.h"
#include "src/base/platform/platform.h"
#include "src/base/platform/wrappers.h"
#include "src/base/sanitizer/lsan-page-allocator.h"
#include "src/base/vector.h"
#include "src/flags/flags.h"
#include "src/init/v8.h"
#include "src/init/vm-cage.h"
#include "src/utils/memcopy.h"

#if V8_LIBC_BIONIC
#include <malloc.h>
#endif

namespace v8 {
namespace internal {

namespace {

void* AlignedAllocInternal(size_t size, size_t alignment) {
  void* ptr;
#if V8_OS_WIN
  ptr = _aligned_malloc(size, alignment);
#elif V8_LIBC_BIONIC
  // posix_memalign is not exposed in some Android versions, so we fall back to
  // memalign. See http://code.google.com/p/android/issues/detail?id=35391.
  ptr = memalign(alignment, size);
#elif V8_OS_STARBOARD
  ptr = SbMemoryAllocateAligned(alignment, size);
#else
  if (posix_memalign(&ptr, alignment, size)) ptr = nullptr;
#endif
  return ptr;
}

class PageAllocatorInitializer {
 public:
  PageAllocatorInitializer() {
    page_allocator_ = V8::GetCurrentPlatform()->GetPageAllocator();
    if (page_allocator_ == nullptr) {
      static base::LeakyObject<base::PageAllocator> default_page_allocator;
      page_allocator_ = default_page_allocator.get();
    }
#if defined(LEAK_SANITIZER)
    static_assert(!V8_VIRTUAL_MEMORY_CAGE_BOOL, "Not currently supported");
    static base::LeakyObject<base::LsanPageAllocator> lsan_allocator(
        page_allocator_);
    page_allocator_ = lsan_allocator.get();
#endif
  }

  PageAllocator* page_allocator() const { return page_allocator_; }

#ifdef V8_VIRTUAL_MEMORY_CAGE
  PageAllocator* data_cage_page_allocator() const {
    return data_cage_page_allocator_;
  }
#endif

  void SetPageAllocatorForTesting(PageAllocator* allocator) {
    page_allocator_ = allocator;
  }

 private:
  PageAllocator* page_allocator_;
#ifdef V8_VIRTUAL_MEMORY_CAGE
  PageAllocator* data_cage_page_allocator_;
#endif
};

DEFINE_LAZY_LEAKY_OBJECT_GETTER(PageAllocatorInitializer,
                                GetPageAllocatorInitializer)

// We will attempt allocation this many times. After each failure, we call
// OnCriticalMemoryPressure to try to free some memory.
const int kAllocationTries = 2;

}  // namespace

v8::PageAllocator* GetPlatformPageAllocator() {
  DCHECK_NOT_NULL(GetPageAllocatorInitializer()->page_allocator());
  return GetPageAllocatorInitializer()->page_allocator();
}

#ifdef V8_VIRTUAL_MEMORY_CAGE
// TODO(chromium:1218005) once we disallow disabling the cage, name this e.g.
// "GetPlatformDataPageAllocator", and set it to the PlatformPageAllocator when
// V8_VIRTUAL_MEMORY_CAGE is not defined. Then use that allocator whenever
// allocating ArrayBuffer backing stores inside v8.
v8::PageAllocator* GetPlatformDataCagePageAllocator() {
  if (GetProcessWideVirtualMemoryCage()->is_disabled()) {
    return GetPlatformPageAllocator();
  } else {
    CHECK(GetProcessWideVirtualMemoryCage()->is_initialized());
    return GetProcessWideVirtualMemoryCage()->GetDataCagePageAllocator();
  }
}
#endif

v8::PageAllocator* SetPlatformPageAllocatorForTesting(
    v8::PageAllocator* new_page_allocator) {
  v8::PageAllocator* old_page_allocator = GetPlatformPageAllocator();
  GetPageAllocatorInitializer()->SetPageAllocatorForTesting(new_page_allocator);
  return old_page_allocator;
}

void* Malloced::operator new(size_t size) {
  void* result = AllocWithRetry(size);
  if (result == nullptr) {
    V8::FatalProcessOutOfMemory(nullptr, "Malloced operator new");
  }
  return result;
}

void Malloced::operator delete(void* p) { base::Free(p); }

char* StrDup(const char* str) {
  size_t length = strlen(str);
  char* result = NewArray<char>(length + 1);
  MemCopy(result, str, length);
  result[length] = '\0';
  return result;
}

char* StrNDup(const char* str, size_t n) {
  size_t length = strlen(str);
  if (n < length) length = n;
  char* result = NewArray<char>(length + 1);
  MemCopy(result, str, length);
  result[length] = '\0';
  return result;
}

void* AllocWithRetry(size_t size) {
  void* result = nullptr;
  for (int i = 0; i < kAllocationTries; ++i) {
    result = base::Malloc(size);
    if (result != nullptr) break;
    if (!OnCriticalMemoryPressure(size)) break;
  }
  return result;
}

void* AlignedAlloc(size_t size, size_t alignment) {
  DCHECK_LE(alignof(void*), alignment);
  DCHECK(base::bits::IsPowerOfTwo(alignment));
  void* result = nullptr;
  for (int i = 0; i < kAllocationTries; ++i) {
    result = AlignedAllocInternal(size, alignment);
    if (result != nullptr) break;
    if (!OnCriticalMemoryPressure(size + alignment)) break;
  }
  if (result == nullptr) {
    V8::FatalProcessOutOfMemory(nullptr, "AlignedAlloc");
  }
  return result;
}

void AlignedFree(void* ptr) {
#if V8_OS_WIN
  _aligned_free(ptr);
#elif V8_LIBC_BIONIC
  // Using free is not correct in general, but for V8_LIBC_BIONIC it is.
  base::Free(ptr);
#elif V8_OS_STARBOARD
  SbMemoryFreeAligned(ptr);
#else
  base::Free(ptr);
#endif
}

size_t AllocatePageSize() {
  return GetPlatformPageAllocator()->AllocatePageSize();
}

size_t CommitPageSize() { return GetPlatformPageAllocator()->CommitPageSize(); }

void SetRandomMmapSeed(int64_t seed) {
  GetPlatformPageAllocator()->SetRandomMmapSeed(seed);
}

void* GetRandomMmapAddr() {
  return GetPlatformPageAllocator()->GetRandomMmapAddr();
}

void* AllocatePages(v8::PageAllocator* page_allocator, void* hint, size_t size,
                    size_t alignment, PageAllocator::Permission access) {
  DCHECK_NOT_NULL(page_allocator);
  DCHECK_EQ(hint, AlignedAddress(hint, alignment));
  DCHECK(IsAligned(size, page_allocator->AllocatePageSize()));
  if (FLAG_randomize_all_allocations) {
    hint = page_allocator->GetRandomMmapAddr();
  }
  void* result = nullptr;
  for (int i = 0; i < kAllocationTries; ++i) {
    result = page_allocator->AllocatePages(hint, size, alignment, access);
    if (result != nullptr) break;
    size_t request_size = size + alignment - page_allocator->AllocatePageSize();
    if (!OnCriticalMemoryPressure(request_size)) break;
  }
  return result;
}

bool FreePages(v8::PageAllocator* page_allocator, void* address,
               const size_t size) {
  DCHECK_NOT_NULL(page_allocator);
  DCHECK(IsAligned(size, page_allocator->AllocatePageSize()));
  return page_allocator->FreePages(address, size);
}

bool ReleasePages(v8::PageAllocator* page_allocator, void* address, size_t size,
                  size_t new_size) {
  DCHECK_NOT_NULL(page_allocator);
  DCHECK_LT(new_size, size);
  DCHECK(IsAligned(new_size, page_allocator->CommitPageSize()));
  return page_allocator->ReleasePages(address, size, new_size);
}

bool SetPermissions(v8::PageAllocator* page_allocator, void* address,
                    size_t size, PageAllocator::Permission access) {
  DCHECK_NOT_NULL(page_allocator);
  return page_allocator->SetPermissions(address, size, access);
}

bool OnCriticalMemoryPressure(size_t length) {
  // TODO(bbudge) Rework retry logic once embedders implement the more
  // informative overload.
  if (!V8::GetCurrentPlatform()->OnCriticalMemoryPressure(length)) {
    V8::GetCurrentPlatform()->OnCriticalMemoryPressure();
  }
  return true;
}

VirtualMemory::VirtualMemory() = default;

VirtualMemory::VirtualMemory(v8::PageAllocator* page_allocator, size_t size,
                             void* hint, size_t alignment, JitPermission jit)
    : page_allocator_(page_allocator) {
  DCHECK_NOT_NULL(page_allocator);
  DCHECK(IsAligned(size, page_allocator_->CommitPageSize()));
  size_t page_size = page_allocator_->AllocatePageSize();
  alignment = RoundUp(alignment, page_size);
  PageAllocator::Permission permissions =
      jit == kMapAsJittable ? PageAllocator::kNoAccessWillJitLater
                            : PageAllocator::kNoAccess;
  Address address = reinterpret_cast<Address>(AllocatePages(
      page_allocator_, hint, RoundUp(size, page_size), alignment, permissions));
  if (address != kNullAddress) {
    DCHECK(IsAligned(address, alignment));
    region_ = base::AddressRegion(address, size);
  }
}

VirtualMemory::~VirtualMemory() {
  if (IsReserved()) {
    Free();
  }
}

void VirtualMemory::Reset() {
  page_allocator_ = nullptr;
  region_ = base::AddressRegion();
}

bool VirtualMemory::SetPermissions(Address address, size_t size,
                                   PageAllocator::Permission access) {
  CHECK(InVM(address, size));
  bool result =
      v8::internal::SetPermissions(page_allocator_, address, size, access);
  DCHECK(result);
  return result;
}

size_t VirtualMemory::Release(Address free_start) {
  DCHECK(IsReserved());
  DCHECK(IsAligned(free_start, page_allocator_->CommitPageSize()));
  // Notice: Order is important here. The VirtualMemory object might live
  // inside the allocated region.

  const size_t old_size = region_.size();
  const size_t free_size = old_size - (free_start - region_.begin());
  CHECK(InVM(free_start, free_size));
  region_.set_size(old_size - free_size);
  CHECK(ReleasePages(page_allocator_, reinterpret_cast<void*>(region_.begin()),
                     old_size, region_.size()));
  return free_size;
}

void VirtualMemory::Free() {
  DCHECK(IsReserved());
  // Notice: Order is important here. The VirtualMemory object might live
  // inside the allocated region.
  v8::PageAllocator* page_allocator = page_allocator_;
  base::AddressRegion region = region_;
  Reset();
  // FreePages expects size to be aligned to allocation granularity however
  // ReleasePages may leave size at only commit granularity. Align it here.
  CHECK(FreePages(page_allocator, reinterpret_cast<void*>(region.begin()),
                  RoundUp(region.size(), page_allocator->AllocatePageSize())));
}

void VirtualMemory::FreeReadOnly() {
  DCHECK(IsReserved());
  // The only difference to Free is that it doesn't call Reset which would write
  // to the VirtualMemory object.
  v8::PageAllocator* page_allocator = page_allocator_;
  base::AddressRegion region = region_;

  // FreePages expects size to be aligned to allocation granularity however
  // ReleasePages may leave size at only commit granularity. Align it here.
  CHECK(FreePages(page_allocator, reinterpret_cast<void*>(region.begin()),
                  RoundUp(region.size(), page_allocator->AllocatePageSize())));
}

VirtualMemoryCage::VirtualMemoryCage() = default;

VirtualMemoryCage::~VirtualMemoryCage() { Free(); }

VirtualMemoryCage::VirtualMemoryCage(VirtualMemoryCage&& other) V8_NOEXCEPT {
  *this = std::move(other);
}

VirtualMemoryCage& VirtualMemoryCage::operator=(VirtualMemoryCage&& other)
    V8_NOEXCEPT {
  page_allocator_ = std::move(other.page_allocator_);
  reservation_ = std::move(other.reservation_);
  return *this;
}

namespace {
inline Address VirtualMemoryCageStart(
    Address reservation_start,
    const VirtualMemoryCage::ReservationParams& params) {
  return RoundUp(reservation_start + params.base_bias_size,
                 params.base_alignment) -
         params.base_bias_size;
}
}  // namespace

bool VirtualMemoryCage::InitReservation(
    const ReservationParams& params, base::AddressRegion existing_reservation) {
  DCHECK(!reservation_.IsReserved());

  const size_t allocate_page_size = params.page_allocator->AllocatePageSize();
  CHECK(IsAligned(params.reservation_size, allocate_page_size));
  CHECK(params.base_alignment == ReservationParams::kAnyBaseAlignment ||
        (IsAligned(params.base_alignment, allocate_page_size) &&
         IsAligned(params.base_bias_size, allocate_page_size)));
  CHECK_LE(params.base_bias_size, params.reservation_size);

  Address hint = RoundDown(params.requested_start_hint,
                           RoundUp(params.base_alignment, allocate_page_size)) -
                 RoundUp(params.base_bias_size, allocate_page_size);

  if (!existing_reservation.is_empty()) {
    CHECK_EQ(existing_reservation.size(), params.reservation_size);
    CHECK(params.base_alignment == ReservationParams::kAnyBaseAlignment ||
          IsAligned(existing_reservation.begin(), params.base_alignment));
    reservation_ =
        VirtualMemory(params.page_allocator, existing_reservation.begin(),
                      existing_reservation.size());
    base_ = reservation_.address() + params.base_bias_size;
    reservation_is_owned_ = false;
  } else if (params.base_alignment == ReservationParams::kAnyBaseAlignment) {
    // When the base doesn't need to be aligned, the virtual memory reservation
    // fails only due to OOM.
    VirtualMemory reservation(params.page_allocator, params.reservation_size,
                              reinterpret_cast<void*>(hint));
    if (!reservation.IsReserved()) return false;

    reservation_ = std::move(reservation);
    base_ = reservation_.address() + params.base_bias_size;
    CHECK_EQ(reservation_.size(), params.reservation_size);
  } else {
    // Otherwise, we need to try harder by first overreserving
    // in hopes of finding a correctly aligned address within the larger
    // reservation.
    const int kMaxAttempts = 4;
    for (int attempt = 0; attempt < kMaxAttempts; ++attempt) {
      // Reserve a region of twice the size so that there is an aligned address
      // within it that's usable as the cage base.
      VirtualMemory padded_reservation(params.page_allocator,
                                       params.reservation_size * 2,
                                       reinterpret_cast<void*>(hint));
      if (!padded_reservation.IsReserved()) return false;

      // Find properly aligned sub-region inside the reservation.
      Address address =
          VirtualMemoryCageStart(padded_reservation.address(), params);
      CHECK(padded_reservation.InVM(address, params.reservation_size));

#if defined(V8_OS_FUCHSIA)
      // Fuchsia does not respect given hints so as a workaround we will use
      // overreserved address space region instead of trying to re-reserve
      // a subregion.
      bool overreserve = true;
#else
      // For the last attempt use the overreserved region to avoid an OOM crash.
      // This case can happen if there are many isolates being created in
      // parallel that race for reserving the regions.
      bool overreserve = (attempt == kMaxAttempts - 1);
#endif

      if (overreserve) {
        if (padded_reservation.InVM(address, params.reservation_size)) {
          reservation_ = std::move(padded_reservation);
          base_ = address + params.base_bias_size;
          break;
        }
      } else {
        // Now free the padded reservation and immediately try to reserve an
        // exact region at aligned address. We have to do this dancing because
        // the reservation address requirement is more complex than just a
        // certain alignment and not all operating systems support freeing parts
        // of reserved address space regions.
        padded_reservation.Free();

        VirtualMemory reservation(params.page_allocator,
                                  params.reservation_size,
                                  reinterpret_cast<void*>(address));
        if (!reservation.IsReserved()) return false;

        // The reservation could still be somewhere else but we can accept it
        // if it has the required alignment.
        Address address = VirtualMemoryCageStart(reservation.address(), params);
        if (reservation.address() == address) {
          reservation_ = std::move(reservation);
          base_ = address + params.base_bias_size;
          CHECK_EQ(reservation_.size(), params.reservation_size);
          break;
        }
      }
    }
  }
  CHECK_NE(base_, kNullAddress);
  CHECK(IsAligned(base_, params.base_alignment));

  const Address allocatable_base = RoundUp(base_, params.page_size);
  const size_t allocatable_size =
      RoundDown(params.reservation_size - (allocatable_base - base_) -
                    params.base_bias_size,
                params.page_size);
  page_allocator_ = std::make_unique<base::BoundedPageAllocator>(
      params.page_allocator, allocatable_base, allocatable_size,
      params.page_size);
  return true;
}

void VirtualMemoryCage::Free() {
  if (IsReserved()) {
    base_ = kNullAddress;
    page_allocator_.reset();
    if (reservation_is_owned_) {
      reservation_.Free();
    } else {
      // Reservation is owned by the Platform.
      DCHECK(V8_VIRTUAL_MEMORY_CAGE_BOOL);
      reservation_.Reset();
    }
  }
}

}  // namespace internal
}  // namespace v8