summaryrefslogtreecommitdiff
path: root/deps/v8/src/base/bounded-page-allocator.cc
blob: 37924a6d674ce99073e5c2bc305524d0c1e946d2 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
// Copyright 2018 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

#include "src/base/bounded-page-allocator.h"

namespace v8 {
namespace base {

BoundedPageAllocator::BoundedPageAllocator(
    v8::PageAllocator* page_allocator, Address start, size_t size,
    size_t allocate_page_size, PageInitializationMode page_initialization_mode)
    : allocate_page_size_(allocate_page_size),
      commit_page_size_(page_allocator->CommitPageSize()),
      page_allocator_(page_allocator),
      region_allocator_(start, size, allocate_page_size_),
      page_initialization_mode_(page_initialization_mode) {
  DCHECK_NOT_NULL(page_allocator);
  DCHECK(IsAligned(allocate_page_size, page_allocator->AllocatePageSize()));
  DCHECK(IsAligned(allocate_page_size_, commit_page_size_));
}

BoundedPageAllocator::Address BoundedPageAllocator::begin() const {
  return region_allocator_.begin();
}

size_t BoundedPageAllocator::size() const { return region_allocator_.size(); }

void* BoundedPageAllocator::AllocatePages(void* hint, size_t size,
                                          size_t alignment,
                                          PageAllocator::Permission access) {
  MutexGuard guard(&mutex_);
  DCHECK(IsAligned(alignment, region_allocator_.page_size()));
  DCHECK(IsAligned(alignment, allocate_page_size_));

  Address address = RegionAllocator::kAllocationFailure;

  Address hint_address = reinterpret_cast<Address>(hint);
  if (hint_address && IsAligned(hint_address, alignment) &&
      region_allocator_.contains(hint_address, size)) {
    if (region_allocator_.AllocateRegionAt(hint_address, size)) {
      address = hint_address;
    }
  }

  if (address == RegionAllocator::kAllocationFailure) {
    if (alignment <= allocate_page_size_) {
      // TODO(ishell): Consider using randomized version here.
      address = region_allocator_.AllocateRegion(size);
    } else {
      address = region_allocator_.AllocateAlignedRegion(size, alignment);
    }
  }

  if (address == RegionAllocator::kAllocationFailure) {
    return nullptr;
  }

  void* ptr = reinterpret_cast<void*>(address);
  if (!page_allocator_->SetPermissions(ptr, size, access)) {
    // This most likely means that we ran out of memory.
    CHECK_EQ(region_allocator_.FreeRegion(address), size);
    return nullptr;
  }

  return ptr;
}

bool BoundedPageAllocator::AllocatePagesAt(Address address, size_t size,
                                           PageAllocator::Permission access) {
  DCHECK(IsAligned(address, allocate_page_size_));
  DCHECK(IsAligned(size, allocate_page_size_));

  {
    MutexGuard guard(&mutex_);
    DCHECK(region_allocator_.contains(address, size));

    if (!region_allocator_.AllocateRegionAt(address, size)) {
      return false;
    }
  }

  void* ptr = reinterpret_cast<void*>(address);
  if (!page_allocator_->SetPermissions(ptr, size, access)) {
    // This most likely means that we ran out of memory.
    CHECK_EQ(region_allocator_.FreeRegion(address), size);
    return false;
  }

  return true;
}

bool BoundedPageAllocator::ReserveForSharedMemoryMapping(void* ptr,
                                                         size_t size) {
  Address address = reinterpret_cast<Address>(ptr);
  DCHECK(IsAligned(address, allocate_page_size_));
  DCHECK(IsAligned(size, commit_page_size_));

  {
    MutexGuard guard(&mutex_);
    DCHECK(region_allocator_.contains(address, size));

    // Region allocator requires page size rather than commit size so just over-
    // allocate there since any extra space couldn't be used anyway.
    size_t region_size = RoundUp(size, allocate_page_size_);
    if (!region_allocator_.AllocateRegionAt(
            address, region_size, RegionAllocator::RegionState::kExcluded)) {
      return false;
    }
  }

  CHECK(page_allocator_->SetPermissions(ptr, size,
                                        PageAllocator::Permission::kNoAccess));
  return true;
}

bool BoundedPageAllocator::FreePages(void* raw_address, size_t size) {
  MutexGuard guard(&mutex_);

  Address address = reinterpret_cast<Address>(raw_address);
  CHECK_EQ(size, region_allocator_.FreeRegion(address));
  if (page_initialization_mode_ ==
      PageInitializationMode::kAllocatedPagesMustBeZeroInitialized) {
    // When we are required to return zero-initialized pages, we decommit the
    // pages here, which will cause any wired pages to be removed by the OS.
    CHECK(page_allocator_->DecommitPages(raw_address, size));
  } else {
    DCHECK_EQ(page_initialization_mode_,
              PageInitializationMode::kAllocatedPagesCanBeUninitialized);
    CHECK(page_allocator_->SetPermissions(raw_address, size,
                                          PageAllocator::kNoAccess));
  }
  return true;
}

bool BoundedPageAllocator::ReleasePages(void* raw_address, size_t size,
                                        size_t new_size) {
  Address address = reinterpret_cast<Address>(raw_address);
  DCHECK(IsAligned(address, allocate_page_size_));

  DCHECK_LT(new_size, size);
  DCHECK(IsAligned(size - new_size, commit_page_size_));

  // This must be held until the page permissions are updated.
  MutexGuard guard(&mutex_);

  // Check if we freed any allocatable pages by this release.
  size_t allocated_size = RoundUp(size, allocate_page_size_);
  size_t new_allocated_size = RoundUp(new_size, allocate_page_size_);

#ifdef DEBUG
  {
    // There must be an allocated region at given |address| of a size not
    // smaller than |size|.
    DCHECK_EQ(allocated_size, region_allocator_.CheckRegion(address));
  }
#endif

  if (new_allocated_size < allocated_size) {
    region_allocator_.TrimRegion(address, new_allocated_size);
  }

  // Keep the region in "used" state just uncommit some pages.
  Address free_address = address + new_size;
  size_t free_size = size - new_size;
  if (page_initialization_mode_ ==
      PageInitializationMode::kAllocatedPagesMustBeZeroInitialized) {
    // See comment in FreePages().
    CHECK(page_allocator_->DecommitPages(reinterpret_cast<void*>(free_address),
                                         free_size));
  } else {
    DCHECK_EQ(page_initialization_mode_,
              PageInitializationMode::kAllocatedPagesCanBeUninitialized);
    CHECK(page_allocator_->SetPermissions(reinterpret_cast<void*>(free_address),
                                          free_size, PageAllocator::kNoAccess));
  }
  return true;
}

bool BoundedPageAllocator::SetPermissions(void* address, size_t size,
                                          PageAllocator::Permission access) {
  DCHECK(IsAligned(reinterpret_cast<Address>(address), commit_page_size_));
  DCHECK(IsAligned(size, commit_page_size_));
  DCHECK(region_allocator_.contains(reinterpret_cast<Address>(address), size));
  return page_allocator_->SetPermissions(address, size, access);
}

bool BoundedPageAllocator::DiscardSystemPages(void* address, size_t size) {
  return page_allocator_->DiscardSystemPages(address, size);
}

bool BoundedPageAllocator::DecommitPages(void* address, size_t size) {
  return page_allocator_->DecommitPages(address, size);
}

}  // namespace base
}  // namespace v8