summaryrefslogtreecommitdiff
path: root/deps/v8/src/heap/heap-allocator.cc
blob: 8cdb4f1fd0103d674e48a82ec333df8af8e45b83 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

#include "src/heap/heap-allocator.h"

#include "src/base/logging.h"
#include "src/common/globals.h"
#include "src/execution/isolate.h"
#include "src/heap/heap-allocator-inl.h"
#include "src/heap/heap-inl.h"
#include "src/logging/counters.h"

namespace v8 {
namespace internal {

class Heap;

HeapAllocator::HeapAllocator(Heap* heap) : heap_(heap) {}

void HeapAllocator::Setup() {
  for (int i = FIRST_SPACE; i <= LAST_SPACE; ++i) {
    spaces_[i] = heap_->space(i);
  }

  shared_old_allocator_ = heap_->shared_space_allocator_.get();
  shared_lo_space_ = heap_->shared_lo_allocation_space();
}

void HeapAllocator::SetReadOnlySpace(ReadOnlySpace* read_only_space) {
  read_only_space_ = read_only_space;
}

AllocationResult HeapAllocator::AllocateRawLargeInternal(
    int size_in_bytes, AllocationType allocation, AllocationOrigin origin,
    AllocationAlignment alignment) {
  DCHECK_GT(size_in_bytes, heap_->MaxRegularHeapObjectSize(allocation));
  switch (allocation) {
    case AllocationType::kYoung:
      return new_lo_space()->AllocateRaw(size_in_bytes);
    case AllocationType::kOld:
      return lo_space()->AllocateRaw(size_in_bytes);
    case AllocationType::kCode:
      return code_lo_space()->AllocateRaw(size_in_bytes);
    case AllocationType::kSharedOld:
      return shared_lo_space()->AllocateRawBackground(
          heap_->main_thread_local_heap(), size_in_bytes);
    case AllocationType::kMap:
    case AllocationType::kReadOnly:
    case AllocationType::kSharedMap:
      UNREACHABLE();
  }
}

namespace {

constexpr AllocationSpace AllocationTypeToGCSpace(AllocationType type) {
  switch (type) {
    case AllocationType::kYoung:
      return NEW_SPACE;
    case AllocationType::kOld:
    case AllocationType::kCode:
    case AllocationType::kMap:
      // OLD_SPACE indicates full GC.
      return OLD_SPACE;
    case AllocationType::kReadOnly:
    case AllocationType::kSharedMap:
    case AllocationType::kSharedOld:
      UNREACHABLE();
  }
}

}  // namespace

AllocationResult HeapAllocator::AllocateRawWithLightRetrySlowPath(
    int size, AllocationType allocation, AllocationOrigin origin,
    AllocationAlignment alignment) {
  AllocationResult result = AllocateRaw(size, allocation, origin, alignment);
  if (!result.IsFailure()) {
    return result;
  }

  // Two GCs before returning failure.
  for (int i = 0; i < 2; i++) {
    if (IsSharedAllocationType(allocation)) {
      heap_->CollectGarbageShared(heap_->main_thread_local_heap(),
                                  GarbageCollectionReason::kAllocationFailure);
    } else {
      AllocationSpace space_to_gc = AllocationTypeToGCSpace(allocation);
      heap_->CollectGarbage(space_to_gc,
                            GarbageCollectionReason::kAllocationFailure);
    }
    result = AllocateRaw(size, allocation, origin, alignment);
    if (!result.IsFailure()) {
      return result;
    }
  }
  return result;
}

AllocationResult HeapAllocator::AllocateRawWithRetryOrFailSlowPath(
    int size, AllocationType allocation, AllocationOrigin origin,
    AllocationAlignment alignment) {
  AllocationResult result =
      AllocateRawWithLightRetrySlowPath(size, allocation, origin, alignment);
  if (!result.IsFailure()) return result;

  if (IsSharedAllocationType(allocation)) {
    heap_->CollectGarbageShared(heap_->main_thread_local_heap(),
                                GarbageCollectionReason::kLastResort);

    // We need always_allocate() to be true both on the client- and
    // server-isolate. It is used in both code paths.
    AlwaysAllocateScope shared_scope(
        heap_->isolate()->shared_space_isolate()->heap());
    AlwaysAllocateScope client_scope(heap_);
    result = AllocateRaw(size, allocation, origin, alignment);
  } else {
    heap_->CollectAllAvailableGarbage(GarbageCollectionReason::kLastResort);

    AlwaysAllocateScope scope(heap_);
    result = AllocateRaw(size, allocation, origin, alignment);
  }

  if (!result.IsFailure()) {
    return result;
  }

  V8::FatalProcessOutOfMemory(heap_->isolate(), "CALL_AND_RETRY_LAST",
                              V8::kHeapOOM);
}

#ifdef DEBUG

void HeapAllocator::IncrementObjectCounters() {
  heap_->isolate()->counters()->objs_since_last_full()->Increment();
  heap_->isolate()->counters()->objs_since_last_young()->Increment();
}

#endif  // DEBUG

#ifdef V8_ENABLE_ALLOCATION_TIMEOUT
// static
void HeapAllocator::InitializeOncePerProcess() {
  SetAllocationGcInterval(v8_flags.gc_interval);
}

// static
void HeapAllocator::SetAllocationGcInterval(int allocation_gc_interval) {
  allocation_gc_interval_.store(allocation_gc_interval,
                                std::memory_order_relaxed);
}

// static
std::atomic<int> HeapAllocator::allocation_gc_interval_{-1};

void HeapAllocator::SetAllocationTimeout(int allocation_timeout) {
  // See `allocation_timeout_` for description. We map negative values to 0 to
  // avoid underflows as allocation decrements this value as well.
  allocation_timeout_ = std::max(0, allocation_timeout);
}

void HeapAllocator::UpdateAllocationTimeout() {
  if (v8_flags.random_gc_interval > 0) {
    const int new_timeout = allocation_timeout_ <= 0
                                ? heap_->isolate()->fuzzer_rng()->NextInt(
                                      v8_flags.random_gc_interval + 1)
                                : allocation_timeout_;
    // Reset the allocation timeout, but make sure to allow at least a few
    // allocations after a collection. The reason for this is that we have a lot
    // of allocation sequences and we assume that a garbage collection will
    // allow the subsequent allocation attempts to go through.
    constexpr int kFewAllocationsHeadroom = 6;
    allocation_timeout_ = std::max(kFewAllocationsHeadroom, new_timeout);
    return;
  }

  int interval = allocation_gc_interval_.load(std::memory_order_relaxed);
  if (interval >= 0) {
    allocation_timeout_ = interval;
  }
}

#endif  // V8_ENABLE_ALLOCATION_TIMEOUT

}  // namespace internal
}  // namespace v8