summaryrefslogtreecommitdiff
path: root/deps/v8/src/snapshot/deserializer-allocator.cc
blob: 0b96a5a0508b7287c067c91615d219b920ea8b50 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

#include "src/snapshot/deserializer-allocator.h"

#include "src/heap/heap-inl.h"  // crbug.com/v8/8499

namespace v8 {
namespace internal {

// We know the space requirements before deserialization and can
// pre-allocate that reserved space. During deserialization, all we need
// to do is to bump up the pointer for each space in the reserved
// space. This is also used for fixing back references.
// We may have to split up the pre-allocation into several chunks
// because it would not fit onto a single page. We do not have to keep
// track of when to move to the next chunk. An opcode will signal this.
// Since multiple large objects cannot be folded into one large object
// space allocation, we have to do an actual allocation when deserializing
// each large object. Instead of tracking offset for back references, we
// reference large objects by index.
Address DeserializerAllocator::AllocateRaw(SnapshotSpace space, int size) {
  const int space_number = static_cast<int>(space);
  if (space == SnapshotSpace::kLargeObject) {
    AlwaysAllocateScope scope(heap_);
    // Note that we currently do not support deserialization of large code
    // objects.
    LargeObjectSpace* lo_space = heap_->lo_space();
    AllocationResult result = lo_space->AllocateRaw(size);
    HeapObject obj = result.ToObjectChecked();
    deserialized_large_objects_.push_back(obj);
    return obj.address();
  } else if (space == SnapshotSpace::kMap) {
    DCHECK_EQ(Map::kSize, size);
    return allocated_maps_[next_map_index_++];
  } else {
    DCHECK(IsPreAllocatedSpace(space));
    Address address = high_water_[space_number];
    DCHECK_NE(address, kNullAddress);
    high_water_[space_number] += size;
#ifdef DEBUG
    // Assert that the current reserved chunk is still big enough.
    const Heap::Reservation& reservation = reservations_[space_number];
    int chunk_index = current_chunk_[space_number];
    DCHECK_LE(high_water_[space_number], reservation[chunk_index].end);
#endif
    if (space == SnapshotSpace::kCode)
      MemoryChunk::FromAddress(address)
          ->GetCodeObjectRegistry()
          ->RegisterNewlyAllocatedCodeObject(address);
    return address;
  }
}

Address DeserializerAllocator::Allocate(SnapshotSpace space, int size) {
  Address address;
  HeapObject obj;

  if (next_alignment_ != kWordAligned) {
    const int reserved = size + Heap::GetMaximumFillToAlign(next_alignment_);
    address = AllocateRaw(space, reserved);
    obj = HeapObject::FromAddress(address);
    // If one of the following assertions fails, then we are deserializing an
    // aligned object when the filler maps have not been deserialized yet.
    // We require filler maps as padding to align the object.
    DCHECK(ReadOnlyRoots(heap_).free_space_map().IsMap());
    DCHECK(ReadOnlyRoots(heap_).one_pointer_filler_map().IsMap());
    DCHECK(ReadOnlyRoots(heap_).two_pointer_filler_map().IsMap());
    obj = heap_->AlignWithFiller(obj, size, reserved, next_alignment_);
    address = obj.address();
    next_alignment_ = kWordAligned;
    return address;
  } else {
    return AllocateRaw(space, size);
  }
}

void DeserializerAllocator::MoveToNextChunk(SnapshotSpace space) {
  DCHECK(IsPreAllocatedSpace(space));
  const int space_number = static_cast<int>(space);
  uint32_t chunk_index = current_chunk_[space_number];
  const Heap::Reservation& reservation = reservations_[space_number];
  // Make sure the current chunk is indeed exhausted.
  CHECK_EQ(reservation[chunk_index].end, high_water_[space_number]);
  // Move to next reserved chunk.
  chunk_index = ++current_chunk_[space_number];
  CHECK_LT(chunk_index, reservation.size());
  high_water_[space_number] = reservation[chunk_index].start;
}

HeapObject DeserializerAllocator::GetMap(uint32_t index) {
  DCHECK_LT(index, next_map_index_);
  return HeapObject::FromAddress(allocated_maps_[index]);
}

HeapObject DeserializerAllocator::GetLargeObject(uint32_t index) {
  DCHECK_LT(index, deserialized_large_objects_.size());
  return deserialized_large_objects_[index];
}

HeapObject DeserializerAllocator::GetObject(SnapshotSpace space,
                                            uint32_t chunk_index,
                                            uint32_t chunk_offset) {
  DCHECK(IsPreAllocatedSpace(space));
  const int space_number = static_cast<int>(space);
  DCHECK_LE(chunk_index, current_chunk_[space_number]);
  Address address =
      reservations_[space_number][chunk_index].start + chunk_offset;
  if (next_alignment_ != kWordAligned) {
    int padding = Heap::GetFillToAlign(address, next_alignment_);
    next_alignment_ = kWordAligned;
    DCHECK(padding == 0 || HeapObject::FromAddress(address).IsFiller());
    address += padding;
  }
  return HeapObject::FromAddress(address);
}

void DeserializerAllocator::DecodeReservation(
    const std::vector<SerializedData::Reservation>& res) {
  DCHECK_EQ(0, reservations_[0].size());
  int current_space = 0;
  for (auto& r : res) {
    reservations_[current_space].push_back(
        {r.chunk_size(), kNullAddress, kNullAddress});
    if (r.is_last()) current_space++;
  }
  DCHECK_EQ(kNumberOfSpaces, current_space);
  for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) current_chunk_[i] = 0;
}

bool DeserializerAllocator::ReserveSpace() {
#ifdef DEBUG
  for (int i = 0; i < kNumberOfSpaces; ++i) {
    DCHECK_GT(reservations_[i].size(), 0);
  }
#endif  // DEBUG
  DCHECK(allocated_maps_.empty());
  // TODO(v8:7464): Allocate using the off-heap ReadOnlySpace here once
  // implemented.
  if (!heap_->ReserveSpace(reservations_, &allocated_maps_)) {
    return false;
  }
  for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) {
    high_water_[i] = reservations_[i][0].start;
  }
  return true;
}

bool DeserializerAllocator::ReservationsAreFullyUsed() const {
  for (int space = 0; space < kNumberOfPreallocatedSpaces; space++) {
    const uint32_t chunk_index = current_chunk_[space];
    if (reservations_[space].size() != chunk_index + 1) {
      return false;
    }
    if (reservations_[space][chunk_index].end != high_water_[space]) {
      return false;
    }
  }
  return (allocated_maps_.size() == next_map_index_);
}

void DeserializerAllocator::RegisterDeserializedObjectsForBlackAllocation() {
  heap_->RegisterDeserializedObjectsForBlackAllocation(
      reservations_, deserialized_large_objects_, allocated_maps_);
}

}  // namespace internal
}  // namespace v8