summaryrefslogtreecommitdiff
path: root/chromium/v8/src/snapshot/default-serializer-allocator.cc
blob: c00f05970471f7949a7e0cffb7c5cabe17170657 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

#include "src/snapshot/default-serializer-allocator.h"

#include "src/heap/heap-inl.h"
#include "src/snapshot/serializer.h"
#include "src/snapshot/snapshot-source-sink.h"

namespace v8 {
namespace internal {

DefaultSerializerAllocator::DefaultSerializerAllocator(
    Serializer<DefaultSerializerAllocator>* serializer)
    : serializer_(serializer) {
  for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) {
    pending_chunk_[i] = 0;
  }
}

SerializerReference DefaultSerializerAllocator::Allocate(AllocationSpace space,
                                                         uint32_t size) {
  DCHECK(space >= 0 && space < kNumberOfPreallocatedSpaces);
  DCHECK(size > 0 && size <= MaxChunkSizeInSpace(space));

  // Maps are allocated through AllocateMap.
  DCHECK_NE(MAP_SPACE, space);

  uint32_t new_chunk_size = pending_chunk_[space] + size;
  if (new_chunk_size > MaxChunkSizeInSpace(space)) {
    // The new chunk size would not fit onto a single page. Complete the
    // current chunk and start a new one.
    serializer_->PutNextChunk(space);
    completed_chunks_[space].push_back(pending_chunk_[space]);
    pending_chunk_[space] = 0;
    new_chunk_size = size;
  }
  uint32_t offset = pending_chunk_[space];
  pending_chunk_[space] = new_chunk_size;
  return SerializerReference::BackReference(
      space, static_cast<uint32_t>(completed_chunks_[space].size()), offset);
}

SerializerReference DefaultSerializerAllocator::AllocateMap() {
  // Maps are allocated one-by-one when deserializing.
  return SerializerReference::MapReference(num_maps_++);
}

SerializerReference DefaultSerializerAllocator::AllocateLargeObject(
    uint32_t size) {
  // Large objects are allocated one-by-one when deserializing. We do not
  // have to keep track of multiple chunks.
  large_objects_total_size_ += size;
  return SerializerReference::LargeObjectReference(seen_large_objects_index_++);
}

SerializerReference DefaultSerializerAllocator::AllocateOffHeapBackingStore() {
  DCHECK_NE(0, seen_backing_stores_index_);
  return SerializerReference::OffHeapBackingStoreReference(
      seen_backing_stores_index_++);
}

#ifdef DEBUG
bool DefaultSerializerAllocator::BackReferenceIsAlreadyAllocated(
    SerializerReference reference) const {
  DCHECK(reference.is_back_reference());
  AllocationSpace space = reference.space();
  if (space == LO_SPACE) {
    return reference.large_object_index() < seen_large_objects_index_;
  } else if (space == MAP_SPACE) {
    return reference.map_index() < num_maps_;
  } else if (space == RO_SPACE &&
             serializer_->isolate()->heap()->deserialization_complete()) {
    // If not deserializing the isolate itself, then we create BackReferences
    // for all RO_SPACE objects without ever allocating.
    return true;
  } else {
    size_t chunk_index = reference.chunk_index();
    if (chunk_index == completed_chunks_[space].size()) {
      return reference.chunk_offset() < pending_chunk_[space];
    } else {
      return chunk_index < completed_chunks_[space].size() &&
             reference.chunk_offset() < completed_chunks_[space][chunk_index];
    }
  }
}
#endif

std::vector<SerializedData::Reservation>
DefaultSerializerAllocator::EncodeReservations() const {
  std::vector<SerializedData::Reservation> out;

  for (int i = FIRST_SPACE; i < kNumberOfPreallocatedSpaces; i++) {
    for (size_t j = 0; j < completed_chunks_[i].size(); j++) {
      out.emplace_back(completed_chunks_[i][j]);
    }

    if (pending_chunk_[i] > 0 || completed_chunks_[i].size() == 0) {
      out.emplace_back(pending_chunk_[i]);
    }
    out.back().mark_as_last();
  }

  STATIC_ASSERT(MAP_SPACE == kNumberOfPreallocatedSpaces);
  out.emplace_back(num_maps_ * Map::kSize);
  out.back().mark_as_last();

  STATIC_ASSERT(LO_SPACE == MAP_SPACE + 1);
  out.emplace_back(large_objects_total_size_);
  out.back().mark_as_last();

  return out;
}

void DefaultSerializerAllocator::OutputStatistics() {
  DCHECK(FLAG_serialization_statistics);

  PrintF("  Spaces (bytes):\n");

  for (int space = FIRST_SPACE; space < kNumberOfSpaces; space++) {
    PrintF("%16s", AllocationSpaceName(static_cast<AllocationSpace>(space)));
  }
  PrintF("\n");

  for (int space = FIRST_SPACE; space < kNumberOfPreallocatedSpaces; space++) {
    size_t s = pending_chunk_[space];
    for (uint32_t chunk_size : completed_chunks_[space]) s += chunk_size;
    PrintF("%16" PRIuS, s);
  }

  STATIC_ASSERT(MAP_SPACE == kNumberOfPreallocatedSpaces);
  PrintF("%16d", num_maps_ * Map::kSize);

  STATIC_ASSERT(LO_SPACE == MAP_SPACE + 1);
  PrintF("%16d\n", large_objects_total_size_);
}

// static
uint32_t DefaultSerializerAllocator::MaxChunkSizeInSpace(int space) {
  DCHECK(0 <= space && space < kNumberOfPreallocatedSpaces);

  return static_cast<uint32_t>(
      MemoryAllocator::PageAreaSize(static_cast<AllocationSpace>(space)));
}

}  // namespace internal
}  // namespace v8