summaryrefslogtreecommitdiff
path: root/deps/v8/src/heap/cppgc/heap-object-header-inl.h
blob: a0bcda464b00eb64380e4542c582e75bd78b75bc (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

#ifndef V8_HEAP_CPPGC_HEAP_OBJECT_HEADER_INL_H_
#define V8_HEAP_CPPGC_HEAP_OBJECT_HEADER_INL_H_

#include "include/cppgc/allocation.h"
#include "include/cppgc/gc-info.h"
#include "src/base/atomic-utils.h"
#include "src/base/logging.h"
#include "src/base/macros.h"
#include "src/heap/cppgc/gc-info-table.h"
#include "src/heap/cppgc/heap-object-header.h"

namespace cppgc {
namespace internal {

// static
HeapObjectHeader& HeapObjectHeader::FromPayload(void* payload) {
  return *reinterpret_cast<HeapObjectHeader*>(static_cast<Address>(payload) -
                                              sizeof(HeapObjectHeader));
}

// static
const HeapObjectHeader& HeapObjectHeader::FromPayload(const void* payload) {
  return *reinterpret_cast<const HeapObjectHeader*>(
      static_cast<ConstAddress>(payload) - sizeof(HeapObjectHeader));
}

HeapObjectHeader::HeapObjectHeader(size_t size, GCInfoIndex gc_info_index) {
#if defined(V8_TARGET_ARCH_64_BIT)
  USE(padding_);
#endif  // defined(V8_TARGET_ARCH_64_BIT)
  DCHECK_LT(gc_info_index, GCInfoTable::kMaxIndex);
  DCHECK_EQ(0u, size & kAllocationMask);
  DCHECK_GE(kMaxSize, size);
  encoded_high_ = GCInfoIndexField::encode(gc_info_index);
  encoded_low_ = EncodeSize(size);
  DCHECK(IsInConstruction());
#ifdef DEBUG
  CheckApiConstants();
#endif  // DEBUG
}

Address HeapObjectHeader::Payload() const {
  return reinterpret_cast<Address>(const_cast<HeapObjectHeader*>(this)) +
         sizeof(HeapObjectHeader);
}

template <HeapObjectHeader::AccessMode mode>
GCInfoIndex HeapObjectHeader::GetGCInfoIndex() const {
  const uint16_t encoded =
      LoadEncoded<mode, EncodedHalf::kHigh, std::memory_order_acquire>();
  return GCInfoIndexField::decode(encoded);
}

template <HeapObjectHeader::AccessMode mode>
size_t HeapObjectHeader::GetSize() const {
  // Size is immutable after construction while either marking or sweeping
  // is running so relaxed load (if mode == kAtomic) is enough.
  uint16_t encoded_low_value =
      LoadEncoded<mode, EncodedHalf::kLow, std::memory_order_relaxed>();
  const size_t size = DecodeSize(encoded_low_value);
  return size;
}

void HeapObjectHeader::SetSize(size_t size) {
  DCHECK(!IsMarked());
  encoded_low_ |= EncodeSize(size);
}

template <HeapObjectHeader::AccessMode mode>
bool HeapObjectHeader::IsLargeObject() const {
  return GetSize<mode>() == kLargeObjectSizeInHeader;
}

template <HeapObjectHeader::AccessMode mode>
bool HeapObjectHeader::IsInConstruction() const {
  const uint16_t encoded =
      LoadEncoded<mode, EncodedHalf::kHigh, std::memory_order_acquire>();
  return !FullyConstructedField::decode(encoded);
}

void HeapObjectHeader::MarkAsFullyConstructed() {
  MakeGarbageCollectedTraitInternal::MarkObjectAsFullyConstructed(Payload());
}

template <HeapObjectHeader::AccessMode mode>
bool HeapObjectHeader::IsMarked() const {
  const uint16_t encoded =
      LoadEncoded<mode, EncodedHalf::kLow, std::memory_order_relaxed>();
  return MarkBitField::decode(encoded);
}

template <HeapObjectHeader::AccessMode mode>
void HeapObjectHeader::Unmark() {
  DCHECK(IsMarked<mode>());
  StoreEncoded<mode, EncodedHalf::kLow, std::memory_order_relaxed>(
      MarkBitField::encode(false), MarkBitField::kMask);
}

bool HeapObjectHeader::TryMarkAtomic() {
  auto* atomic_encoded = v8::base::AsAtomicPtr(&encoded_low_);
  uint16_t old_value = atomic_encoded->load(std::memory_order_relaxed);
  const uint16_t new_value = old_value | MarkBitField::encode(true);
  if (new_value == old_value) {
    return false;
  }
  return atomic_encoded->compare_exchange_strong(old_value, new_value,
                                                 std::memory_order_relaxed);
}

template <HeapObjectHeader::AccessMode mode, HeapObjectHeader::EncodedHalf part,
          std::memory_order memory_order>
uint16_t HeapObjectHeader::LoadEncoded() const {
  const uint16_t& half =
      part == EncodedHalf::kLow ? encoded_low_ : encoded_high_;
  if (mode == AccessMode::kNonAtomic) return half;
  return v8::base::AsAtomicPtr(&half)->load(memory_order);
}

template <HeapObjectHeader::AccessMode mode, HeapObjectHeader::EncodedHalf part,
          std::memory_order memory_order>
void HeapObjectHeader::StoreEncoded(uint16_t bits, uint16_t mask) {
  // Caveat: Not all changes to HeapObjectHeader's bitfields go through
  // StoreEncoded. The following have their own implementations and need to be
  // kept in sync:
  // - HeapObjectHeader::TryMarkAtomic
  // - MarkObjectAsFullyConstructed (API)
  DCHECK_EQ(0u, bits & ~mask);
  uint16_t& half = part == EncodedHalf::kLow ? encoded_low_ : encoded_high_;
  if (mode == AccessMode::kNonAtomic) {
    half = (half & ~mask) | bits;
    return;
  }
  // We don't perform CAS loop here assuming that only none of the info that
  // shares the same encoded halfs change at the same time.
  auto* atomic_encoded = v8::base::AsAtomicPtr(&half);
  uint16_t value = atomic_encoded->load(std::memory_order_relaxed);
  value = (value & ~mask) | bits;
  atomic_encoded->store(value, memory_order);
}

}  // namespace internal
}  // namespace cppgc

#endif  // V8_HEAP_CPPGC_HEAP_OBJECT_HEADER_INL_H_