summaryrefslogtreecommitdiff
path: root/deps/v8/src/objects/js-atomics-synchronization-inl.h
blob: 07710e8d8d7f60022328377dd279b95320a7750e (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

#ifndef V8_OBJECTS_JS_ATOMICS_SYNCHRONIZATION_INL_H_
#define V8_OBJECTS_JS_ATOMICS_SYNCHRONIZATION_INL_H_

#include "src/common/assert-scope.h"
#include "src/common/globals.h"
#include "src/heap/heap-write-barrier-inl.h"
#include "src/objects/js-atomics-synchronization.h"
#include "src/objects/objects-inl.h"

// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"

namespace v8 {
namespace internal {

#include "torque-generated/src/objects/js-atomics-synchronization-tq-inl.inc"

TQ_OBJECT_CONSTRUCTORS_IMPL(JSSynchronizationPrimitive)

std::atomic<JSSynchronizationPrimitive::StateT>*
JSSynchronizationPrimitive::AtomicStatePtr() {
  StateT* state_ptr = reinterpret_cast<StateT*>(field_address(kStateOffset));
  DCHECK(IsAligned(reinterpret_cast<uintptr_t>(state_ptr), sizeof(StateT)));
  return base::AsAtomicPtr(state_ptr);
}

TQ_OBJECT_CONSTRUCTORS_IMPL(JSAtomicsMutex)

CAST_ACCESSOR(JSAtomicsMutex)

JSAtomicsMutex::LockGuard::LockGuard(Isolate* isolate,
                                     Handle<JSAtomicsMutex> mutex)
    : isolate_(isolate), mutex_(mutex) {
  JSAtomicsMutex::Lock(isolate, mutex);
}

JSAtomicsMutex::LockGuard::~LockGuard() { mutex_->Unlock(isolate_); }

JSAtomicsMutex::TryLockGuard::TryLockGuard(Isolate* isolate,
                                           Handle<JSAtomicsMutex> mutex)
    : isolate_(isolate), mutex_(mutex), locked_(mutex->TryLock()) {}

JSAtomicsMutex::TryLockGuard::~TryLockGuard() {
  if (locked_) mutex_->Unlock(isolate_);
}

// static
void JSAtomicsMutex::Lock(Isolate* requester, Handle<JSAtomicsMutex> mutex) {
  DisallowGarbageCollection no_gc;
  // First try to lock an uncontended mutex, which should be the common case. If
  // this fails, then go to the slow path to possibly put the current thread to
  // sleep.
  //
  // The fast path is done using a weak CAS which may fail spuriously on
  // architectures with load-link/store-conditional instructions.
  std::atomic<StateT>* state = mutex->AtomicStatePtr();
  StateT expected = kUnlocked;
  if (V8_UNLIKELY(!state->compare_exchange_weak(expected, kLockedUncontended,
                                                std::memory_order_acquire,
                                                std::memory_order_relaxed))) {
    LockSlowPath(requester, mutex, state);
  }
  mutex->SetCurrentThreadAsOwner();
}

bool JSAtomicsMutex::TryLock() {
  DisallowGarbageCollection no_gc;
  StateT expected = kUnlocked;
  if (V8_LIKELY(AtomicStatePtr()->compare_exchange_strong(
          expected, kLockedUncontended, std::memory_order_acquire,
          std::memory_order_relaxed))) {
    SetCurrentThreadAsOwner();
    return true;
  }
  return false;
}

void JSAtomicsMutex::Unlock(Isolate* requester) {
  DisallowGarbageCollection no_gc;
  // First try to unlock an uncontended mutex, which should be the common
  // case. If this fails, then go to the slow path to wake a waiting thread.
  //
  // In contrast to Lock, the fast path is done using a strong CAS which does
  // not fail spuriously. This simplifies the slow path by guaranteeing that
  // there is at least one waiter to be notified.
  DCHECK(IsCurrentThreadOwner());
  ClearOwnerThread();
  std::atomic<StateT>* state = AtomicStatePtr();
  StateT expected = kLockedUncontended;
  if (V8_LIKELY(state->compare_exchange_strong(expected, kUnlocked,
                                               std::memory_order_release,
                                               std::memory_order_relaxed))) {
    return;
  }
  UnlockSlowPath(requester, state);
}

bool JSAtomicsMutex::IsHeld() {
  return AtomicStatePtr()->load(std::memory_order_relaxed) & kIsLockedBit;
}

bool JSAtomicsMutex::IsCurrentThreadOwner() {
  bool result = AtomicOwnerThreadIdPtr()->load(std::memory_order_relaxed) ==
                ThreadId::Current().ToInteger();
  DCHECK_IMPLIES(result, IsHeld());
  return result;
}

void JSAtomicsMutex::SetCurrentThreadAsOwner() {
  AtomicOwnerThreadIdPtr()->store(ThreadId::Current().ToInteger(),
                                  std::memory_order_relaxed);
}

void JSAtomicsMutex::ClearOwnerThread() {
  AtomicOwnerThreadIdPtr()->store(ThreadId::Invalid().ToInteger(),
                                  std::memory_order_relaxed);
}

std::atomic<int32_t>* JSAtomicsMutex::AtomicOwnerThreadIdPtr() {
  int32_t* owner_thread_id_ptr =
      reinterpret_cast<int32_t*>(field_address(kOwnerThreadIdOffset));
  return base::AsAtomicPtr(owner_thread_id_ptr);
}

TQ_OBJECT_CONSTRUCTORS_IMPL(JSAtomicsCondition)

CAST_ACCESSOR(JSAtomicsCondition)

}  // namespace internal
}  // namespace v8

#include "src/objects/object-macros-undef.h"

#endif  // V8_OBJECTS_JS_ATOMICS_SYNCHRONIZATION_INL_H_