summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGreg Farnum <greg@inktank.com>2013-10-22 15:07:31 -0700
committerGreg Farnum <greg@inktank.com>2013-10-22 15:07:31 -0700
commit4e58411014b6c2b31e23091cd1499767fed667d3 (patch)
treee11d5f3a36bca5f14f091a912fab0c45c42d6873
parent1d047b67b3f6415d0ca42e745130b850dd28f9c4 (diff)
downloadceph-4e58411014b6c2b31e23091cd1499767fed667d3.tar.gz
ReplicatedPG: add a cookie to get_backfill_read() to identify requester
This way we can tell if we're getting clashing lock requesters, or if the repeat is appropriate. Signed-off-by: Greg Farnum <greg@inktank.com>
-rw-r--r--src/osd/ReplicatedPG.cc11
-rw-r--r--src/osd/ReplicatedPG.h32
2 files changed, 28 insertions, 15 deletions
diff --git a/src/osd/ReplicatedPG.cc b/src/osd/ReplicatedPG.cc
index a24ebb945ff..dd9eee341f3 100644
--- a/src/osd/ReplicatedPG.cc
+++ b/src/osd/ReplicatedPG.cc
@@ -281,7 +281,7 @@ void ReplicatedPG::wait_for_missing_object(const hobject_t& soid, OpRequestRef o
else {
dout(7) << "missing " << soid << " v " << v << ", recovering." << dendl;
// nobody can have locks on a missing object, so we must be uncontended
- assert(rw_manager.get_backfill_read(soid));
+ assert(rw_manager.get_backfill_read(soid, RWTracker::MISSING));
PGBackend::RecoveryHandle *h = pgbackend->open_recovery_op();
recover_missing(soid, v, cct->_conf->osd_client_op_priority, h);
pgbackend->run_recovery_op(h, cct->_conf->osd_client_op_priority);
@@ -7653,7 +7653,7 @@ int ReplicatedPG::recover_primary(int max, ThreadPool::TPHandle &handle)
soid = p->second;
}
- if (!rw_manager.get_backfill_read(soid)) {
+ if (!rw_manager.get_backfill_read(soid, RWTracker::PRIMARY)) {
if (!started)
++started; // just lie; this won't impact anything except debug output
break;
@@ -7823,7 +7823,8 @@ int ReplicatedPG::prep_object_replica_pushes(
* In almost all cases, therefore, this lock should be uncontended.
*/
obc->ondisk_read_lock();
- assert(rw_manager.get_backfill_read(soid)); // yep, "backfill" read
+ // yep, we're taking a "backfill" read
+ assert(rw_manager.get_backfill_read(soid, RWTracker::PREP_PUSHES));
pgbackend->recover_object(
soid,
ObjectContextRef(),
@@ -8022,7 +8023,7 @@ int ReplicatedPG::recover_backfill(
} else if (pbi.begin == backfill_info.begin) {
eversion_t& obj_v = backfill_info.objects.begin()->second;
if (pbi.objects.begin()->second != obj_v) {
- if (rw_manager.get_backfill_read(backfill_info.begin)) {
+ if (rw_manager.get_backfill_read(backfill_info.begin, RWTracker::BACKFILL)) {
dout(20) << " replacing peer " << pbi.begin << " with local "
<< obj_v << dendl;
to_push[pbi.begin] = make_pair(obj_v, pbi.objects.begin()->second);
@@ -8046,7 +8047,7 @@ int ReplicatedPG::recover_backfill(
backfill_info.pop_front();
pbi.pop_front();
} else {
- if (rw_manager.get_backfill_read(backfill_info.begin)) {
+ if (rw_manager.get_backfill_read(backfill_info.begin, RWTracker::BACKFILL)) {
dout(20) << " pushing local " << backfill_info.begin << " "
<< backfill_info.objects.begin()->second
<< " to peer osd." << backfill_target << dendl;
diff --git a/src/osd/ReplicatedPG.h b/src/osd/ReplicatedPG.h
index b8aca328304..3e2986117eb 100644
--- a/src/osd/ReplicatedPG.h
+++ b/src/osd/ReplicatedPG.h
@@ -486,10 +486,14 @@ protected:
uint64_t count; /// number of readers or writers
list<OpRequestRef> waiters; /// ops waiting on state change
- /// if set, restart backfill when we can get a read lock
- bool backfill_waiting_on_read;
-
- ObjState() : state(NONE), count(0), backfill_waiting_on_read(false) {}
+ /**
+ * if set, restart backfill when we can get a read lock
+ * The (non-zero) value indicates what requested the lock
+ * so we can be sure it's not getting double-taken
+ */
+ int backfill_waiting_on_read;
+
+ ObjState() : state(NONE), count(0), backfill_waiting_on_read(0) {}
bool get_read(OpRequestRef op) {
if (get_read_lock()) {
return true;
@@ -593,10 +597,18 @@ protected:
obj_state.erase(hoid);
}
}
- bool get_backfill_read(const hobject_t &hoid) {
+ enum {
+ BACKEND_INTERFACE = 1,
+ PREP_PUSHES,
+ BACKFILL,
+ PRIMARY,
+ MISSING
+ };
+ bool get_backfill_read(const hobject_t &hoid, int lock_requester) {
ObjState& obj_locker = obj_state[hoid];
- assert(!obj_locker.backfill_waiting_on_read);
- obj_locker.backfill_waiting_on_read = true;
+ assert(!obj_locker.backfill_waiting_on_read ||
+ obj_locker.backfill_waiting_on_read == lock_requester);
+ obj_locker.backfill_waiting_on_read = lock_requester;
if (obj_locker.get_read_lock()) {
return true;
} // else
@@ -605,12 +617,12 @@ protected:
void drop_backfill_read(const hobject_t &hoid, list<OpRequestRef> *ls) {
map<hobject_t, ObjState>::iterator i = obj_state.find(hoid);
ObjState& obj_locker = i->second;
- assert(obj_locker.backfill_waiting_on_read = true);
+ assert(obj_locker.backfill_waiting_on_read);
obj_locker.put_read(ls);
if (obj_locker.empty())
obj_state.erase(i);
else
- obj_locker.backfill_waiting_on_read = false;
+ obj_locker.backfill_waiting_on_read = 0;
}
} rw_manager;
@@ -645,7 +657,7 @@ protected:
*/
void get_object_recovery_locks(const hobject_t& obj) {
assert(!is_primary());
- rw_manager.get_backfill_read(obj);
+ rw_manager.get_backfill_read(obj, RWTracker::BACKEND_INTERFACE);
}
void drop_object_recovery_locks(const hobject_t& obj) {