summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGreg Farnum <greg@inktank.com>2013-10-22 14:30:35 -0700
committerGreg Farnum <greg@inktank.com>2013-10-22 14:30:35 -0700
commit12de51f9356e6b605265f47cf4486c0b9d5c41bc (patch)
tree972eeaf84a1a5cebd1989c503b9c4caaaabfe932
parentedeb6826ec1b9b737a2963822c48cd1c73985049 (diff)
downloadceph-12de51f9356e6b605265f47cf4486c0b9d5c41bc.tar.gz
ReplicatedPG: RWTracker: always set backfill_waiting_on_read=true
Setting it true even if we aren't actually waiting doesn't break anything else happening, and lets us assert 1) that we're the only doing grabbing the recovery lock, 2) that we have actually taken a recovery lock when dropping one. (We aren't always doing so now, so this should simplify debugging.) Signed-off-by: Greg Farnum <greg@inktank.com>
-rw-r--r--src/osd/ReplicatedPG.h4
1 files changed, 3 insertions, 1 deletions
diff --git a/src/osd/ReplicatedPG.h b/src/osd/ReplicatedPG.h
index 6f0c5c539e3..7290b35e4e6 100644
--- a/src/osd/ReplicatedPG.h
+++ b/src/osd/ReplicatedPG.h
@@ -595,15 +595,17 @@ protected:
}
bool get_backfill_read(const hobject_t &hoid) {
ObjState& obj_locker = obj_state[hoid];
+ assert(!obj_locker.backfill_waiting_on_read);
+ obj_locker.backfill_waiting_on_read = true;
if (obj_locker.get_read_lock()) {
return true;
} // else
- obj_locker.backfill_waiting_on_read = true;
return false;
}
void drop_backfill_read(const hobject_t &hoid, list<OpRequestRef> *ls) {
map<hobject_t, ObjState>::iterator i = obj_state.find(hoid);
ObjState& obj_locker = i->second;
+ assert(obj_locker.backfill_waiting_on_read = true);
obj_locker.put_read(ls);
if (obj_locker.empty())
obj_state.erase(i);