summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--lib/device/bcache-utils.c60
-rw-r--r--lib/device/bcache.c425
-rw-r--r--lib/device/bcache.h66
-rw-r--r--lib/device/dev-cache.c2
-rw-r--r--lib/device/dev-io.c25
-rw-r--r--lib/device/device.h6
-rw-r--r--lib/format_text/format-text.c6
-rw-r--r--lib/label/label.c164
-rw-r--r--test/unit/Makefile1
-rw-r--r--test/unit/bcache_t.c544
-rw-r--r--test/unit/bcache_utils_t.c83
-rw-r--r--test/unit/io_engine_t.c213
-rw-r--r--test/unit/units.h2
13 files changed, 1012 insertions, 585 deletions
diff --git a/lib/device/bcache-utils.c b/lib/device/bcache-utils.c
index 25535547c..70d583948 100644
--- a/lib/device/bcache-utils.c
+++ b/lib/device/bcache-utils.c
@@ -39,32 +39,32 @@ static uint64_t _min(uint64_t lhs, uint64_t rhs)
//----------------------------------------------------------------
-void bcache_prefetch_bytes(struct bcache *cache, int fd, uint64_t start, size_t len)
+void bcache_prefetch_bytes(struct bcache *cache, struct bcache_dev *dev, uint64_t start, size_t len)
{
block_address bb, be;
byte_range_to_block_range(cache, start, len, &bb, &be);
while (bb < be) {
- bcache_prefetch(cache, fd, bb);
+ bcache_prefetch(cache, dev, bb);
bb++;
}
}
//----------------------------------------------------------------
-bool bcache_read_bytes(struct bcache *cache, int fd, uint64_t start, size_t len, void *data)
+bool bcache_read_bytes(struct bcache *cache, struct bcache_dev *dev, uint64_t start, size_t len, void *data)
{
struct block *b;
block_address bb, be;
uint64_t block_size = bcache_block_sectors(cache) << SECTOR_SHIFT;
uint64_t block_offset = start % block_size;
- bcache_prefetch_bytes(cache, fd, start, len);
+ bcache_prefetch_bytes(cache, dev, start, len);
byte_range_to_block_range(cache, start, len, &bb, &be);
for (; bb != be; bb++) {
- if (!bcache_get(cache, fd, bb, 0, &b))
+ if (!bcache_get(cache, dev, bb, 0, &b))
return false;
size_t blen = _min(block_size - block_offset, len);
@@ -86,8 +86,8 @@ bool bcache_read_bytes(struct bcache *cache, int fd, uint64_t start, size_t len,
struct updater;
-typedef bool (*partial_update_fn)(struct updater *u, int fd, block_address bb, uint64_t offset, size_t len);
-typedef bool (*whole_update_fn)(struct updater *u, int fd, block_address bb, block_address be);
+typedef bool (*partial_update_fn)(struct updater *u, struct bcache_dev *dev, block_address bb, uint64_t offset, size_t len);
+typedef bool (*whole_update_fn)(struct updater *u, struct bcache_dev *dev, block_address bb, block_address be);
struct updater {
struct bcache *cache;
@@ -96,7 +96,7 @@ struct updater {
void *data;
};
-static bool _update_bytes(struct updater *u, int fd, uint64_t start, size_t len)
+static bool _update_bytes(struct updater *u, struct bcache_dev *dev, uint64_t start, size_t len)
{
struct bcache *cache = u->cache;
block_address bb, be;
@@ -109,12 +109,12 @@ static bool _update_bytes(struct updater *u, int fd, uint64_t start, size_t len)
// If the last block is partial, we will require a read, so let's
// prefetch it.
if ((start + len) % block_size)
- bcache_prefetch(cache, fd, (start + len) / block_size);
+ bcache_prefetch(cache, dev, (start + len) / block_size);
// First block may be partial
if (block_offset) {
size_t blen = _min(block_size - block_offset, len);
- if (!u->partial_fn(u, fd, bb, block_offset, blen))
+ if (!u->partial_fn(u, dev, bb, block_offset, blen))
return false;
len -= blen;
@@ -126,7 +126,7 @@ static bool _update_bytes(struct updater *u, int fd, uint64_t start, size_t len)
// Now we write out a set of whole blocks
nr_whole = len / block_size;
- if (!u->whole_fn(u, fd, bb, bb + nr_whole))
+ if (!u->whole_fn(u, dev, bb, bb + nr_whole))
return false;
bb += nr_whole;
@@ -136,17 +136,17 @@ static bool _update_bytes(struct updater *u, int fd, uint64_t start, size_t len)
return true;
// Finally we write a partial end block
- return u->partial_fn(u, fd, bb, 0, len);
+ return u->partial_fn(u, dev, bb, 0, len);
}
//----------------------------------------------------------------
-static bool _write_partial(struct updater *u, int fd, block_address bb,
+static bool _write_partial(struct updater *u, struct bcache_dev *dev, block_address bb,
uint64_t offset, size_t len)
{
struct block *b;
- if (!bcache_get(u->cache, fd, bb, GF_DIRTY, &b))
+ if (!bcache_get(u->cache, dev, bb, GF_DIRTY, &b))
return false;
memcpy(((unsigned char *) b->data) + offset, u->data, len);
@@ -156,7 +156,7 @@ static bool _write_partial(struct updater *u, int fd, block_address bb,
return true;
}
-static bool _write_whole(struct updater *u, int fd, block_address bb, block_address be)
+static bool _write_whole(struct updater *u, struct bcache_dev *dev, block_address bb, block_address be)
{
struct block *b;
uint64_t block_size = bcache_block_sectors(u->cache) << SECTOR_SHIFT;
@@ -164,7 +164,7 @@ static bool _write_whole(struct updater *u, int fd, block_address bb, block_addr
for (; bb != be; bb++) {
// We don't need to read the block since we are overwriting
// it completely.
- if (!bcache_get(u->cache, fd, bb, GF_ZERO, &b))
+ if (!bcache_get(u->cache, dev, bb, GF_ZERO, &b))
return false;
memcpy(b->data, u->data, block_size);
u->data = ((unsigned char *) u->data) + block_size;
@@ -174,7 +174,7 @@ static bool _write_whole(struct updater *u, int fd, block_address bb, block_addr
return true;
}
-bool bcache_write_bytes(struct bcache *cache, int fd, uint64_t start, size_t len, void *data)
+bool bcache_write_bytes(struct bcache *cache, struct bcache_dev *dev, uint64_t start, size_t len, void *data)
{
struct updater u;
@@ -183,16 +183,16 @@ bool bcache_write_bytes(struct bcache *cache, int fd, uint64_t start, size_t len
u.whole_fn = _write_whole;
u.data = data;
- return _update_bytes(&u, fd, start, len);
+ return _update_bytes(&u, dev, start, len);
}
//----------------------------------------------------------------
-static bool _zero_partial(struct updater *u, int fd, block_address bb, uint64_t offset, size_t len)
+static bool _zero_partial(struct updater *u, struct bcache_dev *dev, block_address bb, uint64_t offset, size_t len)
{
struct block *b;
- if (!bcache_get(u->cache, fd, bb, GF_DIRTY, &b))
+ if (!bcache_get(u->cache, dev, bb, GF_DIRTY, &b))
return false;
memset(((unsigned char *) b->data) + offset, 0, len);
@@ -201,12 +201,12 @@ static bool _zero_partial(struct updater *u, int fd, block_address bb, uint64_t
return true;
}
-static bool _zero_whole(struct updater *u, int fd, block_address bb, block_address be)
+static bool _zero_whole(struct updater *u, struct bcache_dev *dev, block_address bb, block_address be)
{
struct block *b;
for (; bb != be; bb++) {
- if (!bcache_get(u->cache, fd, bb, GF_ZERO, &b))
+ if (!bcache_get(u->cache, dev, bb, GF_ZERO, &b))
return false;
bcache_put(b);
}
@@ -214,7 +214,7 @@ static bool _zero_whole(struct updater *u, int fd, block_address bb, block_addre
return true;
}
-bool bcache_zero_bytes(struct bcache *cache, int fd, uint64_t start, size_t len)
+bool bcache_zero_bytes(struct bcache *cache, struct bcache_dev *dev, uint64_t start, size_t len)
{
struct updater u;
@@ -223,17 +223,17 @@ bool bcache_zero_bytes(struct bcache *cache, int fd, uint64_t start, size_t len)
u.whole_fn = _zero_whole;
u.data = NULL;
- return _update_bytes(&u, fd, start, len);
+ return _update_bytes(&u, dev, start, len);
}
//----------------------------------------------------------------
-static bool _set_partial(struct updater *u, int fd, block_address bb, uint64_t offset, size_t len)
+static bool _set_partial(struct updater *u, struct bcache_dev *dev, block_address bb, uint64_t offset, size_t len)
{
struct block *b;
uint8_t val = *((uint8_t *) u->data);
- if (!bcache_get(u->cache, fd, bb, GF_DIRTY, &b))
+ if (!bcache_get(u->cache, dev, bb, GF_DIRTY, &b))
return false;
memset(((unsigned char *) b->data) + offset, val, len);
@@ -242,14 +242,14 @@ static bool _set_partial(struct updater *u, int fd, block_address bb, uint64_t o
return true;
}
-static bool _set_whole(struct updater *u, int fd, block_address bb, block_address be)
+static bool _set_whole(struct updater *u, struct bcache_dev *dev, block_address bb, block_address be)
{
struct block *b;
uint8_t val = *((uint8_t *) u->data);
uint64_t len = bcache_block_sectors(u->cache) * 512;
for (; bb != be; bb++) {
- if (!bcache_get(u->cache, fd, bb, GF_ZERO, &b))
+ if (!bcache_get(u->cache, dev, bb, GF_ZERO, &b))
return false;
memset((unsigned char *) b->data, val, len);
bcache_put(b);
@@ -258,7 +258,7 @@ static bool _set_whole(struct updater *u, int fd, block_address bb, block_addres
return true;
}
-bool bcache_set_bytes(struct bcache *cache, int fd, uint64_t start, size_t len, uint8_t val)
+bool bcache_set_bytes(struct bcache *cache, struct bcache_dev *dev, uint64_t start, size_t len, uint8_t val)
{
struct updater u;
@@ -267,6 +267,6 @@ bool bcache_set_bytes(struct bcache *cache, int fd, uint64_t start, size_t len,
u.whole_fn = _set_whole;
u.data = &val;
- return _update_bytes(&u, fd, start, len);
+ return _update_bytes(&u, dev, start, len);
}
diff --git a/lib/device/bcache.c b/lib/device/bcache.c
index f315b2a75..f891e560f 100644
--- a/lib/device/bcache.c
+++ b/lib/device/bcache.c
@@ -136,6 +136,7 @@ struct async_engine {
io_context_t aio_context;
struct cb_set *cbs;
unsigned page_mask;
+ bool use_o_direct;
};
static struct async_engine *_to_async(struct io_engine *e)
@@ -158,6 +159,48 @@ static void _async_destroy(struct io_engine *ioe)
free(e);
}
+// Used by both the async and sync engines
+static int _open_common(const char *path, int os_flags)
+{
+ int fd;
+
+ os_flags |= O_NOATIME;
+
+ fd = open(path, os_flags);
+ if (fd < 0) {
+ if ((errno == EBUSY) && (os_flags & O_EXCL))
+ log_error("Can't open %s exclusively. Mounted filesystem?", path);
+ else
+ log_error("Couldn't open %s, errno = %d", path, errno);
+ }
+
+ return fd;
+}
+
+static int _async_open(struct io_engine *ioe, const char *path, unsigned flags)
+{
+ struct async_engine *e = _to_async(ioe);
+ int os_flags = 0;
+
+ if (e->use_o_direct)
+ os_flags |= O_DIRECT;
+
+ if (flags & EF_READ_ONLY)
+ os_flags |= O_RDONLY;
+ else
+ os_flags |= O_RDWR;
+
+ if (flags & EF_EXCL)
+ os_flags |= O_EXCL;
+
+ return _open_common(path, os_flags);
+}
+
+static void _async_close(struct io_engine *e, int fd)
+{
+ close(fd);
+}
+
static bool _async_issue(struct io_engine *ioe, enum dir d, int fd,
sector_t sb, sector_t se, void *data, void *context)
{
@@ -249,7 +292,7 @@ static unsigned _async_max_io(struct io_engine *e)
return MAX_IO;
}
-struct io_engine *create_async_io_engine(void)
+struct io_engine *create_async_io_engine(bool use_o_direct)
{
int r;
struct async_engine *e = malloc(sizeof(*e));
@@ -258,6 +301,8 @@ struct io_engine *create_async_io_engine(void)
return NULL;
e->e.destroy = _async_destroy;
+ e->e.open = _async_open;
+ e->e.close = _async_close;
e->e.issue = _async_issue;
e->e.wait = _async_wait;
e->e.max_io = _async_max_io;
@@ -278,6 +323,7 @@ struct io_engine *create_async_io_engine(void)
}
e->page_mask = sysconf(_SC_PAGESIZE) - 1;
+ e->use_o_direct = use_o_direct;
return &e->e;
}
@@ -285,65 +331,85 @@ struct io_engine *create_async_io_engine(void)
//----------------------------------------------------------------
struct sync_io {
- struct dm_list list;
+ struct dm_list list;
void *context;
};
struct sync_engine {
struct io_engine e;
struct dm_list complete;
+ bool use_o_direct;
};
static struct sync_engine *_to_sync(struct io_engine *e)
{
- return container_of(e, struct sync_engine, e);
+ return container_of(e, struct sync_engine, e);
}
static void _sync_destroy(struct io_engine *ioe)
{
- struct sync_engine *e = _to_sync(ioe);
- free(e);
+ struct sync_engine *e = _to_sync(ioe);
+ free(e);
+}
+
+static int _sync_open(struct io_engine *ioe, const char *path, unsigned flags)
+{
+ struct sync_engine *e = _to_sync(ioe);
+ int os_flags = 0;
+
+ if (e->use_o_direct)
+ os_flags |= O_DIRECT;
+
+ if (flags & EF_READ_ONLY)
+ os_flags |= O_RDONLY;
+ else
+ os_flags |= O_RDWR;
+
+ if (flags & EF_EXCL)
+ os_flags |= O_EXCL;
+
+ return _open_common(path, os_flags);
}
static bool _sync_issue(struct io_engine *ioe, enum dir d, int fd,
- sector_t sb, sector_t se, void *data, void *context)
+ sector_t sb, sector_t se, void *data, void *context)
{
- int r;
- uint64_t len = (se - sb) * 512, where;
+ int r;
+ uint64_t len = (se - sb) * 512, where;
struct sync_engine *e = _to_sync(ioe);
struct sync_io *io = malloc(sizeof(*io));
if (!io) {
log_warn("unable to allocate sync_io");
- return false;
+ return false;
}
where = sb * 512;
r = lseek(fd, where, SEEK_SET);
if (r < 0) {
- log_warn("unable to seek to position %llu", (unsigned long long) where);
- return false;
+ log_warn("unable to seek to position %llu", (unsigned long long) where);
+ return false;
}
while (len) {
- do {
- if (d == DIR_READ)
- r = read(fd, data, len);
- else
- r = write(fd, data, len);
+ do {
+ if (d == DIR_READ)
+ r = read(fd, data, len);
+ else
+ r = write(fd, data, len);
- } while ((r < 0) && ((r == EINTR) || (r == EAGAIN)));
+ } while ((r < 0) && ((r == EINTR) || (r == EAGAIN)));
- if (r < 0) {
- log_warn("io failed %d", r);
- return false;
- }
+ if (r < 0) {
+ log_warn("io failed %d", r);
+ return false;
+ }
- len -= r;
+ len -= r;
}
if (len) {
- log_warn("short io %u bytes remaining", (unsigned) len);
- return false;
+ log_warn("short io %u bytes remaining", (unsigned) len);
+ return false;
}
@@ -355,7 +421,7 @@ static bool _sync_issue(struct io_engine *ioe, enum dir d, int fd,
static bool _sync_wait(struct io_engine *ioe, io_complete_fn fn)
{
- struct sync_io *io, *tmp;
+ struct sync_io *io, *tmp;
struct sync_engine *e = _to_sync(ioe);
dm_list_iterate_items_safe(io, tmp, &e->complete) {
@@ -369,23 +435,26 @@ static bool _sync_wait(struct io_engine *ioe, io_complete_fn fn)
static unsigned _sync_max_io(struct io_engine *e)
{
- return 1;
+ return 1;
}
-struct io_engine *create_sync_io_engine(void)
+struct io_engine *create_sync_io_engine(bool use_o_direct)
{
struct sync_engine *e = malloc(sizeof(*e));
if (!e)
- return NULL;
+ return NULL;
- e->e.destroy = _sync_destroy;
- e->e.issue = _sync_issue;
- e->e.wait = _sync_wait;
- e->e.max_io = _sync_max_io;
+ e->e.destroy = _sync_destroy;
+ e->e.open = _sync_open;
+ e->e.close = _async_close;
+ e->e.issue = _sync_issue;
+ e->e.wait = _sync_wait;
+ e->e.max_io = _sync_max_io;
+ e->use_o_direct = use_o_direct;
- dm_list_init(&e->complete);
- return &e->e;
+ dm_list_init(&e->complete);
+ return &e->e;
}
//----------------------------------------------------------------
@@ -430,6 +499,20 @@ enum block_flags {
BF_DIRTY = (1 << 1),
};
+struct bcache_dev {
+ // The unit tests are relying on fd being the first element.
+ int fd;
+
+ struct bcache *cache;
+ char *path;
+ unsigned flags;
+
+ // The reference counts tracks users that are holding the dev, plus
+ // all the blocks on that device that are currently in the cache.
+ unsigned holders;
+ unsigned blocks;
+};
+
struct bcache {
sector_t block_sectors;
uint64_t nr_data_blocks;
@@ -465,10 +548,135 @@ struct bcache {
unsigned write_hits;
unsigned write_misses;
unsigned prefetches;
+
+ struct radix_tree *dev_tree;
};
//----------------------------------------------------------------
+static void _free_dev(struct bcache *cache, struct bcache_dev *dev)
+{
+ cache->engine->close(cache->engine, dev->fd);
+ free(dev->path);
+ free(dev);
+}
+
+static void _dev_dtr(void *context, union radix_value v)
+{
+ _free_dev(context, v.ptr);
+}
+
+static void _inc_holders(struct bcache_dev *dev)
+{
+ dev->holders++;
+}
+
+static void _inc_blocks(struct bcache_dev *dev)
+{
+ dev->blocks++;
+}
+
+static void _dev_maybe_close(struct bcache_dev *dev)
+{
+ if (dev->holders || dev->blocks)
+ return;
+
+ if (!radix_tree_remove(dev->cache->dev_tree,
+ (uint8_t *) dev->path,
+ (uint8_t *) dev->path + strlen(dev->path)))
+ log_error("couldn't remove bcache dev: %s", dev->path);
+}
+
+static void _dec_holders(struct bcache_dev *dev)
+{
+ if (!dev->holders)
+ log_error("internal error: holders refcount already at zero (%s)", dev->path);
+ else {
+ dev->holders--;
+ _dev_maybe_close(dev);
+ }
+}
+
+static void _dec_blocks(struct bcache_dev *dev)
+{
+ if (!dev->blocks)
+ log_error("internal error: blocks refcount already at zero (%s)", dev->path);
+ else {
+ dev->blocks--;
+ _dev_maybe_close(dev);
+ }
+}
+
+static bool _eflags(unsigned flags, unsigned flag)
+{
+ return flags & flag;
+}
+
+struct bcache_dev *bcache_get_dev(struct bcache *cache, const char *path, unsigned flags)
+{
+ union radix_value v;
+ struct bcache_dev *dev = NULL;
+
+ if (radix_tree_lookup(cache->dev_tree, (uint8_t *) path, (uint8_t *) (path + strlen(path)), &v)) {
+ dev = v.ptr;
+ _inc_holders(dev);
+
+ if (_eflags(flags, EF_EXCL) && !_eflags(dev->flags, EF_EXCL)) {
+ if (dev->holders != 1) {
+ log_error("you can't update a bcache dev to exclusive with a concurrent holder (%s)",
+ dev->path);
+ _dec_holders(dev);
+ return NULL;
+ }
+
+ bcache_invalidate_dev(cache, dev);
+ _dec_holders(dev);
+ return bcache_get_dev(cache, path, flags);
+ }
+
+ } else {
+ dev = malloc(sizeof(*dev));
+ dev->fd = cache->engine->open(cache->engine, path, flags);
+ if (dev->fd < 0) {
+ log_error("couldn't open bcache_dev(%s)", path);
+ free(dev);
+ return NULL;
+ }
+
+ dev->path = strdup(path);
+ if (!dev->path) {
+ log_error("couldn't copy path when getting new device (%s)", path);
+ cache->engine->close(cache->engine, dev->fd);
+ free(dev);
+ return NULL;
+ }
+ dev->flags = flags;
+
+ dev->cache = cache;
+ dev->holders = 1;
+ dev->blocks = 0;
+
+
+ v.ptr = dev;
+ if (!radix_tree_insert(cache->dev_tree, (uint8_t *) path, (uint8_t *) (path + strlen(path)), v)) {
+ log_error("couldn't insert device into radix tree: %s", path);
+ cache->engine->close(cache->engine, dev->fd);
+ free(dev->path);
+ free(dev);
+ return NULL;
+ }
+ }
+
+ return dev;
+}
+
+void bcache_put_dev(struct bcache_dev *dev)
+{
+ _dec_holders(dev);
+}
+
+//----------------------------------------------------------------
+
struct key_parts {
uint32_t fd;
uint64_t b;
@@ -476,7 +684,7 @@ struct key_parts {
union key {
struct key_parts parts;
- uint8_t bytes[12];
+ uint8_t bytes[12];
};
static struct block *_block_lookup(struct bcache *cache, int fd, uint64_t i)
@@ -495,22 +703,22 @@ static struct block *_block_lookup(struct bcache *cache, int fd, uint64_t i)
static bool _block_insert(struct block *b)
{
- union key k;
- union radix_value v;
+ union key k;
+ union radix_value v;
- k.parts.fd = b->fd;
- k.parts.b = b->index;
- v.ptr = b;
+ k.parts.fd = b->dev->fd;
+ k.parts.b = b->index;
+ v.ptr = b;
return radix_tree_insert(b->cache->rtree, k.bytes, k.bytes + sizeof(k.bytes), v);
}
static void _block_remove(struct block *b)
{
- union key k;
+ union key k;
- k.parts.fd = b->fd;
- k.parts.b = b->index;
+ k.parts.fd = b->dev->fd;
+ k.parts.b = b->index;
radix_tree_remove(b->cache->rtree, k.bytes, k.bytes + sizeof(k.bytes));
}
@@ -649,7 +857,7 @@ static void _issue_low_level(struct block *b, enum dir d)
dm_list_move(&cache->io_pending, &b->list);
- if (!cache->engine->issue(cache->engine, d, b->fd, sb, se, b->data, b)) {
+ if (!cache->engine->issue(cache->engine, d, b->dev->fd, sb, se, b->data, b)) {
/* FIXME: if io_submit() set an errno, return that instead of EIO? */
_complete_io(b, -EIO);
return;
@@ -725,7 +933,7 @@ static struct block *_find_unused_clean_block(struct bcache *cache)
return NULL;
}
-static struct block *_new_block(struct bcache *cache, int fd, block_address i, bool can_wait)
+static struct block *_new_block(struct bcache *cache, struct bcache_dev *dev, block_address i, bool can_wait)
{
struct block *b;
@@ -739,7 +947,7 @@ static struct block *_new_block(struct bcache *cache, int fd, block_address i, b
_wait_io(cache);
} else {
log_error("bcache no new blocks for fd %d index %u",
- fd, (uint32_t) i);
+ dev->fd, (uint32_t) i);
return NULL;
}
}
@@ -749,13 +957,14 @@ static struct block *_new_block(struct bcache *cache, int fd, block_address i, b
dm_list_init(&b->list);
dm_list_init(&b->hash);
b->flags = 0;
- b->fd = fd;
+ _inc_blocks(dev);
+ b->dev = dev;
b->index = i;
b->ref_count = 0;
b->error = 0;
if (!_block_insert(b)) {
- log_error("bcache unable to insert block in radix tree (OOM?)");
+ log_error("bcache unable to insert block in radix tree (OOM?)");
_free_block(b);
return NULL;
}
@@ -795,10 +1004,10 @@ static void _miss(struct bcache *cache, unsigned flags)
}
static struct block *_lookup_or_read_block(struct bcache *cache,
- int fd, block_address i,
+ struct bcache_dev *dev, block_address i,
unsigned flags)
{
- struct block *b = _block_lookup(cache, fd, i);
+ struct block *b = _block_lookup(cache, dev->fd, i);
if (b) {
// FIXME: this is insufficient. We need to also catch a read
@@ -823,7 +1032,7 @@ static struct block *_lookup_or_read_block(struct bcache *cache,
} else {
_miss(cache, flags);
- b = _new_block(cache, fd, i, true);
+ b = _new_block(cache, dev, i, true);
if (b) {
if (flags & GF_ZERO)
_zero_block(b);
@@ -902,7 +1111,7 @@ struct bcache *bcache_create(sector_t block_sectors, unsigned nr_cache_blocks,
dm_list_init(&cache->clean);
dm_list_init(&cache->io_pending);
- cache->rtree = radix_tree_create(NULL, NULL);
+ cache->rtree = radix_tree_create(NULL, NULL);
if (!cache->rtree) {
cache->engine->destroy(cache->engine);
free(cache);
@@ -923,22 +1132,71 @@ struct bcache *bcache_create(sector_t block_sectors, unsigned nr_cache_blocks,
return NULL;
}
+ cache->dev_tree = radix_tree_create(_dev_dtr, cache);
+ if (!cache->dev_tree) {
+ _exit_free_list(cache);
+ cache->engine->destroy(cache->engine);
+ radix_tree_destroy(cache->rtree);
+ free(cache);
+ return NULL;
+ }
+
return cache;
}
+//----------------------------------------------------------------
+
+struct dev_iterator {
+ bool chastised;
+ struct radix_tree_iterator it;
+};
+
+static bool _check_dev(struct radix_tree_iterator *it,
+ uint8_t *kb, uint8_t *ke, union radix_value v)
+{
+ struct dev_iterator *dit = container_of(it, struct dev_iterator, it);
+ struct bcache_dev *dev = v.ptr;
+
+ if (dev->holders) {
+ if (!dit->chastised) {
+ log_warn("Destroying a bcache whilst devices are still held:");
+ dit->chastised = true;
+ }
+
+ log_warn(" %s", dev->path);
+ }
+
+ return true;
+}
+
+static void _check_for_holders(struct bcache *cache)
+{
+ struct dev_iterator dit;
+
+ dit.chastised = false;
+ dit.it.visit = _check_dev;
+ radix_tree_iterate(cache->dev_tree, NULL, NULL, &dit.it);
+}
+
void bcache_destroy(struct bcache *cache)
{
if (cache->nr_locked)
log_warn("some blocks are still locked");
+ _check_for_holders(cache);
+
bcache_flush(cache);
_wait_all(cache);
+
_exit_free_list(cache);
radix_tree_destroy(cache->rtree);
+ radix_tree_destroy(cache->dev_tree);
cache->engine->destroy(cache->engine);
free(cache);
}
+//----------------------------------------------------------------
+
sector_t bcache_block_sectors(struct bcache *cache)
{
return cache->block_sectors;
@@ -954,13 +1212,13 @@ unsigned bcache_max_prefetches(struct bcache *cache)
return cache->max_io;
}
-void bcache_prefetch(struct bcache *cache, int fd, block_address i)
+void bcache_prefetch(struct bcache *cache, struct bcache_dev *dev, block_address i)
{
- struct block *b = _block_lookup(cache, fd, i);
+ struct block *b = _block_lookup(cache, dev->fd, i);
if (!b) {
if (cache->nr_io_pending < cache->max_io) {
- b = _new_block(cache, fd, i, false);
+ b = _new_block(cache, dev, i, false);
if (b) {
cache->prefetches++;
_issue_read(b);
@@ -975,15 +1233,16 @@ static void _recycle_block(struct bcache *cache, struct block *b)
{
_unlink_block(b);
_block_remove(b);
+ _dec_blocks(b->dev);
_free_block(b);
}
-bool bcache_get(struct bcache *cache, int fd, block_address i,
+bool bcache_get(struct bcache *cache, struct bcache_dev *dev, block_address i,
unsigned flags, struct block **result)
{
struct block *b;
- b = _lookup_or_read_block(cache, fd, i, flags);
+ b = _lookup_or_read_block(cache, dev, i, flags);
if (b) {
if (b->error) {
if (b->io_dir == DIR_READ) {
@@ -1005,7 +1264,7 @@ bool bcache_get(struct bcache *cache, int fd, block_address i,
*result = NULL;
- log_error("bcache failed to get block %u fd %d", (uint32_t) i, fd);
+ log_error("bcache failed to get block %u fd %d", (uint32_t) i, dev->fd);
return false;
}
@@ -1069,7 +1328,7 @@ static bool _invalidate_block(struct bcache *cache, struct block *b)
if (b->ref_count) {
log_warn("bcache_invalidate: block (%d, %llu) still held",
- b->fd, (unsigned long long) b->index);
+ b->dev->fd, (unsigned long long) b->index);
return false;
}
@@ -1078,7 +1337,7 @@ static bool _invalidate_block(struct bcache *cache, struct block *b)
_wait_specific(b);
if (b->error)
- return false;
+ return false;
}
_recycle_block(cache, b);
@@ -1086,9 +1345,9 @@ static bool _invalidate_block(struct bcache *cache, struct block *b)
return true;
}
-bool bcache_invalidate(struct bcache *cache, int fd, block_address i)
+bool bcache_invalidate(struct bcache *cache, struct bcache_dev *dev, block_address i)
{
- return _invalidate_block(cache, _block_lookup(cache, fd, i));
+ return _invalidate_block(cache, _block_lookup(cache, dev->fd, i));
}
//----------------------------------------------------------------
@@ -1099,37 +1358,38 @@ struct invalidate_iterator {
};
static bool _writeback_v(struct radix_tree_iterator *it,
- uint8_t *kb, uint8_t *ke, union radix_value v)
+ uint8_t *kb, uint8_t *ke, union radix_value v)
{
struct block *b = v.ptr;
if (_test_flags(b, BF_DIRTY))
- _issue_write(b);
+ _issue_write(b);
- return true;
+ return true;
}
static bool _invalidate_v(struct radix_tree_iterator *it,
- uint8_t *kb, uint8_t *ke, union radix_value v)
+ uint8_t *kb, uint8_t *ke, union radix_value v)
{
struct block *b = v.ptr;
- struct invalidate_iterator *iit = container_of(it, struct invalidate_iterator, it);
+ struct invalidate_iterator *iit = container_of(it, struct invalidate_iterator, it);
if (b->error || _test_flags(b, BF_DIRTY)) {
- log_warn("bcache_invalidate: block (%d, %llu) still dirty",
- b->fd, (unsigned long long) b->index);
- iit->success = false;
- return true;
+ log_warn("bcache_invalidate: block (%d, %llu) still dirty",
+ b->dev->fd, (unsigned long long) b->index);
+ iit->success = false;
+ return true;
}
if (b->ref_count) {
log_warn("bcache_invalidate: block (%d, %llu) still held",
- b->fd, (unsigned long long) b->index);
+ b->dev->fd, (unsigned long long) b->index);
iit->success = false;
return true;
}
_unlink_block(b);
+ _dec_blocks(b->dev);
_free_block(b);
// We can't remove the block from the radix tree yet because
@@ -1137,12 +1397,12 @@ static bool _invalidate_v(struct radix_tree_iterator *it,
return true;
}
-bool bcache_invalidate_fd(struct bcache *cache, int fd)
+bool bcache_invalidate_dev(struct bcache *cache, struct bcache_dev *dev)
{
- union key k;
+ union key k;
struct invalidate_iterator it;
- k.parts.fd = fd;
+ k.parts.fd = dev->fd;
it.it.visit = _writeback_v;
radix_tree_iterate(cache->rtree, k.bytes, k.bytes + sizeof(k.parts.fd), &it.it);
@@ -1156,5 +1416,20 @@ bool bcache_invalidate_fd(struct bcache *cache, int fd)
return it.success;
}
+bool bcache_is_well_formed(struct bcache *cache)
+{
+ if (!radix_tree_is_well_formed(cache->rtree)) {
+ log_error("block tree is badly formed");
+ return false;
+ }
+
+ if (!radix_tree_is_well_formed(cache->dev_tree)) {
+ log_error("dev tree is badly formed");
+ return false;
+ }
+
+ return true;
+}
+
//----------------------------------------------------------------
diff --git a/lib/device/bcache.h b/lib/device/bcache.h
index cb36c3088..168ab3140 100644
--- a/lib/device/bcache.h
+++ b/lib/device/bcache.h
@@ -18,8 +18,8 @@
#include "device_mapper/all.h"
#include <linux/fs.h>
-#include <stdint.h>
#include <stdbool.h>
+#include <stdint.h>
/*----------------------------------------------------------------*/
@@ -39,23 +39,33 @@ typedef uint64_t sector_t;
typedef void io_complete_fn(void *context, int io_error);
+enum {
+ EF_READ_ONLY = 1,
+ EF_EXCL = 2
+};
+
struct io_engine {
void (*destroy)(struct io_engine *e);
+
+ int (*open)(struct io_engine *e, const char *path, unsigned flags);
+ void (*close)(struct io_engine *e, int fd);
+
+ unsigned (*max_io)(struct io_engine *e);
bool (*issue)(struct io_engine *e, enum dir d, int fd,
sector_t sb, sector_t se, void *data, void *context);
bool (*wait)(struct io_engine *e, io_complete_fn fn);
- unsigned (*max_io)(struct io_engine *e);
};
-struct io_engine *create_async_io_engine(void);
-struct io_engine *create_sync_io_engine(void);
+struct io_engine *create_async_io_engine(bool use_o_direct);
+struct io_engine *create_sync_io_engine(bool use_o_direct);
/*----------------------------------------------------------------*/
struct bcache;
+struct bcache_dev;
struct block {
/* clients may only access these three fields */
- int fd;
+ struct bcache_dev *dev;
uint64_t index;
void *data;
@@ -76,6 +86,22 @@ struct bcache *bcache_create(sector_t block_size, unsigned nr_cache_blocks,
struct io_engine *engine);
void bcache_destroy(struct bcache *cache);
+// IMPORTANT: It is up to the caller to normalise the device path. bcache does
+// not detect if two relative path refer to the same file, or if 2 device nodes
+// refer to the same underlying dev.
+
+// There may be more than one holder of a device at a time. But since we cannot
+// promote a dev from being opened non-exclusive to exclusive, there are some
+// restrictions:
+//
+// - You may have concurrent non-exclusive holders.
+// - You may have concurrent exclusive holders.
+// - You may not have mixed holders.
+// - If blocks are in the cache that were acquired by a non exclusive holder,
+// they will all be invalidated if a device is opened exclusively.
+struct bcache_dev *bcache_get_dev(struct bcache *cache, const char *path, unsigned flags);
+void bcache_put_dev(struct bcache_dev *dev);
+
enum bcache_get_flags {
/*
* The block will be zeroed before get_block returns it. This
@@ -101,10 +127,10 @@ unsigned bcache_max_prefetches(struct bcache *cache);
* something like this:
*
* dm_list_iterate_items (dev, &devices)
- * bcache_prefetch(cache, dev->fd, block);
+ * bcache_prefetch(cache, dev, block);
*
* dm_list_iterate_items (dev, &devices) {
- * if (!bcache_get(cache, dev->fd, block, &b))
+ * if (!bcache_get(cache, dev, block, &b))
* fail();
*
* process_block(b);
@@ -114,12 +140,12 @@ unsigned bcache_max_prefetches(struct bcache *cache);
* they complete. But we're talking a very small difference, and it's worth it
* to keep callbacks out of this interface.
*/
-void bcache_prefetch(struct bcache *cache, int fd, block_address index);
+void bcache_prefetch(struct bcache *cache, struct bcache_dev *dev, block_address index);
/*
* Returns true on success.
*/
-bool bcache_get(struct bcache *cache, int fd, block_address index,
+bool bcache_get(struct bcache *cache, struct bcache_dev *dev, block_address index,
unsigned flags, struct block **result);
void bcache_put(struct block *b);
@@ -128,6 +154,7 @@ void bcache_put(struct block *b);
* (return false), if any unlocked dirty data cannot be written back.
*/
bool bcache_flush(struct bcache *cache);
+bool bcache_flush_dev(struct bcache *cache, struct bcache_dev *dev);
/*
* Removes a block from the cache.
@@ -137,26 +164,23 @@ bool bcache_flush(struct bcache *cache);
*
* If the block is currently held false will be returned.
*/
-bool bcache_invalidate(struct bcache *cache, int fd, block_address index);
-
-/*
- * Invalidates all blocks on the given descriptor. Call this before closing
- * the descriptor to make sure everything is written back.
- */
-bool bcache_invalidate_fd(struct bcache *cache, int fd);
+bool bcache_invalidate(struct bcache *cache, struct bcache_dev *dev, block_address index);
+bool bcache_invalidate_dev(struct bcache *cache, struct bcache_dev *dev);
+// For debug only
+bool bcache_is_well_formed(struct bcache *cache);
//----------------------------------------------------------------
// The next four functions are utilities written in terms of the above api.
// Prefetches the blocks neccessary to satisfy a byte range.
-void bcache_prefetch_bytes(struct bcache *cache, int fd, uint64_t start, size_t len);
+void bcache_prefetch_bytes(struct bcache *cache, struct bcache_dev *dev, uint64_t start, size_t len);
// Reads, writes and zeroes bytes. Returns false if errors occur.
-bool bcache_read_bytes(struct bcache *cache, int fd, uint64_t start, size_t len, void *data);
-bool bcache_write_bytes(struct bcache *cache, int fd, uint64_t start, size_t len, void *data);
-bool bcache_zero_bytes(struct bcache *cache, int fd, uint64_t start, size_t len);
-bool bcache_set_bytes(struct bcache *cache, int fd, uint64_t start, size_t len, uint8_t val);
+bool bcache_read_bytes(struct bcache *cache, struct bcache_dev *dev, uint64_t start, size_t len, void *data);
+bool bcache_write_bytes(struct bcache *cache, struct bcache_dev *dev, uint64_t start, size_t len, void *data);
+bool bcache_zero_bytes(struct bcache *cache, struct bcache_dev *dev, uint64_t start, size_t len);
+bool bcache_set_bytes(struct bcache *cache, struct bcache_dev *dev, uint64_t start, size_t len, uint8_t val);
//----------------------------------------------------------------
diff --git a/lib/device/dev-cache.c b/lib/device/dev-cache.c
index 59e00ae9a..d7d6dd22a 100644
--- a/lib/device/dev-cache.c
+++ b/lib/device/dev-cache.c
@@ -1395,6 +1395,8 @@ const char *dev_name_confirmed(struct device *dev, int quiet)
log_debug_devs("Path %s no longer valid for device(%d,%d)",
name, (int) MAJOR(dev->dev),
(int) MINOR(dev->dev));
+
+
else
log_warn("Path %s no longer valid for device(%d,%d)",
name, (int) MAJOR(dev->dev),
diff --git a/lib/device/dev-io.c b/lib/device/dev-io.c
index 43c2c2d27..06ec724ca 100644
--- a/lib/device/dev-io.c
+++ b/lib/device/dev-io.c
@@ -327,9 +327,12 @@ static int _dev_get_size_file(struct device *dev, uint64_t *size)
static int _dev_get_size_dev(struct device *dev, uint64_t *size)
{
- const char *name = dev_name(dev);
- int fd = dev->bcache_fd;
- int do_close = 0;
+ const char *name;
+ int fd;
+
+ name = dev_name_confirmed(dev, 0);
+ if (!name)
+ return 0;
if (dev->size_seqno == _dev_size_seqno) {
log_very_verbose("%s: using cached size %" PRIu64 " sectors",
@@ -338,17 +341,13 @@ static int _dev_get_size_dev(struct device *dev, uint64_t *size)
return 1;
}
- if (fd <= 0) {
- if (!dev_open_readonly(dev))
- return_0;
- fd = dev_fd(dev);
- do_close = 1;
- }
+ fd = open(name, O_RDONLY);
+ if (fd <= 0)
+ return 0;
if (ioctl(fd, BLKGETSIZE64, size) < 0) {
log_sys_error("ioctl BLKGETSIZE64", name);
- if (do_close && !dev_close_immediate(dev))
- log_sys_error("close", name);
+ close(fd);
return 0;
}
@@ -358,9 +357,7 @@ static int _dev_get_size_dev(struct device *dev, uint64_t *size)
log_very_verbose("%s: size is %" PRIu64 " sectors", name, *size);
- if (do_close && !dev_close_immediate(dev))
- log_sys_error("close", name);
-
+ close(fd);
return 1;
}
diff --git a/lib/device/device.h b/lib/device/device.h
index e879dbb2e..0898e9343 100644
--- a/lib/device/device.h
+++ b/lib/device/device.h
@@ -31,8 +31,8 @@
#define DEV_USED_FOR_LV 0x00000100 /* Is device used for an LV */
#define DEV_ASSUMED_FOR_LV 0x00000200 /* Is device assumed for an LV */
#define DEV_NOT_O_NOATIME 0x00000400 /* Don't use O_NOATIME */
-#define DEV_IN_BCACHE 0x00000800 /* dev fd is open and used in bcache */
-#define DEV_BCACHE_EXCL 0x00001000 /* bcache_fd should be open EXCL */
+#define DEV_IN_BCACHE 0x00000800 /* dev fd is open and used in bcache */
+#define DEV_BCACHE_EXCL 0x00001000 /* bcache_fd should be open EXCL */
#define DEV_FILTER_AFTER_SCAN 0x00002000 /* apply filter after bcache has data */
#define DEV_FILTER_OUT_SCAN 0x00004000 /* filtered out during label scan */
#define DEV_BCACHE_WRITE 0x00008000 /* bcache_fd is open with RDWR */
@@ -64,11 +64,11 @@ struct device {
/* private */
int fd;
+ struct bcache_dev *bdev; // FIXME: temporary
int open_count;
int phys_block_size;
int block_size;
int read_ahead;
- int bcache_fd;
uint32_t flags;
unsigned size_seqno;
uint64_t size;
diff --git a/lib/format_text/format-text.c b/lib/format_text/format-text.c
index a1fce86bf..b26e7cf55 100644
--- a/lib/format_text/format-text.c
+++ b/lib/format_text/format-text.c
@@ -391,7 +391,7 @@ static int _raw_write_mda_header(const struct format_type *fmt,
sizeof(mdah->checksum_xl)));
if (!dev_write_bytes(dev, start_byte, MDA_HEADER_SIZE, mdah)) {
- log_error("Failed to write mda header to %s fd %d", dev_name(dev), dev->bcache_fd);
+ log_error("Failed to write mda header to %s", dev_name(dev));
return 0;
}
@@ -654,7 +654,7 @@ static int _vg_write_raw(struct format_instance *fid, struct volume_group *vg,
if (!dev_write_bytes(mdac->area.dev, mdac->area.start + mdac->rlocn.offset,
(size_t) (mdac->rlocn.size - new_wrap),
fidtc->raw_metadata_buf)) {
- log_error("Failed to write metadata to %s fd %d", dev_name(mdac->area.dev), mdac->area.dev->bcache_fd);
+ log_error("Failed to write metadata to %s", dev_name(mdac->area.dev));
goto out;
}
@@ -667,7 +667,7 @@ static int _vg_write_raw(struct format_instance *fid, struct volume_group *vg,
if (!dev_write_bytes(mdac->area.dev, mdac->area.start + MDA_HEADER_SIZE,
(size_t) new_wrap,
fidtc->raw_metadata_buf + mdac->rlocn.size - new_wrap)) {
- log_error("Failed to write metadata wrap to %s fd %d", dev_name(mdac->area.dev), mdac->area.dev->bcache_fd);
+ log_error("Failed to write metadata wrap to %s", dev_name(mdac->area.dev));
goto out;
}
}
diff --git a/lib/label/label.c b/lib/label/label.c
index d2c868564..e4b9228d4 100644
--- a/lib/label/label.c
+++ b/lib/label/label.c
@@ -247,9 +247,7 @@ struct bcache *scan_bcache;
static bool _in_bcache(struct device *dev)
{
- if (!dev)
- return NULL;
- return (dev->flags & DEV_IN_BCACHE) ? true : false;
+ return dev && (dev->flags & DEV_IN_BCACHE);
}
static struct labeller *_find_lvm_header(struct device *dev,
@@ -435,23 +433,22 @@ static int _scan_dev_open(struct device *dev)
const char *name;
struct stat sbuf;
int retried = 0;
- int flags = 0;
- int fd;
+ unsigned flags = 0;
+ struct bcache_dev *bdev;
if (!dev)
return 0;
if (dev->flags & DEV_IN_BCACHE) {
- /* Shouldn't happen */
- log_error("Device open %s has DEV_IN_BCACHE already set", dev_name(dev));
- dev->flags &= ~DEV_IN_BCACHE;
+ /* Shouldn't happen */
+ log_error("Device open %s has DEV_IN_BCACHE already set", dev_name(dev));
+ dev->flags &= ~DEV_IN_BCACHE;
}
- if (dev->bcache_fd > 0) {
- /* Shouldn't happen */
- log_error("Device open %s already open with fd %d",
- dev_name(dev), dev->bcache_fd);
- return 0;
+ if (dev->bdev > 0) {
+ /* Shouldn't happen */
+ log_error("Device open %s already open", dev_name(dev));
+ return 0;
}
/*
@@ -467,9 +464,6 @@ static int _scan_dev_open(struct device *dev)
name_sl = dm_list_item(name_list, struct dm_str_list);
name = name_sl->str;
- flags |= O_DIRECT;
- flags |= O_NOATIME;
-
/*
* FIXME: udev is a train wreck when we open RDWR and close, so we
* need to only use RDWR when we actually need to write, and use
@@ -477,63 +471,56 @@ static int _scan_dev_open(struct device *dev)
* just open with RDWR by default.
*/
- if (dev->flags & DEV_BCACHE_EXCL) {
- flags |= O_EXCL;
- flags |= O_RDWR;
- } else if (dev->flags & DEV_BCACHE_WRITE) {
- flags |= O_RDWR;
- } else {
- flags |= O_RDONLY;
- }
+ if (dev->flags & DEV_BCACHE_EXCL)
+ flags |= EF_EXCL;
-retry_open:
+ else if (!(dev->flags & DEV_BCACHE_WRITE))
+ flags |= EF_READ_ONLY;
- fd = open(name, flags, 0777);
+retry_open:
- if (fd < 0) {
- if ((errno == EBUSY) && (flags & O_EXCL)) {
- log_error("Can't open %s exclusively. Mounted filesystem?",
- dev_name(dev));
- } else {
- int major, minor;
+ bdev = bcache_get_dev(scan_bcache, name, flags);
+ if (!bdev) {
+ int major, minor;
- /*
- * Shouldn't happen, if it does, print stat info to help figure
- * out what's wrong.
- */
+ /*
+ * Shouldn't happen, if it does, print stat info to help figure
+ * out what's wrong.
+ */
- major = (int)MAJOR(dev->dev);
- minor = (int)MINOR(dev->dev);
+ major = (int)MAJOR(dev->dev);
+ minor = (int)MINOR(dev->dev);
- log_error("Device open %s %d:%d failed errno %d", name, major, minor, errno);
+ log_error("Device open %s %d:%d failed errno %d", name, major, minor, errno);
- if (stat(name, &sbuf)) {
- log_debug_devs("Device open %s %d:%d stat failed errno %d",
- name, major, minor, errno);
- } else if (sbuf.st_rdev != dev->dev) {
- log_debug_devs("Device open %s %d:%d stat %d:%d does not match.",
- name, major, minor,
- (int)MAJOR(sbuf.st_rdev), (int)MINOR(sbuf.st_rdev));
- }
+ if (stat(name, &sbuf)) {
+ log_debug_devs("Device open %s %d:%d stat failed errno %d",
+ name, major, minor, errno);
+ } else if (sbuf.st_rdev != dev->dev) {
+ log_debug_devs("Device open %s %d:%d stat %d:%d does not match.",
+ name, major, minor,
+ (int)MAJOR(sbuf.st_rdev), (int)MINOR(sbuf.st_rdev));
+ }
- if (!retried) {
- /*
- * FIXME: remove this, the theory for this retry is that
- * there may be a udev race that we can sometimes mask by
- * retrying. This is here until we can figure out if it's
- * needed and if so fix the real problem.
- */
- usleep(5000);
- log_debug_devs("Device open %s retry", dev_name(dev));
- retried = 1;
- goto retry_open;
- }
+ if (!retried) {
+ /*
+ * FIXME: remove this, the theory for this retry is that
+ * there may be a udev race that we can sometimes mask by
+ * retrying. This is here until we can figure out if it's
+ * needed and if so fix the real problem.
+ */
+ usleep(5000);
+ log_debug_devs("Device open %s retry", dev_name(dev));
+ retried = 1;
+ goto retry_open;
}
+
return 0;
}
dev->flags |= DEV_IN_BCACHE;
- dev->bcache_fd = fd;
+ dev->bdev = bdev;
+
return 1;
}
@@ -545,14 +532,13 @@ static int _scan_dev_close(struct device *dev)
dev->flags &= ~DEV_IN_BCACHE;
dev->flags &= ~DEV_BCACHE_EXCL;
- if (dev->bcache_fd < 0) {
+ if (dev->bdev < 0) {
log_error("scan_dev_close %s already closed", dev_name(dev));
return 0;
}
- if (close(dev->bcache_fd))
- log_warn("close %s errno %d", dev_name(dev), errno);
- dev->bcache_fd = -1;
+ bcache_put_dev(dev->bdev);
+ dev->bdev = NULL;
return 1;
}
@@ -648,7 +634,7 @@ static int _scan_list(struct cmd_context *cmd, struct dev_filter *f,
}
}
- bcache_prefetch(scan_bcache, devl->dev->bcache_fd, 0);
+ bcache_prefetch(scan_bcache, devl->dev->bdev, 0);
rem_prefetches--;
submit_count++;
@@ -665,18 +651,18 @@ static int _scan_list(struct cmd_context *cmd, struct dev_filter *f,
scan_failed = 0;
is_lvm_device = 0;
- if (!bcache_get(scan_bcache, devl->dev->bcache_fd, 0, 0, &bb)) {
+ if (!bcache_get(scan_bcache, devl->dev->bdev, 0, 0, &bb)) {
log_debug_devs("Scan failed to read %s error %d.", dev_name(devl->dev), error);
scan_failed = 1;
scan_read_errors++;
scan_failed_count++;
lvmcache_del_dev(devl->dev);
} else {
- log_debug_devs("Processing data from device %s %d:%d fd %d block %p",
+ log_debug_devs("Processing data from device %s %d:%d block %p",
dev_name(devl->dev),
(int)MAJOR(devl->dev->dev),
(int)MINOR(devl->dev->dev),
- devl->dev->bcache_fd, bb);
+ bb);
ret = _process_block(cmd, f, devl->dev, bb, 0, 0, &is_lvm_device);
@@ -699,7 +685,7 @@ static int _scan_list(struct cmd_context *cmd, struct dev_filter *f,
* drop it from bcache.
*/
if (scan_failed || !is_lvm_device) {
- bcache_invalidate_fd(scan_bcache, devl->dev->bcache_fd);
+ bcache_invalidate_dev(scan_bcache, devl->dev->bdev);
_scan_dev_close(devl->dev);
}
@@ -801,7 +787,7 @@ static int _setup_bcache(int cache_blocks)
if (cache_blocks > MAX_BCACHE_BLOCKS)
cache_blocks = MAX_BCACHE_BLOCKS;
- if (!(ioe = create_async_io_engine())) {
+ if (!(ioe = create_async_io_engine(true))) {
log_error("Failed to create bcache io engine.");
return 0;
}
@@ -856,7 +842,7 @@ int label_scan(struct cmd_context *cmd)
* so this will usually not be true.
*/
if (_in_bcache(dev)) {
- bcache_invalidate_fd(scan_bcache, dev->bcache_fd);
+ bcache_invalidate_dev(scan_bcache, dev->bdev);
_scan_dev_close(dev);
}
@@ -914,7 +900,7 @@ int label_scan_devs(struct cmd_context *cmd, struct dev_filter *f, struct dm_lis
dm_list_iterate_items(devl, devs) {
if (_in_bcache(devl->dev)) {
- bcache_invalidate_fd(scan_bcache, devl->dev->bcache_fd);
+ bcache_invalidate_dev(scan_bcache, devl->dev->bdev);
_scan_dev_close(devl->dev);
}
}
@@ -933,7 +919,7 @@ int label_scan_devs_excl(struct dm_list *devs)
dm_list_iterate_items(devl, devs) {
if (_in_bcache(devl->dev)) {
- bcache_invalidate_fd(scan_bcache, devl->dev->bcache_fd);
+ bcache_invalidate_dev(scan_bcache, devl->dev->bdev);
_scan_dev_close(devl->dev);
}
/*
@@ -953,7 +939,7 @@ int label_scan_devs_excl(struct dm_list *devs)
void label_scan_invalidate(struct device *dev)
{
if (_in_bcache(dev)) {
- bcache_invalidate_fd(scan_bcache, dev->bcache_fd);
+ bcache_invalidate_dev(scan_bcache, dev->bdev);
_scan_dev_close(dev);
}
}
@@ -1033,7 +1019,7 @@ int label_read(struct device *dev)
dm_list_add(&one_dev, &devl->list);
if (_in_bcache(dev)) {
- bcache_invalidate_fd(scan_bcache, dev->bcache_fd);
+ bcache_invalidate_dev(scan_bcache, dev->bdev);
_scan_dev_close(dev);
}
@@ -1067,9 +1053,9 @@ int label_read_sector(struct device *dev, uint64_t read_sector)
label_scan_open(dev);
- bcache_prefetch(scan_bcache, dev->bcache_fd, block_num);
+ bcache_prefetch(scan_bcache, dev->bdev, block_num);
- if (!bcache_get(scan_bcache, dev->bcache_fd, block_num, 0, &bb)) {
+ if (!bcache_get(scan_bcache, dev->bdev, block_num, 0, &bb)) {
log_error("Scan failed to read %s at %llu",
dev_name(dev), (unsigned long long)block_num);
ret = 0;
@@ -1139,7 +1125,7 @@ int label_scan_open_excl(struct device *dev)
if (_in_bcache(dev) && !(dev->flags & DEV_BCACHE_EXCL)) {
/* FIXME: avoid tossing out bcache blocks just to replace fd. */
log_debug("Close and reopen excl %s", dev_name(dev));
- bcache_invalidate_fd(scan_bcache, dev->bcache_fd);
+ bcache_invalidate_dev(scan_bcache, dev->bdev);
_scan_dev_close(dev);
}
dev->flags |= DEV_BCACHE_EXCL;
@@ -1155,7 +1141,7 @@ bool dev_read_bytes(struct device *dev, uint64_t start, size_t len, void *data)
return false;
}
- if (dev->bcache_fd <= 0) {
+ if (dev->bdev<= 0) {
/* This is not often needed, perhaps only with lvmetad. */
if (!label_scan_open(dev)) {
log_error("Error opening device %s for reading at %llu length %u.",
@@ -1164,7 +1150,7 @@ bool dev_read_bytes(struct device *dev, uint64_t start, size_t len, void *data)
}
}
- if (!bcache_read_bytes(scan_bcache, dev->bcache_fd, start, len, data)) {
+ if (!bcache_read_bytes(scan_bcache, dev->bdev, start, len, data)) {
log_error("Error reading device %s at %llu length %u.",
dev_name(dev), (unsigned long long)start, (uint32_t)len);
label_scan_invalidate(dev);
@@ -1188,14 +1174,14 @@ bool dev_write_bytes(struct device *dev, uint64_t start, size_t len, void *data)
if (!(dev->flags & DEV_BCACHE_WRITE)) {
/* FIXME: avoid tossing out bcache blocks just to replace fd. */
log_debug("Close and reopen to write %s", dev_name(dev));
- bcache_invalidate_fd(scan_bcache, dev->bcache_fd);
+ bcache_invalidate_dev(scan_bcache, dev->bdev);
_scan_dev_close(dev);
dev->flags |= DEV_BCACHE_WRITE;
label_scan_open(dev);
}
- if (dev->bcache_fd <= 0) {
+ if (!dev->bdev) {
/* This is not often needed, perhaps only with lvmetad. */
dev->flags |= DEV_BCACHE_WRITE;
if (!label_scan_open(dev)) {
@@ -1205,7 +1191,7 @@ bool dev_write_bytes(struct device *dev, uint64_t start, size_t len, void *data)
}
}
- if (!bcache_write_bytes(scan_bcache, dev->bcache_fd, start, len, data)) {
+ if (!bcache_write_bytes(scan_bcache, dev->bdev, start, len, data)) {
log_error("Error writing device %s at %llu length %u.",
dev_name(dev), (unsigned long long)start, (uint32_t)len);
label_scan_invalidate(dev);
@@ -1234,14 +1220,14 @@ bool dev_write_zeros(struct device *dev, uint64_t start, size_t len)
if (!(dev->flags & DEV_BCACHE_WRITE)) {
/* FIXME: avoid tossing out bcache blocks just to replace fd. */
log_debug("Close and reopen to write %s", dev_name(dev));
- bcache_invalidate_fd(scan_bcache, dev->bcache_fd);
+ bcache_invalidate_dev(scan_bcache, dev->bdev);
_scan_dev_close(dev);
dev->flags |= DEV_BCACHE_WRITE;
label_scan_open(dev);
}
- if (dev->bcache_fd <= 0) {
+ if (!dev->bdev) {
/* This is not often needed, perhaps only with lvmetad. */
dev->flags |= DEV_BCACHE_WRITE;
if (!label_scan_open(dev)) {
@@ -1251,7 +1237,7 @@ bool dev_write_zeros(struct device *dev, uint64_t start, size_t len)
}
}
- if (!bcache_zero_bytes(scan_bcache, dev->bcache_fd, start, len)) {
+ if (!bcache_zero_bytes(scan_bcache, dev->bdev, start, len)) {
log_error("Error writing device %s at %llu length %u.",
dev_name(dev), (unsigned long long)start, (uint32_t)len);
label_scan_invalidate(dev);
@@ -1280,14 +1266,14 @@ bool dev_set_bytes(struct device *dev, uint64_t start, size_t len, uint8_t val)
if (!(dev->flags & DEV_BCACHE_WRITE)) {
/* FIXME: avoid tossing out bcache blocks just to replace fd. */
log_debug("Close and reopen to write %s", dev_name(dev));
- bcache_invalidate_fd(scan_bcache, dev->bcache_fd);
+ bcache_invalidate_dev(scan_bcache, dev->bdev);
_scan_dev_close(dev);
dev->flags |= DEV_BCACHE_WRITE;
label_scan_open(dev);
}
- if (dev->bcache_fd <= 0) {
+ if (!dev->bdev) {
/* This is not often needed, perhaps only with lvmetad. */
dev->flags |= DEV_BCACHE_WRITE;
if (!label_scan_open(dev)) {
@@ -1297,7 +1283,7 @@ bool dev_set_bytes(struct device *dev, uint64_t start, size_t len, uint8_t val)
}
}
- if (!bcache_set_bytes(scan_bcache, dev->bcache_fd, start, len, val)) {
+ if (!bcache_set_bytes(scan_bcache, dev->bdev, start, len, val)) {
log_error("Error writing device %s at %llu length %u.",
dev_name(dev), (unsigned long long)start, (uint32_t)len);
label_scan_invalidate(dev);
diff --git a/test/unit/Makefile b/test/unit/Makefile
index 9155c4763..d586b0045 100644
--- a/test/unit/Makefile
+++ b/test/unit/Makefile
@@ -20,7 +20,6 @@ UNIT_SOURCE=\
test/unit/config_t.c \
test/unit/dmlist_t.c \
test/unit/dmstatus_t.c \
- test/unit/io_engine_t.c \
test/unit/radix_tree_t.c \
test/unit/matcher_t.c \
test/unit/framework.c \
diff --git a/test/unit/bcache_t.c b/test/unit/bcache_t.c
index 5e0700572..74a0bdf6e 100644
--- a/test/unit/bcache_t.c
+++ b/test/unit/bcache_t.c
@@ -23,6 +23,13 @@
#define SHOW_MOCK_CALLS 0
+//----------------------------------------------------------------
+// We're assuming the file descriptor is the first element of the
+// bcache_dev.
+struct bcache_dev {
+ int fd;
+};
+
/*----------------------------------------------------------------
* Mock engine
*--------------------------------------------------------------*/
@@ -32,10 +39,13 @@ struct mock_engine {
struct dm_list issued_io;
unsigned max_io;
sector_t block_size;
+ int last_fd;
};
enum method {
E_DESTROY,
+ E_OPEN,
+ E_CLOSE,
E_ISSUE,
E_WAIT,
E_MAX_IO
@@ -47,10 +57,11 @@ struct mock_call {
bool match_args;
enum dir d;
- int fd;
+ struct bcache_dev *dev;
block_address b;
bool issue_r;
bool wait_r;
+ unsigned engine_flags;
};
struct mock_io {
@@ -68,6 +79,10 @@ static const char *_show_method(enum method m)
switch (m) {
case E_DESTROY:
return "destroy()";
+ case E_OPEN:
+ return "open()";
+ case E_CLOSE:
+ return "close()";
case E_ISSUE:
return "issue()";
case E_WAIT:
@@ -87,13 +102,13 @@ static void _expect(struct mock_engine *e, enum method m)
dm_list_add(&e->expected_calls, &mc->list);
}
-static void _expect_read(struct mock_engine *e, int fd, block_address b)
+static void _expect_read(struct mock_engine *e, struct bcache_dev *dev, block_address b)
{
struct mock_call *mc = malloc(sizeof(*mc));
mc->m = E_ISSUE;
mc->match_args = true;
mc->d = DIR_READ;
- mc->fd = fd;
+ mc->dev = dev;
mc->b = b;
mc->issue_r = true;
mc->wait_r = true;
@@ -110,71 +125,80 @@ static void _expect_read_any(struct mock_engine *e)
dm_list_add(&e->expected_calls, &mc->list);
}
-static void _expect_write(struct mock_engine *e, int fd, block_address b)
+static void _expect_write(struct mock_engine *e, struct bcache_dev *dev, block_address b)
{
struct mock_call *mc = malloc(sizeof(*mc));
mc->m = E_ISSUE;
mc->match_args = true;
mc->d = DIR_WRITE;
- mc->fd = fd;
+ mc->dev = dev;
mc->b = b;
mc->issue_r = true;
mc->wait_r = true;
dm_list_add(&e->expected_calls, &mc->list);
}
-static void _expect_read_bad_issue(struct mock_engine *e, int fd, block_address b)
+static void _expect_read_bad_issue(struct mock_engine *e, struct bcache_dev *dev, block_address b)
{
struct mock_call *mc = malloc(sizeof(*mc));
mc->m = E_ISSUE;
mc->match_args = true;
mc->d = DIR_READ;
- mc->fd = fd;
+ mc->dev = dev;
mc->b = b;
mc->issue_r = false;
mc->wait_r = true;
dm_list_add(&e->expected_calls, &mc->list);
}
-static void _expect_write_bad_issue(struct mock_engine *e, int fd, block_address b)
+static void _expect_write_bad_issue(struct mock_engine *e, struct bcache_dev *dev, block_address b)
{
struct mock_call *mc = malloc(sizeof(*mc));
mc->m = E_ISSUE;
mc->match_args = true;
mc->d = DIR_WRITE;
- mc->fd = fd;
+ mc->dev = dev;
mc->b = b;
mc->issue_r = false;
mc->wait_r = true;
dm_list_add(&e->expected_calls, &mc->list);
}
-static void _expect_read_bad_wait(struct mock_engine *e, int fd, block_address b)
+static void _expect_read_bad_wait(struct mock_engine *e, struct bcache_dev *dev, block_address b)
{
struct mock_call *mc = malloc(sizeof(*mc));
mc->m = E_ISSUE;
mc->match_args = true;
mc->d = DIR_READ;
- mc->fd = fd;
+ mc->dev = dev;
mc->b = b;
mc->issue_r = true;
mc->wait_r = false;
dm_list_add(&e->expected_calls, &mc->list);
}
-static void _expect_write_bad_wait(struct mock_engine *e, int fd, block_address b)
+static void _expect_write_bad_wait(struct mock_engine *e, struct bcache_dev *dev, block_address b)
{
struct mock_call *mc = malloc(sizeof(*mc));
mc->m = E_ISSUE;
mc->match_args = true;
mc->d = DIR_WRITE;
- mc->fd = fd;
+ mc->dev = dev;
mc->b = b;
mc->issue_r = true;
mc->wait_r = false;
dm_list_add(&e->expected_calls, &mc->list);
}
+static void _expect_open(struct mock_engine *e, unsigned eflags)
+{
+ struct mock_call *mc = malloc(sizeof(*mc));
+ mc->m = E_OPEN;
+ mc->match_args = true;
+ mc->engine_flags = eflags;
+ dm_list_add(&e->expected_calls, &mc->list);
+}
+
static struct mock_call *_match_pop(struct mock_engine *e, enum method m)
{
@@ -228,6 +252,27 @@ static void _mock_destroy(struct io_engine *e)
free(_to_mock(e));
}
+static int _mock_open(struct io_engine *e, const char *path, unsigned flags)
+{
+ struct mock_engine *me = _to_mock(e);
+ struct mock_call *mc;
+
+ mc = _match_pop(me, E_OPEN);
+ if (mc->match_args)
+ T_ASSERT_EQUAL(mc->engine_flags, flags);
+ free(mc);
+ return me->last_fd++;
+}
+
+static void _mock_close(struct io_engine *e, int fd)
+{
+ struct mock_engine *me = _to_mock(e);
+ struct mock_call *mc;
+
+ mc = _match_pop(me, E_CLOSE);
+ free(mc);
+}
+
static bool _mock_issue(struct io_engine *e, enum dir d, int fd,
sector_t sb, sector_t se, void *data, void *context)
{
@@ -239,7 +284,7 @@ static bool _mock_issue(struct io_engine *e, enum dir d, int fd,
mc = _match_pop(me, E_ISSUE);
if (mc->match_args) {
T_ASSERT(d == mc->d);
- T_ASSERT(fd == mc->fd);
+ T_ASSERT(fd == mc->dev->fd);
T_ASSERT(sb == mc->b * me->block_size);
T_ASSERT(se == (mc->b + 1) * me->block_size);
}
@@ -294,6 +339,8 @@ static struct mock_engine *_mock_create(unsigned max_io, sector_t block_size)
struct mock_engine *m = malloc(sizeof(*m));
m->e.destroy = _mock_destroy;
+ m->e.open = _mock_open;
+ m->e.close = _mock_close;
m->e.issue = _mock_issue;
m->e.wait = _mock_wait;
m->e.max_io = _mock_max_io;
@@ -302,6 +349,7 @@ static struct mock_engine *_mock_create(unsigned max_io, sector_t block_size)
m->block_size = block_size;
dm_list_init(&m->expected_calls);
dm_list_init(&m->issued_io);
+ m->last_fd = 2;
return m;
}
@@ -420,184 +468,240 @@ static void test_get_triggers_read(void *context)
{
struct fixture *f = context;
- int fd = 17; // arbitrary key
+ const char *path = "/foo/bar/dev";
+ struct bcache_dev *dev;
struct block *b;
- _expect_read(f->me, fd, 0);
+ _expect(f->me, E_OPEN);
+ dev = bcache_get_dev(f->cache, path, 0);
+
+ T_ASSERT(dev);
+ _expect_read(f->me, dev, 0);
_expect(f->me, E_WAIT);
- T_ASSERT(bcache_get(f->cache, fd, 0, 0, &b));
+ T_ASSERT(bcache_get(f->cache, dev, 0, 0, &b));
bcache_put(b);
- _expect_read(f->me, fd, 1);
+ _expect_read(f->me, dev, 1);
_expect(f->me, E_WAIT);
- T_ASSERT(bcache_get(f->cache, fd, 1, GF_DIRTY, &b));
- _expect_write(f->me, fd, 1);
+ T_ASSERT(bcache_get(f->cache, dev, 1, GF_DIRTY, &b));
+ _expect_write(f->me, dev, 1);
_expect(f->me, E_WAIT);
bcache_put(b);
+
+ _expect(f->me, E_CLOSE);
+ bcache_put_dev(dev);
}
static void test_repeated_reads_are_cached(void *context)
{
struct fixture *f = context;
+ const char *path = "/foo/bar/dev";
+ struct bcache_dev *dev;
- int fd = 17; // arbitrary key
unsigned i;
struct block *b;
- _expect_read(f->me, fd, 0);
+ _expect(f->me, E_OPEN);
+ dev = bcache_get_dev(f->cache, path, 0);
+ _expect_read(f->me, dev, 0);
_expect(f->me, E_WAIT);
for (i = 0; i < 100; i++) {
- T_ASSERT(bcache_get(f->cache, fd, 0, 0, &b));
+ T_ASSERT(bcache_get(f->cache, dev, 0, 0, &b));
bcache_put(b);
}
+ _expect(f->me, E_CLOSE);
+ bcache_put_dev(dev);
}
static void test_block_gets_evicted_with_many_reads(void *context)
{
struct fixture *f = context;
+ const char *path = "/foo/bar/dev";
+ struct bcache_dev *dev;
struct mock_engine *me = f->me;
struct bcache *cache = f->cache;
const unsigned nr_cache_blocks = 16;
- int fd = 17; // arbitrary key
unsigned i;
struct block *b;
+ _expect(me, E_OPEN);
+ dev = bcache_get_dev(f->cache, path, 0);
+
+ fprintf(stderr, "1\n");
for (i = 0; i < nr_cache_blocks; i++) {
- _expect_read(me, fd, i);
+ _expect_read(me, dev, i);
_expect(me, E_WAIT);
- T_ASSERT(bcache_get(cache, fd, i, 0, &b));
+ T_ASSERT(bcache_get(cache, dev, i, 0, &b));
bcache_put(b);
}
+ fprintf(stderr, "2\n");
// Not enough cache blocks to hold this one
- _expect_read(me, fd, nr_cache_blocks);
+ _expect_read(me, dev, nr_cache_blocks);
_expect(me, E_WAIT);
- T_ASSERT(bcache_get(cache, fd, nr_cache_blocks, 0, &b));
+ T_ASSERT(bcache_get(cache, dev, nr_cache_blocks, 0, &b));
bcache_put(b);
+ fprintf(stderr, "3\n");
// Now if we run through we should find one block has been
// evicted. We go backwards because the oldest is normally
// evicted first.
_expect_read_any(me);
_expect(me, E_WAIT);
for (i = nr_cache_blocks; i; i--) {
- T_ASSERT(bcache_get(cache, fd, i - 1, 0, &b));
+ T_ASSERT(bcache_get(cache, dev, i - 1, 0, &b));
bcache_put(b);
+ T_ASSERT(bcache_is_well_formed(cache));
}
+
+ fprintf(stderr, "4\n");
+ _expect(me, E_CLOSE);
+ bcache_put_dev(dev);
}
static void test_prefetch_issues_a_read(void *context)
{
struct fixture *f = context;
+ const char *path = "/foo/bar/dev";
+ struct bcache_dev *dev;
struct mock_engine *me = f->me;
struct bcache *cache = f->cache;
const unsigned nr_cache_blocks = 16;
- int fd = 17; // arbitrary key
unsigned i;
struct block *b;
+ _expect(me, E_OPEN);
+ dev = bcache_get_dev(f->cache, path, 0);
+
for (i = 0; i < nr_cache_blocks; i++) {
// prefetch should not wait
- _expect_read(me, fd, i);
- bcache_prefetch(cache, fd, i);
+ _expect_read(me, dev, i);
+ bcache_prefetch(cache, dev, i);
}
_no_outstanding_expectations(me);
for (i = 0; i < nr_cache_blocks; i++) {
_expect(me, E_WAIT);
- T_ASSERT(bcache_get(cache, fd, i, 0, &b));
+ T_ASSERT(bcache_get(cache, dev, i, 0, &b));
bcache_put(b);
}
+
+ _expect(me, E_CLOSE);
+ bcache_put_dev(dev);
}
static void test_too_many_prefetches_does_not_trigger_a_wait(void *context)
{
struct fixture *f = context;
+ const char *path = "/foo/bar/dev";
+ struct bcache_dev *dev;
struct mock_engine *me = f->me;
struct bcache *cache = f->cache;
const unsigned nr_cache_blocks = 16;
- int fd = 17; // arbitrary key
unsigned i;
+ _expect(me, E_OPEN);
+ dev = bcache_get_dev(f->cache, path, 0);
for (i = 0; i < 10 * nr_cache_blocks; i++) {
// prefetch should not wait
if (i < nr_cache_blocks)
- _expect_read(me, fd, i);
- bcache_prefetch(cache, fd, i);
+ _expect_read(me, dev, i);
+ bcache_prefetch(cache, dev, i);
}
// Destroy will wait for any in flight IO triggered by prefetches.
for (i = 0; i < nr_cache_blocks; i++)
_expect(me, E_WAIT);
+
+ _expect(me, E_CLOSE);
+ bcache_put_dev(dev);
}
static void test_dirty_data_gets_written_back(void *context)
{
struct fixture *f = context;
+ const char *path = "/foo/bar/dev";
+ struct bcache_dev *dev;
struct mock_engine *me = f->me;
struct bcache *cache = f->cache;
- int fd = 17; // arbitrary key
struct block *b;
+ _expect(f->me, E_OPEN);
+ dev = bcache_get_dev(f->cache, path, 0);
+
// Expect the read
- _expect_read(me, fd, 0);
+ _expect_read(me, dev, 0);
_expect(me, E_WAIT);
- T_ASSERT(bcache_get(cache, fd, 0, GF_DIRTY, &b));
+ T_ASSERT(bcache_get(cache, dev, 0, GF_DIRTY, &b));
bcache_put(b);
// Expect the write
- _expect_write(me, fd, 0);
+ _expect_write(me, dev, 0);
_expect(me, E_WAIT);
+
+ bcache_put_dev(dev);
+ _expect(f->me, E_CLOSE);
}
static void test_zeroed_data_counts_as_dirty(void *context)
{
struct fixture *f = context;
+ const char *path = "/foo/bar/dev";
+ struct bcache_dev *dev;
struct mock_engine *me = f->me;
struct bcache *cache = f->cache;
- int fd = 17; // arbitrary key
struct block *b;
+ _expect(me, E_OPEN);
+ dev = bcache_get_dev(f->cache, path, 0);
+
// No read
- T_ASSERT(bcache_get(cache, fd, 0, GF_ZERO, &b));
+ T_ASSERT(bcache_get(cache, dev, 0, GF_ZERO, &b));
bcache_put(b);
// Expect the write
- _expect_write(me, fd, 0);
+ _expect_write(me, dev, 0);
_expect(me, E_WAIT);
+
+ _expect(me, E_CLOSE);
+ bcache_put_dev(dev);
}
static void test_flush_waits_for_all_dirty(void *context)
{
struct fixture *f = context;
+ const char *path = "/foo/bar/dev";
+ struct bcache_dev *dev;
struct mock_engine *me = f->me;
struct bcache *cache = f->cache;
const unsigned count = 16;
- int fd = 17; // arbitrary key
unsigned i;
struct block *b;
+ _expect(me, E_OPEN);
+ dev = bcache_get_dev(f->cache, path, 0);
+
for (i = 0; i < count; i++) {
if (i % 2) {
- T_ASSERT(bcache_get(cache, fd, i, GF_ZERO, &b));
+ T_ASSERT(bcache_get(cache, dev, i, GF_ZERO, &b));
} else {
- _expect_read(me, fd, i);
+ _expect_read(me, dev, i);
_expect(me, E_WAIT);
- T_ASSERT(bcache_get(cache, fd, i, 0, &b));
+ T_ASSERT(bcache_get(cache, dev, i, 0, &b));
}
bcache_put(b);
}
for (i = 0; i < count; i++) {
if (i % 2)
- _expect_write(me, fd, i);
+ _expect_write(me, dev, i);
}
for (i = 0; i < count; i++) {
@@ -607,207 +711,415 @@ static void test_flush_waits_for_all_dirty(void *context)
bcache_flush(cache);
_no_outstanding_expectations(me);
+
+ _expect(me, E_CLOSE);
+ bcache_put_dev(dev);
}
static void test_multiple_files(void *context)
{
- static int _fds[] = {1, 128, 345, 678, 890};
+ static const char *_paths[] = {"/dev/dm-1", "/dev/dm-2", "/dev/dm-3", "/dev/dm-4"};
struct fixture *f = context;
struct mock_engine *me = f->me;
struct bcache *cache = f->cache;
+ struct bcache_dev *dev;
struct block *b;
unsigned i;
- for (i = 0; i < DM_ARRAY_SIZE(_fds); i++) {
- _expect_read(me, _fds[i], 0);
+ for (i = 0; i < DM_ARRAY_SIZE(_paths); i++) {
+ _expect(me, E_OPEN);
+ dev = bcache_get_dev(cache, _paths[i], 0);
+ _expect_read(me, dev, 0);
_expect(me, E_WAIT);
- T_ASSERT(bcache_get(cache, _fds[i], 0, 0, &b));
+ T_ASSERT(bcache_get(cache, dev, 0, 0, &b));
bcache_put(b);
+ bcache_put_dev(dev);
}
+
+ for (i = 0; i < DM_ARRAY_SIZE(_paths); i++)
+ _expect(me, E_CLOSE);
}
static void test_read_bad_issue(void *context)
{
struct fixture *f = context;
+ const char *path = "/foo/bar/dev";
+ struct bcache_dev *dev;
struct mock_engine *me = f->me;
struct bcache *cache = f->cache;
struct block *b;
- _expect_read_bad_issue(me, 17, 0);
- T_ASSERT(!bcache_get(cache, 17, 0, 0, &b));
+ _expect(me, E_OPEN);
+ dev = bcache_get_dev(f->cache, path, 0);
+
+ _expect_read_bad_issue(me, dev, 0);
+ T_ASSERT(!bcache_get(cache, dev, 0, 0, &b));
+
+ _expect(me, E_CLOSE);
+ bcache_put_dev(dev);
}
static void test_read_bad_issue_intermittent(void *context)
{
struct fixture *f = context;
+ const char *path = "/foo/bar/dev";
+ struct bcache_dev *dev;
struct mock_engine *me = f->me;
struct bcache *cache = f->cache;
struct block *b;
- int fd = 17;
- _expect_read_bad_issue(me, fd, 0);
- T_ASSERT(!bcache_get(cache, fd, 0, 0, &b));
+ _expect(me, E_OPEN);
+ dev = bcache_get_dev(f->cache, path, 0);
+
+ _expect_read_bad_issue(me, dev, 0);
+ T_ASSERT(!bcache_get(cache, dev, 0, 0, &b));
- _expect_read(me, fd, 0);
+ _expect_read(me, dev, 0);
_expect(me, E_WAIT);
- T_ASSERT(bcache_get(cache, fd, 0, 0, &b));
+ T_ASSERT(bcache_get(cache, dev, 0, 0, &b));
bcache_put(b);
+
+ _expect(me, E_CLOSE);
+ bcache_put_dev(dev);
}
static void test_read_bad_wait(void *context)
{
struct fixture *f = context;
+ const char *path = "/foo/bar/dev";
+ struct bcache_dev *dev;
struct mock_engine *me = f->me;
struct bcache *cache = f->cache;
struct block *b;
- int fd = 17;
- _expect_read_bad_wait(me, fd, 0);
+ _expect(me, E_OPEN);
+ dev = bcache_get_dev(f->cache, path, 0);
+
+ _expect_read_bad_wait(me, dev, 0);
_expect(me, E_WAIT);
- T_ASSERT(!bcache_get(cache, fd, 0, 0, &b));
+ T_ASSERT(!bcache_get(cache, dev, 0, 0, &b));
+
+ _expect(me, E_CLOSE);
+ bcache_put_dev(dev);
}
static void test_read_bad_wait_intermittent(void *context)
{
struct fixture *f = context;
+ const char *path = "/foo/bar/dev";
+ struct bcache_dev *dev;
struct mock_engine *me = f->me;
struct bcache *cache = f->cache;
struct block *b;
- int fd = 17;
- _expect_read_bad_wait(me, fd, 0);
+ _expect(me, E_OPEN);
+ dev = bcache_get_dev(f->cache, path, 0);
+
+ _expect_read_bad_wait(me, dev, 0);
_expect(me, E_WAIT);
- T_ASSERT(!bcache_get(cache, fd, 0, 0, &b));
+ T_ASSERT(!bcache_get(cache, dev, 0, 0, &b));
- _expect_read(me, fd, 0);
+ _expect_read(me, dev, 0);
_expect(me, E_WAIT);
- T_ASSERT(bcache_get(cache, fd, 0, 0, &b));
+ T_ASSERT(bcache_get(cache, dev, 0, 0, &b));
bcache_put(b);
+
+ _expect(me, E_CLOSE);
+ bcache_put_dev(dev);
}
static void test_write_bad_issue_stops_flush(void *context)
{
struct fixture *f = context;
+ const char *path = "/foo/bar/dev";
+ struct bcache_dev *dev;
struct mock_engine *me = f->me;
struct bcache *cache = f->cache;
struct block *b;
- int fd = 17;
- T_ASSERT(bcache_get(cache, fd, 0, GF_ZERO, &b));
- _expect_write_bad_issue(me, fd, 0);
+ _expect(me, E_OPEN);
+ dev = bcache_get_dev(f->cache, path, 0);
+
+ T_ASSERT(bcache_get(cache, dev, 0, GF_ZERO, &b));
+ _expect_write_bad_issue(me, dev, 0);
bcache_put(b);
T_ASSERT(!bcache_flush(cache));
// we'll let it succeed the second time
- _expect_write(me, fd, 0);
+ _expect_write(me, dev, 0);
_expect(me, E_WAIT);
T_ASSERT(bcache_flush(cache));
+
+ _expect(me, E_CLOSE);
+ bcache_put_dev(dev);
}
static void test_write_bad_io_stops_flush(void *context)
{
struct fixture *f = context;
+ const char *path = "/foo/bar/dev";
+ struct bcache_dev *dev;
struct mock_engine *me = f->me;
struct bcache *cache = f->cache;
struct block *b;
- int fd = 17;
- T_ASSERT(bcache_get(cache, fd, 0, GF_ZERO, &b));
- _expect_write_bad_wait(me, fd, 0);
+ _expect(me, E_OPEN);
+ dev = bcache_get_dev(f->cache, path, 0);
+
+ T_ASSERT(bcache_get(cache, dev, 0, GF_ZERO, &b));
+ _expect_write_bad_wait(me, dev, 0);
_expect(me, E_WAIT);
bcache_put(b);
T_ASSERT(!bcache_flush(cache));
// we'll let it succeed the second time
- _expect_write(me, fd, 0);
+ _expect_write(me, dev, 0);
_expect(me, E_WAIT);
T_ASSERT(bcache_flush(cache));
+
+ _expect(me, E_CLOSE);
+ bcache_put_dev(dev);
}
static void test_invalidate_not_present(void *context)
{
struct fixture *f = context;
+ const char *path = "/foo/bar/dev";
+ struct bcache_dev *dev;
struct bcache *cache = f->cache;
- int fd = 17;
- T_ASSERT(bcache_invalidate(cache, fd, 0));
+ _expect(f->me, E_OPEN);
+ dev = bcache_get_dev(f->cache, path, 0);
+ T_ASSERT(bcache_invalidate(cache, dev, 0));
+ _expect(f->me, E_CLOSE);
+ bcache_put_dev(dev);
}
static void test_invalidate_present(void *context)
{
struct fixture *f = context;
+ const char *path = "/foo/bar/dev";
+ struct bcache_dev *dev;
struct mock_engine *me = f->me;
struct bcache *cache = f->cache;
struct block *b;
- int fd = 17;
- _expect_read(me, fd, 0);
+ _expect(f->me, E_OPEN);
+ dev = bcache_get_dev(f->cache, path, 0);
+
+ _expect_read(me, dev, 0);
_expect(me, E_WAIT);
- T_ASSERT(bcache_get(cache, fd, 0, 0, &b));
+ T_ASSERT(bcache_get(cache, dev, 0, 0, &b));
bcache_put(b);
- T_ASSERT(bcache_invalidate(cache, fd, 0));
+ T_ASSERT(bcache_invalidate(cache, dev, 0));
+
+ _expect(me, E_CLOSE);
+ bcache_put_dev(dev);
}
static void test_invalidate_after_read_error(void *context)
{
struct fixture *f = context;
+ const char *path = "/foo/bar/dev";
+ struct bcache_dev *dev;
struct mock_engine *me = f->me;
struct bcache *cache = f->cache;
struct block *b;
- int fd = 17;
- _expect_read_bad_issue(me, fd, 0);
- T_ASSERT(!bcache_get(cache, fd, 0, 0, &b));
- T_ASSERT(bcache_invalidate(cache, fd, 0));
+ _expect(me, E_OPEN);
+ dev = bcache_get_dev(f->cache, path, 0);
+ _expect_read_bad_issue(me, dev, 0);
+ T_ASSERT(!bcache_get(cache, dev, 0, 0, &b));
+ T_ASSERT(bcache_invalidate(cache, dev, 0));
+
+ _expect(me, E_CLOSE);
+ bcache_put_dev(dev);
}
static void test_invalidate_after_write_error(void *context)
{
struct fixture *f = context;
+ const char *path = "/foo/bar/dev";
+ struct bcache_dev *dev;
struct mock_engine *me = f->me;
struct bcache *cache = f->cache;
struct block *b;
- int fd = 17;
- T_ASSERT(bcache_get(cache, fd, 0, GF_ZERO, &b));
+ _expect(me, E_OPEN);
+ dev = bcache_get_dev(f->cache, path, 0);
+ T_ASSERT(bcache_get(cache, dev, 0, GF_ZERO, &b));
bcache_put(b);
// invalidate should fail if the write fails
- _expect_write_bad_wait(me, fd, 0);
+ _expect_write_bad_wait(me, dev, 0);
_expect(me, E_WAIT);
- T_ASSERT(!bcache_invalidate(cache, fd, 0));
+ T_ASSERT(!bcache_invalidate(cache, dev, 0));
// and should succeed if the write does
- _expect_write(me, fd, 0);
+ _expect_write(me, dev, 0);
_expect(me, E_WAIT);
- T_ASSERT(bcache_invalidate(cache, fd, 0));
+ T_ASSERT(bcache_invalidate(cache, dev, 0));
// a read is not required to get the block
- _expect_read(me, fd, 0);
+ _expect_read(me, dev, 0);
_expect(me, E_WAIT);
- T_ASSERT(bcache_get(cache, fd, 0, 0, &b));
+ T_ASSERT(bcache_get(cache, dev, 0, 0, &b));
bcache_put(b);
+
+ _expect(me, E_CLOSE);
+ bcache_put_dev(dev);
}
static void test_invalidate_held_block(void *context)
{
+ struct fixture *f = context;
+ const char *path = "/foo/bar/dev";
+ struct bcache_dev *dev;
+ struct mock_engine *me = f->me;
+ struct bcache *cache = f->cache;
+ struct block *b;
+
+ _expect(me, E_OPEN);
+ dev = bcache_get_dev(f->cache, path, 0);
+ T_ASSERT(bcache_get(cache, dev, 0, GF_ZERO, &b));
+ T_ASSERT(!bcache_invalidate(cache, dev, 0));
+
+ _expect_write(me, dev, 0);
+ _expect(me, E_WAIT);
+ bcache_put(b);
+
+ _expect(me, E_CLOSE);
+ bcache_put_dev(dev);
+}
+
+//----------------------------------------------------------------
+
+static void test_concurrent_devs(void *context)
+{
+ struct fixture *f = context;
+ struct mock_engine *me = f->me;
+ struct bcache *cache = f->cache;
+
+ const char *path = "/dev/foo/bar";
+ struct bcache_dev *dev1, *dev2;
+
+ _expect(me, E_OPEN);
+ dev1 = bcache_get_dev(cache, path, 0);
+ dev2 = bcache_get_dev(cache, path, 0);
+
+ _expect(me, E_CLOSE); // only one close
+
+ bcache_put_dev(dev1);
+ bcache_put_dev(dev2);
+}
+
+static void test_concurrent_devs_exclusive(void *context)
+{
+ struct fixture *f = context;
+ struct mock_engine *me = f->me;
+ struct bcache *cache = f->cache;
+
+ const char *path = "/dev/foo/bar";
+ struct bcache_dev *dev1, *dev2;
+
+ _expect(me, E_OPEN);
+ dev1 = bcache_get_dev(cache, path, EF_EXCL);
+ dev2 = bcache_get_dev(cache, path, EF_EXCL);
+
+ _expect(me, E_CLOSE); // only one close
+
+ bcache_put_dev(dev1);
+ bcache_put_dev(dev2);
+}
+
+static void test_exclusive_flags_gets_passed_to_engine(void *context)
+{
+ struct fixture *f = context;
+ struct mock_engine *me = f->me;
+ struct bcache *cache = f->cache;
+
+ const char *path = "/dev/foo/bar";
+ struct bcache_dev *dev;
+
+ _expect_open(me, EF_EXCL);
+ dev = bcache_get_dev(cache, path, EF_EXCL);
+ _expect(me, E_CLOSE);
+ bcache_put_dev(dev);
+
+ _expect_open(me, EF_READ_ONLY);
+ dev = bcache_get_dev(cache, path, EF_READ_ONLY);
+ _expect(me, E_CLOSE);
+ bcache_put_dev(dev);
+
+ _expect_open(me, EF_EXCL | EF_READ_ONLY);
+ dev = bcache_get_dev(cache, path, EF_EXCL | EF_READ_ONLY);
+ _expect(me, E_CLOSE);
+ bcache_put_dev(dev);
+}
+
+static void test_reopen_exclusive_triggers_invalidate(void *context)
+{
struct fixture *f = context;
struct mock_engine *me = f->me;
struct bcache *cache = f->cache;
+
+ const char *path = "/dev/foo/bar";
+ struct bcache_dev *dev;
struct block *b;
- int fd = 17;
- T_ASSERT(bcache_get(cache, fd, 0, GF_ZERO, &b));
+ _expect_open(me, 0);
+ dev = bcache_get_dev(cache, path, 0);
+ T_ASSERT(dev);
+ _expect_read(me, dev, 0);
+ _expect(me, E_WAIT);
+ T_ASSERT(bcache_get(cache, dev, 0, 0, &b));
+ bcache_put(b);
+ bcache_put_dev(dev);
+
+ _no_outstanding_expectations(me);
- T_ASSERT(!bcache_invalidate(cache, fd, 0));
+ _expect(me, E_CLOSE);
+ _expect_open(me, EF_EXCL);
- _expect_write(me, fd, 0);
+ dev = bcache_get_dev(cache, path, EF_EXCL);
+ T_ASSERT(dev);
+ _expect_read(me, dev, 0);
_expect(me, E_WAIT);
+ T_ASSERT(bcache_get(cache, dev, 0, 0, &b));
bcache_put(b);
+
+ _expect(me, E_CLOSE);
+ bcache_put_dev(dev);
+}
+
+static void test_concurrent_reopen_excl_fails(void *context)
+{
+ struct fixture *f = context;
+ struct mock_engine *me = f->me;
+ struct bcache *cache = f->cache;
+
+ const char *path = "/dev/foo/bar";
+ struct bcache_dev *dev;
+ struct block *b;
+
+ _expect_open(me, 0);
+ dev = bcache_get_dev(cache, path, 0);
+ T_ASSERT(dev);
+ _expect_read(me, dev, 0);
+ _expect(me, E_WAIT);
+ T_ASSERT(bcache_get(cache, dev, 0, 0, &b));
+ bcache_put(b);
+
+ _no_outstanding_expectations(me);
+
+ T_ASSERT(!bcache_get_dev(cache, path, EF_EXCL));
+
+ _expect(me, E_CLOSE);
+ bcache_put_dev(dev);
}
//----------------------------------------------------------------
@@ -815,6 +1127,8 @@ static void test_invalidate_held_block(void *context)
static void _cycle(struct fixture *f, unsigned nr_cache_blocks)
{
+ char buffer[64];
+ struct bcache_dev *dev;
struct mock_engine *me = f->me;
struct bcache *cache = f->cache;
@@ -822,18 +1136,25 @@ static void _cycle(struct fixture *f, unsigned nr_cache_blocks)
struct block *b;
for (i = 0; i < nr_cache_blocks; i++) {
+ snprintf(buffer, sizeof(buffer) - 1, "/dev/dm-%u", i);
+ _expect(me, E_OPEN);
+ dev = bcache_get_dev(f->cache, buffer, 0);
// prefetch should not wait
- _expect_read(me, i, 0);
- bcache_prefetch(cache, i, 0);
+ _expect_read(me, dev, 0);
+ bcache_prefetch(cache, dev, 0);
+ bcache_put_dev(dev);
}
// This double checks the reads occur in response to the prefetch
_no_outstanding_expectations(me);
for (i = 0; i < nr_cache_blocks; i++) {
+ snprintf(buffer, sizeof(buffer) - 1, "/dev/dm-%u", i);
+ dev = bcache_get_dev(f->cache, buffer, 0);
_expect(me, E_WAIT);
- T_ASSERT(bcache_get(cache, i, 0, 0, &b));
+ T_ASSERT(bcache_get(cache, dev, 0, 0, &b));
bcache_put(b);
+ bcache_put_dev(dev);
}
_no_outstanding_expectations(me);
@@ -842,18 +1163,30 @@ static void _cycle(struct fixture *f, unsigned nr_cache_blocks)
static void test_concurrent_reads_after_invalidate(void *context)
{
struct fixture *f = context;
+ char buffer[64];
unsigned i, nr_cache_blocks = 16;
+ struct bcache_dev *dev;
_cycle(f, nr_cache_blocks);
- for (i = 0; i < nr_cache_blocks; i++)
- bcache_invalidate_fd(f->cache, i);
+ for (i = 0; i < nr_cache_blocks; i++) {
+ snprintf(buffer, sizeof(buffer) - 1, "/dev/dm-%u", i);
+ dev = bcache_get_dev(f->cache, buffer, 0);
+ bcache_invalidate_dev(f->cache, dev);
+ _expect(f->me, E_CLOSE);
+ bcache_put_dev(dev);
+ _no_outstanding_expectations(f->me);
+ }
+
_cycle(f, nr_cache_blocks);
+
+ for (i = 0; i < nr_cache_blocks; i++)
+ _expect(f->me, E_CLOSE);
}
/*----------------------------------------------------------------
* Top level
*--------------------------------------------------------------*/
-#define T(path, desc, fn) register_test(ts, "/base/device/bcache/" path, desc, fn)
+#define T(path, desc, fn) register_test(ts, "/base/device/bcache/core/" path, desc, fn)
static struct test_suite *_tiny_tests(void)
{
@@ -900,6 +1233,11 @@ static struct test_suite *_small_tests(void)
T("invalidate-fails-in-held", "invalidating a held block fails", test_invalidate_held_block);
T("concurrent-reads-after-invalidate", "prefetch should still issue concurrent reads after invalidate",
test_concurrent_reads_after_invalidate);
+ T("concurrent-devs", "a device may have more than one holder", test_concurrent_devs);
+ T("concurrent-devs-exclusive", "a device, opened exclusively, may have more than one holder", test_concurrent_devs_exclusive);
+ T("dev-flags-get-passed-to-engine", "EF_EXCL and EF_READ_ONLY get passed down", test_exclusive_flags_gets_passed_to_engine);
+ T("reopen-excl-invalidates", "reopening a dev EF_EXCL indicates you want to invalidate everything", test_reopen_exclusive_triggers_invalidate);
+ T("concurrent-reopen-excl-fails", "you can't reopen a dev EF_EXCL if there's already a holder", test_concurrent_reopen_excl_fails);
return ts;
}
diff --git a/test/unit/bcache_utils_t.c b/test/unit/bcache_utils_t.c
index 66780ea21..8f07ed44a 100644
--- a/test/unit/bcache_utils_t.c
+++ b/test/unit/bcache_utils_t.c
@@ -34,9 +34,9 @@
#define INIT_PATTERN 123
struct fixture {
- int fd;
char fname[32];
struct bcache *cache;
+ struct bcache_dev *dev;
};
static inline uint8_t _pattern_at(uint8_t pat, uint8_t byte)
@@ -49,57 +49,73 @@ static uint64_t byte(block_address b, uint64_t offset)
return b * T_BLOCK_SIZE + offset;
}
-static void *_fix_init(struct io_engine *engine)
+// With testing in tmpfs directory O_DIRECT cannot be used
+// tmpfs has f_fsid == 0 (unsure if this is best guess)
+static bool _use_o_direct_internal(void)
{
- uint8_t buffer[T_BLOCK_SIZE];
- struct fixture *f = malloc(sizeof(*f));
- unsigned b, i;
struct statvfs fsdata;
- static int _runs_is_tmpfs = -1;
- if (_runs_is_tmpfs == -1) {
- // With testing in tmpfs directory O_DIRECT cannot be used
- // tmpfs has f_fsid == 0 (unsure if this is best guess)
- _runs_is_tmpfs = (statvfs(".", &fsdata) == 0 && !fsdata.f_fsid) ? 1 : 0;
- if (_runs_is_tmpfs)
+ if (statvfs(".", &fsdata))
+ // assume we can
+ return true;
+
+ return fsdata.f_fsid;
+}
+
+static bool _use_o_direct(void)
+{
+ static bool latch = false;
+ static bool result;
+
+ if (!latch) {
+ latch = true;
+ result = _use_o_direct_internal();
+ if (!result)
printf(" Running test in tmpfs, *NOT* using O_DIRECT\n");
}
+ return result;
+}
+
+static void *_fix_init(struct io_engine *engine)
+{
+ int fd;
+ uint8_t buffer[T_BLOCK_SIZE];
+ struct fixture *f = malloc(sizeof(*f));
+ unsigned b, i;
+
T_ASSERT(f);
snprintf(f->fname, sizeof(f->fname), "unit-test-XXXXXX");
- f->fd = mkstemp(f->fname);
- T_ASSERT(f->fd >= 0);
+ fd = mkstemp(f->fname);
+ T_ASSERT(fd >= 0);
for (b = 0; b < NR_BLOCKS; b++) {
for (i = 0; i < sizeof(buffer); i++)
buffer[i] = _pattern_at(INIT_PATTERN, byte(b, i));
- T_ASSERT(write(f->fd, buffer, T_BLOCK_SIZE) > 0);
- }
-
- if (!_runs_is_tmpfs) {
- close(f->fd);
- // reopen with O_DIRECT
- f->fd = open(f->fname, O_RDWR | O_DIRECT);
- T_ASSERT(f->fd >= 0);
+ T_ASSERT(write(fd, buffer, T_BLOCK_SIZE) > 0);
}
+ close(fd);
f->cache = bcache_create(T_BLOCK_SIZE / 512, NR_BLOCKS, engine);
T_ASSERT(f->cache);
+ f->dev = bcache_get_dev(f->cache, f->fname, 0);
+ T_ASSERT(f->dev);
+
return f;
}
static void *_async_init(void)
{
- struct io_engine *e = create_async_io_engine();
+ struct io_engine *e = create_async_io_engine(_use_o_direct());
T_ASSERT(e);
return _fix_init(e);
}
static void *_sync_init(void)
{
- struct io_engine *e = create_sync_io_engine();
+ struct io_engine *e = create_sync_io_engine(_use_o_direct());
T_ASSERT(e);
return _fix_init(e);
}
@@ -108,8 +124,8 @@ static void _fix_exit(void *fixture)
{
struct fixture *f = fixture;
+ bcache_put_dev(f->dev);
bcache_destroy(f->cache);
- close(f->fd);
unlink(f->fname);
free(f);
}
@@ -143,7 +159,7 @@ static void _verify(struct fixture *f, uint64_t byte_b, uint64_t byte_e, uint8_t
unsigned i;
size_t len2 = byte_e - byte_b;
uint8_t *buffer = malloc(len2);
- T_ASSERT(bcache_read_bytes(f->cache, f->fd, byte_b, len2, buffer));
+ T_ASSERT(bcache_read_bytes(f->cache, f->dev, byte_b, len2, buffer));
for (i = 0; i < len; i++)
T_ASSERT_EQUAL(buffer[i], _pattern_at(pat, byte_b + i));
free(buffer);
@@ -151,7 +167,7 @@ static void _verify(struct fixture *f, uint64_t byte_b, uint64_t byte_e, uint8_t
// Verify again, driving bcache directly
for (; bb != be; bb++) {
- T_ASSERT(bcache_get(f->cache, f->fd, bb, 0, &b));
+ T_ASSERT(bcache_get(f->cache, f->dev, bb, 0, &b));
blen = _min(T_BLOCK_SIZE - offset, len);
_verify_bytes(b, bb * T_BLOCK_SIZE, offset, blen, pat);
@@ -173,7 +189,7 @@ static void _verify_set(struct fixture *f, uint64_t byte_b, uint64_t byte_e, uin
uint64_t blen, len = byte_e - byte_b;
for (; bb != be; bb++) {
- T_ASSERT(bcache_get(f->cache, f->fd, bb, 0, &b));
+ T_ASSERT(bcache_get(f->cache, f->dev, bb, 0, &b));
blen = _min(T_BLOCK_SIZE - offset, len);
for (i = 0; i < blen; i++)
@@ -201,30 +217,35 @@ static void _do_write(struct fixture *f, uint64_t byte_b, uint64_t byte_e, uint8
for (i = 0; i < len; i++)
buffer[i] = _pattern_at(pat, byte_b + i);
- T_ASSERT(bcache_write_bytes(f->cache, f->fd, byte_b, byte_e - byte_b, buffer));
+ T_ASSERT(bcache_write_bytes(f->cache, f->dev, byte_b, byte_e - byte_b, buffer));
free(buffer);
}
static void _do_zero(struct fixture *f, uint64_t byte_b, uint64_t byte_e)
{
- T_ASSERT(bcache_zero_bytes(f->cache, f->fd, byte_b, byte_e - byte_b));
+ T_ASSERT(bcache_zero_bytes(f->cache, f->dev, byte_b, byte_e - byte_b));
}
static void _do_set(struct fixture *f, uint64_t byte_b, uint64_t byte_e, uint8_t val)
{
- T_ASSERT(bcache_set_bytes(f->cache, f->fd, byte_b, byte_e - byte_b, val));
+ T_ASSERT(bcache_set_bytes(f->cache, f->dev, byte_b, byte_e - byte_b, val));
}
static void _reopen(struct fixture *f)
{
struct io_engine *engine;
+ bcache_put_dev(f->dev);
bcache_destroy(f->cache);
- engine = create_async_io_engine();
+
+ engine = create_async_io_engine(_use_o_direct());
T_ASSERT(engine);
f->cache = bcache_create(T_BLOCK_SIZE / 512, NR_BLOCKS, engine);
T_ASSERT(f->cache);
+
+ f->dev = bcache_get_dev(f->cache, f->fname, 0);
+ T_ASSERT(f->cache);
}
//----------------------------------------------------------------
diff --git a/test/unit/io_engine_t.c b/test/unit/io_engine_t.c
deleted file mode 100644
index 1a4f638e5..000000000
--- a/test/unit/io_engine_t.c
+++ /dev/null
@@ -1,213 +0,0 @@
-/*
- * Copyright (C) 2018 Red Hat, Inc. All rights reserved.
- *
- * This file is part of LVM2.
- *
- * This copyrighted material is made available to anyone wishing to use,
- * modify, copy, or redistribute it subject to the terms and conditions
- * of the GNU General Public License v.2.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-#define _GNU_SOURCE
-
-#include <errno.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <fcntl.h>
-#include <unistd.h>
-
-#include "lib/device/bcache.h"
-#include "framework.h"
-#include "units.h"
-
-//----------------------------------------------------------------
-
-#define SECTOR_SIZE 512
-#define BLOCK_SIZE_SECTORS 8
-#define NR_BLOCKS 64
-
-struct fixture {
- struct io_engine *e;
- uint8_t *data;
-
- char fname[64];
- int fd;
-};
-
-static void _fill_buffer(uint8_t *buffer, uint8_t seed, size_t count)
-{
- unsigned i;
- uint8_t b = seed;
-
- for (i = 0; i < count; i++) {
- buffer[i] = b;
- b = ((b << 5) + b) + i;
- }
-}
-
-static void _check_buffer(uint8_t *buffer, uint8_t seed, size_t count)
-{
- unsigned i;
- uint8_t b = seed;
-
- for (i = 0; i < count; i++) {
- T_ASSERT_EQUAL(buffer[i], b);
- b = ((b << 5) + b) + i;
- }
-}
-
-static void _print_buffer(const char *name, uint8_t *buffer, size_t count)
-{
- unsigned col;
-
- fprintf(stderr, "%s:\n", name);
- while (count) {
- for (col = 0; count && col < 20; col++) {
- fprintf(stderr, "%x, ", (unsigned) *buffer);
- col++;
- buffer++;
- count--;
- }
- fprintf(stderr, "\n");
- }
-}
-
-static void *_fix_init(void)
-{
- struct fixture *f = malloc(sizeof(*f));
-
- T_ASSERT(f);
- f->e = create_async_io_engine();
- T_ASSERT(f->e);
- if (posix_memalign((void **) &f->data, 4096, SECTOR_SIZE * BLOCK_SIZE_SECTORS))
- test_fail("posix_memalign failed");
-
- snprintf(f->fname, sizeof(f->fname), "unit-test-XXXXXX");
- f->fd = mkstemp(f->fname);
- T_ASSERT(f->fd >= 0);
-
- _fill_buffer(f->data, 123, SECTOR_SIZE * BLOCK_SIZE_SECTORS);
-
- T_ASSERT(write(f->fd, f->data, SECTOR_SIZE * BLOCK_SIZE_SECTORS) > 0);
- T_ASSERT(lseek(f->fd, 0, SEEK_SET) != -1);
-
- return f;
-}
-
-static void _fix_exit(void *fixture)
-{
- struct fixture *f = fixture;
-
- close(f->fd);
- unlink(f->fname);
- free(f->data);
- if (f->e)
- f->e->destroy(f->e);
- free(f);
-}
-
-static void _test_create(void *fixture)
-{
- // empty
-}
-
-struct io {
- bool completed;
- int error;
-};
-
-static void _io_init(struct io *io)
-{
- io->completed = false;
- io->error = 0;
-}
-
-static void _complete_io(void *context, int io_error)
-{
- struct io *io = context;
- io->completed = true;
- io->error = io_error;
-}
-
-static void _test_read(void *fixture)
-{
- struct fixture *f = fixture;
-
- struct io io;
-
- _io_init(&io);
- T_ASSERT(f->e->issue(f->e, DIR_READ, f->fd, 0, BLOCK_SIZE_SECTORS, f->data, &io));
- T_ASSERT(f->e->wait(f->e, _complete_io));
- T_ASSERT(io.completed);
- T_ASSERT(!io.error);
-
- _check_buffer(f->data, 123, sizeof(f->data));
-}
-
-static void _test_write(void *fixture)
-{
- struct fixture *f = fixture;
-
- struct io io;
-
- _io_init(&io);
- T_ASSERT(f->e->issue(f->e, DIR_WRITE, f->fd, 0, BLOCK_SIZE_SECTORS, f->data, &io));
- T_ASSERT(f->e->wait(f->e, _complete_io));
- T_ASSERT(io.completed);
- T_ASSERT(!io.error);
-}
-
-static void _test_write_bytes(void *fixture)
-{
- struct fixture *f = fixture;
-
- unsigned offset = 345;
- char buf_out[32];
- char buf_in[32];
- struct bcache *cache = bcache_create(8, BLOCK_SIZE_SECTORS, f->e);
- T_ASSERT(cache);
-
- // T_ASSERT(bcache_read_bytes(cache, f->fd, offset, sizeof(buf_in), buf_in));
- _fill_buffer((uint8_t *) buf_out, 234, sizeof(buf_out));
- T_ASSERT(bcache_write_bytes(cache, f->fd, offset, sizeof(buf_out), buf_out));
- T_ASSERT(bcache_read_bytes(cache, f->fd, offset, sizeof(buf_in), buf_in));
-
- _print_buffer("buf_out", (uint8_t *) buf_out, sizeof(buf_out));
- _print_buffer("buf_in", (uint8_t *) buf_in, sizeof(buf_in));
- T_ASSERT(!memcmp(buf_out, buf_in, sizeof(buf_out)));
-
- bcache_destroy(cache);
- f->e = NULL; // already destroyed
-}
-
-//----------------------------------------------------------------
-
-#define T(path, desc, fn) register_test(ts, "/base/device/bcache/io-engine/" path, desc, fn)
-
-static struct test_suite *_tests(void)
-{
- struct test_suite *ts = test_suite_create(_fix_init, _fix_exit);
- if (!ts) {
- fprintf(stderr, "out of memory\n");
- exit(1);
- }
-
- T("create-destroy", "simple create/destroy", _test_create);
- T("read", "read sanity check", _test_read);
- T("write", "write sanity check", _test_write);
- T("bcache-write-bytes", "test the utility fns", _test_write_bytes);
-
- return ts;
-}
-
-void io_engine_tests(struct dm_list *all_tests)
-{
- dm_list_add(all_tests, &_tests()->list);
-}
-
diff --git a/test/unit/units.h b/test/unit/units.h
index bc0db8d13..43834bd44 100644
--- a/test/unit/units.h
+++ b/test/unit/units.h
@@ -27,7 +27,6 @@ void bitset_tests(struct dm_list *suites);
void config_tests(struct dm_list *suites);
void dm_list_tests(struct dm_list *suites);
void dm_status_tests(struct dm_list *suites);
-void io_engine_tests(struct dm_list *suites);
void percent_tests(struct dm_list *suites);
void radix_tree_tests(struct dm_list *suites);
void regex_tests(struct dm_list *suites);
@@ -44,7 +43,6 @@ static inline void register_all_tests(struct dm_list *suites)
config_tests(suites);
dm_list_tests(suites);
dm_status_tests(suites);
- io_engine_tests(suites);
percent_tests(suites);
radix_tree_tests(suites);
regex_tests(suites);