summaryrefslogtreecommitdiff
path: root/fs/btrfs/inode-map.c
diff options
context:
space:
mode:
authorLi Zefan <lizf@cn.fujitsu.com>2011-04-20 10:33:24 +0800
committerLi Zefan <lizf@cn.fujitsu.com>2011-04-25 16:46:11 +0800
commit82d5902d9c681be37ffa9d70482907f9f0b7ec1f (patch)
treec9c99f0b60004ac14d09d277d3216667df09c32d /fs/btrfs/inode-map.c
parent33345d01522f8152f99dc84a3e7a1a45707f387f (diff)
downloadlinux-next-82d5902d9c681be37ffa9d70482907f9f0b7ec1f.tar.gz
Btrfs: Support reading/writing on disk free ino cache
This is similar to block group caching. We dedicate a special inode in fs tree to save free ino cache. At the very first time we create/delete a file after mount, the free ino cache will be loaded from disk into memory. When the fs tree is commited, the cache will be written back to disk. To keep compatibility, we check the root generation against the generation of the special inode when loading the cache, so the loading will fail if the btrfs filesystem was mounted in an older kernel before. Signed-off-by: Li Zefan <lizf@cn.fujitsu.com>
Diffstat (limited to 'fs/btrfs/inode-map.c')
-rw-r--r--fs/btrfs/inode-map.c87
1 files changed, 87 insertions, 0 deletions
diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c
index 5be62df90c4f..7967e85c72f5 100644
--- a/fs/btrfs/inode-map.c
+++ b/fs/btrfs/inode-map.c
@@ -137,6 +137,7 @@ out:
static void start_caching(struct btrfs_root *root)
{
struct task_struct *tsk;
+ int ret;
spin_lock(&root->cache_lock);
if (root->cached != BTRFS_CACHE_NO) {
@@ -147,6 +148,14 @@ static void start_caching(struct btrfs_root *root)
root->cached = BTRFS_CACHE_STARTED;
spin_unlock(&root->cache_lock);
+ ret = load_free_ino_cache(root->fs_info, root);
+ if (ret == 1) {
+ spin_lock(&root->cache_lock);
+ root->cached = BTRFS_CACHE_FINISHED;
+ spin_unlock(&root->cache_lock);
+ return;
+ }
+
tsk = kthread_run(caching_kthread, root, "btrfs-ino-cache-%llu\n",
root->root_key.objectid);
BUG_ON(IS_ERR(tsk));
@@ -352,6 +361,84 @@ void btrfs_init_free_ino_ctl(struct btrfs_root *root)
pinned->op = &pinned_free_ino_op;
}
+int btrfs_save_ino_cache(struct btrfs_root *root,
+ struct btrfs_trans_handle *trans)
+{
+ struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
+ struct btrfs_path *path;
+ struct inode *inode;
+ u64 alloc_hint = 0;
+ int ret;
+ int prealloc;
+ bool retry = false;
+
+ path = btrfs_alloc_path();
+ if (!path)
+ return -ENOMEM;
+again:
+ inode = lookup_free_ino_inode(root, path);
+ if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
+ ret = PTR_ERR(inode);
+ goto out;
+ }
+
+ if (IS_ERR(inode)) {
+ BUG_ON(retry);
+ retry = true;
+
+ ret = create_free_ino_inode(root, trans, path);
+ if (ret)
+ goto out;
+ goto again;
+ }
+
+ BTRFS_I(inode)->generation = 0;
+ ret = btrfs_update_inode(trans, root, inode);
+ WARN_ON(ret);
+
+ if (i_size_read(inode) > 0) {
+ ret = btrfs_truncate_free_space_cache(root, trans, path, inode);
+ if (ret)
+ goto out_put;
+ }
+
+ spin_lock(&root->cache_lock);
+ if (root->cached != BTRFS_CACHE_FINISHED) {
+ ret = -1;
+ spin_unlock(&root->cache_lock);
+ goto out_put;
+ }
+ spin_unlock(&root->cache_lock);
+
+ spin_lock(&ctl->tree_lock);
+ prealloc = sizeof(struct btrfs_free_space) * ctl->free_extents;
+ prealloc = ALIGN(prealloc, PAGE_CACHE_SIZE);
+ prealloc += ctl->total_bitmaps * PAGE_CACHE_SIZE;
+ spin_unlock(&ctl->tree_lock);
+
+ /* Just to make sure we have enough space */
+ prealloc += 8 * PAGE_CACHE_SIZE;
+
+ ret = btrfs_check_data_free_space(inode, prealloc);
+ if (ret)
+ goto out_put;
+
+ ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, prealloc,
+ prealloc, prealloc, &alloc_hint);
+ if (ret)
+ goto out_put;
+ btrfs_free_reserved_data_space(inode, prealloc);
+
+out_put:
+ iput(inode);
+out:
+ if (ret == 0)
+ ret = btrfs_write_out_ino_cache(root, trans, path);
+
+ btrfs_free_path(path);
+ return ret;
+}
+
static int btrfs_find_highest_objectid(struct btrfs_root *root, u64 *objectid)
{
struct btrfs_path *path;