summaryrefslogtreecommitdiff
path: root/drivers/block/zram
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/block/zram')
-rw-r--r--drivers/block/zram/zcomp.c12
-rw-r--r--drivers/block/zram/zcomp.h1
-rw-r--r--drivers/block/zram/zram_drv.c36
-rw-r--r--drivers/block/zram/zram_drv.h41
4 files changed, 71 insertions, 19 deletions
diff --git a/drivers/block/zram/zcomp.c b/drivers/block/zram/zcomp.c
index 4b5cd3a7b2b6..fa8329ad79fd 100644
--- a/drivers/block/zram/zcomp.c
+++ b/drivers/block/zram/zcomp.c
@@ -118,12 +118,19 @@ ssize_t zcomp_available_show(const char *comp, char *buf)
struct zcomp_strm *zcomp_stream_get(struct zcomp *comp)
{
- return *get_cpu_ptr(comp->stream);
+ struct zcomp_strm *zstrm;
+
+ zstrm = *this_cpu_ptr(comp->stream);
+ spin_lock(&zstrm->zcomp_lock);
+ return zstrm;
}
void zcomp_stream_put(struct zcomp *comp)
{
- put_cpu_ptr(comp->stream);
+ struct zcomp_strm *zstrm;
+
+ zstrm = *this_cpu_ptr(comp->stream);
+ spin_unlock(&zstrm->zcomp_lock);
}
int zcomp_compress(struct zcomp_strm *zstrm,
@@ -174,6 +181,7 @@ static int __zcomp_cpu_notifier(struct zcomp *comp,
pr_err("Can't allocate a compression stream\n");
return NOTIFY_BAD;
}
+ spin_lock_init(&zstrm->zcomp_lock);
*per_cpu_ptr(comp->stream, cpu) = zstrm;
break;
case CPU_DEAD:
diff --git a/drivers/block/zram/zcomp.h b/drivers/block/zram/zcomp.h
index 478cac2ed465..f7a6efdc3285 100644
--- a/drivers/block/zram/zcomp.h
+++ b/drivers/block/zram/zcomp.h
@@ -14,6 +14,7 @@ struct zcomp_strm {
/* compression/decompression buffer */
void *buffer;
struct crypto_comp *tfm;
+ spinlock_t zcomp_lock;
};
/* dynamic per-device compression frontend */
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index c9914d653968..2038d138f286 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -528,6 +528,8 @@ static struct zram_meta *zram_meta_alloc(char *pool_name, u64 disksize)
goto out_error;
}
+ zram_meta_init_table_locks(meta, disksize);
+
return meta;
out_error:
@@ -575,28 +577,28 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
struct zram_meta *meta = zram->meta;
unsigned long handle;
unsigned int size;
+ struct zcomp_strm *zstrm;
- bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
+ zram_lock_table(&meta->table[index]);
handle = meta->table[index].handle;
size = zram_get_obj_size(meta, index);
if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) {
- bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
+ zram_unlock_table(&meta->table[index]);
memset(mem, 0, PAGE_SIZE);
return 0;
}
+ zstrm = zcomp_stream_get(zram->comp);
cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
if (size == PAGE_SIZE) {
memcpy(mem, cmem, PAGE_SIZE);
} else {
- struct zcomp_strm *zstrm = zcomp_stream_get(zram->comp);
-
ret = zcomp_decompress(zstrm, cmem, size, mem);
- zcomp_stream_put(zram->comp);
}
zs_unmap_object(meta->mem_pool, handle);
- bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
+ zcomp_stream_put(zram->comp);
+ zram_unlock_table(&meta->table[index]);
/* Should NEVER happen. Return bio error if it does. */
if (unlikely(ret)) {
@@ -616,14 +618,14 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
struct zram_meta *meta = zram->meta;
page = bvec->bv_page;
- bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
+ zram_lock_table(&meta->table[index]);
if (unlikely(!meta->table[index].handle) ||
zram_test_flag(meta, index, ZRAM_ZERO)) {
- bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
+ zram_unlock_table(&meta->table[index]);
handle_zero_page(bvec);
return 0;
}
- bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
+ zram_unlock_table(&meta->table[index]);
if (is_partial_io(bvec))
/* Use a temporary buffer to decompress the page */
@@ -700,10 +702,10 @@ compress_again:
if (user_mem)
kunmap_atomic(user_mem);
/* Free memory associated with this sector now. */
- bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
+ zram_lock_table(&meta->table[index]);
zram_free_page(zram, index);
zram_set_flag(meta, index, ZRAM_ZERO);
- bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
+ zram_unlock_table(&meta->table[index]);
atomic64_inc(&zram->stats.zero_pages);
ret = 0;
@@ -794,12 +796,12 @@ compress_again:
* Free memory associated with this sector
* before overwriting unused sectors.
*/
- bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
+ zram_lock_table(&meta->table[index]);
zram_free_page(zram, index);
meta->table[index].handle = handle;
zram_set_obj_size(meta, index, clen);
- bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
+ zram_unlock_table(&meta->table[index]);
/* Update stats */
atomic64_add(clen, &zram->stats.compr_data_size);
@@ -842,9 +844,9 @@ static void zram_bio_discard(struct zram *zram, u32 index,
}
while (n >= PAGE_SIZE) {
- bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
+ zram_lock_table(&meta->table[index]);
zram_free_page(zram, index);
- bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
+ zram_unlock_table(&meta->table[index]);
atomic64_inc(&zram->stats.notify_free);
index++;
n -= PAGE_SIZE;
@@ -973,9 +975,9 @@ static void zram_slot_free_notify(struct block_device *bdev,
zram = bdev->bd_disk->private_data;
meta = zram->meta;
- bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
+ zram_lock_table(&meta->table[index]);
zram_free_page(zram, index);
- bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
+ zram_unlock_table(&meta->table[index]);
atomic64_inc(&zram->stats.notify_free);
}
diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h
index 74fcf10da374..fd4020c99b9e 100644
--- a/drivers/block/zram/zram_drv.h
+++ b/drivers/block/zram/zram_drv.h
@@ -73,6 +73,9 @@ enum zram_pageflags {
struct zram_table_entry {
unsigned long handle;
unsigned long value;
+#ifdef CONFIG_PREEMPT_RT_BASE
+ spinlock_t lock;
+#endif
};
struct zram_stats {
@@ -120,4 +123,42 @@ struct zram {
*/
bool claim; /* Protected by bdev->bd_mutex */
};
+
+#ifndef CONFIG_PREEMPT_RT_BASE
+static inline void zram_lock_table(struct zram_table_entry *table)
+{
+ bit_spin_lock(ZRAM_ACCESS, &table->value);
+}
+
+static inline void zram_unlock_table(struct zram_table_entry *table)
+{
+ bit_spin_unlock(ZRAM_ACCESS, &table->value);
+}
+
+static inline void zram_meta_init_table_locks(struct zram_meta *meta, u64 disksize) { }
+#else /* CONFIG_PREEMPT_RT_BASE */
+static inline void zram_lock_table(struct zram_table_entry *table)
+{
+ spin_lock(&table->lock);
+ __set_bit(ZRAM_ACCESS, &table->value);
+}
+
+static inline void zram_unlock_table(struct zram_table_entry *table)
+{
+ __clear_bit(ZRAM_ACCESS, &table->value);
+ spin_unlock(&table->lock);
+}
+
+static inline void zram_meta_init_table_locks(struct zram_meta *meta, u64 disksize)
+{
+ size_t num_pages = disksize >> PAGE_SHIFT;
+ size_t index;
+
+ for (index = 0; index < num_pages; index++) {
+ spinlock_t *lock = &meta->table[index].lock;
+ spin_lock_init(lock);
+ }
+}
+#endif /* CONFIG_PREEMPT_RT_BASE */
+
#endif