diff options
author | Sebastian Andrzej Siewior <bigeasy@linutronix.de> | 2016-04-01 23:19:31 +0200 |
---|---|---|
committer | Sebastian Andrzej Siewior <bigeasy@linutronix.de> | 2016-04-01 23:19:31 +0200 |
commit | 78eaf7bbf2853f0726c445dbf90234ba299c6f32 (patch) | |
tree | 6357dc3e58559adbaef20d0c1c24d333749a3a4d /patches/drivers-block-zram-Replace-bit-spinlocks-with-rtmute.patch | |
parent | 1df1c6d97b8a65305fdbd9d8d90fecd48718fc83 (diff) | |
download | linux-rt-78eaf7bbf2853f0726c445dbf90234ba299c6f32.tar.gz |
[ANNOUNCE] 4.4.6-rt13v4.4.6-rt13-patches
Dear RT folks!
I'm pleased to announce the v4.4.6-rt13 patch set.
Changes since v4.4.6-rt12:
- Alexandre Belloni sent patch for the AT91 to get rid of the free_irq()
warning.
- Yang Shi sent a patch to address a "sleeping while atomic" warning in
a writeback tracepoint. Until now it was disabled to avoid it, now it
can be used again.
- Rik van Riel sent a patch to make the kvm async pagefault code use a
simple wait queue.
- Mike Galbraith set a patch to address a "sleeping while atomic"
warning in zsmalloc
- Netork packets sent by a RT task could be delayed (but won't block the
RT task) if a task with lower priority was interruped while sending a
packet. This is address by taking a qdisc lock so the high-prio task
can boost a task with lower priority.
- Clark Williams reported a swait related complate_all() warning while
comming out of suspend. Suspend to RAM (and hibernate) are now
filtered out from the warning.
- Mike Galbraith sent a patch to address a "sleeping while atomic"
warning in the zram driver.
- Josh Cartwright sent a patch to fix a lockdep splat in list_bl which
was reported by Luis Claudio R. Goncalves.
Known issues:
- CPU hotplug got a little better but can deadlock.
The delta patch against 4.4.6-rt12 is appended below and can be found here:
https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.4/incr/patch-4.4.6-rt12-rt13.patch.xz
You can get this release via the git tree at:
git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git v4.4.6-rt13
The RT patch against 4.4.6 can be found here:
https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.4/patch-4.4.6-rt13.patch.xz
The split quilt queue is available at:
https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.6-rt13.tar.xz
Sebastian
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Diffstat (limited to 'patches/drivers-block-zram-Replace-bit-spinlocks-with-rtmute.patch')
-rw-r--r-- | patches/drivers-block-zram-Replace-bit-spinlocks-with-rtmute.patch | 175 |
1 files changed, 175 insertions, 0 deletions
diff --git a/patches/drivers-block-zram-Replace-bit-spinlocks-with-rtmute.patch b/patches/drivers-block-zram-Replace-bit-spinlocks-with-rtmute.patch new file mode 100644 index 000000000000..72c412350f53 --- /dev/null +++ b/patches/drivers-block-zram-Replace-bit-spinlocks-with-rtmute.patch @@ -0,0 +1,175 @@ +From: Mike Galbraith <umgwanakikbuti@gmail.com> +Date: Thu, 31 Mar 2016 04:08:28 +0200 +Subject: [PATCH] drivers/block/zram: Replace bit spinlocks with rtmutex + for -rt + +They're nondeterministic, and lead to ___might_sleep() splats in -rt. +OTOH, they're a lot less wasteful than an rtmutex per page. + +Signed-off-by: Mike Galbraith <umgwanakikbuti@gmail.com> +Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> +--- + drivers/block/zram/zram_drv.c | 30 ++++++++++++++++-------------- + drivers/block/zram/zram_drv.h | 41 +++++++++++++++++++++++++++++++++++++++++ + 2 files changed, 57 insertions(+), 14 deletions(-) + +--- a/drivers/block/zram/zram_drv.c ++++ b/drivers/block/zram/zram_drv.c +@@ -520,6 +520,8 @@ static struct zram_meta *zram_meta_alloc + goto out_error; + } + ++ zram_meta_init_table_locks(meta, disksize); ++ + return meta; + + out_error: +@@ -568,12 +570,12 @@ static int zram_decompress_page(struct z + unsigned long handle; + size_t size; + +- bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); ++ zram_lock_table(&meta->table[index]); + handle = meta->table[index].handle; + size = zram_get_obj_size(meta, index); + + if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) { +- bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); ++ zram_unlock_table(&meta->table[index]); + clear_page(mem); + return 0; + } +@@ -584,7 +586,7 @@ static int zram_decompress_page(struct z + else + ret = zcomp_decompress(zram->comp, cmem, size, mem); + zs_unmap_object(meta->mem_pool, handle); +- bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); ++ zram_unlock_table(&meta->table[index]); + + /* Should NEVER happen. Return bio error if it does. */ + if (unlikely(ret)) { +@@ -604,14 +606,14 @@ static int zram_bvec_read(struct zram *z + struct zram_meta *meta = zram->meta; + page = bvec->bv_page; + +- bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); ++ zram_lock_table(&meta->table[index]); + if (unlikely(!meta->table[index].handle) || + zram_test_flag(meta, index, ZRAM_ZERO)) { +- bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); ++ zram_unlock_table(&meta->table[index]); + handle_zero_page(bvec); + return 0; + } +- bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); ++ zram_unlock_table(&meta->table[index]); + + if (is_partial_io(bvec)) + /* Use a temporary buffer to decompress the page */ +@@ -689,10 +691,10 @@ static int zram_bvec_write(struct zram * + if (user_mem) + kunmap_atomic(user_mem); + /* Free memory associated with this sector now. */ +- bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); ++ zram_lock_table(&meta->table[index]); + zram_free_page(zram, index); + zram_set_flag(meta, index, ZRAM_ZERO); +- bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); ++ zram_unlock_table(&meta->table[index]); + + atomic64_inc(&zram->stats.zero_pages); + ret = 0; +@@ -752,12 +754,12 @@ static int zram_bvec_write(struct zram * + * Free memory associated with this sector + * before overwriting unused sectors. + */ +- bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); ++ zram_lock_table(&meta->table[index]); + zram_free_page(zram, index); + + meta->table[index].handle = handle; + zram_set_obj_size(meta, index, clen); +- bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); ++ zram_unlock_table(&meta->table[index]); + + /* Update stats */ + atomic64_add(clen, &zram->stats.compr_data_size); +@@ -800,9 +802,9 @@ static void zram_bio_discard(struct zram + } + + while (n >= PAGE_SIZE) { +- bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); ++ zram_lock_table(&meta->table[index]); + zram_free_page(zram, index); +- bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); ++ zram_unlock_table(&meta->table[index]); + atomic64_inc(&zram->stats.notify_free); + index++; + n -= PAGE_SIZE; +@@ -928,9 +930,9 @@ static void zram_slot_free_notify(struct + zram = bdev->bd_disk->private_data; + meta = zram->meta; + +- bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); ++ zram_lock_table(&meta->table[index]); + zram_free_page(zram, index); +- bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); ++ zram_unlock_table(&meta->table[index]); + atomic64_inc(&zram->stats.notify_free); + } + +--- a/drivers/block/zram/zram_drv.h ++++ b/drivers/block/zram/zram_drv.h +@@ -72,6 +72,9 @@ enum zram_pageflags { + struct zram_table_entry { + unsigned long handle; + unsigned long value; ++#ifdef CONFIG_PREEMPT_RT_BASE ++ spinlock_t lock; ++#endif + }; + + struct zram_stats { +@@ -119,4 +122,42 @@ struct zram { + */ + bool claim; /* Protected by bdev->bd_mutex */ + }; ++ ++#ifndef CONFIG_PREEMPT_RT_BASE ++static inline void zram_lock_table(struct zram_table_entry *table) ++{ ++ bit_spin_lock(ZRAM_ACCESS, &table->value); ++} ++ ++static inline void zram_unlock_table(struct zram_table_entry *table) ++{ ++ bit_spin_unlock(ZRAM_ACCESS, &table->value); ++} ++ ++static inline void zram_meta_init_locks(struct zram_meta *meta, u64 disksize) { } ++#else /* CONFIG_PREEMPT_RT_BASE */ ++static inline void zram_lock_table(struct zram_table_entry *table) ++{ ++ spin_lock(&table->lock); ++ __set_bit(ZRAM_ACCESS, &table->value); ++} ++ ++static inline void zram_unlock_table(struct zram_table_entry *table) ++{ ++ __clear_bit(ZRAM_ACCESS, &table->value); ++ spin_unlock(&table->lock); ++} ++ ++static inline void zram_meta_init_table_locks(struct zram_meta *meta, u64 disksize) ++{ ++ size_t num_pages = disksize >> PAGE_SHIFT; ++ size_t index; ++ ++ for (index = 0; index < num_pages; index++) { ++ spinlock_t *lock = &meta->table[index].lock; ++ spin_lock_init(lock); ++ } ++} ++#endif /* CONFIG_PREEMPT_RT_BASE */ ++ + #endif |