summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorStephen Rothwell <sfr@canb.auug.org.au>2016-09-12 10:43:29 +1000
committerStephen Rothwell <sfr@canb.auug.org.au>2016-09-12 10:43:29 +1000
commit138c337cf0c2436790877f87e91154b3c3294346 (patch)
tree5478a0791322fc5a43d1965e3bb20c40365f1484 /lib
parente0b9fbaea6a94fba9f9bd422d133e45f984fef03 (diff)
parentcf9932a9414e241571008edd7412ab22f02b5704 (diff)
downloadlinux-next-138c337cf0c2436790877f87e91154b3c3294346.tar.gz
Merge remote-tracking branch 'net-next/master'
Diffstat (limited to 'lib')
-rw-r--r--lib/rhashtable.c56
1 files changed, 25 insertions, 31 deletions
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index 56054e541a0f..06c28728bb53 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -444,7 +444,8 @@ EXPORT_SYMBOL_GPL(rhashtable_insert_rehash);
struct bucket_table *rhashtable_insert_slow(struct rhashtable *ht,
const void *key,
struct rhash_head *obj,
- struct bucket_table *tbl)
+ struct bucket_table *tbl,
+ void **data)
{
struct rhash_head *head;
unsigned int hash;
@@ -455,8 +456,11 @@ struct bucket_table *rhashtable_insert_slow(struct rhashtable *ht,
spin_lock_nested(rht_bucket_lock(tbl, hash), SINGLE_DEPTH_NESTING);
err = -EEXIST;
- if (key && rhashtable_lookup_fast(ht, key, ht->p))
- goto exit;
+ if (key) {
+ *data = rhashtable_lookup_fast(ht, key, ht->p);
+ if (*data)
+ goto exit;
+ }
err = -E2BIG;
if (unlikely(rht_grow_above_max(ht, tbl)))
@@ -490,10 +494,9 @@ exit:
EXPORT_SYMBOL_GPL(rhashtable_insert_slow);
/**
- * rhashtable_walk_init - Initialise an iterator
+ * rhashtable_walk_enter - Initialise an iterator
* @ht: Table to walk over
* @iter: Hash table Iterator
- * @gfp: GFP flags for allocations
*
* This function prepares a hash table walk.
*
@@ -508,30 +511,22 @@ EXPORT_SYMBOL_GPL(rhashtable_insert_slow);
* This function may sleep so you must not call it from interrupt
* context or with spin locks held.
*
- * You must call rhashtable_walk_exit if this function returns
- * successfully.
+ * You must call rhashtable_walk_exit after this function returns.
*/
-int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter,
- gfp_t gfp)
+void rhashtable_walk_enter(struct rhashtable *ht, struct rhashtable_iter *iter)
{
iter->ht = ht;
iter->p = NULL;
iter->slot = 0;
iter->skip = 0;
- iter->walker = kmalloc(sizeof(*iter->walker), gfp);
- if (!iter->walker)
- return -ENOMEM;
-
spin_lock(&ht->lock);
- iter->walker->tbl =
+ iter->walker.tbl =
rcu_dereference_protected(ht->tbl, lockdep_is_held(&ht->lock));
- list_add(&iter->walker->list, &iter->walker->tbl->walkers);
+ list_add(&iter->walker.list, &iter->walker.tbl->walkers);
spin_unlock(&ht->lock);
-
- return 0;
}
-EXPORT_SYMBOL_GPL(rhashtable_walk_init);
+EXPORT_SYMBOL_GPL(rhashtable_walk_enter);
/**
* rhashtable_walk_exit - Free an iterator
@@ -542,10 +537,9 @@ EXPORT_SYMBOL_GPL(rhashtable_walk_init);
void rhashtable_walk_exit(struct rhashtable_iter *iter)
{
spin_lock(&iter->ht->lock);
- if (iter->walker->tbl)
- list_del(&iter->walker->list);
+ if (iter->walker.tbl)
+ list_del(&iter->walker.list);
spin_unlock(&iter->ht->lock);
- kfree(iter->walker);
}
EXPORT_SYMBOL_GPL(rhashtable_walk_exit);
@@ -571,12 +565,12 @@ int rhashtable_walk_start(struct rhashtable_iter *iter)
rcu_read_lock();
spin_lock(&ht->lock);
- if (iter->walker->tbl)
- list_del(&iter->walker->list);
+ if (iter->walker.tbl)
+ list_del(&iter->walker.list);
spin_unlock(&ht->lock);
- if (!iter->walker->tbl) {
- iter->walker->tbl = rht_dereference_rcu(ht->tbl, ht);
+ if (!iter->walker.tbl) {
+ iter->walker.tbl = rht_dereference_rcu(ht->tbl, ht);
return -EAGAIN;
}
@@ -598,7 +592,7 @@ EXPORT_SYMBOL_GPL(rhashtable_walk_start);
*/
void *rhashtable_walk_next(struct rhashtable_iter *iter)
{
- struct bucket_table *tbl = iter->walker->tbl;
+ struct bucket_table *tbl = iter->walker.tbl;
struct rhashtable *ht = iter->ht;
struct rhash_head *p = iter->p;
@@ -631,8 +625,8 @@ next:
/* Ensure we see any new tables. */
smp_rmb();
- iter->walker->tbl = rht_dereference_rcu(tbl->future_tbl, ht);
- if (iter->walker->tbl) {
+ iter->walker.tbl = rht_dereference_rcu(tbl->future_tbl, ht);
+ if (iter->walker.tbl) {
iter->slot = 0;
iter->skip = 0;
return ERR_PTR(-EAGAIN);
@@ -652,7 +646,7 @@ void rhashtable_walk_stop(struct rhashtable_iter *iter)
__releases(RCU)
{
struct rhashtable *ht;
- struct bucket_table *tbl = iter->walker->tbl;
+ struct bucket_table *tbl = iter->walker.tbl;
if (!tbl)
goto out;
@@ -661,9 +655,9 @@ void rhashtable_walk_stop(struct rhashtable_iter *iter)
spin_lock(&ht->lock);
if (tbl->rehash < tbl->size)
- list_add(&iter->walker->list, &tbl->walkers);
+ list_add(&iter->walker.list, &tbl->walkers);
else
- iter->walker->tbl = NULL;
+ iter->walker.tbl = NULL;
spin_unlock(&ht->lock);
iter->p = NULL;