summaryrefslogtreecommitdiff
path: root/fs/nfs/nfs4proc.c
diff options
context:
space:
mode:
authorElena Reshetova <elena.reshetova@intel.com>2017-10-20 12:53:38 +0300
committerAnna Schumaker <Anna.Schumaker@Netapp.com>2017-11-17 13:48:01 -0500
commit212bf41d88c06afc23e03f9b274eebf1e8dba197 (patch)
tree755a58e9b423e6e8bfe06fa2036b633a427ff42b /fs/nfs/nfs4proc.c
parent2f62b5aa4814be2c511553fd6afb4d35b6c2503b (diff)
downloadlinux-next-212bf41d88c06afc23e03f9b274eebf1e8dba197.tar.gz
fs, nfs: convert nfs_client.cl_count from atomic_t to refcount_t
atomic_t variables are currently used to implement reference counters with the following properties: - counter is initialized to 1 using atomic_set() - a resource is freed upon counter reaching zero - once counter reaches zero, its further increments aren't allowed - counter schema uses basic atomic operations (set, inc, inc_not_zero, dec_and_test, etc.) Such atomic variables should be converted to a newly provided refcount_t type and API that prevents accidental counter overflows and underflows. This is important since overflows and underflows can lead to use-after-free situation and be exploitable. The variable nfs_client.cl_count is used as pure reference counter. Convert it to refcount_t and fix up the operations. Suggested-by: Kees Cook <keescook@chromium.org> Reviewed-by: David Windsor <dwindsor@gmail.com> Reviewed-by: Hans Liljestrand <ishkamiel@gmail.com> Signed-off-by: Elena Reshetova <elena.reshetova@intel.com> Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
Diffstat (limited to 'fs/nfs/nfs4proc.c')
-rw-r--r--fs/nfs/nfs4proc.c12
1 files changed, 6 insertions, 6 deletions
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index be8c75a2cbbe..82e5ed2ee6ba 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -4870,7 +4870,7 @@ static void nfs4_renew_release(void *calldata)
struct nfs4_renewdata *data = calldata;
struct nfs_client *clp = data->client;
- if (atomic_read(&clp->cl_count) > 1)
+ if (refcount_read(&clp->cl_count) > 1)
nfs4_schedule_state_renewal(clp);
nfs_put_client(clp);
kfree(data);
@@ -4918,7 +4918,7 @@ static int nfs4_proc_async_renew(struct nfs_client *clp, struct rpc_cred *cred,
if (renew_flags == 0)
return 0;
- if (!atomic_inc_not_zero(&clp->cl_count))
+ if (!refcount_inc_not_zero(&clp->cl_count))
return -EIO;
data = kmalloc(sizeof(*data), GFP_NOFS);
if (data == NULL) {
@@ -7499,7 +7499,7 @@ nfs4_run_exchange_id(struct nfs_client *clp, struct rpc_cred *cred,
struct nfs41_exchange_id_data *calldata;
int status;
- if (!atomic_inc_not_zero(&clp->cl_count))
+ if (!refcount_inc_not_zero(&clp->cl_count))
return ERR_PTR(-EIO);
status = -ENOMEM;
@@ -8099,7 +8099,7 @@ static void nfs41_sequence_release(void *data)
struct nfs4_sequence_data *calldata = data;
struct nfs_client *clp = calldata->clp;
- if (atomic_read(&clp->cl_count) > 1)
+ if (refcount_read(&clp->cl_count) > 1)
nfs4_schedule_state_renewal(clp);
nfs_put_client(clp);
kfree(calldata);
@@ -8128,7 +8128,7 @@ static void nfs41_sequence_call_done(struct rpc_task *task, void *data)
trace_nfs4_sequence(clp, task->tk_status);
if (task->tk_status < 0) {
dprintk("%s ERROR %d\n", __func__, task->tk_status);
- if (atomic_read(&clp->cl_count) == 1)
+ if (refcount_read(&clp->cl_count) == 1)
goto out;
if (nfs41_sequence_handle_errors(task, clp) == -EAGAIN) {
@@ -8179,7 +8179,7 @@ static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp,
struct rpc_task *ret;
ret = ERR_PTR(-EIO);
- if (!atomic_inc_not_zero(&clp->cl_count))
+ if (!refcount_inc_not_zero(&clp->cl_count))
goto out_err;
ret = ERR_PTR(-ENOMEM);