summaryrefslogtreecommitdiff
path: root/drivers/infiniband
diff options
context:
space:
mode:
authorZhu Yanjun <yanjun.zhu@linux.dev>2023-01-16 14:35:02 -0500
committerLeon Romanovsky <leon@kernel.org>2023-01-26 12:58:46 +0200
commit2f25e3bab00e97658a454a3e017b49157909321f (patch)
tree26047e87fb3a70b12e34afabfb66b140116990e2 /drivers/infiniband
parente965ef0e7b2ce2564f20f13c1fc369d886bc2544 (diff)
downloadlinux-2f25e3bab00e97658a454a3e017b49157909321f.tar.gz
RDMA/irdma: Split CQ handler into irdma_reg_user_mr_type_cq
Split the source codes related with CQ handling into a new function. Reviewed-by: Shiraz Saleem <shiraz.saleem@intel.com> Signed-off-by: Zhu Yanjun <yanjun.zhu@linux.dev> Link: https://lore.kernel.org/r/20230116193502.66540-5-yanjun.zhu@intel.com Signed-off-by: Leon Romanovsky <leon@kernel.org>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r--drivers/infiniband/hw/irdma/verbs.c69
1 files changed, 40 insertions, 29 deletions
diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
index 93a8997d6267..6982f38596c8 100644
--- a/drivers/infiniband/hw/irdma/verbs.c
+++ b/drivers/infiniband/hw/irdma/verbs.c
@@ -2867,6 +2867,40 @@ static int irdma_reg_user_mr_type_qp(struct irdma_mem_reg_req req,
return 0;
}
+static int irdma_reg_user_mr_type_cq(struct irdma_mem_reg_req req,
+ struct ib_udata *udata,
+ struct irdma_mr *iwmr)
+{
+ struct irdma_device *iwdev = to_iwdev(iwmr->ibmr.device);
+ struct irdma_pbl *iwpbl = &iwmr->iwpbl;
+ struct irdma_ucontext *ucontext = NULL;
+ u8 shadow_pgcnt = 1;
+ unsigned long flags;
+ bool use_pbles;
+ u32 total;
+ int err;
+
+ if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_CQ_RESIZE)
+ shadow_pgcnt = 0;
+ total = req.cq_pages + shadow_pgcnt;
+ if (total > iwmr->page_cnt)
+ return -EINVAL;
+
+ use_pbles = req.cq_pages > 1;
+ err = irdma_handle_q_mem(iwdev, &req, iwpbl, use_pbles);
+ if (err)
+ return err;
+
+ ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext,
+ ibucontext);
+ spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
+ list_add_tail(&iwpbl->list, &ucontext->cq_reg_mem_list);
+ iwpbl->on_list = true;
+ spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
+
+ return 0;
+}
+
/**
* irdma_reg_user_mr - Register a user memory region
* @pd: ptr of pd
@@ -2882,16 +2916,10 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
{
#define IRDMA_MEM_REG_MIN_REQ_LEN offsetofend(struct irdma_mem_reg_req, sq_pages)
struct irdma_device *iwdev = to_iwdev(pd->device);
- struct irdma_ucontext *ucontext;
- struct irdma_pbl *iwpbl;
- struct irdma_mr *iwmr;
- struct ib_umem *region;
- struct irdma_mem_reg_req req;
- u32 total;
- u8 shadow_pgcnt = 1;
- bool use_pbles = false;
- unsigned long flags;
- int err = -EINVAL;
+ struct irdma_mem_reg_req req = {};
+ struct ib_umem *region = NULL;
+ struct irdma_mr *iwmr = NULL;
+ int err;
if (len > iwdev->rf->sc_dev.hw_attrs.max_mr_size)
return ERR_PTR(-EINVAL);
@@ -2918,8 +2946,6 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
return (struct ib_mr *)iwmr;
}
- iwpbl = &iwmr->iwpbl;
-
switch (req.reg_type) {
case IRDMA_MEMREG_TYPE_QP:
err = irdma_reg_user_mr_type_qp(req, udata, iwmr);
@@ -2928,25 +2954,9 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
break;
case IRDMA_MEMREG_TYPE_CQ:
- if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_CQ_RESIZE)
- shadow_pgcnt = 0;
- total = req.cq_pages + shadow_pgcnt;
- if (total > iwmr->page_cnt) {
- err = -EINVAL;
- goto error;
- }
-
- use_pbles = (req.cq_pages > 1);
- err = irdma_handle_q_mem(iwdev, &req, iwpbl, use_pbles);
+ err = irdma_reg_user_mr_type_cq(req, udata, iwmr);
if (err)
goto error;
-
- ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext,
- ibucontext);
- spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
- list_add_tail(&iwpbl->list, &ucontext->cq_reg_mem_list);
- iwpbl->on_list = true;
- spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
break;
case IRDMA_MEMREG_TYPE_MEM:
err = irdma_reg_user_mr_type_mem(iwmr, access);
@@ -2955,6 +2965,7 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
break;
default:
+ err = -EINVAL;
goto error;
}