summaryrefslogtreecommitdiff
path: root/drivers/infiniband/hw/mlx4/qp.c
diff options
context:
space:
mode:
authorBart Van Assche <bart.vanassche@wdc.com>2018-07-18 09:25:32 -0700
committerJason Gunthorpe <jgg@mellanox.com>2018-07-30 20:09:34 -0600
commitd34ac5cd3a73aacd11009c4fc3ba15d7ea62c411 (patch)
tree153c7ef6cc03750e6fc0d70afd782bf3ddbc4aa7 /drivers/infiniband/hw/mlx4/qp.c
parent7bb1fafc2f163ad03a2007295bb2f57cfdbfb630 (diff)
downloadlinux-rt-d34ac5cd3a73aacd11009c4fc3ba15d7ea62c411.tar.gz
RDMA, core and ULPs: Declare ib_post_send() and ib_post_recv() arguments const
Since neither ib_post_send() nor ib_post_recv() modify the data structure their second argument points at, declare that argument const. This change makes it necessary to declare the 'bad_wr' argument const too and also to modify all ULPs that call ib_post_send(), ib_post_recv() or ib_post_srq_recv(). This patch does not change any functionality but makes it possible for the compiler to verify whether the ib_post_(send|recv|srq_recv) really do not modify the posted work request. To make this possible, only one cast had to be introduce that casts away constness, namely in rpcrdma_post_recvs(). The only way I can think of to avoid that cast is to introduce an additional loop in that function or to change the data type of bad_wr from struct ib_recv_wr ** into int (an index that refers to an element in the work request list). However, both approaches would require even more extensive changes than this patch. Signed-off-by: Bart Van Assche <bart.vanassche@wdc.com> Reviewed-by: Chuck Lever <chuck.lever@oracle.com> Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
Diffstat (limited to 'drivers/infiniband/hw/mlx4/qp.c')
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c21
1 files changed, 11 insertions, 10 deletions
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 44fc684b5e3a..b431757d4668 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -3569,8 +3569,8 @@ static void add_zero_len_inline(void *wqe)
inl->byte_count = cpu_to_be32(1 << 31);
}
-static int _mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
- struct ib_send_wr **bad_wr, bool drain)
+static int _mlx4_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr, bool drain)
{
struct mlx4_ib_qp *qp = to_mqp(ibqp);
void *wqe;
@@ -3901,14 +3901,14 @@ out:
return err;
}
-int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
- struct ib_send_wr **bad_wr)
+int mlx4_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr)
{
return _mlx4_ib_post_send(ibqp, wr, bad_wr, false);
}
-static int _mlx4_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr, bool drain)
+static int _mlx4_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr, bool drain)
{
struct mlx4_ib_qp *qp = to_mqp(ibqp);
struct mlx4_wqe_data_seg *scat;
@@ -3995,8 +3995,8 @@ out:
return err;
}
-int mlx4_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr)
+int mlx4_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr)
{
return _mlx4_ib_post_recv(ibqp, wr, bad_wr, false);
}
@@ -4536,7 +4536,7 @@ void mlx4_ib_drain_sq(struct ib_qp *qp)
struct ib_cq *cq = qp->send_cq;
struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
struct mlx4_ib_drain_cqe sdrain;
- struct ib_send_wr *bad_swr;
+ const struct ib_send_wr *bad_swr;
struct ib_rdma_wr swr = {
.wr = {
.next = NULL,
@@ -4571,7 +4571,8 @@ void mlx4_ib_drain_rq(struct ib_qp *qp)
struct ib_cq *cq = qp->recv_cq;
struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
struct mlx4_ib_drain_cqe rdrain;
- struct ib_recv_wr rwr = {}, *bad_rwr;
+ struct ib_recv_wr rwr = {};
+ const struct ib_recv_wr *bad_rwr;
int ret;
struct mlx4_ib_dev *dev = to_mdev(qp->device);
struct mlx4_dev *mdev = dev->dev;