RDMA/ocrdma: remove reference of ocrdma_dev out of ocrdma_qp structure

Use get_ocrdma_dev(ocrdma_qp->ibqp.device) function to access ocrdma
device pointer.

Signed-off-by: Mitesh Ahuja <mitesh.ahuja@emulex.com>
Signed-off-by: Devesh Sharma <devesh.sharma@emulex.com>
Signed-off-by: Selvin Xavier <selvin.xavier@emulex.com>
Signed-off-by: Roland Dreier <roland@purestorage.com>
This commit is contained in:
Mitesh Ahuja 2014-12-18 14:13:06 +05:30 committed by Roland Dreier
parent b4dbe8d52d
commit d2b8f7b1f8
3 changed files with 34 additions and 29 deletions

View file

@ -370,7 +370,6 @@ struct ocrdma_srq {
struct ocrdma_qp { struct ocrdma_qp {
struct ib_qp ibqp; struct ib_qp ibqp;
struct ocrdma_dev *dev;
u8 __iomem *sq_db; u8 __iomem *sq_db;
struct ocrdma_qp_hwq_info sq; struct ocrdma_qp_hwq_info sq;

View file

@ -2041,8 +2041,9 @@ void ocrdma_flush_qp(struct ocrdma_qp *qp)
{ {
bool found; bool found;
unsigned long flags; unsigned long flags;
struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
spin_lock_irqsave(&qp->dev->flush_q_lock, flags); spin_lock_irqsave(&dev->flush_q_lock, flags);
found = ocrdma_is_qp_in_sq_flushlist(qp->sq_cq, qp); found = ocrdma_is_qp_in_sq_flushlist(qp->sq_cq, qp);
if (!found) if (!found)
list_add_tail(&qp->sq_entry, &qp->sq_cq->sq_head); list_add_tail(&qp->sq_entry, &qp->sq_cq->sq_head);
@ -2051,7 +2052,7 @@ void ocrdma_flush_qp(struct ocrdma_qp *qp)
if (!found) if (!found)
list_add_tail(&qp->rq_entry, &qp->rq_cq->rq_head); list_add_tail(&qp->rq_entry, &qp->rq_cq->rq_head);
} }
spin_unlock_irqrestore(&qp->dev->flush_q_lock, flags); spin_unlock_irqrestore(&dev->flush_q_lock, flags);
} }
static void ocrdma_init_hwq_ptr(struct ocrdma_qp *qp) static void ocrdma_init_hwq_ptr(struct ocrdma_qp *qp)
@ -2117,7 +2118,8 @@ static int ocrdma_set_create_qp_sq_cmd(struct ocrdma_create_qp_req *cmd,
int status; int status;
u32 len, hw_pages, hw_page_size; u32 len, hw_pages, hw_page_size;
dma_addr_t pa; dma_addr_t pa;
struct ocrdma_dev *dev = qp->dev; struct ocrdma_pd *pd = qp->pd;
struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
struct pci_dev *pdev = dev->nic_info.pdev; struct pci_dev *pdev = dev->nic_info.pdev;
u32 max_wqe_allocated; u32 max_wqe_allocated;
u32 max_sges = attrs->cap.max_send_sge; u32 max_sges = attrs->cap.max_send_sge;
@ -2172,7 +2174,8 @@ static int ocrdma_set_create_qp_rq_cmd(struct ocrdma_create_qp_req *cmd,
int status; int status;
u32 len, hw_pages, hw_page_size; u32 len, hw_pages, hw_page_size;
dma_addr_t pa = 0; dma_addr_t pa = 0;
struct ocrdma_dev *dev = qp->dev; struct ocrdma_pd *pd = qp->pd;
struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
struct pci_dev *pdev = dev->nic_info.pdev; struct pci_dev *pdev = dev->nic_info.pdev;
u32 max_rqe_allocated = attrs->cap.max_recv_wr + 1; u32 max_rqe_allocated = attrs->cap.max_recv_wr + 1;
@ -2231,7 +2234,8 @@ static void ocrdma_set_create_qp_dpp_cmd(struct ocrdma_create_qp_req *cmd,
static int ocrdma_set_create_qp_ird_cmd(struct ocrdma_create_qp_req *cmd, static int ocrdma_set_create_qp_ird_cmd(struct ocrdma_create_qp_req *cmd,
struct ocrdma_qp *qp) struct ocrdma_qp *qp)
{ {
struct ocrdma_dev *dev = qp->dev; struct ocrdma_pd *pd = qp->pd;
struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
struct pci_dev *pdev = dev->nic_info.pdev; struct pci_dev *pdev = dev->nic_info.pdev;
dma_addr_t pa = 0; dma_addr_t pa = 0;
int ird_page_size = dev->attr.ird_page_size; int ird_page_size = dev->attr.ird_page_size;
@ -2302,8 +2306,8 @@ int ocrdma_mbx_create_qp(struct ocrdma_qp *qp, struct ib_qp_init_attr *attrs,
{ {
int status = -ENOMEM; int status = -ENOMEM;
u32 flags = 0; u32 flags = 0;
struct ocrdma_dev *dev = qp->dev;
struct ocrdma_pd *pd = qp->pd; struct ocrdma_pd *pd = qp->pd;
struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
struct pci_dev *pdev = dev->nic_info.pdev; struct pci_dev *pdev = dev->nic_info.pdev;
struct ocrdma_cq *cq; struct ocrdma_cq *cq;
struct ocrdma_create_qp_req *cmd; struct ocrdma_create_qp_req *cmd;
@ -2426,11 +2430,12 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp,
union ib_gid sgid, zgid; union ib_gid sgid, zgid;
u32 vlan_id; u32 vlan_id;
u8 mac_addr[6]; u8 mac_addr[6];
struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
if ((ah_attr->ah_flags & IB_AH_GRH) == 0) if ((ah_attr->ah_flags & IB_AH_GRH) == 0)
return -EINVAL; return -EINVAL;
if (atomic_cmpxchg(&qp->dev->update_sl, 1, 0)) if (atomic_cmpxchg(&dev->update_sl, 1, 0))
ocrdma_init_service_level(qp->dev); ocrdma_init_service_level(dev);
cmd->params.tclass_sq_psn |= cmd->params.tclass_sq_psn |=
(ah_attr->grh.traffic_class << OCRDMA_QP_PARAMS_TCLASS_SHIFT); (ah_attr->grh.traffic_class << OCRDMA_QP_PARAMS_TCLASS_SHIFT);
cmd->params.rnt_rc_sl_fl |= cmd->params.rnt_rc_sl_fl |=
@ -2441,7 +2446,7 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp,
cmd->flags |= OCRDMA_QP_PARA_FLOW_LBL_VALID; cmd->flags |= OCRDMA_QP_PARA_FLOW_LBL_VALID;
memcpy(&cmd->params.dgid[0], &ah_attr->grh.dgid.raw[0], memcpy(&cmd->params.dgid[0], &ah_attr->grh.dgid.raw[0],
sizeof(cmd->params.dgid)); sizeof(cmd->params.dgid));
status = ocrdma_query_gid(&qp->dev->ibdev, 1, status = ocrdma_query_gid(&dev->ibdev, 1,
ah_attr->grh.sgid_index, &sgid); ah_attr->grh.sgid_index, &sgid);
if (status) if (status)
return status; return status;
@ -2452,7 +2457,7 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp,
qp->sgid_idx = ah_attr->grh.sgid_index; qp->sgid_idx = ah_attr->grh.sgid_index;
memcpy(&cmd->params.sgid[0], &sgid.raw[0], sizeof(cmd->params.sgid)); memcpy(&cmd->params.sgid[0], &sgid.raw[0], sizeof(cmd->params.sgid));
status = ocrdma_resolve_dmac(qp->dev, ah_attr, &mac_addr[0]); status = ocrdma_resolve_dmac(dev, ah_attr, &mac_addr[0]);
if (status) if (status)
return status; return status;
cmd->params.dmac_b0_to_b3 = mac_addr[0] | (mac_addr[1] << 8) | cmd->params.dmac_b0_to_b3 = mac_addr[0] | (mac_addr[1] << 8) |
@ -2467,7 +2472,7 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp,
vlan_id << OCRDMA_QP_PARAMS_VLAN_SHIFT; vlan_id << OCRDMA_QP_PARAMS_VLAN_SHIFT;
cmd->flags |= OCRDMA_QP_PARA_VLAN_EN_VALID; cmd->flags |= OCRDMA_QP_PARA_VLAN_EN_VALID;
cmd->params.rnt_rc_sl_fl |= cmd->params.rnt_rc_sl_fl |=
(qp->dev->sl & 0x07) << OCRDMA_QP_PARAMS_SL_SHIFT; (dev->sl & 0x07) << OCRDMA_QP_PARAMS_SL_SHIFT;
} }
return 0; return 0;
} }
@ -2477,6 +2482,7 @@ static int ocrdma_set_qp_params(struct ocrdma_qp *qp,
struct ib_qp_attr *attrs, int attr_mask) struct ib_qp_attr *attrs, int attr_mask)
{ {
int status = 0; int status = 0;
struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
if (attr_mask & IB_QP_PKEY_INDEX) { if (attr_mask & IB_QP_PKEY_INDEX) {
cmd->params.path_mtu_pkey_indx |= (attrs->pkey_index & cmd->params.path_mtu_pkey_indx |= (attrs->pkey_index &
@ -2494,12 +2500,12 @@ static int ocrdma_set_qp_params(struct ocrdma_qp *qp,
return status; return status;
} else if (qp->qp_type == IB_QPT_GSI || qp->qp_type == IB_QPT_UD) { } else if (qp->qp_type == IB_QPT_GSI || qp->qp_type == IB_QPT_UD) {
/* set the default mac address for UD, GSI QPs */ /* set the default mac address for UD, GSI QPs */
cmd->params.dmac_b0_to_b3 = qp->dev->nic_info.mac_addr[0] | cmd->params.dmac_b0_to_b3 = dev->nic_info.mac_addr[0] |
(qp->dev->nic_info.mac_addr[1] << 8) | (dev->nic_info.mac_addr[1] << 8) |
(qp->dev->nic_info.mac_addr[2] << 16) | (dev->nic_info.mac_addr[2] << 16) |
(qp->dev->nic_info.mac_addr[3] << 24); (dev->nic_info.mac_addr[3] << 24);
cmd->params.vlan_dmac_b4_to_b5 = qp->dev->nic_info.mac_addr[4] | cmd->params.vlan_dmac_b4_to_b5 = dev->nic_info.mac_addr[4] |
(qp->dev->nic_info.mac_addr[5] << 8); (dev->nic_info.mac_addr[5] << 8);
} }
if ((attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) && if ((attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) &&
attrs->en_sqd_async_notify) { attrs->en_sqd_async_notify) {
@ -2556,7 +2562,7 @@ static int ocrdma_set_qp_params(struct ocrdma_qp *qp,
cmd->flags |= OCRDMA_QP_PARA_RQPSN_VALID; cmd->flags |= OCRDMA_QP_PARA_RQPSN_VALID;
} }
if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) { if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
if (attrs->max_rd_atomic > qp->dev->attr.max_ord_per_qp) { if (attrs->max_rd_atomic > dev->attr.max_ord_per_qp) {
status = -EINVAL; status = -EINVAL;
goto pmtu_err; goto pmtu_err;
} }
@ -2564,7 +2570,7 @@ static int ocrdma_set_qp_params(struct ocrdma_qp *qp,
cmd->flags |= OCRDMA_QP_PARA_MAX_ORD_VALID; cmd->flags |= OCRDMA_QP_PARA_MAX_ORD_VALID;
} }
if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) { if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
if (attrs->max_dest_rd_atomic > qp->dev->attr.max_ird_per_qp) { if (attrs->max_dest_rd_atomic > dev->attr.max_ird_per_qp) {
status = -EINVAL; status = -EINVAL;
goto pmtu_err; goto pmtu_err;
} }

View file

@ -1219,8 +1219,8 @@ static int ocrdma_copy_qp_uresp(struct ocrdma_qp *qp,
int status = 0; int status = 0;
u64 usr_db; u64 usr_db;
struct ocrdma_create_qp_uresp uresp; struct ocrdma_create_qp_uresp uresp;
struct ocrdma_dev *dev = qp->dev;
struct ocrdma_pd *pd = qp->pd; struct ocrdma_pd *pd = qp->pd;
struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
memset(&uresp, 0, sizeof(uresp)); memset(&uresp, 0, sizeof(uresp));
usr_db = dev->nic_info.unmapped_db + usr_db = dev->nic_info.unmapped_db +
@ -1359,7 +1359,6 @@ struct ib_qp *ocrdma_create_qp(struct ib_pd *ibpd,
status = -ENOMEM; status = -ENOMEM;
goto gen_err; goto gen_err;
} }
qp->dev = dev;
ocrdma_set_qp_init_params(qp, pd, attrs); ocrdma_set_qp_init_params(qp, pd, attrs);
if (udata == NULL) if (udata == NULL)
qp->cap_flags |= (OCRDMA_QP_MW_BIND | OCRDMA_QP_LKEY0 | qp->cap_flags |= (OCRDMA_QP_MW_BIND | OCRDMA_QP_LKEY0 |
@ -1418,7 +1417,7 @@ int _ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
enum ib_qp_state old_qps; enum ib_qp_state old_qps;
qp = get_ocrdma_qp(ibqp); qp = get_ocrdma_qp(ibqp);
dev = qp->dev; dev = get_ocrdma_dev(ibqp->device);
if (attr_mask & IB_QP_STATE) if (attr_mask & IB_QP_STATE)
status = ocrdma_qp_state_change(qp, attr->qp_state, &old_qps); status = ocrdma_qp_state_change(qp, attr->qp_state, &old_qps);
/* if new and previous states are same hw doesn't need to /* if new and previous states are same hw doesn't need to
@ -1441,7 +1440,7 @@ int ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
enum ib_qp_state old_qps, new_qps; enum ib_qp_state old_qps, new_qps;
qp = get_ocrdma_qp(ibqp); qp = get_ocrdma_qp(ibqp);
dev = qp->dev; dev = get_ocrdma_dev(ibqp->device);
/* syncronize with multiple context trying to change, retrive qps */ /* syncronize with multiple context trying to change, retrive qps */
mutex_lock(&dev->dev_lock); mutex_lock(&dev->dev_lock);
@ -1508,7 +1507,7 @@ int ocrdma_query_qp(struct ib_qp *ibqp,
u32 qp_state; u32 qp_state;
struct ocrdma_qp_params params; struct ocrdma_qp_params params;
struct ocrdma_qp *qp = get_ocrdma_qp(ibqp); struct ocrdma_qp *qp = get_ocrdma_qp(ibqp);
struct ocrdma_dev *dev = qp->dev; struct ocrdma_dev *dev = get_ocrdma_dev(ibqp->device);
memset(&params, 0, sizeof(params)); memset(&params, 0, sizeof(params));
mutex_lock(&dev->dev_lock); mutex_lock(&dev->dev_lock);
@ -1704,7 +1703,7 @@ void ocrdma_del_flush_qp(struct ocrdma_qp *qp)
{ {
int found = false; int found = false;
unsigned long flags; unsigned long flags;
struct ocrdma_dev *dev = qp->dev; struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
/* sync with any active CQ poll */ /* sync with any active CQ poll */
spin_lock_irqsave(&dev->flush_q_lock, flags); spin_lock_irqsave(&dev->flush_q_lock, flags);
@ -1729,7 +1728,7 @@ int ocrdma_destroy_qp(struct ib_qp *ibqp)
unsigned long flags; unsigned long flags;
qp = get_ocrdma_qp(ibqp); qp = get_ocrdma_qp(ibqp);
dev = qp->dev; dev = get_ocrdma_dev(ibqp->device);
attrs.qp_state = IB_QPS_ERR; attrs.qp_state = IB_QPS_ERR;
pd = qp->pd; pd = qp->pd;
@ -2114,11 +2113,12 @@ static int ocrdma_build_fr(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
u64 fbo; u64 fbo;
struct ocrdma_ewqe_fr *fast_reg = (struct ocrdma_ewqe_fr *)(hdr + 1); struct ocrdma_ewqe_fr *fast_reg = (struct ocrdma_ewqe_fr *)(hdr + 1);
struct ocrdma_mr *mr; struct ocrdma_mr *mr;
struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
u32 wqe_size = sizeof(*fast_reg) + sizeof(*hdr); u32 wqe_size = sizeof(*fast_reg) + sizeof(*hdr);
wqe_size = roundup(wqe_size, OCRDMA_WQE_ALIGN_BYTES); wqe_size = roundup(wqe_size, OCRDMA_WQE_ALIGN_BYTES);
if (wr->wr.fast_reg.page_list_len > qp->dev->attr.max_pages_per_frmr) if (wr->wr.fast_reg.page_list_len > dev->attr.max_pages_per_frmr)
return -EINVAL; return -EINVAL;
hdr->cw |= (OCRDMA_FR_MR << OCRDMA_WQE_OPCODE_SHIFT); hdr->cw |= (OCRDMA_FR_MR << OCRDMA_WQE_OPCODE_SHIFT);
@ -2146,7 +2146,7 @@ static int ocrdma_build_fr(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
fast_reg->size_sge = fast_reg->size_sge =
get_encoded_page_size(1 << wr->wr.fast_reg.page_shift); get_encoded_page_size(1 << wr->wr.fast_reg.page_shift);
mr = (struct ocrdma_mr *) (unsigned long) mr = (struct ocrdma_mr *) (unsigned long)
qp->dev->stag_arr[(hdr->lkey >> 8) & (OCRDMA_MAX_STAG - 1)]; dev->stag_arr[(hdr->lkey >> 8) & (OCRDMA_MAX_STAG - 1)];
build_frmr_pbes(wr, mr->hwmr.pbl_table, &mr->hwmr); build_frmr_pbes(wr, mr->hwmr.pbl_table, &mr->hwmr);
return 0; return 0;
} }