When HW is in resetting stage, we could not poll back all the expected work completions as the HW won't generate cqe anymore. This patch allows driver to compose the expected wc instead of the HW during resetting stage. Once the hardware finished resetting, we can poll cq from hardware again. Signed-off-by: Ran Zhou <zhouran10@h-partners.com> (cherry picked from commit 5494e44cf97e65d858c8f7376c0424a833dc8323)
95 lines
3.0 KiB
Diff
95 lines
3.0 KiB
Diff
From 64e8d59358cfdb05d7b172bb1b60f18fb7f3d844 Mon Sep 17 00:00:00 2001
|
|
From: Chengchang Tang <tangchengchang@huawei.com>
|
|
Date: Thu, 7 Dec 2023 09:48:02 +0800
|
|
Subject: [PATCH 18/18] libhns: Fix owner bit when SQ wraps around in new IO
|
|
|
|
driver inclusion
|
|
category: bugfix
|
|
bugzilla: https://gitee.com/openeuler/kernel/issues/I98YNG
|
|
|
|
--------------------------------------------------------------------------
|
|
|
|
The owner bit has been write in init_rc_wqe() or init_ud_wqe()
|
|
with a write value. And it will be overwritten by some subsequent
|
|
operations. When the SQ wraps around, the overwritten value will be
|
|
an incorrect value.
|
|
|
|
For example, driver will assign the owner bit in the second step,
|
|
and overwrite it in the third step.
|
|
|
|
```c
|
|
ibv_wr_start();
|
|
ibv_wr_rdma_write();
|
|
if (inline)
|
|
ibv_wr_set_inline_data_list();
|
|
else
|
|
ibv_wr_set_sge_list();
|
|
ibv_wr_complete();
|
|
```
|
|
|
|
This patch removes the redundant owner bit assignment operations
|
|
in new IO.
|
|
|
|
Fixes: ("libhns: Fix the owner bit error of sq in new io")
|
|
Signed-off-by: Chengchang Tang <tangchengchang@huawei.com>
|
|
---
|
|
providers/hns/hns_roce_u_hw_v2.c | 7 -------
|
|
1 file changed, 7 deletions(-)
|
|
|
|
diff --git a/providers/hns/hns_roce_u_hw_v2.c b/providers/hns/hns_roce_u_hw_v2.c
|
|
index a0dce1c..9016978 100644
|
|
--- a/providers/hns/hns_roce_u_hw_v2.c
|
|
+++ b/providers/hns/hns_roce_u_hw_v2.c
|
|
@@ -2353,8 +2353,6 @@ static void wr_set_sge_list_rc(struct ibv_qp_ex *ibv_qp, size_t num_sge,
|
|
|
|
wqe->msg_len = htole32(qp->sge_info.total_len);
|
|
hr_reg_write(wqe, RCWQE_SGE_NUM, qp->sge_info.valid_num);
|
|
-
|
|
- enable_wqe(qp, wqe, qp->sq.head);
|
|
}
|
|
|
|
static void wr_send_rc(struct ibv_qp_ex *ibv_qp)
|
|
@@ -2546,7 +2544,6 @@ static void wr_set_inline_data_rc(struct ibv_qp_ex *ibv_qp, void *addr,
|
|
|
|
qp->sge_info.total_len = length;
|
|
set_inline_data_list_rc(qp, wqe, 1, &buff);
|
|
- enable_wqe(qp, wqe, qp->sq.head);
|
|
}
|
|
|
|
static void wr_set_inline_data_list_rc(struct ibv_qp_ex *ibv_qp, size_t num_buf,
|
|
@@ -2564,7 +2561,6 @@ static void wr_set_inline_data_list_rc(struct ibv_qp_ex *ibv_qp, size_t num_buf,
|
|
qp->sge_info.total_len += buf_list[i].length;
|
|
|
|
set_inline_data_list_rc(qp, wqe, num_buf, buf_list);
|
|
- enable_wqe(qp, wqe, qp->sq.head);
|
|
}
|
|
|
|
static struct hns_roce_ud_sq_wqe *
|
|
@@ -2701,7 +2697,6 @@ static void wr_set_sge_list_ud(struct ibv_qp_ex *ibv_qp, size_t num_sge,
|
|
hr_reg_write(wqe, UDWQE_SGE_NUM, cnt);
|
|
|
|
qp->sge_info.start_idx += cnt;
|
|
- enable_wqe(qp, wqe, qp->sq.head);
|
|
}
|
|
|
|
static void set_inline_data_list_ud(struct hns_roce_qp *qp,
|
|
@@ -2767,7 +2762,6 @@ static void wr_set_inline_data_ud(struct ibv_qp_ex *ibv_qp, void *addr,
|
|
|
|
qp->sge_info.total_len = length;
|
|
set_inline_data_list_ud(qp, wqe, 1, &buff);
|
|
- enable_wqe(qp, wqe, qp->sq.head);
|
|
}
|
|
|
|
static void wr_set_inline_data_list_ud(struct ibv_qp_ex *ibv_qp, size_t num_buf,
|
|
@@ -2785,7 +2779,6 @@ static void wr_set_inline_data_list_ud(struct ibv_qp_ex *ibv_qp, size_t num_buf,
|
|
qp->sge_info.total_len += buf_list[i].length;
|
|
|
|
set_inline_data_list_ud(qp, wqe, num_buf, buf_list);
|
|
- enable_wqe(qp, wqe, qp->sq.head);
|
|
}
|
|
|
|
static void wr_start(struct ibv_qp_ex *ibv_qp)
|
|
--
|
|
2.33.0
|
|
|