rdma-core/0005-libhns-Encapsulate-context-attribute-setting-into-a-.patch
Ran Zhou e56042b4e2 Support reporting wc as software mode.
When HW is in resetting stage, we could not poll back all the
expected work completions as the HW won't generate cqe anymore.
This patch allows driver to compose the expected wc instead of the HW
during resetting stage. Once the hardware finished resetting, we can
poll cq from hardware again.

Signed-off-by: Ran Zhou <zhouran10@h-partners.com>
(cherry picked from commit 5494e44cf97e65d858c8f7376c0424a833dc8323)
2024-03-28 20:21:14 +08:00

140 lines
4.9 KiB
Diff

From 4deb1a1a9b181d481f51a989b5c173857da87c44 Mon Sep 17 00:00:00 2001
From: Junxian Huang <huangjunxian6@hisilicon.com>
Date: Tue, 5 Mar 2024 13:57:23 +0800
Subject: [PATCH] libhns: Encapsulate context attribute setting into a single
function
driver inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I95UWO
------------------------------------------------------------------
This patch doesn't involve functional changes. Just encapsulate context
attribute setting into a single function set_context_attr() to make
hns_roce_alloc_context() more readable.
Signed-off-by: Junxian Huang <huangjunxian6@hisilicon.com>
Signed-off-by: Ran Zhou <zhouran10@h-partners.com>
---
providers/hns/hns_roce_u.c | 69 ++++++++++++++++++++++----------------
1 file changed, 40 insertions(+), 29 deletions(-)
diff --git a/providers/hns/hns_roce_u.c b/providers/hns/hns_roce_u.c
index 0b254fb..69f7d3f 100644
--- a/providers/hns/hns_roce_u.c
+++ b/providers/hns/hns_roce_u.c
@@ -97,50 +97,33 @@ static uint32_t calc_table_shift(uint32_t entry_count, uint32_t size_shift)
return count_shift > size_shift ? count_shift - size_shift : 0;
}
-static struct verbs_context *hns_roce_alloc_context(struct ibv_device *ibdev,
- int cmd_fd,
- void *private_data)
+static int set_context_attr(struct hns_roce_device *hr_dev,
+ struct hns_roce_context *context,
+ struct hns_roce_alloc_ucontext_resp *resp)
{
- struct hns_roce_device *hr_dev = to_hr_dev(ibdev);
- struct hns_roce_alloc_ucontext_resp resp = {};
- struct hns_roce_alloc_ucontext cmd = {};
struct ibv_device_attr dev_attrs;
- struct hns_roce_context *context;
int i;
- context = verbs_init_and_alloc_context(ibdev, cmd_fd, context, ibv_ctx,
- RDMA_DRIVER_HNS);
- if (!context)
- return NULL;
-
- cmd.config |= HNS_ROCE_EXSGE_FLAGS | HNS_ROCE_RQ_INLINE_FLAGS |
- HNS_ROCE_CQE_INLINE_FLAGS;
- if (ibv_cmd_get_context(&context->ibv_ctx, &cmd.ibv_cmd, sizeof(cmd),
- &resp.ibv_resp, sizeof(resp)))
- goto err_free;
-
- if (!resp.cqe_size)
+ if (!resp->cqe_size)
context->cqe_size = HNS_ROCE_CQE_SIZE;
- else if (resp.cqe_size <= HNS_ROCE_V3_CQE_SIZE)
- context->cqe_size = resp.cqe_size;
+ else if (resp->cqe_size <= HNS_ROCE_V3_CQE_SIZE)
+ context->cqe_size = resp->cqe_size;
else
context->cqe_size = HNS_ROCE_V3_CQE_SIZE;
- context->config = resp.config;
- if (resp.config & HNS_ROCE_RSP_EXSGE_FLAGS)
- context->max_inline_data = resp.max_inline_data;
+ context->config = resp->config;
+ if (resp->config & HNS_ROCE_RSP_EXSGE_FLAGS)
+ context->max_inline_data = resp->max_inline_data;
- context->qp_table_shift = calc_table_shift(resp.qp_tab_size,
+ context->qp_table_shift = calc_table_shift(resp->qp_tab_size,
HNS_ROCE_QP_TABLE_BITS);
context->qp_table_mask = (1 << context->qp_table_shift) - 1;
- pthread_mutex_init(&context->qp_table_mutex, NULL);
for (i = 0; i < HNS_ROCE_QP_TABLE_SIZE; ++i)
context->qp_table[i].refcnt = 0;
- context->srq_table_shift = calc_table_shift(resp.srq_tab_size,
+ context->srq_table_shift = calc_table_shift(resp->srq_tab_size,
HNS_ROCE_SRQ_TABLE_BITS);
context->srq_table_mask = (1 << context->srq_table_shift) - 1;
- pthread_mutex_init(&context->srq_table_mutex, NULL);
for (i = 0; i < HNS_ROCE_SRQ_TABLE_SIZE; ++i)
context->srq_table[i].refcnt = 0;
@@ -149,7 +132,7 @@ static struct verbs_context *hns_roce_alloc_context(struct ibv_device *ibdev,
struct ibv_device_attr_ex,
orig_attr),
sizeof(dev_attrs)))
- goto err_free;
+ return EIO;
hr_dev->hw_version = dev_attrs.hw_ver;
context->max_qp_wr = dev_attrs.max_qp_wr;
@@ -158,11 +141,39 @@ static struct verbs_context *hns_roce_alloc_context(struct ibv_device *ibdev,
context->max_srq_wr = dev_attrs.max_srq_wr;
context->max_srq_sge = dev_attrs.max_srq_sge;
+ return 0;
+}
+
+static struct verbs_context *hns_roce_alloc_context(struct ibv_device *ibdev,
+ int cmd_fd,
+ void *private_data)
+{
+ struct hns_roce_device *hr_dev = to_hr_dev(ibdev);
+ struct hns_roce_alloc_ucontext_resp resp = {};
+ struct hns_roce_alloc_ucontext cmd = {};
+ struct hns_roce_context *context;
+
+ context = verbs_init_and_alloc_context(ibdev, cmd_fd, context, ibv_ctx,
+ RDMA_DRIVER_HNS);
+ if (!context)
+ return NULL;
+
+ cmd.config |= HNS_ROCE_EXSGE_FLAGS | HNS_ROCE_RQ_INLINE_FLAGS |
+ HNS_ROCE_CQE_INLINE_FLAGS;
+ if (ibv_cmd_get_context(&context->ibv_ctx, &cmd.ibv_cmd, sizeof(cmd),
+ &resp.ibv_resp, sizeof(resp)))
+ goto err_free;
+
+ if (set_context_attr(hr_dev, context, &resp))
+ goto err_free;
+
context->uar = mmap(NULL, hr_dev->page_size, PROT_READ | PROT_WRITE,
MAP_SHARED, cmd_fd, 0);
if (context->uar == MAP_FAILED)
goto err_free;
+ pthread_mutex_init(&context->qp_table_mutex, NULL);
+ pthread_mutex_init(&context->srq_table_mutex, NULL);
pthread_spin_init(&context->uar_lock, PTHREAD_PROCESS_PRIVATE);
verbs_set_ops(&context->ibv_ctx, &hns_common_ops);
--
2.33.0