Add support for configuration of congestion control algorithms in QP granularity with direct verbs hnsdv_create_qp(). Reference: https://github.com/linux-rdma/rdma-core/pull/1426/commits Signed-off-by: Ran Zhou <zhouran10@h-partners.com> (cherry picked from commit f4a8396bcf41ea12bf3e7b73793e60bfba097377)
140 lines
4.9 KiB
Diff
140 lines
4.9 KiB
Diff
From da7f5d66f410f226f1cc0437bb4fc3124fcbb3f3 Mon Sep 17 00:00:00 2001
|
|
From: Junxian Huang <huangjunxian6@hisilicon.com>
|
|
Date: Tue, 5 Mar 2024 13:57:23 +0800
|
|
Subject: [PATCH 6/7] libhns: Encapsulate context attribute setting into a
|
|
single function
|
|
|
|
driver inclusion
|
|
category: feature
|
|
bugzilla: https://gitee.com/openeuler/kernel/issues/I95UWO
|
|
|
|
------------------------------------------------------------------
|
|
This patch doesn't involve functional changes. Just encapsulate context
|
|
attribute setting into a single function set_context_attr() to make
|
|
hns_roce_alloc_context() more readable.
|
|
|
|
Signed-off-by: Junxian Huang <huangjunxian6@hisilicon.com>
|
|
Signed-off-by: Ran Zhou <zhouran10@h-partners.com>dd
|
|
---
|
|
providers/hns/hns_roce_u.c | 69 ++++++++++++++++++++++----------------
|
|
1 file changed, 40 insertions(+), 29 deletions(-)
|
|
|
|
diff --git a/providers/hns/hns_roce_u.c b/providers/hns/hns_roce_u.c
|
|
index 0b254fb..69f7d3f 100644
|
|
--- a/providers/hns/hns_roce_u.c
|
|
+++ b/providers/hns/hns_roce_u.c
|
|
@@ -97,50 +97,33 @@ static uint32_t calc_table_shift(uint32_t entry_count, uint32_t size_shift)
|
|
return count_shift > size_shift ? count_shift - size_shift : 0;
|
|
}
|
|
|
|
-static struct verbs_context *hns_roce_alloc_context(struct ibv_device *ibdev,
|
|
- int cmd_fd,
|
|
- void *private_data)
|
|
+static int set_context_attr(struct hns_roce_device *hr_dev,
|
|
+ struct hns_roce_context *context,
|
|
+ struct hns_roce_alloc_ucontext_resp *resp)
|
|
{
|
|
- struct hns_roce_device *hr_dev = to_hr_dev(ibdev);
|
|
- struct hns_roce_alloc_ucontext_resp resp = {};
|
|
- struct hns_roce_alloc_ucontext cmd = {};
|
|
struct ibv_device_attr dev_attrs;
|
|
- struct hns_roce_context *context;
|
|
int i;
|
|
|
|
- context = verbs_init_and_alloc_context(ibdev, cmd_fd, context, ibv_ctx,
|
|
- RDMA_DRIVER_HNS);
|
|
- if (!context)
|
|
- return NULL;
|
|
-
|
|
- cmd.config |= HNS_ROCE_EXSGE_FLAGS | HNS_ROCE_RQ_INLINE_FLAGS |
|
|
- HNS_ROCE_CQE_INLINE_FLAGS;
|
|
- if (ibv_cmd_get_context(&context->ibv_ctx, &cmd.ibv_cmd, sizeof(cmd),
|
|
- &resp.ibv_resp, sizeof(resp)))
|
|
- goto err_free;
|
|
-
|
|
- if (!resp.cqe_size)
|
|
+ if (!resp->cqe_size)
|
|
context->cqe_size = HNS_ROCE_CQE_SIZE;
|
|
- else if (resp.cqe_size <= HNS_ROCE_V3_CQE_SIZE)
|
|
- context->cqe_size = resp.cqe_size;
|
|
+ else if (resp->cqe_size <= HNS_ROCE_V3_CQE_SIZE)
|
|
+ context->cqe_size = resp->cqe_size;
|
|
else
|
|
context->cqe_size = HNS_ROCE_V3_CQE_SIZE;
|
|
|
|
- context->config = resp.config;
|
|
- if (resp.config & HNS_ROCE_RSP_EXSGE_FLAGS)
|
|
- context->max_inline_data = resp.max_inline_data;
|
|
+ context->config = resp->config;
|
|
+ if (resp->config & HNS_ROCE_RSP_EXSGE_FLAGS)
|
|
+ context->max_inline_data = resp->max_inline_data;
|
|
|
|
- context->qp_table_shift = calc_table_shift(resp.qp_tab_size,
|
|
+ context->qp_table_shift = calc_table_shift(resp->qp_tab_size,
|
|
HNS_ROCE_QP_TABLE_BITS);
|
|
context->qp_table_mask = (1 << context->qp_table_shift) - 1;
|
|
- pthread_mutex_init(&context->qp_table_mutex, NULL);
|
|
for (i = 0; i < HNS_ROCE_QP_TABLE_SIZE; ++i)
|
|
context->qp_table[i].refcnt = 0;
|
|
|
|
- context->srq_table_shift = calc_table_shift(resp.srq_tab_size,
|
|
+ context->srq_table_shift = calc_table_shift(resp->srq_tab_size,
|
|
HNS_ROCE_SRQ_TABLE_BITS);
|
|
context->srq_table_mask = (1 << context->srq_table_shift) - 1;
|
|
- pthread_mutex_init(&context->srq_table_mutex, NULL);
|
|
for (i = 0; i < HNS_ROCE_SRQ_TABLE_SIZE; ++i)
|
|
context->srq_table[i].refcnt = 0;
|
|
|
|
@@ -149,7 +132,7 @@ static struct verbs_context *hns_roce_alloc_context(struct ibv_device *ibdev,
|
|
struct ibv_device_attr_ex,
|
|
orig_attr),
|
|
sizeof(dev_attrs)))
|
|
- goto err_free;
|
|
+ return EIO;
|
|
|
|
hr_dev->hw_version = dev_attrs.hw_ver;
|
|
context->max_qp_wr = dev_attrs.max_qp_wr;
|
|
@@ -158,11 +141,39 @@ static struct verbs_context *hns_roce_alloc_context(struct ibv_device *ibdev,
|
|
context->max_srq_wr = dev_attrs.max_srq_wr;
|
|
context->max_srq_sge = dev_attrs.max_srq_sge;
|
|
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static struct verbs_context *hns_roce_alloc_context(struct ibv_device *ibdev,
|
|
+ int cmd_fd,
|
|
+ void *private_data)
|
|
+{
|
|
+ struct hns_roce_device *hr_dev = to_hr_dev(ibdev);
|
|
+ struct hns_roce_alloc_ucontext_resp resp = {};
|
|
+ struct hns_roce_alloc_ucontext cmd = {};
|
|
+ struct hns_roce_context *context;
|
|
+
|
|
+ context = verbs_init_and_alloc_context(ibdev, cmd_fd, context, ibv_ctx,
|
|
+ RDMA_DRIVER_HNS);
|
|
+ if (!context)
|
|
+ return NULL;
|
|
+
|
|
+ cmd.config |= HNS_ROCE_EXSGE_FLAGS | HNS_ROCE_RQ_INLINE_FLAGS |
|
|
+ HNS_ROCE_CQE_INLINE_FLAGS;
|
|
+ if (ibv_cmd_get_context(&context->ibv_ctx, &cmd.ibv_cmd, sizeof(cmd),
|
|
+ &resp.ibv_resp, sizeof(resp)))
|
|
+ goto err_free;
|
|
+
|
|
+ if (set_context_attr(hr_dev, context, &resp))
|
|
+ goto err_free;
|
|
+
|
|
context->uar = mmap(NULL, hr_dev->page_size, PROT_READ | PROT_WRITE,
|
|
MAP_SHARED, cmd_fd, 0);
|
|
if (context->uar == MAP_FAILED)
|
|
goto err_free;
|
|
|
|
+ pthread_mutex_init(&context->qp_table_mutex, NULL);
|
|
+ pthread_mutex_init(&context->srq_table_mutex, NULL);
|
|
pthread_spin_init(&context->uar_lock, PTHREAD_PROCESS_PRIVATE);
|
|
|
|
verbs_set_ops(&context->ibv_ctx, &hns_common_ops);
|
|
--
|
|
2.33.0
|
|
|