Add supoort libhns td unlock
This patch add libhns td unlock function. Signed-off-by: Yixing Liu <liuyixing1@huawei.com>
This commit is contained in:
parent
6d64231f65
commit
1c38175fa1
835
0030-libhns-Add-support-for-the-thread-domain-and-the-par.patch
Normal file
835
0030-libhns-Add-support-for-the-thread-domain-and-the-par.patch
Normal file
@ -0,0 +1,835 @@
|
||||
From 812372fadc96c6c59d460d84cdc72d60014f220d Mon Sep 17 00:00:00 2001
|
||||
From: Yixing Liu <liuyixing1@huawei.com>
|
||||
Date: Mon, 27 Jun 2022 14:52:04 +0800
|
||||
Subject: [PATCH rdma-core] libhns: Add support for the thread domain and the parent domain
|
||||
|
||||
Internal locks will be disabled for queues under the
|
||||
thread domain with the parent domain.
|
||||
|
||||
driver inclusion
|
||||
category: feature
|
||||
bugzilla: https://gitee.com/src-openeuler/rdma-core/issues/I63NGA
|
||||
|
||||
------------------------------------------------------------------
|
||||
|
||||
Signed-off-by: Yixing Liu <liuyixing1@huawei.com>
|
||||
Reviewed-by: Yangyang Li <liyangyang20@huawei.com>
|
||||
---
|
||||
providers/hns/hns_roce_u.c | 5 +-
|
||||
providers/hns/hns_roce_u.h | 75 ++++++++-
|
||||
providers/hns/hns_roce_u_hw_v2.c | 77 ++++-----
|
||||
providers/hns/hns_roce_u_verbs.c | 267 ++++++++++++++++++++++++++++---
|
||||
4 files changed, 357 insertions(+), 67 deletions(-)
|
||||
|
||||
diff --git a/providers/hns/hns_roce_u.c b/providers/hns/hns_roce_u.c
|
||||
index 266e73e..e3c72bb 100644
|
||||
--- a/providers/hns/hns_roce_u.c
|
||||
+++ b/providers/hns/hns_roce_u.c
|
||||
@@ -67,7 +67,7 @@ static const struct verbs_context_ops hns_common_ops = {
|
||||
.create_qp = hns_roce_u_create_qp,
|
||||
.create_qp_ex = hns_roce_u_create_qp_ex,
|
||||
.dealloc_mw = hns_roce_u_dealloc_mw,
|
||||
- .dealloc_pd = hns_roce_u_free_pd,
|
||||
+ .dealloc_pd = hns_roce_u_dealloc_pd,
|
||||
.dereg_mr = hns_roce_u_dereg_mr,
|
||||
.destroy_cq = hns_roce_u_destroy_cq,
|
||||
.modify_cq = hns_roce_u_modify_cq,
|
||||
@@ -88,6 +88,9 @@ static const struct verbs_context_ops hns_common_ops = {
|
||||
.close_xrcd = hns_roce_u_close_xrcd,
|
||||
.open_qp = hns_roce_u_open_qp,
|
||||
.get_srq_num = hns_roce_u_get_srq_num,
|
||||
+ .alloc_td = hns_roce_u_alloc_td,
|
||||
+ .dealloc_td = hns_roce_u_dealloc_td,
|
||||
+ .alloc_parent_domain = hns_roce_u_alloc_pad,
|
||||
};
|
||||
|
||||
static uint32_t calc_table_shift(uint32_t entry_count, uint32_t size_shift)
|
||||
diff --git a/providers/hns/hns_roce_u.h b/providers/hns/hns_roce_u.h
|
||||
index 8c1cb1e..8181da7 100644
|
||||
--- a/providers/hns/hns_roce_u.h
|
||||
+++ b/providers/hns/hns_roce_u.h
|
||||
@@ -188,6 +188,11 @@ struct hns_roce_db_page {
|
||||
unsigned long *bitmap;
|
||||
};
|
||||
|
||||
+struct hns_roce_spinlock {
|
||||
+ pthread_spinlock_t lock;
|
||||
+ int need_lock;
|
||||
+};
|
||||
+
|
||||
struct hns_roce_context {
|
||||
struct verbs_context ibv_ctx;
|
||||
void *uar;
|
||||
@@ -222,15 +227,27 @@ struct hns_roce_context {
|
||||
unsigned int max_inline_data;
|
||||
};
|
||||
|
||||
+struct hns_roce_td {
|
||||
+ struct ibv_td ibv_td;
|
||||
+ atomic_int refcount;
|
||||
+};
|
||||
+
|
||||
struct hns_roce_pd {
|
||||
struct ibv_pd ibv_pd;
|
||||
unsigned int pdn;
|
||||
+ atomic_int refcount;
|
||||
+ struct hns_roce_pd *protection_domain;
|
||||
+};
|
||||
+
|
||||
+struct hns_roce_pad {
|
||||
+ struct hns_roce_pd pd;
|
||||
+ struct hns_roce_td *td;
|
||||
};
|
||||
|
||||
struct hns_roce_cq {
|
||||
struct verbs_cq verbs_cq;
|
||||
struct hns_roce_buf buf;
|
||||
- pthread_spinlock_t lock;
|
||||
+ struct hns_roce_spinlock hr_lock;
|
||||
unsigned int cqn;
|
||||
unsigned int cq_depth;
|
||||
unsigned int cons_index;
|
||||
@@ -266,7 +283,7 @@ struct hns_roce_srq {
|
||||
struct hns_roce_idx_que idx_que;
|
||||
struct hns_roce_buf wqe_buf;
|
||||
struct hns_roce_rinl_buf srq_rinl_buf;
|
||||
- pthread_spinlock_t lock;
|
||||
+ struct hns_roce_spinlock hr_lock;
|
||||
unsigned long *wrid;
|
||||
unsigned int srqn;
|
||||
unsigned int wqe_cnt;
|
||||
@@ -279,7 +296,7 @@ struct hns_roce_srq {
|
||||
|
||||
struct hns_roce_wq {
|
||||
unsigned long *wrid;
|
||||
- pthread_spinlock_t lock;
|
||||
+ struct hns_roce_spinlock hr_lock;
|
||||
unsigned int wqe_cnt;
|
||||
int max_post;
|
||||
unsigned int head;
|
||||
@@ -397,9 +414,35 @@ static inline struct hns_roce_context *to_hr_ctx(struct ibv_context *ibv_ctx)
|
||||
return container_of(ibv_ctx, struct hns_roce_context, ibv_ctx.context);
|
||||
}
|
||||
|
||||
+static inline struct hns_roce_td *to_hr_td(struct ibv_td *ibv_td)
|
||||
+{
|
||||
+ return container_of(ibv_td, struct hns_roce_td, ibv_td);
|
||||
+}
|
||||
+
|
||||
+/* to_hr_pd always returns the real hns_roce_pd obj. */
|
||||
static inline struct hns_roce_pd *to_hr_pd(struct ibv_pd *ibv_pd)
|
||||
{
|
||||
- return container_of(ibv_pd, struct hns_roce_pd, ibv_pd);
|
||||
+ struct hns_roce_pd *pd =
|
||||
+ container_of(ibv_pd, struct hns_roce_pd, ibv_pd);
|
||||
+
|
||||
+ if (pd->protection_domain)
|
||||
+ return pd->protection_domain;
|
||||
+
|
||||
+ return pd;
|
||||
+}
|
||||
+
|
||||
+static inline struct hns_roce_pad *to_hr_pad(struct ibv_pd *ibv_pd)
|
||||
+{
|
||||
+ struct hns_roce_pad *pad =
|
||||
+ ibv_pd ?
|
||||
+ container_of(ibv_pd, struct hns_roce_pad, pd.ibv_pd) :
|
||||
+ NULL;
|
||||
+
|
||||
+ if (pad && pad->pd.protection_domain)
|
||||
+ return pad;
|
||||
+
|
||||
+ /* Otherwise ibv_pd isn't a parent_domain */
|
||||
+ return NULL;
|
||||
}
|
||||
|
||||
static inline struct hns_roce_cq *to_hr_cq(struct ibv_cq *ibv_cq)
|
||||
@@ -422,14 +465,35 @@ static inline struct hns_roce_ah *to_hr_ah(struct ibv_ah *ibv_ah)
|
||||
return container_of(ibv_ah, struct hns_roce_ah, ibv_ah);
|
||||
}
|
||||
|
||||
+static inline int hns_roce_spin_lock(struct hns_roce_spinlock *hr_lock)
|
||||
+{
|
||||
+ if (hr_lock->need_lock)
|
||||
+ return pthread_spin_lock(&hr_lock->lock);
|
||||
+
|
||||
+ return 0;
|
||||
+}
|
||||
+
|
||||
+static inline int hns_roce_spin_unlock(struct hns_roce_spinlock *hr_lock)
|
||||
+{
|
||||
+ if (hr_lock->need_lock)
|
||||
+ return pthread_spin_unlock(&hr_lock->lock);
|
||||
+
|
||||
+ return 0;
|
||||
+}
|
||||
+
|
||||
int hns_roce_u_query_device(struct ibv_context *context,
|
||||
const struct ibv_query_device_ex_input *input,
|
||||
struct ibv_device_attr_ex *attr, size_t attr_size);
|
||||
int hns_roce_u_query_port(struct ibv_context *context, uint8_t port,
|
||||
struct ibv_port_attr *attr);
|
||||
|
||||
+struct ibv_td *hns_roce_u_alloc_td(struct ibv_context *context,
|
||||
+ struct ibv_td_init_attr *attr);
|
||||
+int hns_roce_u_dealloc_td(struct ibv_td *ibv_td);
|
||||
+struct ibv_pd *hns_roce_u_alloc_pad(struct ibv_context *context,
|
||||
+ struct ibv_parent_domain_init_attr *attr);
|
||||
struct ibv_pd *hns_roce_u_alloc_pd(struct ibv_context *context);
|
||||
-int hns_roce_u_free_pd(struct ibv_pd *pd);
|
||||
+int hns_roce_u_dealloc_pd(struct ibv_pd *pd);
|
||||
|
||||
struct ibv_mr *hns_roce_u_reg_mr(struct ibv_pd *pd, void *addr, size_t length,
|
||||
uint64_t hca_va, int access);
|
||||
@@ -488,6 +552,7 @@ int hns_roce_u_close_xrcd(struct ibv_xrcd *ibv_xrcd);
|
||||
int hns_roce_alloc_buf(struct hns_roce_buf *buf, unsigned int size,
|
||||
int page_size);
|
||||
void hns_roce_free_buf(struct hns_roce_buf *buf);
|
||||
+void hns_roce_qp_spinlock_destroy(struct hns_roce_qp *qp);
|
||||
|
||||
void hns_roce_free_qp_buf(struct hns_roce_qp *qp, struct hns_roce_context *ctx);
|
||||
|
||||
diff --git a/providers/hns/hns_roce_u_hw_v2.c b/providers/hns/hns_roce_u_hw_v2.c
|
||||
index c652eea..80e836d 100644
|
||||
--- a/providers/hns/hns_roce_u_hw_v2.c
|
||||
+++ b/providers/hns/hns_roce_u_hw_v2.c
|
||||
@@ -227,14 +227,14 @@ static void hns_roce_free_srq_wqe(struct hns_roce_srq *srq, uint16_t ind)
|
||||
uint32_t bitmap_num;
|
||||
int bit_num;
|
||||
|
||||
- pthread_spin_lock(&srq->lock);
|
||||
+ hns_roce_spin_lock(&srq->hr_lock);
|
||||
|
||||
bitmap_num = ind / BIT_CNT_PER_LONG;
|
||||
bit_num = ind % BIT_CNT_PER_LONG;
|
||||
srq->idx_que.bitmap[bitmap_num] |= (1ULL << bit_num);
|
||||
srq->idx_que.tail++;
|
||||
|
||||
- pthread_spin_unlock(&srq->lock);
|
||||
+ hns_roce_spin_unlock(&srq->hr_lock);
|
||||
}
|
||||
|
||||
static int get_srq_from_cqe(struct hns_roce_v2_cqe *cqe,
|
||||
@@ -266,9 +266,9 @@ static int hns_roce_v2_wq_overflow(struct hns_roce_wq *wq, unsigned int nreq,
|
||||
if (cur + nreq < wq->max_post)
|
||||
return 0;
|
||||
|
||||
- pthread_spin_lock(&cq->lock);
|
||||
+ hns_roce_spin_lock(&cq->hr_lock);
|
||||
cur = wq->head - wq->tail;
|
||||
- pthread_spin_unlock(&cq->lock);
|
||||
+ hns_roce_spin_unlock(&cq->hr_lock);
|
||||
|
||||
return cur + nreq >= wq->max_post;
|
||||
}
|
||||
@@ -721,7 +721,7 @@ static int hns_roce_u_v2_poll_cq(struct ibv_cq *ibvcq, int ne,
|
||||
int err = V2_CQ_OK;
|
||||
int npolled;
|
||||
|
||||
- pthread_spin_lock(&cq->lock);
|
||||
+ hns_roce_spin_lock(&cq->hr_lock);
|
||||
|
||||
for (npolled = 0; npolled < ne; ++npolled) {
|
||||
err = hns_roce_poll_one(ctx, &qp, cq, wc + npolled);
|
||||
@@ -736,7 +736,7 @@ static int hns_roce_u_v2_poll_cq(struct ibv_cq *ibvcq, int ne,
|
||||
update_cq_db(ctx, cq);
|
||||
}
|
||||
|
||||
- pthread_spin_unlock(&cq->lock);
|
||||
+ hns_roce_spin_unlock(&cq->hr_lock);
|
||||
|
||||
return err == V2_CQ_POLL_ERR ? err : npolled;
|
||||
}
|
||||
@@ -1273,7 +1273,7 @@ int hns_roce_u_v2_post_send(struct ibv_qp *ibvqp, struct ibv_send_wr *wr,
|
||||
return ret;
|
||||
}
|
||||
|
||||
- pthread_spin_lock(&qp->sq.lock);
|
||||
+ hns_roce_spin_lock(&qp->sq.hr_lock);
|
||||
|
||||
sge_info.start_idx = qp->next_sge; /* start index of extend sge */
|
||||
|
||||
@@ -1333,7 +1333,7 @@ out:
|
||||
*(qp->sdb) = qp->sq.head & 0xffff;
|
||||
}
|
||||
|
||||
- pthread_spin_unlock(&qp->sq.lock);
|
||||
+ hns_roce_spin_unlock(&qp->sq.hr_lock);
|
||||
|
||||
if (ibvqp->state == IBV_QPS_ERR) {
|
||||
attr.qp_state = IBV_QPS_ERR;
|
||||
@@ -1426,7 +1426,7 @@ static int hns_roce_u_v2_post_recv(struct ibv_qp *ibvqp, struct ibv_recv_wr *wr,
|
||||
return ret;
|
||||
}
|
||||
|
||||
- pthread_spin_lock(&qp->rq.lock);
|
||||
+ hns_roce_spin_lock(&qp->rq.hr_lock);
|
||||
|
||||
max_sge = qp->rq.max_gs - qp->rq.rsv_sge;
|
||||
for (nreq = 0; wr; ++nreq, wr = wr->next) {
|
||||
@@ -1460,7 +1460,7 @@ out:
|
||||
hns_roce_update_rq_db(ctx, ibvqp->qp_num, qp->rq.head);
|
||||
}
|
||||
|
||||
- pthread_spin_unlock(&qp->rq.lock);
|
||||
+ hns_roce_spin_unlock(&qp->rq.hr_lock);
|
||||
|
||||
if (ibvqp->state == IBV_QPS_ERR) {
|
||||
attr.qp_state = IBV_QPS_ERR;
|
||||
@@ -1515,9 +1515,9 @@ static void __hns_roce_v2_cq_clean(struct hns_roce_cq *cq, uint32_t qpn,
|
||||
static void hns_roce_v2_cq_clean(struct hns_roce_cq *cq, unsigned int qpn,
|
||||
struct hns_roce_srq *srq)
|
||||
{
|
||||
- pthread_spin_lock(&cq->lock);
|
||||
+ hns_roce_spin_lock(&cq->hr_lock);
|
||||
__hns_roce_v2_cq_clean(cq, qpn, srq);
|
||||
- pthread_spin_unlock(&cq->lock);
|
||||
+ hns_roce_spin_unlock(&cq->hr_lock);
|
||||
}
|
||||
|
||||
static void record_qp_attr(struct ibv_qp *qp, struct ibv_qp_attr *attr,
|
||||
@@ -1550,8 +1550,8 @@ static int hns_roce_u_v2_modify_qp(struct ibv_qp *qp, struct ibv_qp_attr *attr,
|
||||
int ret;
|
||||
|
||||
if ((attr_mask & IBV_QP_STATE) && (attr->qp_state == IBV_QPS_ERR)) {
|
||||
- pthread_spin_lock(&hr_qp->sq.lock);
|
||||
- pthread_spin_lock(&hr_qp->rq.lock);
|
||||
+ hns_roce_spin_lock(&hr_qp->sq.hr_lock);
|
||||
+ hns_roce_spin_lock(&hr_qp->rq.hr_lock);
|
||||
flag = true;
|
||||
}
|
||||
|
||||
@@ -1560,8 +1560,8 @@ static int hns_roce_u_v2_modify_qp(struct ibv_qp *qp, struct ibv_qp_attr *attr,
|
||||
sizeof(resp_ex));
|
||||
|
||||
if (flag) {
|
||||
- pthread_spin_unlock(&hr_qp->rq.lock);
|
||||
- pthread_spin_unlock(&hr_qp->sq.lock);
|
||||
+ hns_roce_spin_unlock(&hr_qp->sq.hr_lock);
|
||||
+ hns_roce_spin_unlock(&hr_qp->rq.hr_lock);
|
||||
}
|
||||
|
||||
if (ret)
|
||||
@@ -1602,18 +1602,18 @@ static void hns_roce_lock_cqs(struct ibv_qp *qp)
|
||||
|
||||
if (send_cq && recv_cq) {
|
||||
if (send_cq == recv_cq) {
|
||||
- pthread_spin_lock(&send_cq->lock);
|
||||
+ hns_roce_spin_lock(&send_cq->hr_lock);
|
||||
} else if (send_cq->cqn < recv_cq->cqn) {
|
||||
- pthread_spin_lock(&send_cq->lock);
|
||||
- pthread_spin_lock(&recv_cq->lock);
|
||||
+ hns_roce_spin_lock(&send_cq->hr_lock);
|
||||
+ hns_roce_spin_lock(&recv_cq->hr_lock);
|
||||
} else {
|
||||
- pthread_spin_lock(&recv_cq->lock);
|
||||
- pthread_spin_lock(&send_cq->lock);
|
||||
+ hns_roce_spin_lock(&recv_cq->hr_lock);
|
||||
+ hns_roce_spin_lock(&send_cq->hr_lock);
|
||||
}
|
||||
} else if (send_cq) {
|
||||
- pthread_spin_lock(&send_cq->lock);
|
||||
+ hns_roce_spin_lock(&send_cq->hr_lock);
|
||||
} else if (recv_cq) {
|
||||
- pthread_spin_lock(&recv_cq->lock);
|
||||
+ hns_roce_spin_lock(&recv_cq->hr_lock);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1624,18 +1624,18 @@ static void hns_roce_unlock_cqs(struct ibv_qp *qp)
|
||||
|
||||
if (send_cq && recv_cq) {
|
||||
if (send_cq == recv_cq) {
|
||||
- pthread_spin_unlock(&send_cq->lock);
|
||||
+ hns_roce_spin_unlock(&send_cq->hr_lock);
|
||||
} else if (send_cq->cqn < recv_cq->cqn) {
|
||||
- pthread_spin_unlock(&recv_cq->lock);
|
||||
- pthread_spin_unlock(&send_cq->lock);
|
||||
+ hns_roce_spin_unlock(&recv_cq->hr_lock);
|
||||
+ hns_roce_spin_unlock(&send_cq->hr_lock);
|
||||
} else {
|
||||
- pthread_spin_unlock(&send_cq->lock);
|
||||
- pthread_spin_unlock(&recv_cq->lock);
|
||||
+ hns_roce_spin_unlock(&send_cq->hr_lock);
|
||||
+ hns_roce_spin_unlock(&recv_cq->hr_lock);
|
||||
}
|
||||
} else if (send_cq) {
|
||||
- pthread_spin_unlock(&send_cq->lock);
|
||||
+ hns_roce_spin_unlock(&send_cq->hr_lock);
|
||||
} else if (recv_cq) {
|
||||
- pthread_spin_unlock(&recv_cq->lock);
|
||||
+ hns_roce_spin_unlock(&recv_cq->hr_lock);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1750,7 +1750,7 @@ static int hns_roce_u_v2_post_srq_recv(struct ibv_srq *ib_srq,
|
||||
int ret = 0;
|
||||
void *wqe;
|
||||
|
||||
- pthread_spin_lock(&srq->lock);
|
||||
+ hns_roce_spin_lock(&srq->hr_lock);
|
||||
|
||||
max_sge = srq->max_gs - srq->rsv_sge;
|
||||
for (nreq = 0; wr; ++nreq, wr = wr->next) {
|
||||
@@ -1789,7 +1789,7 @@ static int hns_roce_u_v2_post_srq_recv(struct ibv_srq *ib_srq,
|
||||
(__le32 *)&srq_db);
|
||||
}
|
||||
|
||||
- pthread_spin_unlock(&srq->lock);
|
||||
+ hns_roce_spin_unlock(&srq->hr_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@@ -1805,11 +1805,11 @@ static int wc_start_poll_cq(struct ibv_cq_ex *current,
|
||||
if (attr->comp_mask)
|
||||
return EINVAL;
|
||||
|
||||
- pthread_spin_lock(&cq->lock);
|
||||
+ hns_roce_spin_lock(&cq->hr_lock);
|
||||
|
||||
err = hns_roce_poll_one(ctx, &qp, cq, NULL);
|
||||
if (err != V2_CQ_OK)
|
||||
- pthread_spin_unlock(&cq->lock);
|
||||
+ hns_roce_spin_unlock(&cq->hr_lock);
|
||||
|
||||
return err;
|
||||
}
|
||||
@@ -1843,7 +1843,7 @@ static void wc_end_poll_cq(struct ibv_cq_ex *current)
|
||||
else
|
||||
update_cq_db(ctx, cq);
|
||||
|
||||
- pthread_spin_unlock(&cq->lock);
|
||||
+ hns_roce_spin_unlock(&cq->hr_lock);
|
||||
}
|
||||
|
||||
static enum ibv_wc_opcode wc_read_opcode(struct ibv_cq_ex *current)
|
||||
@@ -2558,7 +2558,7 @@ static void wr_start(struct ibv_qp_ex *ibv_qp)
|
||||
return;
|
||||
}
|
||||
|
||||
- pthread_spin_lock(&qp->sq.lock);
|
||||
+ hns_roce_spin_lock(&qp->sq.hr_lock);
|
||||
qp->sge_info.start_idx = qp->next_sge;
|
||||
qp->rb_sq_head = qp->sq.head;
|
||||
qp->err = 0;
|
||||
@@ -2591,7 +2591,8 @@ static int wr_complete(struct ibv_qp_ex *ibv_qp)
|
||||
}
|
||||
|
||||
out:
|
||||
- pthread_spin_unlock(&qp->sq.lock);
|
||||
+ hns_roce_spin_unlock(&qp->sq.hr_lock);
|
||||
+
|
||||
if (ibv_qp->qp_base.state == IBV_QPS_ERR) {
|
||||
attr.qp_state = IBV_QPS_ERR;
|
||||
hns_roce_u_v2_modify_qp(&ibv_qp->qp_base, &attr, IBV_QP_STATE);
|
||||
@@ -2606,7 +2607,7 @@ static void wr_abort(struct ibv_qp_ex *ibv_qp)
|
||||
|
||||
qp->sq.head = qp->rb_sq_head;
|
||||
|
||||
- pthread_spin_unlock(&qp->sq.lock);
|
||||
+ hns_roce_spin_unlock(&qp->sq.hr_lock);
|
||||
}
|
||||
|
||||
enum {
|
||||
diff --git a/providers/hns/hns_roce_u_verbs.c b/providers/hns/hns_roce_u_verbs.c
|
||||
index 3b7a67d..f6c7423 100644
|
||||
--- a/providers/hns/hns_roce_u_verbs.c
|
||||
+++ b/providers/hns/hns_roce_u_verbs.c
|
||||
@@ -33,6 +33,7 @@
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
+#include <math.h>
|
||||
#include <errno.h>
|
||||
#include <pthread.h>
|
||||
#include <sys/mman.h>
|
||||
@@ -42,6 +43,38 @@
|
||||
#include "hns_roce_u_db.h"
|
||||
#include "hns_roce_u_hw_v2.h"
|
||||
|
||||
+static int hns_roce_whether_need_lock(struct ibv_pd *pd)
|
||||
+{
|
||||
+ struct hns_roce_pad *pad;
|
||||
+ bool need_lock = true;
|
||||
+
|
||||
+ pad = to_hr_pad(pd);
|
||||
+ if (pad && pad->td)
|
||||
+ need_lock = false;
|
||||
+
|
||||
+ return need_lock;
|
||||
+}
|
||||
+
|
||||
+static int hns_roce_spinlock_init(struct hns_roce_spinlock *hr_lock,
|
||||
+ bool need_lock)
|
||||
+{
|
||||
+ hr_lock->need_lock = need_lock;
|
||||
+
|
||||
+ if (need_lock)
|
||||
+ return pthread_spin_init(&hr_lock->lock,
|
||||
+ PTHREAD_PROCESS_PRIVATE);
|
||||
+
|
||||
+ return 0;
|
||||
+}
|
||||
+
|
||||
+static int hns_roce_spinlock_destroy(struct hns_roce_spinlock *hr_lock)
|
||||
+{
|
||||
+ if (hr_lock->need_lock)
|
||||
+ return pthread_spin_destroy(&hr_lock->lock);
|
||||
+
|
||||
+ return 0;
|
||||
+}
|
||||
+
|
||||
void hns_roce_init_qp_indices(struct hns_roce_qp *qp)
|
||||
{
|
||||
qp->sq.head = 0;
|
||||
@@ -85,40 +118,153 @@ int hns_roce_u_query_port(struct ibv_context *context, uint8_t port,
|
||||
return ibv_cmd_query_port(context, port, attr, &cmd, sizeof(cmd));
|
||||
}
|
||||
|
||||
+struct ibv_td *hns_roce_u_alloc_td(struct ibv_context *context,
|
||||
+ struct ibv_td_init_attr *attr)
|
||||
+{
|
||||
+ struct hns_roce_td *td;
|
||||
+
|
||||
+ if (attr->comp_mask) {
|
||||
+ errno = EINVAL;
|
||||
+ return NULL;
|
||||
+ }
|
||||
+
|
||||
+ td = calloc(1, sizeof(*td));
|
||||
+ if (!td) {
|
||||
+ errno = ENOMEM;
|
||||
+ return NULL;
|
||||
+ }
|
||||
+
|
||||
+ td->ibv_td.context = context;
|
||||
+ atomic_init(&td->refcount, 1);
|
||||
+
|
||||
+ return &td->ibv_td;
|
||||
+}
|
||||
+
|
||||
+int hns_roce_u_dealloc_td(struct ibv_td *ibv_td)
|
||||
+{
|
||||
+ struct hns_roce_td *td;
|
||||
+ int ret = 0;
|
||||
+
|
||||
+ td = to_hr_td(ibv_td);
|
||||
+ if (atomic_load(&td->refcount) > 1) {
|
||||
+ ret = -EBUSY;
|
||||
+ goto err;
|
||||
+ }
|
||||
+
|
||||
+ free(td);
|
||||
+
|
||||
+err:
|
||||
+ errno = abs(ret);
|
||||
+ return ret;
|
||||
+}
|
||||
+
|
||||
struct ibv_pd *hns_roce_u_alloc_pd(struct ibv_context *context)
|
||||
{
|
||||
+ struct hns_roce_alloc_pd_resp resp = {};
|
||||
struct ibv_alloc_pd cmd;
|
||||
struct hns_roce_pd *pd;
|
||||
- struct hns_roce_alloc_pd_resp resp = {};
|
||||
-
|
||||
- pd = malloc(sizeof(*pd));
|
||||
- if (!pd)
|
||||
- return NULL;
|
||||
+ int ret;
|
||||
|
||||
- if (ibv_cmd_alloc_pd(context, &pd->ibv_pd, &cmd, sizeof(cmd),
|
||||
- &resp.ibv_resp, sizeof(resp))) {
|
||||
- free(pd);
|
||||
+ pd = calloc(1, sizeof(*pd));
|
||||
+ if (!pd) {
|
||||
+ errno = ENOMEM;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
+ ret = ibv_cmd_alloc_pd(context, &pd->ibv_pd, &cmd, sizeof(cmd),
|
||||
+ &resp.ibv_resp, sizeof(resp));
|
||||
+
|
||||
+ if (ret)
|
||||
+ goto err;
|
||||
+
|
||||
+ atomic_init(&pd->refcount, 1);
|
||||
pd->pdn = resp.pdn;
|
||||
|
||||
return &pd->ibv_pd;
|
||||
+
|
||||
+err:
|
||||
+ free(pd);
|
||||
+ errno = abs(ret);
|
||||
+ return NULL;
|
||||
}
|
||||
|
||||
-int hns_roce_u_free_pd(struct ibv_pd *pd)
|
||||
+struct ibv_pd *hns_roce_u_alloc_pad(struct ibv_context *context,
|
||||
+ struct ibv_parent_domain_init_attr *attr)
|
||||
+{
|
||||
+ struct hns_roce_pad *pad;
|
||||
+
|
||||
+ if (ibv_check_alloc_parent_domain(attr))
|
||||
+ return NULL;
|
||||
+
|
||||
+ if (attr->comp_mask) {
|
||||
+ errno = EINVAL;
|
||||
+ return NULL;
|
||||
+ }
|
||||
+
|
||||
+ pad = calloc(1, sizeof(*pad));
|
||||
+ if (!pad) {
|
||||
+ errno = ENOMEM;
|
||||
+ return NULL;
|
||||
+ }
|
||||
+
|
||||
+ if (attr->td) {
|
||||
+ pad->td = to_hr_td(attr->td);
|
||||
+ atomic_fetch_add(&pad->td->refcount, 1);
|
||||
+ }
|
||||
+
|
||||
+ pad->pd.protection_domain = to_hr_pd(attr->pd);
|
||||
+ atomic_fetch_add(&pad->pd.protection_domain->refcount, 1);
|
||||
+
|
||||
+ ibv_initialize_parent_domain(&pad->pd.ibv_pd,
|
||||
+ &pad->pd.protection_domain->ibv_pd);
|
||||
+
|
||||
+ return &pad->pd.ibv_pd;
|
||||
+}
|
||||
+
|
||||
+static void hns_roce_free_pad(struct hns_roce_pad *pad)
|
||||
+{
|
||||
+ atomic_fetch_sub(&pad->pd.protection_domain->refcount, 1);
|
||||
+
|
||||
+ if (pad->td)
|
||||
+ atomic_fetch_sub(&pad->td->refcount, 1);
|
||||
+
|
||||
+ free(pad);
|
||||
+}
|
||||
+
|
||||
+static int hns_roce_free_pd(struct hns_roce_pd *pd)
|
||||
{
|
||||
int ret;
|
||||
|
||||
- ret = ibv_cmd_dealloc_pd(pd);
|
||||
+ if (atomic_load(&pd->refcount) > 1) {
|
||||
+ ret = -EBUSY;
|
||||
+ goto err;
|
||||
+ }
|
||||
+
|
||||
+ ret = ibv_cmd_dealloc_pd(&pd->ibv_pd);
|
||||
if (ret)
|
||||
- return ret;
|
||||
+ goto err;
|
||||
|
||||
- free(to_hr_pd(pd));
|
||||
+ free(pd);
|
||||
+
|
||||
+err:
|
||||
+ errno = abs(ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
+int hns_roce_u_dealloc_pd(struct ibv_pd *ibv_pd)
|
||||
+{
|
||||
+ struct hns_roce_pad *pad = to_hr_pad(ibv_pd);
|
||||
+ struct hns_roce_pd *pd = to_hr_pd(ibv_pd);
|
||||
+
|
||||
+ if (pad) {
|
||||
+ hns_roce_free_pad(pad);
|
||||
+ return 0;
|
||||
+ }
|
||||
+
|
||||
+ return hns_roce_free_pd(pd);
|
||||
+}
|
||||
+
|
||||
struct ibv_xrcd *hns_roce_u_open_xrcd(struct ibv_context *context,
|
||||
struct ibv_xrcd_init_attr *xrcd_init_attr)
|
||||
{
|
||||
@@ -275,6 +421,11 @@ int hns_roce_u_dealloc_mw(struct ibv_mw *mw)
|
||||
return 0;
|
||||
}
|
||||
|
||||
+enum {
|
||||
+ CREATE_CQ_SUPPORTED_COMP_MASK = IBV_CQ_INIT_ATTR_MASK_FLAGS |
|
||||
+ IBV_CQ_INIT_ATTR_MASK_PD,
|
||||
+};
|
||||
+
|
||||
enum {
|
||||
CREATE_CQ_SUPPORTED_WC_FLAGS = IBV_WC_STANDARD_FLAGS |
|
||||
IBV_WC_EX_WITH_CVLAN,
|
||||
@@ -286,12 +437,22 @@ static int verify_cq_create_attr(struct ibv_cq_init_attr_ex *attr,
|
||||
if (!attr->cqe || attr->cqe > context->max_cqe)
|
||||
return -EINVAL;
|
||||
|
||||
- if (attr->comp_mask)
|
||||
- return -EOPNOTSUPP;
|
||||
+ if (!check_comp_mask(attr->comp_mask, CREATE_CQ_SUPPORTED_COMP_MASK)) {
|
||||
+ verbs_err(&context->ibv_ctx, "unsupported cq comps 0x%x\n",
|
||||
+ attr->comp_mask);
|
||||
+ return EOPNOTSUPP;
|
||||
+ }
|
||||
|
||||
if (!check_comp_mask(attr->wc_flags, CREATE_CQ_SUPPORTED_WC_FLAGS))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
+ if (attr->comp_mask & IBV_CQ_INIT_ATTR_MASK_PD) {
|
||||
+ if (!to_hr_pad(attr->parent_domain)) {
|
||||
+ verbs_err(&context->ibv_ctx, "failed to check the pad of cq.\n");
|
||||
+ return EINVAL;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
attr->cqe = max_t(uint32_t, HNS_ROCE_MIN_CQE_NUM,
|
||||
roundup_pow_of_two(attr->cqe));
|
||||
|
||||
@@ -341,7 +502,9 @@ static struct ibv_cq_ex *create_cq(struct ibv_context *context,
|
||||
struct ibv_cq_init_attr_ex *attr)
|
||||
{
|
||||
struct hns_roce_context *hr_ctx = to_hr_ctx(context);
|
||||
+ struct hns_roce_pad *pad = NULL;
|
||||
struct hns_roce_cq *cq;
|
||||
+ int need_lock;
|
||||
int ret;
|
||||
|
||||
ret = verify_cq_create_attr(attr, hr_ctx);
|
||||
@@ -354,7 +517,14 @@ static struct ibv_cq_ex *create_cq(struct ibv_context *context,
|
||||
goto err;
|
||||
}
|
||||
|
||||
- ret = pthread_spin_init(&cq->lock, PTHREAD_PROCESS_PRIVATE);
|
||||
+ if (attr->comp_mask & IBV_CQ_INIT_ATTR_MASK_PD)
|
||||
+ pad = to_hr_pad(attr->parent_domain);
|
||||
+
|
||||
+ need_lock = hns_roce_whether_need_lock(pad ? &pad->pd.ibv_pd : NULL);
|
||||
+ if (!need_lock)
|
||||
+ verbs_info(verbs_get_ctx(context), "configure cq as no lock.\n");
|
||||
+
|
||||
+ ret = hns_roce_spinlock_init(&cq->hr_lock, need_lock);
|
||||
if (ret)
|
||||
goto err_lock;
|
||||
|
||||
@@ -385,14 +555,12 @@ err_cmd:
|
||||
hns_roce_free_db(hr_ctx, cq->db, HNS_ROCE_CQ_TYPE_DB);
|
||||
err_db:
|
||||
hns_roce_free_buf(&cq->buf);
|
||||
-err_lock:
|
||||
err_buf:
|
||||
+ hns_roce_spinlock_destroy(&cq->hr_lock);
|
||||
+err_lock:
|
||||
free(cq);
|
||||
err:
|
||||
- if (ret < 0)
|
||||
- ret = -ret;
|
||||
-
|
||||
- errno = ret;
|
||||
+ errno = abs(ret);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@@ -655,6 +823,7 @@ static struct ibv_srq *create_srq(struct ibv_context *context,
|
||||
{
|
||||
struct hns_roce_context *hr_ctx = to_hr_ctx(context);
|
||||
struct hns_roce_srq *srq;
|
||||
+ int need_lock;
|
||||
int ret;
|
||||
|
||||
ret = verify_srq_create_attr(hr_ctx, init_attr);
|
||||
@@ -667,7 +836,11 @@ static struct ibv_srq *create_srq(struct ibv_context *context,
|
||||
goto err;
|
||||
}
|
||||
|
||||
- if (pthread_spin_init(&srq->lock, PTHREAD_PROCESS_PRIVATE))
|
||||
+ need_lock = hns_roce_whether_need_lock(init_attr->pd);
|
||||
+ if (!need_lock)
|
||||
+ verbs_info(verbs_get_ctx(context), "configure srq as no lock.\n");
|
||||
+
|
||||
+ if (hns_roce_spinlock_init(&srq->hr_lock, need_lock))
|
||||
goto err_free_srq;
|
||||
|
||||
set_srq_param(context, srq, init_attr);
|
||||
@@ -875,6 +1048,48 @@ static int verify_qp_create_attr(struct hns_roce_context *ctx,
|
||||
return verify_qp_create_cap(ctx, attr);
|
||||
}
|
||||
|
||||
+static int hns_roce_qp_spinlock_init(struct hns_roce_context *ctx,
|
||||
+ struct ibv_qp_init_attr_ex *attr,
|
||||
+ struct hns_roce_qp *qp)
|
||||
+{
|
||||
+ int sq_need_lock;
|
||||
+ int rq_need_lock;
|
||||
+ int ret;
|
||||
+
|
||||
+ sq_need_lock = hns_roce_whether_need_lock(attr->pd);
|
||||
+ if (!sq_need_lock)
|
||||
+ verbs_warn(&ctx->ibv_ctx, "configure sq as no lock.\n");
|
||||
+
|
||||
+ rq_need_lock = hns_roce_whether_need_lock(attr->pd);
|
||||
+ if (!rq_need_lock)
|
||||
+ verbs_warn(&ctx->ibv_ctx, "configure rq as no lock.\n");
|
||||
+
|
||||
+ ret = hns_roce_spinlock_init(&qp->sq.hr_lock, sq_need_lock);
|
||||
+ if (ret) {
|
||||
+ verbs_err(&ctx->ibv_ctx, "failed to init sq spinlock.\n");
|
||||
+ return ret;
|
||||
+ }
|
||||
+
|
||||
+ ret = hns_roce_spinlock_init(&qp->rq.hr_lock, rq_need_lock);
|
||||
+ if (ret) {
|
||||
+ verbs_err(&ctx->ibv_ctx, "failed to init rq spinlock.\n");
|
||||
+ goto err_rq_lock;
|
||||
+ }
|
||||
+
|
||||
+ return 0;
|
||||
+
|
||||
+err_rq_lock:
|
||||
+ hns_roce_spinlock_destroy(&qp->sq.hr_lock);
|
||||
+
|
||||
+ return ret;
|
||||
+}
|
||||
+
|
||||
+void hns_roce_qp_spinlock_destroy(struct hns_roce_qp *qp)
|
||||
+{
|
||||
+ hns_roce_spinlock_destroy(&qp->rq.hr_lock);
|
||||
+ hns_roce_spinlock_destroy(&qp->sq.hr_lock);
|
||||
+}
|
||||
+
|
||||
static int alloc_recv_rinl_buf(uint32_t max_sge,
|
||||
struct hns_roce_rinl_buf *rinl_buf)
|
||||
{
|
||||
@@ -1248,8 +1463,8 @@ static int hns_roce_alloc_qp_buf(struct ibv_qp_init_attr_ex *attr,
|
||||
{
|
||||
int ret;
|
||||
|
||||
- if (pthread_spin_init(&qp->sq.lock, PTHREAD_PROCESS_PRIVATE) ||
|
||||
- pthread_spin_init(&qp->rq.lock, PTHREAD_PROCESS_PRIVATE))
|
||||
+ if (pthread_spin_init(&qp->sq.hr_lock.lock, PTHREAD_PROCESS_PRIVATE) ||
|
||||
+ pthread_spin_init(&qp->rq.hr_lock.lock, PTHREAD_PROCESS_PRIVATE))
|
||||
return -ENOMEM;
|
||||
|
||||
ret = qp_alloc_wqe(&attr->cap, qp, ctx);
|
||||
@@ -1294,6 +1509,10 @@ static struct ibv_qp *create_qp(struct ibv_context *ibv_ctx,
|
||||
|
||||
hns_roce_set_qp_params(attr, qp, context);
|
||||
|
||||
+ ret = hns_roce_qp_spinlock_init(context, attr, qp);
|
||||
+ if (ret)
|
||||
+ goto err_spinlock;
|
||||
+
|
||||
ret = hns_roce_alloc_qp_buf(attr, qp, context);
|
||||
if (ret)
|
||||
goto err_buf;
|
||||
@@ -1327,6 +1546,8 @@ err_ops:
|
||||
err_cmd:
|
||||
hns_roce_free_qp_buf(qp, context);
|
||||
err_buf:
|
||||
+ hns_roce_qp_spinlock_destroy(qp);
|
||||
+err_spinlock:
|
||||
free(qp);
|
||||
err:
|
||||
if (ret < 0)
|
||||
--
|
||||
2.30.0
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
Name: rdma-core
|
||||
Version: 41.0
|
||||
Release: 5
|
||||
Release: 6
|
||||
Summary: RDMA core userspace libraries and daemons
|
||||
License: GPLv2 or BSD
|
||||
Url: https://github.com/linux-rdma/rdma-core
|
||||
@ -35,6 +35,7 @@ Patch25: 0026-rdma-ndd-disable-systemd-ProtectHostName-feature.patch
|
||||
Patch26: 0027-libhns-Add-RoH-device-IDs.patch
|
||||
Patch27: 0028-Update-kernel-headers.patch
|
||||
Patch28: 0029-libhns-Add-the-parsing-of-mac-type-in-RoH-mode.patch
|
||||
Patch29: 0030-libhns-Add-support-for-the-thread-domain-and-the-par.patch
|
||||
|
||||
BuildRequires: binutils cmake >= 2.8.11 gcc libudev-devel pkgconfig pkgconfig(libnl-3.0)
|
||||
BuildRequires: pkgconfig(libnl-route-3.0) valgrind-devel systemd systemd-devel
|
||||
@ -279,6 +280,12 @@ fi
|
||||
%{_mandir}/*
|
||||
|
||||
%changelog
|
||||
* Mon Nov 28 2022 Yixing Liu <liuyixing1@huawei.com> - 41.0-6
|
||||
- Type: requirement
|
||||
- ID: NA
|
||||
- SUG: NA
|
||||
- DESC: Support libhns td unlock
|
||||
|
||||
* Mon Nov 07 2022 Guofeng Yue <yueguofeng@hisilicon.com> - 41.0-5
|
||||
- Type: requirement
|
||||
- ID: NA
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user