From 6aa5efb3059c66d3d0f49804551b38c5ed827ec1 Mon Sep 17 00:00:00 2001 From: Chengchang Tang Date: Mon, 10 May 2021 17:13:49 +0800 Subject: libhns: Add direct verbs support to config DCA driver inclusion category: feature bugzilla: https://gitee.com/src-openeuler/rdma-core/issues/I63L1M ---------------------------------------------------------- Add two direct verbs to config DCA: 1. hnsdv_open_device() is used to config DCA memory pool. 2. hnsdv_create_qp() is used to create a DCA QP. Signed-off-by: Chengchang Tang Reviewed-by: Yangyang Li --- debian/control | 2 +- debian/ibverbs-providers.install | 1 + debian/ibverbs-providers.lintian-overrides | 4 +- debian/ibverbs-providers.symbols | 6 ++ debian/libibverbs-dev.install | 4 + providers/hns/CMakeLists.txt | 9 ++- providers/hns/hns_roce_u.c | 92 +++++++++++++++++++--- providers/hns/hns_roce_u.h | 2 + providers/hns/hns_roce_u_abi.h | 1 + providers/hns/hns_roce_u_buf.c | 3 + providers/hns/hns_roce_u_hw_v2.c | 33 +++++++- providers/hns/hns_roce_u_verbs.c | 58 ++++++++++++-- providers/hns/hnsdv.h | 65 +++++++++++++++ providers/hns/libhns.map | 9 +++ redhat/rdma-core.spec | 5 +- suse/rdma-core.spec | 21 ++++- 16 files changed, 289 insertions(+), 26 deletions(-) create mode 100644 providers/hns/hnsdv.h create mode 100644 providers/hns/libhns.map diff --git a/debian/control b/debian/control index 7485ad3..22eb6cd 100644 --- a/debian/control +++ b/debian/control @@ -94,7 +94,7 @@ Description: User space provider drivers for libibverbs - cxgb4: Chelsio T4 iWARP HCAs - efa: Amazon Elastic Fabric Adapter - hfi1verbs: Intel Omni-Path HFI - - hns: HiSilicon Hip06 SoC + - hns: HiSilicon Hip08+ SoC - ipathverbs: QLogic InfiniPath HCAs - irdma: Intel Ethernet Connection RDMA - mlx4: Mellanox ConnectX-3 InfiniBand HCAs diff --git a/debian/ibverbs-providers.install b/debian/ibverbs-providers.install index 4f971fb..c6ecbbc 100644 --- a/debian/ibverbs-providers.install +++ b/debian/ibverbs-providers.install @@ -1,5 +1,6 @@ etc/libibverbs.d/ usr/lib/*/libefa.so.* usr/lib/*/libibverbs/lib*-rdmav*.so +usr/lib/*/libhns.so.* usr/lib/*/libmlx4.so.* usr/lib/*/libmlx5.so.* diff --git a/debian/ibverbs-providers.lintian-overrides b/debian/ibverbs-providers.lintian-overrides index 8a44d54..f6afb70 100644 --- a/debian/ibverbs-providers.lintian-overrides +++ b/debian/ibverbs-providers.lintian-overrides @@ -1,2 +1,2 @@ -# libefa, libmlx4 and libmlx5 are ibverbs provider that provides more functions. -ibverbs-providers: package-name-doesnt-match-sonames libefa1 libmlx4-1 libmlx5-1 +# libefa, libhns, libmlx4 and libmlx5 are ibverbs provider that provides more functions. +ibverbs-providers: package-name-doesnt-match-sonames libefa1 libhns-1 libmlx4-1 libmlx5-1 diff --git a/debian/ibverbs-providers.symbols b/debian/ibverbs-providers.symbols index 2c6b330..1844369 100644 --- a/debian/ibverbs-providers.symbols +++ b/debian/ibverbs-providers.symbols @@ -162,3 +162,9 @@ libefa.so.1 ibverbs-providers #MINVER# efadv_create_qp_ex@EFA_1.1 26 efadv_query_device@EFA_1.1 26 efadv_query_ah@EFA_1.1 26 +libhns.so.1 ibverbs-providers #MINVER# +* Build-Depends-Package: libibverbs-dev + HNS_1.0@HNS_1.0 36 + hnsdv_is_supported@HNS_1.0 36 + hnsdv_open_device@HNS_1.0 36 + hnsdv_create_qp@HNS_1.0 36 diff --git a/debian/libibverbs-dev.install b/debian/libibverbs-dev.install index bc8caa5..7d6e6a2 100644 --- a/debian/libibverbs-dev.install +++ b/debian/libibverbs-dev.install @@ -1,5 +1,6 @@ usr/include/infiniband/arch.h usr/include/infiniband/efadv.h +usr/include/infiniband/hnsdv.h usr/include/infiniband/ib_user_ioctl_verbs.h usr/include/infiniband/mlx4dv.h usr/include/infiniband/mlx5_api.h @@ -14,6 +15,8 @@ usr/include/infiniband/verbs_api.h usr/lib/*/lib*-rdmav*.a usr/lib/*/libefa.a usr/lib/*/libefa.so +usr/lib/*/libhns.a +usr/lib/*/libhns.so usr/lib/*/libibverbs*.so usr/lib/*/libibverbs.a usr/lib/*/libmlx4.a @@ -21,6 +24,7 @@ usr/lib/*/libmlx4.so usr/lib/*/libmlx5.a usr/lib/*/libmlx5.so usr/lib/*/pkgconfig/libefa.pc +usr/lib/*/pkgconfig/libhns.pc usr/lib/*/pkgconfig/libibverbs.pc usr/lib/*/pkgconfig/libmlx4.pc usr/lib/*/pkgconfig/libmlx5.pc diff --git a/providers/hns/CMakeLists.txt b/providers/hns/CMakeLists.txt index 7aaca75..160e1ff 100644 --- a/providers/hns/CMakeLists.txt +++ b/providers/hns/CMakeLists.txt @@ -1,7 +1,14 @@ -rdma_provider(hns +rdma_shared_provider(hns libhns.map + 1 1.0.${PACKAGE_VERSION} hns_roce_u.c hns_roce_u_buf.c hns_roce_u_db.c hns_roce_u_hw_v2.c hns_roce_u_verbs.c ) + +publish_headers(infiniband + hnsdv.h +) + +rdma_pkg_config("hns" "libibverbs" "${CMAKE_THREAD_LIBS_INIT}") diff --git a/providers/hns/hns_roce_u.c b/providers/hns/hns_roce_u.c index fe30cda..0cf6d4b 100644 --- a/providers/hns/hns_roce_u.c +++ b/providers/hns/hns_roce_u.c @@ -114,8 +114,60 @@ static int mmap_dca(struct hns_roce_context *ctx, int cmd_fd, return 0; } +bool hnsdv_is_supported(struct ibv_device *device) +{ + return is_hns_dev(device); +} + +struct ibv_context *hnsdv_open_device(struct ibv_device *device, + struct hnsdv_context_attr *attr) +{ + if (!is_hns_dev(device)) { + errno = EOPNOTSUPP; + return NULL; + } + + return verbs_open_device(device, attr); +} + +static void set_dca_pool_param(struct hns_roce_context *ctx, + struct hnsdv_context_attr *attr, int page_size) +{ + struct hns_roce_dca_ctx *dca_ctx = &ctx->dca_ctx; + + if (attr->comp_mask & HNSDV_CONTEXT_MASK_DCA_UNIT_SIZE) + dca_ctx->unit_size = align(attr->dca_unit_size, page_size); + else + dca_ctx->unit_size = page_size * HNS_DCA_DEFAULT_UNIT_PAGES; + + /* The memory pool cannot be expanded, only init the DCA context. */ + if (dca_ctx->unit_size == 0) + return; + + /* If not set, the memory pool can be expanded unlimitedly. */ + if (attr->comp_mask & HNSDV_CONTEXT_MASK_DCA_MAX_SIZE) + dca_ctx->max_size = DIV_ROUND_UP(attr->dca_max_size, + dca_ctx->unit_size) * + dca_ctx->unit_size; + else + dca_ctx->max_size = HNS_DCA_MAX_MEM_SIZE; + + /* If not set, the memory pool cannot be shrunk. */ + if (attr->comp_mask & HNSDV_CONTEXT_MASK_DCA_MIN_SIZE) + dca_ctx->min_size = DIV_ROUND_UP(attr->dca_min_size, + dca_ctx->unit_size) * + dca_ctx->unit_size; + else + dca_ctx->min_size = HNS_DCA_MAX_MEM_SIZE; + + verbs_debug(&ctx->ibv_ctx, + "Support DCA, unit %d, max %ld, min %ld Bytes.\n", + dca_ctx->unit_size, dca_ctx->max_size, dca_ctx->min_size); +} + static int init_dca_context(struct hns_roce_context *ctx, int cmd_fd, struct hns_roce_alloc_ucontext_resp *resp, + struct hnsdv_context_attr *attr, int page_size) { struct hns_roce_dca_ctx *dca_ctx = &ctx->dca_ctx; @@ -127,14 +179,18 @@ static int init_dca_context(struct hns_roce_context *ctx, int cmd_fd, if (!(ctx->config & HNS_ROCE_UCTX_RSP_DCA_FLAGS)) return 0; + dca_ctx->unit_size = 0; + dca_ctx->mem_cnt = 0; + list_head_init(&dca_ctx->mem_list); ret = pthread_spin_init(&dca_ctx->lock, PTHREAD_PROCESS_PRIVATE); if (ret) return ret; - dca_ctx->unit_size = page_size * HNS_DCA_DEFAULT_UNIT_PAGES; - dca_ctx->max_size = HNS_DCA_MAX_MEM_SIZE; - dca_ctx->mem_cnt = 0; + if (!attr || !(attr->flags & HNSDV_CONTEXT_FLAGS_DCA)) + return 0; + + set_dca_pool_param(ctx, attr, page_size); if (mmap_key) { const unsigned int bits_per_qp = 2 * HNS_DCA_BITS_PER_STATUS; @@ -185,18 +241,28 @@ static uint32_t calc_table_shift(uint32_t entry_count, uint32_t size_shift) return count_shift > size_shift ? count_shift - size_shift : 0; } -static void ucontext_set_cmd(struct hns_roce_alloc_ucontext *cmd, int page_size) +static void ucontext_set_cmd(struct hns_roce_alloc_ucontext *cmd, + struct hnsdv_context_attr *attr) { cmd->config |= HNS_ROCE_EXSGE_FLAGS | HNS_ROCE_RQ_INLINE_FLAGS | - HNS_ROCE_CQE_INLINE_FLAGS | HNS_ROCE_UCTX_CONFIG_DCA; - cmd->comp = HNS_ROCE_ALLOC_UCTX_COMP_DCA_MAX_QPS; - cmd->dca_max_qps = page_size * 8 / 2 * HNS_DCA_BITS_PER_STATUS; + HNS_ROCE_CQE_INLINE_FLAGS; + + if (!attr || !(attr->flags & HNSDV_CONTEXT_FLAGS_DCA)) + return; + + cmd->config |= HNS_ROCE_UCTX_CONFIG_DCA; + + if (attr->comp_mask & HNSDV_CONTEXT_MASK_DCA_PRIME_QPS) { + cmd->comp |= HNS_ROCE_ALLOC_UCTX_COMP_DCA_MAX_QPS; + cmd->dca_max_qps = attr->dca_prime_qps; + } } static struct verbs_context *hns_roce_alloc_context(struct ibv_device *ibdev, int cmd_fd, void *private_data) { + struct hnsdv_context_attr *ctx_attr = private_data; struct hns_roce_device *hr_dev = to_hr_dev(ibdev); struct hns_roce_alloc_ucontext_resp resp = {}; struct hns_roce_alloc_ucontext cmd = {}; @@ -209,7 +275,7 @@ static struct verbs_context *hns_roce_alloc_context(struct ibv_device *ibdev, if (!context) return NULL; - ucontext_set_cmd(&cmd, hr_dev->page_size); + ucontext_set_cmd(&cmd, ctx_attr); if (ibv_cmd_get_context(&context->ibv_ctx, &cmd.ibv_cmd, sizeof(cmd), &resp.ibv_resp, sizeof(resp))) goto err_free; @@ -255,7 +321,8 @@ static struct verbs_context *hns_roce_alloc_context(struct ibv_device *ibdev, context->max_srq_wr = dev_attrs.max_srq_wr; context->max_srq_sge = dev_attrs.max_srq_sge; - if (init_dca_context(context, cmd_fd, &resp, hr_dev->page_size)) + if (init_dca_context(context, cmd_fd, + &resp, ctx_attr, hr_dev->page_size)) goto err_free; if (hns_roce_mmap(hr_dev, context, cmd_fd)) @@ -317,4 +384,11 @@ static const struct verbs_device_ops hns_roce_dev_ops = { .uninit_device = hns_uninit_device, .alloc_context = hns_roce_alloc_context, }; + +bool is_hns_dev(struct ibv_device *device) +{ + struct verbs_device *verbs_device = verbs_get_device(device); + + return verbs_device->ops == &hns_roce_dev_ops; +} PROVIDER_DRIVER(hns, hns_roce_dev_ops); diff --git a/providers/hns/hns_roce_u.h b/providers/hns/hns_roce_u.h index 91b0c8f..71c35c5 100644 --- a/providers/hns/hns_roce_u.h +++ b/providers/hns/hns_roce_u.h @@ -562,6 +562,8 @@ static inline void clear_bit_unlock(atomic_bitmap_t *p, uint32_t nr) atomic_fetch_and(p, ~HNS_ROCE_BIT_MASK(nr)); } +bool is_hns_dev(struct ibv_device *device); + int hns_roce_u_query_device(struct ibv_context *context, const struct ibv_query_device_ex_input *input, struct ibv_device_attr_ex *attr, size_t attr_size); diff --git a/providers/hns/hns_roce_u_abi.h b/providers/hns/hns_roce_u_abi.h index 0519ac7..1eaf62d 100644 --- a/providers/hns/hns_roce_u_abi.h +++ b/providers/hns/hns_roce_u_abi.h @@ -36,6 +36,7 @@ #include #include #include +#include "hnsdv.h" DECLARE_DRV_CMD(hns_roce_alloc_pd, IB_USER_VERBS_CMD_ALLOC_PD, empty, hns_roce_ib_alloc_pd_resp); diff --git a/providers/hns/hns_roce_u_buf.c b/providers/hns/hns_roce_u_buf.c index 08c0fbc..780683e 100644 --- a/providers/hns/hns_roce_u_buf.c +++ b/providers/hns/hns_roce_u_buf.c @@ -56,6 +56,9 @@ int hns_roce_alloc_buf(struct hns_roce_buf *buf, unsigned int size, void hns_roce_free_buf(struct hns_roce_buf *buf) { + if (!buf->buf) + return; + ibv_dofork_range(buf->buf, buf->length); munmap(buf->buf, buf->length); diff --git a/providers/hns/hns_roce_u_hw_v2.c b/providers/hns/hns_roce_u_hw_v2.c index 028d20c..7661863 100644 --- a/providers/hns/hns_roce_u_hw_v2.c +++ b/providers/hns/hns_roce_u_hw_v2.c @@ -1473,6 +1473,7 @@ out: static int check_qp_recv(struct hns_roce_qp *qp, struct hns_roce_context *ctx) { struct ibv_qp *ibvqp = &qp->verbs_qp.qp; + int ret = 0; if (unlikely(ibvqp->qp_type != IBV_QPT_RC && ibvqp->qp_type != IBV_QPT_UD)) @@ -1481,10 +1482,15 @@ static int check_qp_recv(struct hns_roce_qp *qp, struct hns_roce_context *ctx) if (ibvqp->state == IBV_QPS_RESET || ibvqp->srq) return -EINVAL; - if (check_dca_attach_enable(qp)) - return dca_attach_qp_buf(ctx, qp); + if (check_dca_attach_enable(qp)) { + ret = dca_attach_qp_buf(ctx, qp); + if (ret) + verbs_err_datapath(&ctx->ibv_ctx, + "failed to attach QP-%u recv, ret = %d.\n", + qp->verbs_qp.qp.qp_num, ret); + } - return 0; + return ret; } static void fill_recv_sge_to_wqe(struct ibv_recv_wr *wr, void *wqe, @@ -1951,6 +1957,9 @@ static int wc_start_poll_cq(struct ibv_cq_ex *current, hns_roce_spin_lock(&cq->hr_lock); err = hns_roce_poll_one(ctx, &qp, cq, NULL); + if (qp && check_dca_detach_enable(qp)) + dca_detach_qp_buf(ctx, qp); + if (err != V2_CQ_OK) hns_roce_spin_unlock(&cq->hr_lock); @@ -1965,6 +1974,8 @@ static int wc_next_poll_cq(struct ibv_cq_ex *current) int err; err = hns_roce_poll_one(ctx, &qp, cq, NULL); + if (qp && check_dca_detach_enable(qp)) + dca_detach_qp_buf(ctx, qp); if (err != V2_CQ_OK) return err; @@ -2159,6 +2170,9 @@ init_rc_wqe(struct hns_roce_qp *qp, uint64_t wr_id, unsigned int opcode) hr_reg_clear(wqe, RCWQE_INLINE); hr_reg_clear(wqe, RCWQE_SO); + if (check_qp_dca_enable(qp)) + fill_rc_dca_fields(qp->verbs_qp.qp.qp_num, wqe); + qp->sq.wrid[wqe_idx] = wr_id; qp->cur_wqe = wqe; qp->sq.head++; @@ -2691,8 +2705,10 @@ static void wr_set_inline_data_list_ud(struct ibv_qp_ex *ibv_qp, size_t num_buf, static void wr_start(struct ibv_qp_ex *ibv_qp) { + struct hns_roce_context *ctx = to_hr_ctx(ibv_qp->qp_base.context); struct hns_roce_qp *qp = to_hr_qp(&ibv_qp->qp_base); enum ibv_qp_state state = ibv_qp->qp_base.state; + int ret; if (state == IBV_QPS_RESET || state == IBV_QPS_INIT || @@ -2701,6 +2717,17 @@ static void wr_start(struct ibv_qp_ex *ibv_qp) return; } + if (check_qp_dca_enable(qp)) { + ret = dca_attach_qp_buf(ctx, qp); + if (ret) { + verbs_err_datapath(&ctx->ibv_ctx, + "failed to attach QP-%u send, ret = %d.\n", + qp->verbs_qp.qp.qp_num, ret); + qp->err = ret; + return; + } + } + hns_roce_spin_lock(&qp->sq.hr_lock); qp->sge_info.start_idx = qp->next_sge; qp->rb_sq_head = qp->sq.head; diff --git a/providers/hns/hns_roce_u_verbs.c b/providers/hns/hns_roce_u_verbs.c index 749b01b..282ab74 100644 --- a/providers/hns/hns_roce_u_verbs.c +++ b/providers/hns/hns_roce_u_verbs.c @@ -961,6 +961,15 @@ enum { IBV_QP_INIT_ATTR_SEND_OPS_FLAGS, }; +enum { + SEND_OPS_FLAG_MASK = + IBV_QP_EX_WITH_RDMA_WRITE | IBV_QP_EX_WITH_RDMA_WRITE_WITH_IMM | + IBV_QP_EX_WITH_SEND | IBV_QP_EX_WITH_SEND_WITH_IMM | + IBV_QP_EX_WITH_RDMA_READ | IBV_QP_EX_WITH_ATOMIC_CMP_AND_SWP | + IBV_QP_EX_WITH_ATOMIC_FETCH_AND_ADD | IBV_QP_EX_WITH_LOCAL_INV | + IBV_QP_EX_WITH_SEND_WITH_INV, +}; + static int check_qp_create_mask(struct hns_roce_context *ctx, struct ibv_qp_init_attr_ex *attr) { @@ -969,6 +978,10 @@ static int check_qp_create_mask(struct hns_roce_context *ctx, if (!check_comp_mask(attr->comp_mask, CREATE_QP_SUP_COMP_MASK)) return -EOPNOTSUPP; + if (attr->comp_mask & IBV_QP_INIT_ATTR_SEND_OPS_FLAGS && + !check_comp_mask(attr->send_ops_flags, SEND_OPS_FLAG_MASK)) + return -EOPNOTSUPP; + switch (attr->qp_type) { case IBV_QPT_UD: if (hr_dev->hw_version == HNS_ROCE_HW_VER2) @@ -1165,9 +1178,21 @@ static int calc_qp_buff_size(struct hns_roce_device *hr_dev, return 0; } -static inline bool check_qp_support_dca(bool pool_en, enum ibv_qp_type qp_type) +static inline bool check_qp_support_dca(struct hns_roce_dca_ctx *dca_ctx, + struct ibv_qp_init_attr_ex *attr, + struct hnsdv_qp_init_attr *hns_attr) { - if (pool_en && (qp_type == IBV_QPT_RC || qp_type == IBV_QPT_XRC_SEND)) + /* DCA pool disable */ + if (!dca_ctx->unit_size) + return false; + + /* Unsupport type */ + if (attr->qp_type != IBV_QPT_RC && attr->qp_type != IBV_QPT_XRC_SEND) + return false; + + if (hns_attr && + (hns_attr->comp_mask & HNSDV_QP_INIT_ATTR_MASK_QP_CREATE_FLAGS) && + (hns_attr->create_flags & HNSDV_QP_CREATE_ENABLE_DCA_MODE)) return true; return false; @@ -1185,6 +1210,7 @@ static void qp_free_wqe(struct hns_roce_qp *qp) } static int qp_alloc_wqe(struct ibv_qp_init_attr_ex *attr, + struct hnsdv_qp_init_attr *hns_attr, struct hns_roce_qp *qp, struct hns_roce_context *ctx) { struct hns_roce_device *hr_dev = to_hr_dev(ctx->ibv_ctx.context.device); @@ -1208,7 +1234,8 @@ static int qp_alloc_wqe(struct ibv_qp_init_attr_ex *attr, goto err_alloc; } - if (check_qp_support_dca(ctx->dca_ctx.max_size != 0, attr->qp_type)) { + if (check_qp_support_dca(&ctx->dca_ctx, attr, hns_attr) && + ctx->dca_ctx.max_size > 0) { /* when DCA is enabled, use a buffer list to store page addr */ qp->buf.buf = NULL; qp->dca_wqe.max_cnt = hr_hw_page_count(qp->buf_size); @@ -1216,6 +1243,7 @@ static int qp_alloc_wqe(struct ibv_qp_init_attr_ex *attr, qp->dca_wqe.bufs = calloc(qp->dca_wqe.max_cnt, sizeof(void *)); if (!qp->dca_wqe.bufs) goto err_alloc; + verbs_debug(&ctx->ibv_ctx, "alloc DCA buf.\n"); } else { if (hns_roce_alloc_buf(&qp->buf, qp->buf_size, HNS_HW_PAGE_SIZE)) @@ -1478,6 +1506,7 @@ void hns_roce_free_qp_buf(struct hns_roce_qp *qp, struct hns_roce_context *ctx) } static int hns_roce_alloc_qp_buf(struct ibv_qp_init_attr_ex *attr, + struct hnsdv_qp_init_attr *hns_attr, struct hns_roce_qp *qp, struct hns_roce_context *ctx) { @@ -1487,7 +1516,7 @@ static int hns_roce_alloc_qp_buf(struct ibv_qp_init_attr_ex *attr, pthread_spin_init(&qp->rq.hr_lock.lock, PTHREAD_PROCESS_PRIVATE)) return -ENOMEM; - ret = qp_alloc_wqe(attr, qp, ctx); + ret = qp_alloc_wqe(attr, hns_attr, qp, ctx); if (ret) return ret; @@ -1510,7 +1539,8 @@ static int mmap_dwqe(struct ibv_context *ibv_ctx, struct hns_roce_qp *qp, } static struct ibv_qp *create_qp(struct ibv_context *ibv_ctx, - struct ibv_qp_init_attr_ex *attr) + struct ibv_qp_init_attr_ex *attr, + struct hnsdv_qp_init_attr *hns_attr) { struct hns_roce_context *context = to_hr_ctx(ibv_ctx); struct hns_roce_qp *qp; @@ -1533,7 +1563,7 @@ static struct ibv_qp *create_qp(struct ibv_context *ibv_ctx, if (ret) goto err_spinlock; - ret = hns_roce_alloc_qp_buf(attr, qp, context); + ret = hns_roce_alloc_qp_buf(attr, hns_attr, qp, context); if (ret) goto err_buf; @@ -1587,7 +1617,7 @@ struct ibv_qp *hns_roce_u_create_qp(struct ibv_pd *pd, attrx.comp_mask = IBV_QP_INIT_ATTR_PD; attrx.pd = pd; - qp = create_qp(pd->context, &attrx); + qp = create_qp(pd->context, &attrx, NULL); if (qp) memcpy(attr, &attrx, sizeof(*attr)); @@ -1597,7 +1627,19 @@ struct ibv_qp *hns_roce_u_create_qp(struct ibv_pd *pd, struct ibv_qp *hns_roce_u_create_qp_ex(struct ibv_context *context, struct ibv_qp_init_attr_ex *attr) { - return create_qp(context, attr); + return create_qp(context, attr, NULL); +} + +struct ibv_qp *hnsdv_create_qp(struct ibv_context *context, + struct ibv_qp_init_attr_ex *qp_attr, + struct hnsdv_qp_init_attr *hns_attr) +{ + if (!is_hns_dev(context->device)) { + errno = EOPNOTSUPP; + return NULL; + } + + return create_qp(context, qp_attr, hns_attr); } struct ibv_qp *hns_roce_u_open_qp(struct ibv_context *context, diff --git a/providers/hns/hnsdv.h b/providers/hns/hnsdv.h new file mode 100644 index 0000000..cfe1611 --- /dev/null +++ b/providers/hns/hnsdv.h @@ -0,0 +1,65 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */ +/* + * Copyright (c) 2021 HiSilicon Limited. + */ + +#ifndef __HNSDV_H__ +#define __HNSDV_H__ + +#include +#include + +#include + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +enum hnsdv_context_attr_flags { + HNSDV_CONTEXT_FLAGS_DCA = 1 << 0, +}; + +enum hnsdv_context_comp_mask { + HNSDV_CONTEXT_MASK_DCA_PRIME_QPS = 1 << 0, + HNSDV_CONTEXT_MASK_DCA_UNIT_SIZE = 1 << 1, + HNSDV_CONTEXT_MASK_DCA_MAX_SIZE = 1 << 2, + HNSDV_CONTEXT_MASK_DCA_MIN_SIZE = 1 << 3, +}; + +struct hnsdv_context_attr { + uint64_t flags; /* Use enum hnsdv_context_attr_flags */ + uint64_t comp_mask; /* Use enum hnsdv_context_comp_mask */ + uint32_t dca_prime_qps; + uint32_t dca_unit_size; + uint64_t dca_max_size; + uint64_t dca_min_size; +}; + +bool hnsdv_is_supported(struct ibv_device *device); +struct ibv_context *hnsdv_open_device(struct ibv_device *device, + struct hnsdv_context_attr *attr); + +enum hnsdv_qp_create_flags { + HNSDV_QP_CREATE_ENABLE_DCA_MODE = 1 << 0, +}; + +enum hnsdv_qp_init_attr_mask { + HNSDV_QP_INIT_ATTR_MASK_QP_CREATE_FLAGS = 1 << 0, +}; + +struct hnsdv_qp_init_attr { + uint64_t comp_mask; /* Use enum hnsdv_qp_init_attr_mask */ + uint32_t create_flags; /* Use enum hnsdv_qp_create_flags */ +}; + +struct ibv_qp *hnsdv_create_qp(struct ibv_context *context, + struct ibv_qp_init_attr_ex *qp_attr, + struct hnsdv_qp_init_attr *hns_qp_attr); + +#ifdef __cplusplus +} +#endif + +#endif /* __HNSDV_H__ */ diff --git a/providers/hns/libhns.map b/providers/hns/libhns.map new file mode 100644 index 0000000..aed491c --- /dev/null +++ b/providers/hns/libhns.map @@ -0,0 +1,9 @@ +/* Export symbols should be added below according to + Documentation/versioning.md document. */ +HNS_1.0 { + global: + hnsdv_is_supported; + hnsdv_open_device; + hnsdv_create_qp; + local: *; +}; diff --git a/redhat/rdma-core.spec b/redhat/rdma-core.spec index f1b196a..321578c 100644 --- a/redhat/rdma-core.spec +++ b/redhat/rdma-core.spec @@ -150,6 +150,8 @@ Provides: libefa = %{version}-%{release} Obsoletes: libefa < %{version}-%{release} Provides: libhfi1 = %{version}-%{release} Obsoletes: libhfi1 < %{version}-%{release} +Provides: libhns = %{version}-%{release} +Obsoletes: libhns < %{version}-%{release} Provides: libipathverbs = %{version}-%{release} Obsoletes: libipathverbs < %{version}-%{release} Provides: libirdma = %{version}-%{release} @@ -177,7 +179,7 @@ Device-specific plug-in ibverbs userspace drivers are included: - libcxgb4: Chelsio T4 iWARP HCA - libefa: Amazon Elastic Fabric Adapter - libhfi1: Intel Omni-Path HFI -- libhns: HiSilicon Hip06 SoC +- libhns: HiSilicon Hip08+ SoC - libipathverbs: QLogic InfiniPath HCA - libirdma: Intel Ethernet Connection RDMA - libmlx4: Mellanox ConnectX-3 InfiniBand HCA @@ -562,6 +564,7 @@ fi %dir %{_sysconfdir}/libibverbs.d %dir %{_libdir}/libibverbs %{_libdir}/libefa.so.* +%{_libdir}/libhns.so.* %{_libdir}/libibverbs*.so.* %{_libdir}/libibverbs/*.so %{_libdir}/libmlx5.so.* diff --git a/suse/rdma-core.spec b/suse/rdma-core.spec index bd1faec..ce19db1 100644 --- a/suse/rdma-core.spec +++ b/suse/rdma-core.spec @@ -35,6 +35,7 @@ License: BSD-2-Clause OR GPL-2.0-only Group: Productivity/Networking/Other %define efa_so_major 1 +%define hns_so_major 1 %define verbs_so_major 1 %define rdmacm_so_major 1 %define umad_so_major 3 @@ -44,6 +45,7 @@ Group: Productivity/Networking/Other %define mad_major 5 %define efa_lname libefa%{efa_so_major} +%define hns_lname libhns%{hns_so_major} %define verbs_lname libibverbs%{verbs_so_major} %define rdmacm_lname librdmacm%{rdmacm_so_major} %define umad_lname libibumad%{umad_so_major} @@ -157,6 +159,7 @@ Requires: %{umad_lname} = %{version}-%{release} Requires: %{verbs_lname} = %{version}-%{release} %if 0%{?dma_coherent} Requires: %{efa_lname} = %{version}-%{release} +Requires: %{hns_lname} = %{version}-%{release} Requires: %{mlx4_lname} = %{version}-%{release} Requires: %{mlx5_lname} = %{version}-%{release} %endif @@ -197,6 +200,7 @@ Requires: %{name}%{?_isa} = %{version}-%{release} Obsoletes: libcxgb4-rdmav2 < %{version}-%{release} Obsoletes: libefa-rdmav2 < %{version}-%{release} Obsoletes: libhfi1verbs-rdmav2 < %{version}-%{release} +Obsoletes: libhns-rdmav2 < %{version}-%{release} Obsoletes: libipathverbs-rdmav2 < %{version}-%{release} Obsoletes: libmlx4-rdmav2 < %{version}-%{release} Obsoletes: libmlx5-rdmav2 < %{version}-%{release} @@ -205,6 +209,7 @@ Obsoletes: libocrdma-rdmav2 < %{version}-%{release} Obsoletes: librxe-rdmav2 < %{version}-%{release} %if 0%{?dma_coherent} Requires: %{efa_lname} = %{version}-%{release} +Requires: %{hns_lname} = %{version}-%{release} Requires: %{mlx4_lname} = %{version}-%{release} Requires: %{mlx5_lname} = %{version}-%{release} %endif @@ -223,7 +228,7 @@ Device-specific plug-in ibverbs userspace drivers are included: - libcxgb4: Chelsio T4 iWARP HCA - libefa: Amazon Elastic Fabric Adapter - libhfi1: Intel Omni-Path HFI -- libhns: HiSilicon Hip06 SoC +- libhns: HiSilicon Hip08+ SoC - libipathverbs: QLogic InfiniPath HCA - libirdma: Intel Ethernet Connection RDMA - libmlx4: Mellanox ConnectX-3 InfiniBand HCA @@ -250,6 +255,13 @@ Group: System/Libraries %description -n %efa_lname This package contains the efa runtime library. +%package -n %hns_lname +Summary: HNS runtime library +Group: System/Libraries + +%description -n %hns_lname +This package contains the hns runtime library. + %package -n %mlx4_lname Summary: MLX4 runtime library Group: System/Libraries @@ -493,6 +505,9 @@ rm -rf %{buildroot}/%{_sbindir}/srp_daemon.sh %post -n %efa_lname -p /sbin/ldconfig %postun -n %efa_lname -p /sbin/ldconfig +%post -n %hns_lname -p /sbin/ldconfig +%postun -n %hns_lname -p /sbin/ldconfig + %post -n %mlx4_lname -p /sbin/ldconfig %postun -n %mlx4_lname -p /sbin/ldconfig @@ -689,6 +704,10 @@ done %defattr(-,root,root) %{_libdir}/libefa*.so.* +%files -n %hns_lname +%defattr(-,root,root) +%{_libdir}/libhns*.so.* + %files -n %mlx4_lname %defattr(-,root,root) %{_libdir}/libmlx4*.so.* -- 2.30.0