dpdk/0079-net-xsc-add-xsc-PMD.patch
Rong Qian 7dc59912ef add xsc PMD
(cherry picked from commit 56bf053e0ae8c5b90defec1bf06a9ded3f8525d7)
2025-03-14 20:08:43 +08:00

7682 lines
200 KiB
Diff

From 7b0eefe88a023359f72dadc87a4d2449d765a38a Mon Sep 17 00:00:00 2001
From: Rong Qian <qianr@yunsilicon.com>
Date: Fri, 14 Mar 2025 14:17:42 +0800
Subject: [PATCH v4 1/1] net/xsc: add xsc PMD
---
drivers/net/meson.build | 1 +
drivers/net/xsc/meson.build | 56 ++
drivers/net/xsc/xsc_cmd.h | 478 +++++++++++
drivers/net/xsc/xsc_compat.c | 15 +
drivers/net/xsc/xsc_compat.h | 46 ++
drivers/net/xsc/xsc_defs.h | 102 +++
drivers/net/xsc/xsc_dev.c | 413 ++++++++++
drivers/net/xsc/xsc_dev.h | 200 +++++
drivers/net/xsc/xsc_ethdev.c | 966 +++++++++++++++++++++++
drivers/net/xsc/xsc_ethdev.h | 63 ++
drivers/net/xsc/xsc_log.h | 48 ++
drivers/net/xsc/xsc_np.c | 489 ++++++++++++
drivers/net/xsc/xsc_np.h | 156 ++++
drivers/net/xsc/xsc_rdma.c | 1304 +++++++++++++++++++++++++++++++
drivers/net/xsc/xsc_rx.c | 518 ++++++++++++
drivers/net/xsc/xsc_rx.h | 65 ++
drivers/net/xsc/xsc_rxtx.h | 193 +++++
drivers/net/xsc/xsc_tx.c | 353 +++++++++
drivers/net/xsc/xsc_tx.h | 62 ++
drivers/net/xsc/xsc_vfio.c | 1128 ++++++++++++++++++++++++++
drivers/net/xsc/xsc_vfio_mbox.c | 691 ++++++++++++++++
drivers/net/xsc/xsc_vfio_mbox.h | 142 ++++
22 files changed, 7489 insertions(+)
create mode 100644 drivers/net/xsc/meson.build
create mode 100644 drivers/net/xsc/xsc_cmd.h
create mode 100644 drivers/net/xsc/xsc_compat.c
create mode 100644 drivers/net/xsc/xsc_compat.h
create mode 100644 drivers/net/xsc/xsc_defs.h
create mode 100644 drivers/net/xsc/xsc_dev.c
create mode 100644 drivers/net/xsc/xsc_dev.h
create mode 100644 drivers/net/xsc/xsc_ethdev.c
create mode 100644 drivers/net/xsc/xsc_ethdev.h
create mode 100644 drivers/net/xsc/xsc_log.h
create mode 100644 drivers/net/xsc/xsc_np.c
create mode 100644 drivers/net/xsc/xsc_np.h
create mode 100644 drivers/net/xsc/xsc_rdma.c
create mode 100644 drivers/net/xsc/xsc_rx.c
create mode 100644 drivers/net/xsc/xsc_rx.h
create mode 100644 drivers/net/xsc/xsc_rxtx.h
create mode 100644 drivers/net/xsc/xsc_tx.c
create mode 100644 drivers/net/xsc/xsc_tx.h
create mode 100644 drivers/net/xsc/xsc_vfio.c
create mode 100644 drivers/net/xsc/xsc_vfio_mbox.c
create mode 100644 drivers/net/xsc/xsc_vfio_mbox.h
diff --git a/drivers/net/meson.build b/drivers/net/meson.build
index bd38b53..1c445f3 100644
--- a/drivers/net/meson.build
+++ b/drivers/net/meson.build
@@ -61,6 +61,7 @@ drivers = [
'vhost',
'virtio',
'vmxnet3',
+ 'xsc',
]
std_deps = ['ethdev', 'kvargs'] # 'ethdev' also pulls in mbuf, net, eal etc
std_deps += ['bus_pci'] # very many PMDs depend on PCI, so make std
diff --git a/drivers/net/xsc/meson.build b/drivers/net/xsc/meson.build
new file mode 100644
index 0000000..c72d001
--- /dev/null
+++ b/drivers/net/xsc/meson.build
@@ -0,0 +1,56 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2025 Yunsilicon Technology Co., Ltd.
+
+if not is_linux or not dpdk_conf.get('RTE_ARCH_64')
+ build = false
+ reason = 'only supported on 64bit Linux'
+endif
+
+sources = files(
+ 'xsc_ethdev.c',
+ 'xsc_dev.c',
+ 'xsc_vfio_mbox.c',
+ 'xsc_vfio.c',
+ 'xsc_np.c',
+ 'xsc_rx.c',
+ 'xsc_tx.c',
+ 'xsc_compat.c',
+)
+
+libnames = ['ibverbs']
+foreach libname:libnames
+ lib = dependency('lib' + libname, required: false, method : 'pkg-config')
+ if lib.found()
+ build_rdma = true
+ ext_deps += lib
+ else
+ build_rdma = false
+ reason = 'missing dependency, "' + libname + '"'
+ endif
+endforeach
+
+lib = dependency('libxscale', required: false, method : 'pkg-config')
+if lib.found()
+ build_rdma = true
+ ext_deps += lib
+else
+ build_rdma = false
+ reason = 'missing dependency, "' + libname + '"'
+endif
+
+header_to_check = 'infiniband/xscdv.h'
+if cc.has_header(header_to_check)
+ build_rdma = true
+ cflags += '-DHAVE_XSC_DV_PROVIDER=1'
+ message(header_to_check + ' found, defining HAVE_XSC_DV_PROVIDER=1')
+else
+ build_rdma = false
+ cflags += '-DHAVE_XSC_DV_PROVIDER=0'
+ message(header_to_check + ' not found, defining HAVE_XSC_DV_PROVIDER=0')
+endif
+
+if build_rdma
+ sources += files('xsc_rdma.c')
+else
+ message('Some dependencies are missing, xsc_rdma.c will not be compiled.')
+endif
\ No newline at end of file
diff --git a/drivers/net/xsc/xsc_cmd.h b/drivers/net/xsc/xsc_cmd.h
new file mode 100644
index 0000000..b6b5021
--- /dev/null
+++ b/drivers/net/xsc/xsc_cmd.h
@@ -0,0 +1,478 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2025 Yunsilicon Technology Co., Ltd.
+ */
+
+#ifndef _XSC_CMD_H_
+#define _XSC_CMD_H_
+
+#include <sys/types.h>
+#include <unistd.h>
+#include <string.h>
+#include <dirent.h>
+#include <net/if.h>
+
+#define XSC_BOARD_SN_LEN 32
+#define XSC_CMD_QUERY_HCA_CAP_V1 1
+
+enum xsc_cmd_opcode {
+ XSC_CMD_OP_QUERY_HCA_CAP = 0x100,
+ XSC_CMD_OP_FUNCTION_RESET = 0x10c,
+ XSC_CMD_OP_SET_QP_STATUS = 0x200,
+ XSC_CMD_OP_CREATE_CQ = 0x400,
+ XSC_CMD_OP_DESTROY_CQ = 0x401,
+ XSC_CMD_OP_CREATE_QP = 0x500,
+ XSC_CMD_OP_DESTROY_QP = 0x501,
+ XSC_CMD_OP_RTR2RTS_QP = 0x504,
+ XSC_CMD_OP_QP_2RST = 0x50A,
+ XSC_CMD_OP_CREATE_MULTI_QP = 0x515,
+ XSC_CMD_OP_MODIFY_NIC_HCA = 0x812,
+ XSC_CMD_OP_MODIFY_RAW_QP = 0x81f,
+ XSC_CMD_OP_QUERY_VPORT_STATE = 0x822,
+ XSC_CMD_OP_QUERY_EVENT_TYPE = 0x831,
+ XSC_CMD_OP_QUERY_LINK_INFO = 0x832,
+ XSC_CMD_OP_ENABLE_MSIX = 0x850,
+ XSC_CMD_OP_EXEC_NP = 0x900,
+ XSC_CMD_OP_SET_MTU = 0x1100,
+ XSC_CMD_OP_QUERY_ETH_MAC = 0X1101,
+ XSC_CMD_OP_SET_PORT_ADMIN_STATUS = 0x1801,
+ XSC_CMD_OP_MAX
+};
+
+enum xsc_cmd_status {
+ XSC_CMD_SUCC = 0,
+ XSC_CMD_FAIL,
+ XSC_CMD_TIMEOUT,
+};
+
+struct xsc_cmd_inbox_hdr {
+ rte_be16_t opcode;
+ uint8_t rsvd[4];
+ rte_be16_t ver;
+};
+
+struct xsc_cmd_outbox_hdr {
+ uint8_t status;
+ uint8_t rsvd[5];
+ rte_be16_t ver;
+};
+
+struct xsc_cmd_fw_version {
+ uint8_t major;
+ uint8_t minor;
+ rte_be16_t patch;
+ rte_be32_t tweak;
+ uint8_t extra_flag;
+ uint8_t rsv[7];
+};
+
+struct xsc_cmd_hca_cap {
+ uint8_t rsvd1[12];
+ uint8_t send_seg_num;
+ uint8_t send_wqe_shift;
+ uint8_t recv_seg_num;
+ uint8_t recv_wqe_shift;
+ uint8_t log_max_srq_sz;
+ uint8_t log_max_qp_sz;
+ uint8_t log_max_mtt;
+ uint8_t log_max_qp;
+ uint8_t log_max_strq_sz;
+ uint8_t log_max_srqs;
+ uint8_t rsvd2[2];
+ uint8_t log_max_tso;
+ uint8_t log_max_cq_sz;
+ uint8_t rsvd3;
+ uint8_t log_max_cq;
+ uint8_t log_max_eq_sz;
+ uint8_t log_max_mkey;
+ uint8_t log_max_msix;
+ uint8_t log_max_eq;
+ uint8_t max_indirection;
+ uint8_t log_max_mrw_sz;
+ uint8_t log_max_bsf_list_sz;
+ uint8_t log_max_klm_list_sz;
+ uint8_t rsvd4;
+ uint8_t log_max_ra_req_dc;
+ uint8_t rsvd5;
+ uint8_t log_max_ra_res_dc;
+ uint8_t rsvd6;
+ uint8_t log_max_ra_req_qp;
+ uint8_t log_max_qp_depth;
+ uint8_t log_max_ra_res_qp;
+ rte_be16_t max_vfs;
+ rte_be16_t raweth_qp_id_end;
+ rte_be16_t raw_tpe_qp_num;
+ rte_be16_t max_qp_count;
+ rte_be16_t raweth_qp_id_base;
+ uint8_t rsvd7;
+ uint8_t local_ca_ack_delay;
+ uint8_t max_num_eqs;
+ uint8_t num_ports;
+ uint8_t log_max_msg;
+ uint8_t mac_port;
+ rte_be16_t raweth_rss_qp_id_base;
+ rte_be16_t stat_rate_support;
+ uint8_t rsvd8[2];
+ rte_be64_t flags;
+ uint8_t rsvd9;
+ uint8_t uar_sz;
+ uint8_t rsvd10;
+ uint8_t log_pg_sz;
+ rte_be16_t bf_log_bf_reg_size;
+ rte_be16_t msix_base;
+ rte_be16_t msix_num;
+ rte_be16_t max_desc_sz_sq;
+ uint8_t rsvd11[2];
+ rte_be16_t max_desc_sz_rq;
+ uint8_t rsvd12[2];
+ rte_be16_t max_desc_sz_sq_dc;
+ uint8_t rsvd13[4];
+ rte_be16_t max_qp_mcg;
+ uint8_t rsvd14;
+ uint8_t log_max_mcg;
+ uint8_t rsvd15;
+ uint8_t log_max_pd;
+ uint8_t rsvd16;
+ uint8_t log_max_xrcd;
+ uint8_t rsvd17[40];
+ rte_be32_t uar_page_sz;
+ uint8_t rsvd18[8];
+ rte_be32_t hw_feature_flag;
+ rte_be16_t pf0_vf_funcid_base;
+ rte_be16_t pf0_vf_funcid_top;
+ rte_be16_t pf1_vf_funcid_base;
+ rte_be16_t pf1_vf_funcid_top;
+ rte_be16_t pcie0_pf_funcid_base;
+ rte_be16_t pcie0_pf_funcid_top;
+ rte_be16_t pcie1_pf_funcid_base;
+ rte_be16_t pcie1_pf_funcid_top;
+ uint8_t log_msx_atomic_size_qp;
+ uint8_t pcie_host;
+ uint8_t rsvd19;
+ uint8_t log_msx_atomic_size_dc;
+ uint8_t board_sn[XSC_BOARD_SN_LEN];
+ uint8_t max_tc;
+ uint8_t mac_bit;
+ rte_be16_t funcid_to_logic_port;
+ uint8_t rsvd20[6];
+ uint8_t nif_port_num;
+ uint8_t reg_mr_via_cmdq;
+ rte_be32_t hca_core_clock;
+ rte_be32_t max_rwq_indirection_tables;
+ rte_be32_t max_rwq_indirection_table_size;
+ rte_be32_t chip_ver_h;
+ rte_be32_t chip_ver_m;
+ rte_be32_t chip_ver_l;
+ rte_be32_t hotfix_num;
+ rte_be32_t feature_flag;
+ rte_be32_t rx_pkt_len_max;
+ rte_be32_t glb_func_id;
+ rte_be64_t tx_db;
+ rte_be64_t rx_db;
+ rte_be64_t complete_db;
+ rte_be64_t complete_reg;
+ rte_be64_t event_db;
+ rte_be32_t qp_rate_limit_min;
+ rte_be32_t qp_rate_limit_max;
+ struct xsc_cmd_fw_version fw_ver;
+ uint8_t lag_logic_port_ofst;
+ rte_be64_t max_mr_size;
+ rte_be16_t max_cmd_in_len;
+ rte_be16_t max_cmd_out_len;
+};
+
+struct xsc_cmd_query_hca_cap_mbox_in {
+ struct xsc_cmd_inbox_hdr hdr;
+ rte_be16_t cpu_num;
+ uint8_t rsvd[6];
+};
+
+struct xsc_cmd_query_hca_cap_mbox_out {
+ struct xsc_cmd_outbox_hdr hdr;
+ uint8_t rsvd[8];
+ struct xsc_cmd_hca_cap hca_cap;
+};
+
+struct xsc_cmd_cq_context {
+ uint16_t eqn;
+ uint16_t pa_num;
+ uint16_t glb_func_id;
+ uint8_t log_cq_sz;
+ uint8_t cq_type;
+};
+
+struct xsc_cmd_create_cq_mbox_in {
+ struct xsc_cmd_inbox_hdr hdr;
+ struct xsc_cmd_cq_context ctx;
+ uint64_t pas[];
+};
+
+struct xsc_cmd_create_cq_mbox_out {
+ struct xsc_cmd_outbox_hdr hdr;
+ uint32_t cqn;
+ uint8_t rsvd[4];
+};
+
+struct xsc_cmd_destroy_cq_mbox_in {
+ struct xsc_cmd_inbox_hdr hdr;
+ uint32_t cqn;
+ uint8_t rsvd[4];
+};
+
+struct xsc_cmd_destroy_cq_mbox_out {
+ struct xsc_cmd_outbox_hdr hdr;
+ uint8_t rsvd[8];
+};
+
+struct xsc_cmd_create_qp_request {
+ rte_be16_t input_qpn;
+ rte_be16_t pa_num;
+ uint8_t qp_type;
+ uint8_t log_sq_sz;
+ uint8_t log_rq_sz;
+ uint8_t dma_direct;
+ rte_be32_t pdn;
+ rte_be16_t cqn_send;
+ rte_be16_t cqn_recv;
+ rte_be16_t glb_funcid;
+ uint8_t page_shift;
+ uint8_t rsvd;
+ rte_be64_t pas[];
+};
+
+struct xsc_cmd_create_qp_mbox_in {
+ struct xsc_cmd_inbox_hdr hdr;
+ struct xsc_cmd_create_qp_request req;
+};
+
+struct xsc_cmd_create_qp_mbox_out {
+ struct xsc_cmd_outbox_hdr hdr;
+ uint32_t qpn;
+ uint8_t rsvd[4];
+};
+
+struct xsc_cmd_create_multiqp_mbox_in {
+ struct xsc_cmd_inbox_hdr hdr;
+ rte_be16_t qp_num;
+ uint8_t qp_type;
+ uint8_t rsvd;
+ rte_be32_t req_len;
+ uint8_t data[];
+};
+
+struct xsc_cmd_create_multiqp_mbox_out {
+ struct xsc_cmd_outbox_hdr hdr;
+ rte_be32_t qpn_base;
+};
+
+struct xsc_cmd_destroy_qp_mbox_in {
+ struct xsc_cmd_inbox_hdr hdr;
+ rte_be32_t qpn;
+ uint8_t rsvd[4];
+};
+
+struct xsc_cmd_destroy_qp_mbox_out {
+ struct xsc_cmd_outbox_hdr hdr;
+ uint8_t rsvd[8];
+};
+
+struct xsc_cmd_qp_context {
+ rte_be32_t remote_qpn;
+ rte_be32_t cqn_send;
+ rte_be32_t cqn_recv;
+ rte_be32_t next_send_psn;
+ rte_be32_t next_recv_psn;
+ rte_be32_t pdn;
+ rte_be16_t src_udp_port;
+ rte_be16_t path_id;
+ uint8_t mtu_mode;
+ uint8_t lag_sel;
+ uint8_t lag_sel_en;
+ uint8_t retry_cnt;
+ uint8_t rnr_retry;
+ uint8_t dscp;
+ uint8_t state;
+ uint8_t hop_limit;
+ uint8_t dmac[6];
+ uint8_t smac[6];
+ rte_be32_t dip[4];
+ rte_be32_t sip[4];
+ rte_be16_t ip_type;
+ rte_be16_t grp_id;
+ uint8_t vlan_valid;
+ uint8_t dci_cfi_prio_sl;
+ rte_be16_t vlan_id;
+ uint8_t qp_out_port;
+ uint8_t pcie_no;
+ rte_be16_t lag_id;
+ rte_be16_t func_id;
+ rte_be16_t rsvd;
+};
+
+struct xsc_cmd_modify_qp_mbox_in {
+ struct xsc_cmd_inbox_hdr hdr;
+ rte_be32_t qpn;
+ struct xsc_cmd_qp_context ctx;
+ uint8_t no_need_wait;
+};
+
+struct xsc_cmd_modify_qp_mbox_out {
+ struct xsc_cmd_outbox_hdr hdr;
+ uint8_t rsvd[8];
+};
+
+struct xsc_cmd_modify_raw_qp_request {
+ uint16_t qpn;
+ uint16_t lag_id;
+ uint16_t func_id;
+ uint8_t dma_direct;
+ uint8_t prio;
+ uint8_t qp_out_port;
+ uint8_t rsvd[7];
+};
+
+struct xsc_cmd_modify_raw_qp_mbox_in {
+ struct xsc_cmd_inbox_hdr hdr;
+ uint8_t pcie_no;
+ uint8_t rsv[7];
+ struct xsc_cmd_modify_raw_qp_request req;
+};
+
+struct xsc_cmd_modify_raw_qp_mbox_out {
+ struct xsc_cmd_outbox_hdr hdr;
+ uint8_t rsvd[8];
+};
+
+struct xsc_cmd_set_mtu_mbox_in {
+ struct xsc_cmd_inbox_hdr hdr;
+ rte_be16_t mtu;
+ rte_be16_t rx_buf_sz_min;
+ uint8_t mac_port;
+ uint8_t rsvd;
+};
+
+struct xsc_cmd_set_mtu_mbox_out {
+ struct xsc_cmd_outbox_hdr hdr;
+};
+
+struct xsc_cmd_query_eth_mac_mbox_in {
+ struct xsc_cmd_inbox_hdr hdr;
+ uint8_t index;
+};
+
+struct xsc_cmd_query_eth_mac_mbox_out {
+ struct xsc_cmd_outbox_hdr hdr;
+ uint8_t mac[6];
+};
+
+struct xsc_cmd_nic_attr {
+ rte_be16_t caps;
+ rte_be16_t caps_mask;
+ uint8_t mac_addr[6];
+};
+
+struct xsc_cmd_rss_modify_attr {
+ uint8_t caps_mask;
+ uint8_t rss_en;
+ rte_be16_t rqn_base;
+ rte_be16_t rqn_num;
+ uint8_t hfunc;
+ rte_be32_t hash_tmpl;
+ uint8_t hash_key[52];
+};
+
+struct xsc_cmd_modify_nic_hca_mbox_in {
+ struct xsc_cmd_inbox_hdr hdr;
+ struct xsc_cmd_nic_attr nic;
+ struct xsc_cmd_rss_modify_attr rss;
+};
+
+struct xsc_cmd_modify_nic_hca_mbox_out {
+ struct xsc_cmd_outbox_hdr hdr;
+ uint8_t rsvd[4];
+};
+
+struct xsc_cmd_set_port_admin_status_mbox_in {
+ struct xsc_cmd_inbox_hdr hdr;
+ uint16_t admin_status;
+};
+
+struct xsc_cmd_set_port_admin_status_mbox_out {
+ struct xsc_cmd_outbox_hdr hdr;
+ uint32_t status;
+};
+
+struct xsc_cmd_linkinfo {
+ uint8_t status; /*link status: 0-down, 1-up */
+ uint8_t port;
+ uint8_t duplex;
+ uint8_t autoneg;
+ uint32_t linkspeed;
+ uint64_t supported;
+ uint64_t advertising;
+ uint64_t supported_fec;
+ uint64_t advertised_fec;
+ uint64_t supported_speed[2];
+ uint64_t advertising_speed[2];
+};
+
+struct xsc_cmd_query_linkinfo_mbox_in {
+ struct xsc_cmd_inbox_hdr hdr;
+};
+
+struct xsc_cmd_query_linkinfo_mbox_out {
+ struct xsc_cmd_outbox_hdr hdr;
+ struct xsc_cmd_linkinfo ctx;
+};
+
+struct xsc_cmd_query_vport_state_in {
+ struct xsc_cmd_inbox_hdr hdr;
+ uint32_t other_vport:1;
+ uint32_t vport_number:16;
+ uint32_t rsv:15;
+};
+
+struct xsc_cmd_query_vport_state_out {
+ struct xsc_cmd_outbox_hdr hdr;
+ uint8_t admin_state:4;
+ uint8_t state:4;
+};
+
+struct xsc_cmd_event_resp {
+ uint8_t resp_event_type;
+};
+
+struct xsc_cmd_event_query_type_mbox_in {
+ struct xsc_cmd_inbox_hdr hdr;
+ uint8_t rsvd[2];
+};
+
+struct xsc_cmd_event_query_type_mbox_out {
+ struct xsc_cmd_outbox_hdr hdr;
+ struct xsc_cmd_event_resp ctx;
+};
+
+struct xsc_cmd_msix_table_info_mbox_in {
+ struct xsc_cmd_inbox_hdr hdr;
+ uint16_t index;
+ uint8_t rsvd[6];
+};
+
+struct xsc_cmd_msix_table_info_mbox_out {
+ struct xsc_cmd_outbox_hdr hdr;
+ uint32_t addr_lo;
+ uint32_t addr_hi;
+ uint32_t data;
+};
+
+struct xsc_cmd_function_reset_mbox_in {
+ struct xsc_cmd_inbox_hdr hdr;
+ rte_be16_t glb_func_id;
+ uint8_t rsvd[6];
+};
+
+struct xsc_cmd_function_reset_mbox_out {
+ struct xsc_cmd_outbox_hdr hdr;
+ uint8_t rsvd[8];
+};
+
+#endif /* _XSC_CMD_H_ */
diff --git a/drivers/net/xsc/xsc_compat.c b/drivers/net/xsc/xsc_compat.c
new file mode 100644
index 0000000..4913ec9
--- /dev/null
+++ b/drivers/net/xsc/xsc_compat.c
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2025 Yunsilicon Technology Co., Ltd.
+ */
+
+#include "xsc_compat.h"
+
+#if RTE_VERSION_NUM(22, 0, 0, 0) > RTE_VERSION
+uint16_t
+rte_eth_pkt_burst_dummy(void *queue __rte_unused,
+ struct rte_mbuf **pkts __rte_unused,
+ uint16_t nb_pkts __rte_unused)
+{
+ return 0;
+}
+#endif
diff --git a/drivers/net/xsc/xsc_compat.h b/drivers/net/xsc/xsc_compat.h
new file mode 100644
index 0000000..a57d817
--- /dev/null
+++ b/drivers/net/xsc/xsc_compat.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2025 Yunsilicon Technology Co., Ltd.
+ */
+
+#ifndef _XSC_COMPAT_H_
+#define _XSC_COMPAT_H_
+
+#include <rte_mbuf_core.h>
+#include <rte_version.h>
+
+#if RTE_VERSION_NUM(22, 0, 0, 0) > RTE_VERSION
+#include <rte_bus_pci.h>
+#else
+#include <bus_pci_driver.h>
+#endif
+
+#if RTE_VERSION_NUM(25, 0, 0, 0) > RTE_VERSION
+#ifndef __rte_packed_begin
+#define __rte_packed_begin
+#endif
+
+#ifndef __rte_packed_end
+#define __rte_packed_end __rte_packed
+#endif
+#endif
+
+#if RTE_VERSION_NUM(22, 0, 0, 0) > RTE_VERSION
+/**
+ * @internal
+ * Dummy DPDK callback for Rx/Tx packet burst.
+ *
+ * @param queue
+ * Pointer to Rx/Tx queue
+ * @param pkts
+ * Packet array
+ * @param nb_pkts
+ * Number of packets in packet array
+ */
+__rte_internal
+uint16_t
+rte_eth_pkt_burst_dummy(void *queue __rte_unused,
+ struct rte_mbuf **pkts __rte_unused,
+ uint16_t nb_pkts __rte_unused);
+#endif
+
+#endif /* _XSC_COMPAT_H_ */
diff --git a/drivers/net/xsc/xsc_defs.h b/drivers/net/xsc/xsc_defs.h
new file mode 100644
index 0000000..78e4154
--- /dev/null
+++ b/drivers/net/xsc/xsc_defs.h
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2025 Yunsilicon Technology Co., Ltd.
+ */
+
+#ifndef XSC_DEFS_H_
+#define XSC_DEFS_H_
+
+#define XSC_PAGE_SIZE 4096
+#define XSC_PHY_PORT_NUM 1
+
+#define XSC_PCI_VENDOR_ID 0x1f67
+#define XSC_PCI_DEV_ID_MS 0x1111
+#define XSC_PCI_DEV_ID_MSVF 0x1112
+#define XSC_PCI_DEV_ID_MVH 0x1151
+#define XSC_PCI_DEV_ID_MVHVF 0x1152
+#define XSC_PCI_DEV_ID_MVS 0x1153
+
+#define XSC_VFREP_BASE_LOGICAL_PORT 1081
+#define XSC_MAX_MAC_ADDRESSES 3
+
+#define XSC_RSS_HASH_KEY_LEN 52
+#define XSC_RSS_HASH_BIT_IPV4_SIP (1ULL << 0)
+#define XSC_RSS_HASH_BIT_IPV4_DIP (1ULL << 1)
+#define XSC_RSS_HASH_BIT_IPV6_SIP (1ULL << 2)
+#define XSC_RSS_HASH_BIT_IPV6_DIP (1ULL << 3)
+#define XSC_RSS_HASH_BIT_IPV4_SPORT (1ULL << 4)
+#define XSC_RSS_HASH_BIT_IPV4_DPORT (1ULL << 5)
+#define XSC_RSS_HASH_BIT_IPV6_SPORT (1ULL << 6)
+#define XSC_RSS_HASH_BIT_IPV6_DPORT (1ULL << 7)
+#define XSC_RSS_HASH_BIT_TNL_ID (1ULL << 8)
+#define XSC_RSS_HASH_BIT_NXT_PRO (1ULL << 9)
+
+#define XSC_EPAT_VLD_FLAG (1ULL)
+#define XSC_EPAT_RX_QP_ID_OFST_FLAG (1ULL << 2)
+#define XSC_EPAT_QP_NUM_FLAG (1ULL << 3)
+#define XSC_EPAT_RSS_EN_FLAG (1ULL << 4)
+#define XSC_EPAT_RSS_HASH_TEMPLATE_FLAG (1ULL << 5)
+#define XSC_EPAT_RSS_HASH_FUNC_FLAG (1ULL << 6)
+#define XSC_EPAT_HAS_PPH_FLAG (1ULL << 9)
+
+#define XSC_MAX_DESC_NUMBER 1024
+#define XSC_SEND_WQE_DS 3
+#define XSC_ESEG_EXTRA_DATA_SIZE 48u
+
+#define XSC_PF_TX_DB_ADDR 0x4802000
+#define XSC_PF_RX_DB_ADDR 0x4804000
+#define XSC_PF_CQ_DB_ADDR 0x2120000
+
+#define XSC_VF_RX_DB_ADDR 0x8d4
+#define XSC_VF_TX_DB_ADDR 0x8d0
+#define XSC_VF_CQ_DB_ADDR 0x8c4
+
+#define XSC_HIF_CMDQM_VECTOR_ID_MEM_ADDR 0x1034000
+
+enum xsc_nic_mode {
+ XSC_NIC_MODE_LEGACY,
+ XSC_NIC_MODE_SWITCHDEV,
+ XSC_NIC_MODE_SOC,
+};
+
+enum xsc_pph_type {
+ XSC_PPH_NONE = 0,
+ XSC_RX_PPH = 0x1,
+ XSC_TX_PPH = 0x2,
+ XSC_VFREP_PPH = 0x4,
+ XSC_UPLINK_PPH = 0x8,
+};
+
+enum xsc_funcid_type {
+ XSC_FUNCID_TYPE_INVAL = 0x0,
+ XSC_EMU_FUNCID = 0x1,
+ XSC_PHYPORT_MAC_FUNCID = 0x2,
+ XSC_VF_IOCTL_FUNCID = 0x3,
+ XSC_PHYPORT_LAG_FUNCID = 0x4,
+ XSC_FUNCID_TYPE_UNKNOWN = 0x5,
+};
+
+enum xsc_port_type {
+ XSC_PORT_TYPE_NONE = 0,
+ XSC_PORT_TYPE_UPLINK,
+ XSC_PORT_TYPE_UPLINK_BOND,
+ XSC_PORT_TYPE_PFVF,
+ XSC_PORT_TYPE_PFHPF,
+ XSC_PORT_TYPE_UNKNOWN,
+};
+
+enum xsc_tbm_cap {
+ XSC_TBM_CAP_HASH_PPH = 0,
+ XSC_TBM_CAP_RSS,
+ XSC_TBM_CAP_PP_BYPASS,
+ XSC_TBM_CAP_PCT_DROP_CONFIG,
+};
+
+enum xsc_rss_hf {
+ XSC_RSS_HASH_KEY_UPDATE = 0,
+ XSC_RSS_HASH_TEMP_UPDATE,
+ XSC_RSS_HASH_FUNC_UPDATE,
+ XSC_RSS_RXQ_UPDATE,
+ XSC_RSS_RXQ_DROP,
+};
+
+#endif /* XSC_DEFS_H_ */
diff --git a/drivers/net/xsc/xsc_dev.c b/drivers/net/xsc/xsc_dev.c
new file mode 100644
index 0000000..0562241
--- /dev/null
+++ b/drivers/net/xsc/xsc_dev.c
@@ -0,0 +1,413 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2025 Yunsilicon Technology Co., Ltd.
+ */
+
+#include <stdlib.h>
+#include <unistd.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <limits.h>
+#include <sys/mman.h>
+
+#include <rte_pci.h>
+#include <rte_bus_pci.h>
+#include <rte_kvargs.h>
+#include <rte_eal_paging.h>
+#include <rte_bitops.h>
+
+#include "xsc_log.h"
+#include "xsc_defs.h"
+#include "xsc_dev.h"
+#include "xsc_cmd.h"
+
+#define XSC_DEV_DEF_FLOW_MODE 7
+
+TAILQ_HEAD(xsc_dev_ops_list, xsc_dev_ops);
+static struct xsc_dev_ops_list dev_ops_list = TAILQ_HEAD_INITIALIZER(dev_ops_list);
+
+static const struct xsc_dev_ops *
+xsc_dev_ops_get(enum rte_pci_kernel_driver kdrv)
+{
+ const struct xsc_dev_ops *ops;
+
+ TAILQ_FOREACH(ops, &dev_ops_list, entry) {
+ if (ops->kdrv & (1 << kdrv))
+ return ops;
+ }
+
+ return NULL;
+}
+
+void
+xsc_dev_ops_register(struct xsc_dev_ops *new_ops)
+{
+ struct xsc_dev_ops *ops;
+
+ TAILQ_FOREACH(ops, &dev_ops_list, entry) {
+ if (ops->kdrv == new_ops->kdrv) {
+ PMD_DRV_LOG(ERR, "xsc dev ops exists, kdrv=%" PRIu64 "", new_ops->kdrv);
+ return;
+ }
+ }
+
+ TAILQ_INSERT_TAIL(&dev_ops_list, new_ops, entry);
+}
+
+int
+xsc_dev_mailbox_exec(struct xsc_dev *xdev, void *data_in,
+ int in_len, void *data_out, int out_len)
+{
+ return xdev->dev_ops->mailbox_exec(xdev, data_in, in_len,
+ data_out, out_len);
+}
+
+int
+xsc_dev_intr_event_get(struct xsc_dev *xdev)
+{
+ return xdev->dev_ops->intr_event_get(xdev);
+}
+
+int
+xsc_dev_intr_handler_install(struct xsc_dev *xdev, rte_intr_callback_fn cb, void *cb_arg)
+{
+ return xdev->dev_ops->intr_handler_install(xdev, cb, cb_arg);
+}
+
+int xsc_dev_intr_handler_uninstall(struct xsc_dev *xdev)
+{
+ return xdev->dev_ops->intr_handler_uninstall(xdev);
+}
+
+int
+xsc_dev_set_link_up(struct xsc_dev *xdev)
+{
+ if (xdev->dev_ops->set_link_up == NULL)
+ return -ENOTSUP;
+
+ return xdev->dev_ops->set_link_up(xdev);
+}
+
+int
+xsc_dev_set_link_down(struct xsc_dev *xdev)
+{
+ if (xdev->dev_ops->set_link_down == NULL)
+ return -ENOTSUP;
+
+ return xdev->dev_ops->set_link_down(xdev);
+}
+
+int
+xsc_dev_link_update(struct xsc_dev *xdev, int wait_to_complete)
+{
+ if (xdev->dev_ops->link_update == NULL)
+ return -ENOTSUP;
+
+ return xdev->dev_ops->link_update(xdev, wait_to_complete);
+}
+
+int
+xsc_dev_set_mtu(struct xsc_dev *xdev, uint16_t mtu)
+{
+ return xdev->dev_ops->set_mtu(xdev, mtu);
+}
+
+int
+xsc_dev_get_mac(struct xsc_dev *xdev, uint8_t *mac)
+{
+ return xdev->dev_ops->get_mac(xdev, mac);
+}
+
+int
+xsc_dev_destroy_qp(struct xsc_dev *xdev, void *qp)
+{
+ return xdev->dev_ops->destroy_qp(qp);
+}
+
+int
+xsc_dev_destroy_cq(struct xsc_dev *xdev, void *cq)
+{
+ return xdev->dev_ops->destroy_cq(cq);
+}
+
+int
+xsc_dev_modify_qp_status(struct xsc_dev *xdev, uint32_t qpn, int num, int opcode)
+{
+ return xdev->dev_ops->modify_qp_status(xdev, qpn, num, opcode);
+}
+
+int
+xsc_dev_modify_qp_qostree(struct xsc_dev *xdev, uint16_t qpn)
+{
+ return xdev->dev_ops->modify_qp_qostree(xdev, qpn);
+}
+
+int
+xsc_dev_rx_cq_create(struct xsc_dev *xdev, struct xsc_rx_cq_params *cq_params,
+ struct xsc_rx_cq_info *cq_info)
+{
+ return xdev->dev_ops->rx_cq_create(xdev, cq_params, cq_info);
+}
+
+int
+xsc_dev_tx_cq_create(struct xsc_dev *xdev, struct xsc_tx_cq_params *cq_params,
+ struct xsc_tx_cq_info *cq_info)
+{
+ return xdev->dev_ops->tx_cq_create(xdev, cq_params, cq_info);
+}
+
+int
+xsc_dev_tx_qp_create(struct xsc_dev *xdev, struct xsc_tx_qp_params *qp_params,
+ struct xsc_tx_qp_info *qp_info)
+{
+ return xdev->dev_ops->tx_qp_create(xdev, qp_params, qp_info);
+}
+
+int
+xsc_dev_close(struct xsc_dev *xdev, int repr_id)
+{
+ xsc_dev_clear_pct(xdev, repr_id);
+ return xdev->dev_ops->dev_close(xdev);
+}
+
+int
+xsc_dev_rss_key_modify(struct xsc_dev *xdev, uint8_t *rss_key, uint8_t rss_key_len)
+{
+ struct xsc_cmd_modify_nic_hca_mbox_in in = {};
+ struct xsc_cmd_modify_nic_hca_mbox_out out = {};
+ uint8_t rss_caps_mask = 0;
+ int ret, key_len = 0;
+
+ in.hdr.opcode = rte_cpu_to_be_16(XSC_CMD_OP_MODIFY_NIC_HCA);
+
+ key_len = RTE_MIN(rss_key_len, XSC_RSS_HASH_KEY_LEN);
+ rte_memcpy(in.rss.hash_key, rss_key, key_len);
+ rss_caps_mask |= RTE_BIT32(XSC_RSS_HASH_KEY_UPDATE);
+
+ in.rss.caps_mask = rss_caps_mask;
+ in.rss.rss_en = 1;
+ in.nic.caps_mask = rte_cpu_to_be_16(RTE_BIT32(XSC_TBM_CAP_RSS));
+ in.nic.caps = in.nic.caps_mask;
+
+ ret = xsc_dev_mailbox_exec(xdev, &in, sizeof(in), &out, sizeof(out));
+ if (ret != 0 || out.hdr.status != 0)
+ return -1;
+ return 0;
+}
+
+static int
+xsc_dev_alloc_vfos_info(struct xsc_dev *xdev)
+{
+ struct xsc_hwinfo *hwinfo;
+ int base_lp = 0;
+
+ if (xsc_dev_is_vf(xdev))
+ return 0;
+
+ hwinfo = &xdev->hwinfo;
+ if (hwinfo->pcie_no == 1) {
+ xdev->vfrep_offset = hwinfo->func_id -
+ hwinfo->pcie1_pf_funcid_base +
+ hwinfo->pcie0_pf_funcid_top -
+ hwinfo->pcie0_pf_funcid_base + 1;
+ } else {
+ xdev->vfrep_offset = hwinfo->func_id - hwinfo->pcie0_pf_funcid_base;
+ }
+
+ base_lp = XSC_VFREP_BASE_LOGICAL_PORT;
+ if (xdev->devargs.nic_mode == XSC_NIC_MODE_LEGACY)
+ base_lp += xdev->vfrep_offset;
+ xdev->vfos_logical_in_port = base_lp;
+ return 0;
+}
+
+static void
+xsc_dev_args_parse(struct xsc_dev *xdev, struct rte_devargs *devargs)
+{
+ struct rte_kvargs *kvlist;
+ struct xsc_devargs *xdevargs = &xdev->devargs;
+ const char *tmp;
+
+ kvlist = rte_kvargs_parse(devargs->args, NULL);
+ if (kvlist == NULL)
+ return;
+
+ tmp = rte_kvargs_get(kvlist, XSC_PPH_MODE_ARG);
+ if (tmp != NULL)
+ xdevargs->pph_mode = atoi(tmp);
+ else
+ xdevargs->pph_mode = XSC_PPH_NONE;
+
+ tmp = rte_kvargs_get(kvlist, XSC_NIC_MODE_ARG);
+ if (tmp != NULL)
+ xdevargs->nic_mode = atoi(tmp);
+ else
+ xdevargs->nic_mode = XSC_NIC_MODE_LEGACY;
+
+ tmp = rte_kvargs_get(kvlist, XSC_FLOW_MODE_ARG);
+ if (tmp != NULL)
+ xdevargs->flow_mode = atoi(tmp);
+ else
+ xdevargs->flow_mode = XSC_DEV_DEF_FLOW_MODE;
+
+ rte_kvargs_free(kvlist);
+}
+
+int
+xsc_dev_qp_set_id_get(struct xsc_dev *xdev, int repr_id)
+{
+ if (xsc_dev_is_vf(xdev))
+ return 0;
+
+ return (repr_id % 511 + 1);
+}
+
+static void
+xsc_repr_info_init(struct xsc_dev *xdev, struct xsc_repr_info *info,
+ enum xsc_port_type port_type,
+ enum xsc_funcid_type funcid_type, int32_t repr_id)
+{
+ int qp_set_id, logical_port;
+ struct xsc_hwinfo *hwinfo = &xdev->hwinfo;
+
+ info->repr_id = repr_id;
+ info->port_type = port_type;
+ if (port_type == XSC_PORT_TYPE_UPLINK_BOND) {
+ info->pf_bond = 1;
+ info->funcid = XSC_PHYPORT_LAG_FUNCID << 14;
+ } else if (port_type == XSC_PORT_TYPE_UPLINK) {
+ info->pf_bond = -1;
+ info->funcid = funcid_type << 14;
+ } else if (port_type == XSC_PORT_TYPE_PFVF) {
+ info->funcid = funcid_type << 14;
+ }
+
+ qp_set_id = xsc_dev_qp_set_id_get(xdev, repr_id);
+ if (xsc_dev_is_vf(xdev))
+ logical_port = xdev->hwinfo.func_id +
+ xdev->hwinfo.funcid_to_logic_port_off;
+ else
+ logical_port = xdev->vfos_logical_in_port + qp_set_id - 1;
+
+ info->logical_port = logical_port;
+ info->local_dstinfo = logical_port;
+ info->peer_logical_port = hwinfo->mac_phy_port;
+ info->peer_dstinfo = hwinfo->mac_phy_port;
+}
+
+int
+xsc_dev_repr_ports_probe(struct xsc_dev *xdev, int nb_repr_ports, int max_eth_ports)
+{
+ int funcid_type;
+ struct xsc_repr_port *repr_port;
+ int i;
+
+ PMD_INIT_FUNC_TRACE();
+
+ xdev->num_repr_ports = nb_repr_ports + XSC_PHY_PORT_NUM;
+ if (xdev->num_repr_ports > max_eth_ports) {
+ PMD_DRV_LOG(ERR, "Repr ports num %d, should be less than max %d",
+ xdev->num_repr_ports, max_eth_ports);
+ return -EINVAL;
+ }
+
+ xdev->repr_ports = rte_zmalloc(NULL,
+ sizeof(struct xsc_repr_port) * xdev->num_repr_ports,
+ RTE_CACHE_LINE_SIZE);
+ if (xdev->repr_ports == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to allocate memory for repr ports");
+ return -ENOMEM;
+ }
+
+ funcid_type = (xdev->devargs.nic_mode == XSC_NIC_MODE_SWITCHDEV) ?
+ XSC_VF_IOCTL_FUNCID : XSC_PHYPORT_MAC_FUNCID;
+
+ /* PF representor use the last repr_ports */
+ repr_port = &xdev->repr_ports[xdev->num_repr_ports - 1];
+ xsc_repr_info_init(xdev, &repr_port->info, XSC_PORT_TYPE_UPLINK,
+ XSC_PHYPORT_MAC_FUNCID, xdev->num_repr_ports - 1);
+ repr_port->info.ifindex = xdev->ifindex;
+ repr_port->xdev = xdev;
+ LIST_INIT(&repr_port->def_pct_list);
+
+ /* VF representor start from 0 */
+ for (i = 0; i < nb_repr_ports; i++) {
+ repr_port = &xdev->repr_ports[i];
+ xsc_repr_info_init(xdev, &repr_port->info,
+ XSC_PORT_TYPE_PFVF, funcid_type, i);
+ repr_port->xdev = xdev;
+ LIST_INIT(&repr_port->def_pct_list);
+ }
+
+ return 0;
+}
+
+void
+xsc_dev_uninit(struct xsc_dev *xdev)
+{
+ PMD_INIT_FUNC_TRACE();
+ xsc_dev_pct_uninit();
+ xsc_dev_close(xdev, XSC_DEV_REPR_ID_INVALID);
+ rte_free(xdev);
+}
+
+int
+xsc_dev_init(struct rte_pci_device *pci_dev, struct xsc_dev **xdev)
+{
+ struct xsc_dev *d;
+ int ret;
+
+ PMD_INIT_FUNC_TRACE();
+
+ d = rte_zmalloc(NULL, sizeof(*d), RTE_CACHE_LINE_SIZE);
+ if (d == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to alloc memory for xsc_dev");
+ return -ENOMEM;
+ }
+
+ d->dev_ops = xsc_dev_ops_get(pci_dev->kdrv);
+ if (d->dev_ops == NULL) {
+ PMD_DRV_LOG(ERR, "Could not get dev_ops, kdrv=%d", pci_dev->kdrv);
+ return -ENODEV;
+ }
+
+ d->pci_dev = pci_dev;
+
+ if (d->dev_ops->dev_init)
+ d->dev_ops->dev_init(d);
+
+ xsc_dev_args_parse(d, pci_dev->device.devargs);
+
+ ret = xsc_dev_alloc_vfos_info(d);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Failed to alloc vfos info");
+ ret = -EINVAL;
+ goto hwinfo_init_fail;
+ }
+
+ ret = xsc_dev_pct_init();
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Failed to init xsc pct");
+ ret = -EINVAL;
+ goto hwinfo_init_fail;
+ }
+
+ *xdev = d;
+
+ return 0;
+
+hwinfo_init_fail:
+ xsc_dev_uninit(d);
+ return ret;
+}
+
+bool
+xsc_dev_is_vf(struct xsc_dev *xdev)
+{
+ uint16_t device_id = xdev->pci_dev->id.device_id;
+
+ if (device_id == XSC_PCI_DEV_ID_MSVF ||
+ device_id == XSC_PCI_DEV_ID_MVHVF)
+ return true;
+
+ return false;
+}
diff --git a/drivers/net/xsc/xsc_dev.h b/drivers/net/xsc/xsc_dev.h
new file mode 100644
index 0000000..b8651c8
--- /dev/null
+++ b/drivers/net/xsc/xsc_dev.h
@@ -0,0 +1,200 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2025 Yunsilicon Technology Co., Ltd.
+ */
+
+#ifndef _XSC_DEV_H_
+#define _XSC_DEV_H_
+
+#include <rte_ethdev.h>
+#include <ethdev_driver.h>
+#include <rte_interrupts.h>
+#include <rte_bitmap.h>
+#include <rte_malloc.h>
+
+#include "xsc_defs.h"
+#include "xsc_log.h"
+#include "xsc_rxtx.h"
+#include "xsc_np.h"
+#include "xsc_compat.h"
+
+#define XSC_PPH_MODE_ARG "pph_mode"
+#define XSC_NIC_MODE_ARG "nic_mode"
+#define XSC_FLOW_MODE_ARG "flow_mode"
+
+#define XSC_FUNCID_TYPE_MASK 0x1c000
+#define XSC_FUNCID_MASK 0x3fff
+
+#define XSC_DEV_PCT_IDX_INVALID 0xFFFFFFFF
+#define XSC_DEV_REPR_ID_INVALID 0x7FFFFFFF
+
+enum xsc_queue_type {
+ XSC_QUEUE_TYPE_RDMA_RC = 0,
+ XSC_QUEUE_TYPE_RDMA_MAD = 1,
+ XSC_QUEUE_TYPE_RAW = 2,
+ XSC_QUEUE_TYPE_VIRTIO_NET = 3,
+ XSC_QUEUE_TYPE_VIRTIO_BLK = 4,
+ XSC_QUEUE_TYPE_RAW_TPE = 5,
+ XSC_QUEUE_TYPE_RAW_TSO = 6,
+ XSC_QUEUE_TYPE_RAW_TX = 7,
+ XSC_QUEUE_TYPE_INVALID = 0xFF,
+};
+
+struct xsc_hwinfo {
+ uint32_t pcie_no; /* pcie number , 0 or 1 */
+ uint32_t func_id; /* pf glb func id */
+ uint32_t pcie_host; /* host pcie number */
+ uint32_t mac_phy_port; /* mac port */
+ uint32_t funcid_to_logic_port_off; /* port func id offset */
+ uint32_t chip_version;
+ uint32_t hca_core_clock;
+ uint16_t lag_id;
+ uint16_t raw_qp_id_base;
+ uint16_t raw_rss_qp_id_base;
+ uint16_t pf0_vf_funcid_base;
+ uint16_t pf0_vf_funcid_top;
+ uint16_t pf1_vf_funcid_base;
+ uint16_t pf1_vf_funcid_top;
+ uint16_t pcie0_pf_funcid_base;
+ uint16_t pcie0_pf_funcid_top;
+ uint16_t pcie1_pf_funcid_base;
+ uint16_t pcie1_pf_funcid_top;
+ uint16_t lag_port_start;
+ uint16_t raw_tpe_qp_num;
+ uint16_t msix_base;
+ uint16_t msix_num;
+ uint8_t send_seg_num;
+ uint8_t recv_seg_num;
+ uint8_t valid; /* 1: current phy info is valid, 0 : invalid */
+ uint8_t on_chip_tbl_vld;
+ uint8_t dma_rw_tbl_vld;
+ uint8_t pct_compress_vld;
+ uint8_t mac_bit;
+ uint8_t esw_mode;
+};
+
+struct xsc_devargs {
+ int nic_mode;
+ int flow_mode;
+ int pph_mode;
+};
+
+struct xsc_repr_info {
+ int repr_id;
+ enum xsc_port_type port_type;
+ int pf_bond;
+
+ uint32_t ifindex;
+ const char *phys_dev_name;
+ uint32_t funcid;
+
+ uint16_t logical_port;
+ uint16_t local_dstinfo;
+ uint16_t peer_logical_port;
+ uint16_t peer_dstinfo;
+};
+
+struct xsc_repr_port {
+ struct xsc_dev *xdev;
+ struct xsc_repr_info info;
+ void *drv_data;
+ struct xsc_dev_pct_list def_pct_list;
+};
+
+struct xsc_dev_config {
+ uint8_t pph_flag;
+ uint8_t hw_csum;
+ uint8_t tso;
+ uint32_t tso_max_payload_sz;
+};
+
+struct xsc_dev {
+ struct rte_pci_device *pci_dev;
+ const struct xsc_dev_ops *dev_ops;
+ struct xsc_devargs devargs;
+ struct xsc_hwinfo hwinfo;
+ struct rte_eth_link pf_dev_link;
+ uint32_t link_speed_capa;
+ int vfos_logical_in_port;
+ int vfrep_offset;
+
+ struct rte_intr_handle *intr_handle;
+ struct xsc_repr_port *repr_ports;
+ int num_repr_ports; /* PF and VF representor ports num */
+ int ifindex;
+ int port_id; /* Probe dev */
+ void *dev_priv;
+ char name[PCI_PRI_STR_SIZE];
+ void *bar_addr;
+ void *jumbo_buffer_pa;
+ void *jumbo_buffer_va;
+ uint64_t bar_len;
+ int ctrl_fd;
+};
+
+enum xsc_intr_event_type {
+ XSC_EVENT_TYPE_NONE = 0x0,
+ XSC_EVENT_TYPE_CHANGE_LINK = 0x0001,
+ XSC_EVENT_TYPE_TEMP_WARN = 0x0002,
+ XSC_EVENT_TYPE_OVER_TEMP_PROTECTION = 0x0004,
+};
+
+struct xsc_dev_ops {
+ TAILQ_ENTRY(xsc_dev_ops) entry;
+ uint64_t kdrv;
+ int (*dev_init)(struct xsc_dev *xdev);
+ int (*dev_close)(struct xsc_dev *xdev);
+ int (*get_mac)(struct xsc_dev *xdev, uint8_t *mac);
+ int (*set_link_up)(struct xsc_dev *xdev);
+ int (*set_link_down)(struct xsc_dev *xdev);
+ int (*link_update)(struct xsc_dev *xdev, int wait_to_complete);
+ int (*set_mtu)(struct xsc_dev *xdev, uint16_t mtu);
+ int (*destroy_qp)(void *qp);
+ int (*destroy_cq)(void *cq);
+ int (*modify_qp_status)(struct xsc_dev *xdev,
+ uint32_t qpn, int num, int opcode);
+ int (*modify_qp_qostree)(struct xsc_dev *xdev, uint16_t qpn);
+
+ int (*rx_cq_create)(struct xsc_dev *xdev, struct xsc_rx_cq_params *cq_params,
+ struct xsc_rx_cq_info *cq_info);
+ int (*tx_cq_create)(struct xsc_dev *xdev, struct xsc_tx_cq_params *cq_params,
+ struct xsc_tx_cq_info *cq_info);
+ int (*tx_qp_create)(struct xsc_dev *xdev, struct xsc_tx_qp_params *qp_params,
+ struct xsc_tx_qp_info *qp_info);
+ int (*mailbox_exec)(struct xsc_dev *xdev, void *data_in,
+ int in_len, void *data_out, int out_len);
+ int (*intr_event_get)(struct xsc_dev *xdev);
+ int (*intr_handler_install)(struct xsc_dev *xdev, rte_intr_callback_fn cb, void *cb_arg);
+ int (*intr_handler_uninstall)(struct xsc_dev *xdev);
+};
+
+int xsc_dev_mailbox_exec(struct xsc_dev *xdev, void *data_in,
+ int in_len, void *data_out, int out_len);
+int xsc_dev_intr_event_get(struct xsc_dev *xdev);
+int xsc_dev_intr_handler_install(struct xsc_dev *xdev,
+ rte_intr_callback_fn cb, void *cb_arg);
+int xsc_dev_intr_handler_uninstall(struct xsc_dev *xdev);
+void xsc_dev_ops_register(struct xsc_dev_ops *new_ops);
+int xsc_dev_set_link_up(struct xsc_dev *xdev);
+int xsc_dev_set_link_down(struct xsc_dev *xde);
+int xsc_dev_link_update(struct xsc_dev *xdev, int wait_to_complete);
+int xsc_dev_destroy_qp(struct xsc_dev *xdev, void *qp);
+int xsc_dev_destroy_cq(struct xsc_dev *xdev, void *cq);
+int xsc_dev_modify_qp_status(struct xsc_dev *xdev, uint32_t qpn, int num, int opcode);
+int xsc_dev_modify_qp_qostree(struct xsc_dev *xdev, uint16_t qpn);
+int xsc_dev_rx_cq_create(struct xsc_dev *xdev, struct xsc_rx_cq_params *cq_params,
+ struct xsc_rx_cq_info *cq_info);
+int xsc_dev_tx_cq_create(struct xsc_dev *xdev, struct xsc_tx_cq_params *cq_params,
+ struct xsc_tx_cq_info *cq_info);
+int xsc_dev_tx_qp_create(struct xsc_dev *xdev, struct xsc_tx_qp_params *qp_params,
+ struct xsc_tx_qp_info *qp_info);
+int xsc_dev_init(struct rte_pci_device *pci_dev, struct xsc_dev **dev);
+void xsc_dev_uninit(struct xsc_dev *xdev);
+int xsc_dev_close(struct xsc_dev *xdev, int repr_id);
+int xsc_dev_repr_ports_probe(struct xsc_dev *xdev, int nb_repr_ports, int max_eth_ports);
+int xsc_dev_rss_key_modify(struct xsc_dev *xdev, uint8_t *rss_key, uint8_t rss_key_len);
+bool xsc_dev_is_vf(struct xsc_dev *xdev);
+int xsc_dev_qp_set_id_get(struct xsc_dev *xdev, int repr_id);
+int xsc_dev_set_mtu(struct xsc_dev *xdev, uint16_t mtu);
+int xsc_dev_get_mac(struct xsc_dev *xdev, uint8_t *mac);
+
+#endif /* _XSC_DEV_H_ */
diff --git a/drivers/net/xsc/xsc_ethdev.c b/drivers/net/xsc/xsc_ethdev.c
new file mode 100644
index 0000000..c63b603
--- /dev/null
+++ b/drivers/net/xsc/xsc_ethdev.c
@@ -0,0 +1,966 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2025 Yunsilicon Technology Co., Ltd.
+ */
+
+#include <ethdev_pci.h>
+#include <rte_interrupts.h>
+
+#include "xsc_log.h"
+#include "xsc_defs.h"
+#include "xsc_ethdev.h"
+#include "xsc_rx.h"
+#include "xsc_tx.h"
+#include "xsc_dev.h"
+#include "xsc_cmd.h"
+
+static int
+xsc_ethdev_rss_hash_conf_get(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct xsc_ethdev_priv *priv = TO_XSC_ETHDEV_PRIV(dev);
+
+ if (rss_conf->rss_key != NULL && rss_conf->rss_key_len >= priv->rss_conf.rss_key_len)
+ memcpy(rss_conf->rss_key, priv->rss_conf.rss_key, priv->rss_conf.rss_key_len);
+
+ rss_conf->rss_key_len = priv->rss_conf.rss_key_len;
+ rss_conf->rss_hf = priv->rss_conf.rss_hf;
+ return 0;
+}
+
+static int
+xsc_ethdev_rss_hash_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct xsc_ethdev_priv *priv = TO_XSC_ETHDEV_PRIV(dev);
+ int ret = 0;
+
+ ret = xsc_dev_rss_key_modify(priv->xdev, rss_conf->rss_key, rss_conf->rss_key_len);
+ if (ret == 0) {
+ memcpy(priv->rss_conf.rss_key, rss_conf->rss_key,
+ priv->rss_conf.rss_key_len);
+ priv->rss_conf.rss_key_len = rss_conf->rss_key_len;
+ priv->rss_conf.rss_hf = rss_conf->rss_hf;
+ }
+
+ return ret;
+}
+
+static int
+xsc_ethdev_configure(struct rte_eth_dev *dev)
+{
+ struct xsc_ethdev_priv *priv = TO_XSC_ETHDEV_PRIV(dev);
+ int ret;
+ struct rte_eth_rss_conf *rss_conf;
+
+ priv->num_sq = dev->data->nb_tx_queues;
+ priv->num_rq = dev->data->nb_rx_queues;
+
+ if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+ dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
+
+ if (priv->rss_conf.rss_key == NULL) {
+ priv->rss_conf.rss_key = rte_zmalloc(NULL, XSC_RSS_HASH_KEY_LEN,
+ RTE_CACHE_LINE_SIZE);
+ if (priv->rss_conf.rss_key == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to alloc rss key");
+ rte_errno = ENOMEM;
+ ret = -rte_errno;
+ goto error;
+ }
+ priv->rss_conf.rss_key_len = XSC_RSS_HASH_KEY_LEN;
+ }
+
+ if (dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key != NULL) {
+ rss_conf = &dev->data->dev_conf.rx_adv_conf.rss_conf;
+ ret = xsc_ethdev_rss_hash_update(dev, rss_conf);
+ if (ret != 0) {
+ PMD_DRV_LOG(ERR, "Xsc pmd set rss key error!");
+ rte_errno = -ENOEXEC;
+ goto error;
+ }
+ }
+
+ priv->txqs = (void *)dev->data->tx_queues;
+ priv->rxqs = (void *)dev->data->rx_queues;
+ return 0;
+
+error:
+ return -rte_errno;
+}
+
+static void
+xsc_ethdev_txq_release(struct rte_eth_dev *dev, uint16_t idx)
+{
+ struct xsc_ethdev_priv *priv = TO_XSC_ETHDEV_PRIV(dev);
+ struct xsc_txq_data *txq_data = xsc_txq_get(priv, idx);
+
+ if (txq_data == NULL)
+ return;
+
+ xsc_dev_set_qpsetid(priv->xdev, txq_data->qpn, 0);
+ xsc_txq_obj_release(priv->xdev, txq_data);
+ rte_free(txq_data->fcqs);
+ txq_data->fcqs = NULL;
+ xsc_txq_elts_free(txq_data);
+ rte_free(txq_data);
+ (*priv->txqs)[idx] = NULL;
+
+ dev->data->tx_queues[idx] = NULL;
+ dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STOPPED;
+}
+
+static void
+xsc_ethdev_rxq_release(struct rte_eth_dev *dev, uint16_t idx)
+{
+ struct xsc_ethdev_priv *priv = TO_XSC_ETHDEV_PRIV(dev);
+ struct xsc_rxq_data *rxq_data = xsc_rxq_get(priv, idx);
+
+ if (rxq_data == NULL)
+ return;
+ xsc_rxq_rss_obj_release(priv->xdev, rxq_data);
+ xsc_rxq_elts_free(rxq_data);
+ rte_free(rxq_data);
+ (*priv->rxqs)[idx] = NULL;
+
+ dev->data->rx_queues[idx] = NULL;
+ dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STOPPED;
+}
+
+static int
+xsc_ethdev_enable(struct rte_eth_dev *dev)
+{
+ struct xsc_ethdev_priv *priv = TO_XSC_ETHDEV_PRIV(dev);
+ struct xsc_hwinfo *hwinfo;
+ int peer_dstinfo = 0;
+ int peer_logicalport = 0;
+ int logical_port = 0;
+ int local_dstinfo = 0;
+ int pcie_logic_port = 0;
+ int qp_set_id;
+ int repr_id;
+ struct xsc_rxq_data *rxq;
+ uint16_t rx_qpn;
+ int i, vld;
+ struct xsc_txq_data *txq;
+ struct xsc_repr_port *repr;
+ struct xsc_repr_info *repr_info;
+
+ if (priv->funcid_type != XSC_PHYPORT_MAC_FUNCID)
+ return -ENODEV;
+
+ rxq = xsc_rxq_get(priv, 0);
+ if (rxq == NULL)
+ return -EINVAL;
+
+ rx_qpn = (uint16_t)rxq->qpn;
+ hwinfo = &priv->xdev->hwinfo;
+ repr_id = priv->representor_id;
+ repr = &priv->xdev->repr_ports[repr_id];
+ repr_info = &repr->info;
+
+ qp_set_id = xsc_dev_qp_set_id_get(priv->xdev, repr_id);
+ logical_port = repr_info->logical_port;
+ local_dstinfo = repr_info->local_dstinfo;
+ peer_logicalport = repr_info->peer_logical_port;
+ peer_dstinfo = repr_info->peer_dstinfo;
+
+ pcie_logic_port = hwinfo->pcie_no + 8;
+
+ for (i = 0; i < priv->num_sq; i++) {
+ txq = xsc_txq_get(priv, i);
+ if (txq == NULL)
+ return -EINVAL;
+ xsc_dev_modify_qp_status(priv->xdev, txq->qpn, 1, XSC_CMD_OP_RTR2RTS_QP);
+ xsc_dev_modify_qp_qostree(priv->xdev, txq->qpn);
+ xsc_dev_set_qpsetid(priv->xdev, txq->qpn, qp_set_id);
+ }
+
+ if (!xsc_dev_is_vf(priv->xdev)) {
+ xsc_dev_create_ipat(priv->xdev, logical_port, peer_dstinfo);
+ xsc_dev_create_vfos_baselp(priv->xdev);
+ xsc_dev_create_epat(priv->xdev, local_dstinfo, pcie_logic_port,
+ rx_qpn - hwinfo->raw_rss_qp_id_base,
+ priv->num_rq, &priv->rss_conf);
+ xsc_dev_create_pct(priv->xdev, repr_id, logical_port, peer_dstinfo);
+ xsc_dev_create_pct(priv->xdev, repr_id, peer_logicalport, local_dstinfo);
+ } else {
+ vld = xsc_dev_get_ipat_vld(priv->xdev, logical_port);
+ if (vld == 0)
+ xsc_dev_create_ipat(priv->xdev, logical_port, peer_dstinfo);
+ xsc_dev_vf_modify_epat(priv->xdev, local_dstinfo,
+ rx_qpn - hwinfo->raw_rss_qp_id_base,
+ priv->num_rq, &priv->rss_conf);
+ }
+
+ return 0;
+}
+
+static void
+xsc_rxq_stop(struct rte_eth_dev *dev)
+{
+ struct xsc_ethdev_priv *priv = TO_XSC_ETHDEV_PRIV(dev);
+ uint16_t i;
+
+ for (i = 0; i != priv->num_rq; ++i)
+ xsc_ethdev_rxq_release(dev, i);
+ priv->rxqs = NULL;
+ priv->flags &= ~XSC_FLAG_RX_QUEUE_INIT;
+}
+
+static void
+xsc_txq_stop(struct rte_eth_dev *dev)
+{
+ struct xsc_ethdev_priv *priv = TO_XSC_ETHDEV_PRIV(dev);
+ uint16_t i;
+
+ for (i = 0; i != priv->num_sq; ++i)
+ xsc_ethdev_txq_release(dev, i);
+ priv->txqs = NULL;
+ priv->flags &= ~XSC_FLAG_TX_QUEUE_INIT;
+}
+
+static int
+xsc_txq_start(struct xsc_ethdev_priv *priv)
+{
+ struct xsc_txq_data *txq_data;
+ struct rte_eth_dev *dev = priv->eth_dev;
+ uint64_t offloads = dev->data->dev_conf.txmode.offloads;
+ uint16_t i;
+ int ret;
+ size_t size;
+
+ if (priv->flags & XSC_FLAG_TX_QUEUE_INIT) {
+ for (i = 0; i != priv->num_sq; ++i)
+ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
+ return 0;
+ }
+
+ for (i = 0; i != priv->num_sq; ++i) {
+ txq_data = xsc_txq_get(priv, i);
+ if (txq_data == NULL)
+ goto error;
+ xsc_txq_elts_alloc(txq_data);
+ ret = xsc_txq_obj_new(priv->xdev, txq_data, offloads, i);
+ if (ret < 0)
+ goto error;
+ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
+ PMD_DRV_LOG(INFO, "Port %u create tx success", dev->data->port_id);
+
+ size = txq_data->cqe_s * sizeof(*txq_data->fcqs);
+ txq_data->fcqs = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
+ if (!txq_data->fcqs) {
+ PMD_DRV_LOG(ERR, "Port %u txq %u alloc fcqs memory failed",
+ dev->data->port_id, i);
+ rte_errno = ENOMEM;
+ goto error;
+ }
+ }
+
+ priv->flags |= XSC_FLAG_TX_QUEUE_INIT;
+ return 0;
+
+error:
+ /* Queue resources are released by xsc_ethdev_start calling the stop interface */
+ return -rte_errno;
+}
+
+static int
+xsc_rxq_start(struct xsc_ethdev_priv *priv)
+{
+ struct xsc_rxq_data *rxq_data;
+ struct rte_eth_dev *dev = priv->eth_dev;
+ uint16_t i;
+ int ret;
+
+ if (priv->flags & XSC_FLAG_RX_QUEUE_INIT) {
+ for (i = 0; i != priv->num_sq; ++i)
+ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
+ return 0;
+ }
+
+ for (i = 0; i != priv->num_rq; ++i) {
+ rxq_data = xsc_rxq_get(priv, i);
+ if (rxq_data == NULL)
+ goto error;
+ if (dev->data->rx_queue_state[i] != RTE_ETH_QUEUE_STATE_STARTED) {
+ ret = xsc_rxq_elts_alloc(rxq_data);
+ if (ret != 0)
+ goto error;
+ }
+ }
+
+ ret = xsc_rxq_rss_obj_new(priv, priv->dev_data->port_id);
+ if (ret != 0)
+ goto error;
+
+ priv->flags |= XSC_FLAG_RX_QUEUE_INIT;
+ return 0;
+error:
+ /* Queue resources are released by xsc_ethdev_start calling the stop interface */
+ return -rte_errno;
+}
+
+static int
+xsc_ethdev_start(struct rte_eth_dev *dev)
+{
+ int ret;
+ struct xsc_ethdev_priv *priv = TO_XSC_ETHDEV_PRIV(dev);
+
+ ret = xsc_txq_start(priv);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Port %u txq start failed: %s",
+ dev->data->port_id, strerror(rte_errno));
+ goto error;
+ }
+
+ ret = xsc_rxq_start(priv);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Port %u Rx queue start failed: %s",
+ dev->data->port_id, strerror(rte_errno));
+ goto error;
+ }
+
+ dev->data->dev_started = 1;
+
+ dev->rx_pkt_burst = xsc_rx_burst;
+ dev->tx_pkt_burst = xsc_tx_burst;
+
+ ret = xsc_ethdev_enable(dev);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Failed to enable port: %u",
+ dev->data->port_id);
+ goto error;
+ }
+
+ return 0;
+
+error:
+ dev->data->dev_started = 0;
+ xsc_txq_stop(dev);
+ xsc_rxq_stop(dev);
+ return -rte_errno;
+}
+
+static int
+xsc_ethdev_stop(struct rte_eth_dev *dev)
+{
+ struct xsc_ethdev_priv *priv = TO_XSC_ETHDEV_PRIV(dev);
+ uint16_t i;
+
+ PMD_DRV_LOG(DEBUG, "Port %u stopping", dev->data->port_id);
+ dev->data->dev_started = 0;
+ dev->rx_pkt_burst = rte_eth_pkt_burst_dummy;
+ dev->tx_pkt_burst = rte_eth_pkt_burst_dummy;
+ rte_wmb();
+
+ rte_delay_us_sleep(1000 * priv->num_rq);
+ for (i = 0; i < priv->num_rq; ++i)
+ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
+ for (i = 0; i < priv->num_sq; ++i)
+ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+ return 0;
+}
+
+static int
+xsc_ethdev_close(struct rte_eth_dev *dev)
+{
+ struct xsc_ethdev_priv *priv = TO_XSC_ETHDEV_PRIV(dev);
+
+ PMD_DRV_LOG(DEBUG, "Port %u closing", dev->data->port_id);
+ dev->rx_pkt_burst = rte_eth_pkt_burst_dummy;
+ dev->tx_pkt_burst = rte_eth_pkt_burst_dummy;
+ rte_wmb();
+
+ xsc_txq_stop(dev);
+ xsc_rxq_stop(dev);
+
+ rte_free(priv->rss_conf.rss_key);
+ xsc_dev_close(priv->xdev, priv->representor_id);
+ dev->data->mac_addrs = NULL;
+ return 0;
+}
+
+static int
+xsc_ethdev_set_link_up(struct rte_eth_dev *dev)
+{
+ struct xsc_ethdev_priv *priv = TO_XSC_ETHDEV_PRIV(dev);
+ struct xsc_dev *xdev = priv->xdev;
+
+ return xsc_dev_set_link_up(xdev);
+}
+
+static int
+xsc_ethdev_set_link_down(struct rte_eth_dev *dev)
+{
+ struct xsc_ethdev_priv *priv = TO_XSC_ETHDEV_PRIV(dev);
+ struct xsc_dev *xdev = priv->xdev;
+
+ return xsc_dev_set_link_down(xdev);
+}
+
+static int
+xsc_ethdev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
+{
+ struct xsc_ethdev_priv *priv = TO_XSC_ETHDEV_PRIV(dev);
+ struct xsc_dev *xdev = priv->xdev;
+ int ret = 0;
+
+ ret = xsc_dev_link_update(xdev, wait_to_complete);
+ if (ret == 0) {
+ dev->data->dev_link = xdev->pf_dev_link;
+ dev->data->dev_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
+ RTE_ETH_LINK_SPEED_FIXED);
+ }
+ return ret;
+}
+
+static uint64_t
+xsc_get_rx_queue_offloads(struct rte_eth_dev *dev)
+{
+ struct xsc_ethdev_priv *priv = TO_XSC_ETHDEV_PRIV(dev);
+ struct xsc_dev_config *config = &priv->config;
+ uint64_t offloads = 0;
+
+ if (config->hw_csum)
+ offloads |= (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_TCP_CKSUM);
+
+ return offloads;
+}
+
+static uint64_t
+xsc_get_tx_port_offloads(struct rte_eth_dev *dev)
+{
+ struct xsc_ethdev_priv *priv = TO_XSC_ETHDEV_PRIV(dev);
+ uint64_t offloads = 0;
+ struct xsc_dev_config *config = &priv->config;
+
+ if (config->hw_csum)
+ offloads |= (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+ RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_TCP_CKSUM);
+ if (config->tso)
+ offloads |= RTE_ETH_TX_OFFLOAD_TCP_TSO;
+ return offloads;
+}
+
+static int
+xsc_ethdev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
+{
+ struct xsc_ethdev_priv *priv = TO_XSC_ETHDEV_PRIV(dev);
+
+ info->min_rx_bufsize = 64;
+ info->max_rx_pktlen = 65536;
+ info->max_lro_pkt_size = 0;
+ info->max_rx_queues = 256;
+ info->max_tx_queues = 1024;
+ info->rx_desc_lim.nb_max = 4096;
+ info->rx_desc_lim.nb_min = 16;
+ info->tx_desc_lim.nb_max = 8192;
+ info->tx_desc_lim.nb_min = 128;
+
+ info->rx_queue_offload_capa = xsc_get_rx_queue_offloads(dev);
+ info->rx_offload_capa = info->rx_queue_offload_capa;
+ info->tx_offload_capa = xsc_get_tx_port_offloads(dev);
+
+ info->if_index = priv->ifindex;
+ info->speed_capa = priv->xdev->link_speed_capa;
+ info->hash_key_size = XSC_RSS_HASH_KEY_LEN;
+ info->tx_desc_lim.nb_seg_max = 8;
+ info->tx_desc_lim.nb_mtu_seg_max = 8;
+ info->switch_info.name = dev->data->name;
+ info->switch_info.port_id = priv->representor_id;
+ return 0;
+}
+
+static int
+xsc_ethdev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
+ uint32_t socket, const struct rte_eth_rxconf *conf,
+ struct rte_mempool *mp)
+{
+ struct xsc_ethdev_priv *priv = TO_XSC_ETHDEV_PRIV(dev);
+ struct xsc_rxq_data *rxq_data = NULL;
+ uint16_t desc_n;
+ uint16_t rx_free_thresh;
+ uint64_t offloads = conf->offloads | dev->data->dev_conf.rxmode.offloads;
+
+ desc = (desc > XSC_MAX_DESC_NUMBER) ? XSC_MAX_DESC_NUMBER : desc;
+ desc_n = desc;
+
+ if (!rte_is_power_of_2(desc))
+ desc_n = 1 << rte_log2_u32(desc);
+
+ rxq_data = rte_malloc_socket(NULL, sizeof(*rxq_data) + desc_n * sizeof(struct rte_mbuf *),
+ RTE_CACHE_LINE_SIZE, socket);
+ if (rxq_data == NULL) {
+ PMD_DRV_LOG(ERR, "Port %u create rxq idx %d failure",
+ dev->data->port_id, idx);
+ rte_errno = ENOMEM;
+ return -rte_errno;
+ }
+ rxq_data->idx = idx;
+ rxq_data->priv = priv;
+ (*priv->rxqs)[idx] = rxq_data;
+
+ rx_free_thresh = (conf->rx_free_thresh) ? conf->rx_free_thresh : XSC_RX_FREE_THRESH;
+ rxq_data->rx_free_thresh = rx_free_thresh;
+
+ rxq_data->elts = (struct rte_mbuf *(*)[desc_n])(rxq_data + 1);
+ rxq_data->mp = mp;
+ rxq_data->socket = socket;
+
+ rxq_data->csum = !!(offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM);
+ rxq_data->hw_timestamp = !!(offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP);
+ rxq_data->crc_present = 0;
+
+ rxq_data->wqe_n = rte_log2_u32(desc_n);
+ rxq_data->wqe_s = desc_n;
+ rxq_data->wqe_m = desc_n - 1;
+
+ rxq_data->port_id = dev->data->port_id;
+ dev->data->rx_queues[idx] = rxq_data;
+ return 0;
+}
+
+static int
+xsc_ethdev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
+ uint32_t socket, const struct rte_eth_txconf *conf)
+{
+ struct xsc_ethdev_priv *priv = TO_XSC_ETHDEV_PRIV(dev);
+ struct xsc_txq_data *txq;
+ uint16_t desc_n;
+
+ desc = (desc > XSC_MAX_DESC_NUMBER) ? XSC_MAX_DESC_NUMBER : desc;
+ desc_n = desc;
+
+ if (!rte_is_power_of_2(desc))
+ desc_n = 1 << rte_log2_u32(desc);
+
+ txq = rte_malloc_socket(NULL, sizeof(*txq) + desc_n * sizeof(struct rte_mbuf *),
+ RTE_CACHE_LINE_SIZE, socket);
+ txq->offloads = conf->offloads | dev->data->dev_conf.txmode.offloads;
+ txq->priv = priv;
+ txq->socket = socket;
+
+ txq->elts_n = rte_log2_u32(desc_n);
+ txq->elts_s = desc_n;
+ txq->elts_m = desc_n - 1;
+ txq->port_id = dev->data->port_id;
+ txq->idx = idx;
+
+ (*priv->txqs)[idx] = txq;
+ return 0;
+}
+
+static int
+xsc_ethdev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
+{
+ struct xsc_ethdev_priv *priv = TO_XSC_ETHDEV_PRIV(dev);
+ int ret = 0;
+
+ if (priv->eth_type != RTE_ETH_REPRESENTOR_PF) {
+ priv->mtu = mtu;
+ return 0;
+ }
+
+ ret = xsc_dev_set_mtu(priv->xdev, mtu);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Mtu set to %u failure", mtu);
+ return -EAGAIN;
+ }
+
+ priv->mtu = mtu;
+ return 0;
+}
+
+static int
+xsc_ethdev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+{
+ struct xsc_ethdev_priv *priv = TO_XSC_ETHDEV_PRIV(dev);
+ uint32_t rxqs_n = priv->num_rq;
+ uint32_t txqs_n = priv->num_sq;
+ uint32_t i, idx;
+ struct xsc_rxq_data *rxq;
+ struct xsc_txq_data *txq;
+
+ for (i = 0; i < rxqs_n; ++i) {
+ rxq = xsc_rxq_get(priv, i);
+ if (unlikely(rxq == NULL))
+ continue;
+
+ idx = rxq->idx;
+ if (idx < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
+ stats->q_ipackets[idx] += rxq->stats.rx_pkts;
+ stats->q_ibytes[idx] += rxq->stats.rx_bytes;
+ stats->q_errors[idx] += rxq->stats.rx_errors +
+ rxq->stats.rx_nombuf;
+ }
+ stats->ipackets += rxq->stats.rx_pkts;
+ stats->ibytes += rxq->stats.rx_bytes;
+ stats->ierrors += rxq->stats.rx_errors;
+ stats->rx_nombuf += rxq->stats.rx_nombuf;
+ }
+
+ for (i = 0; i < txqs_n; ++i) {
+ txq = xsc_txq_get(priv, i);
+ if (unlikely(txq == NULL))
+ continue;
+
+ idx = txq->idx;
+ if (idx < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
+ stats->q_opackets[idx] += txq->stats.tx_pkts;
+ stats->q_obytes[idx] += txq->stats.tx_bytes;
+ stats->q_errors[idx] += txq->stats.tx_errors;
+ }
+ stats->opackets += txq->stats.tx_pkts;
+ stats->obytes += txq->stats.tx_bytes;
+ stats->oerrors += txq->stats.tx_errors;
+ }
+
+ return 0;
+}
+
+static int
+xsc_ethdev_stats_reset(struct rte_eth_dev *dev)
+{
+ struct xsc_ethdev_priv *priv = TO_XSC_ETHDEV_PRIV(dev);
+ uint32_t rxqs_n = priv->num_rq;
+ uint32_t txqs_n = priv->num_sq;
+ uint32_t i;
+ struct xsc_rxq_data *rxq;
+ struct xsc_txq_data *txq;
+
+ for (i = 0; i < rxqs_n; ++i) {
+ rxq = xsc_rxq_get(priv, i);
+ if (unlikely(rxq == NULL))
+ continue;
+ memset(&rxq->stats, 0, sizeof(struct xsc_rxq_stats));
+ }
+ for (i = 0; i < txqs_n; ++i) {
+ txq = xsc_txq_get(priv, i);
+ if (unlikely(txq == NULL))
+ continue;
+ memset(&txq->stats, 0, sizeof(struct xsc_txq_stats));
+ }
+
+ return 0;
+}
+
+static int
+xsc_ethdev_mac_addr_add(struct rte_eth_dev *dev, struct rte_ether_addr *mac, uint32_t index)
+{
+ int i;
+
+ rte_errno = EINVAL;
+ if (index > XSC_MAX_MAC_ADDRESSES)
+ return -rte_errno;
+
+ if (rte_is_zero_ether_addr(mac))
+ return -rte_errno;
+
+ for (i = 0; i != XSC_MAX_MAC_ADDRESSES; ++i) {
+ if (i == (int)index)
+ continue;
+ if (memcmp(&dev->data->mac_addrs[i], mac, sizeof(*mac)) != 0)
+ continue;
+ /* Address already configured elsewhere, return with error */
+ rte_errno = EADDRINUSE;
+ return -rte_errno;
+ }
+
+ dev->data->mac_addrs[index] = *mac;
+ return 0;
+}
+
+const struct eth_dev_ops xsc_eth_dev_ops = {
+ .dev_configure = xsc_ethdev_configure,
+ .dev_start = xsc_ethdev_start,
+ .dev_stop = xsc_ethdev_stop,
+ .dev_set_link_up = xsc_ethdev_set_link_up,
+ .dev_set_link_down = xsc_ethdev_set_link_down,
+ .dev_close = xsc_ethdev_close,
+ .link_update = xsc_ethdev_link_update,
+ .stats_get = xsc_ethdev_stats_get,
+ .stats_reset = xsc_ethdev_stats_reset,
+ .dev_infos_get = xsc_ethdev_infos_get,
+ .rx_queue_setup = xsc_ethdev_rx_queue_setup,
+ .tx_queue_setup = xsc_ethdev_tx_queue_setup,
+ .rx_queue_release = xsc_ethdev_rxq_release,
+ .tx_queue_release = xsc_ethdev_txq_release,
+ .mtu_set = xsc_ethdev_set_mtu,
+ .rss_hash_update = xsc_ethdev_rss_hash_update,
+ .rss_hash_conf_get = xsc_ethdev_rss_hash_conf_get,
+};
+
+static int
+xsc_ethdev_init_one_representor(struct rte_eth_dev *eth_dev, void *init_params)
+{
+ int ret;
+ struct xsc_repr_port *repr_port = (struct xsc_repr_port *)init_params;
+ struct xsc_ethdev_priv *priv = TO_XSC_ETHDEV_PRIV(eth_dev);
+ struct xsc_dev_config *config = &priv->config;
+ struct rte_ether_addr mac;
+
+ priv->repr_port = repr_port;
+ repr_port->drv_data = eth_dev;
+ priv->xdev = repr_port->xdev;
+ priv->mtu = RTE_ETHER_MTU;
+ priv->funcid_type = (repr_port->info.funcid & XSC_FUNCID_TYPE_MASK) >> 14;
+ priv->funcid = repr_port->info.funcid & XSC_FUNCID_MASK;
+ if (repr_port->info.port_type == XSC_PORT_TYPE_UPLINK ||
+ repr_port->info.port_type == XSC_PORT_TYPE_UPLINK_BOND)
+ priv->eth_type = RTE_ETH_REPRESENTOR_PF;
+ else
+ priv->eth_type = RTE_ETH_REPRESENTOR_VF;
+ priv->representor_id = repr_port->info.repr_id;
+ priv->dev_data = eth_dev->data;
+ priv->ifindex = repr_port->info.ifindex;
+
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
+ eth_dev->data->mac_addrs = priv->mac;
+ if (rte_is_zero_ether_addr(eth_dev->data->mac_addrs)) {
+ ret = xsc_dev_get_mac(priv->xdev, mac.addr_bytes);
+ if (ret != 0) {
+ PMD_DRV_LOG(ERR, "Port %u cannot get MAC address",
+ eth_dev->data->port_id);
+ return -ENODEV;
+ }
+ }
+
+ xsc_ethdev_mac_addr_add(eth_dev, &mac, 0);
+
+ config->hw_csum = 1;
+ config->pph_flag = priv->xdev->devargs.pph_mode;
+ if ((config->pph_flag & XSC_TX_PPH) != 0) {
+ config->tso = 0;
+ } else {
+ config->tso = 1;
+ config->tso_max_payload_sz = 1500;
+ }
+
+ priv->is_representor = 1;
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
+ eth_dev->data->representor_id = priv->representor_id;
+ eth_dev->data->backer_port_id = eth_dev->data->port_id;
+
+ eth_dev->dev_ops = &xsc_eth_dev_ops;
+ eth_dev->rx_pkt_burst = rte_eth_pkt_burst_dummy;
+ eth_dev->tx_pkt_burst = rte_eth_pkt_burst_dummy;
+
+ rte_eth_dev_probing_finish(eth_dev);
+
+ return 0;
+}
+
+static int
+xsc_ethdev_init_representors(struct rte_eth_dev *eth_dev)
+{
+ struct xsc_ethdev_priv *priv = TO_XSC_ETHDEV_PRIV(eth_dev);
+ struct rte_eth_devargs eth_da = { .nb_representor_ports = 0 };
+ struct rte_device *dev;
+ struct xsc_dev *xdev;
+ struct xsc_repr_port *repr_port;
+ char name[RTE_ETH_NAME_MAX_LEN];
+ int i;
+ int ret;
+
+ PMD_INIT_FUNC_TRACE();
+
+ dev = &priv->pci_dev->device;
+ if (dev->devargs != NULL) {
+#if RTE_VERSION_NUM(24, 0, 0, 0) > RTE_VERSION
+ ret = rte_eth_devargs_parse(dev->devargs->args, &eth_da);
+#else
+ ret = rte_eth_devargs_parse(dev->devargs->args, &eth_da, 1);
+#endif
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "Failed to parse device arguments: %s",
+ dev->devargs->args);
+ return -EINVAL;
+ }
+ }
+
+ xdev = priv->xdev;
+ ret = xsc_dev_repr_ports_probe(xdev, eth_da.nb_representor_ports, RTE_MAX_ETHPORTS);
+ if (ret != 0) {
+ PMD_DRV_LOG(ERR, "Failed to probe %d xsc device representors",
+ eth_da.nb_representor_ports);
+ return ret;
+ }
+
+ /* PF rep init */
+ repr_port = &xdev->repr_ports[xdev->num_repr_ports - 1];
+ ret = xsc_ethdev_init_one_representor(eth_dev, repr_port);
+ if (ret != 0) {
+ PMD_DRV_LOG(ERR, "Failed to init backing representor");
+ return ret;
+ }
+
+ /* VF rep init */
+ for (i = 0; i < eth_da.nb_representor_ports; i++) {
+ repr_port = &xdev->repr_ports[i];
+ snprintf(name, sizeof(name), "%s_rep_%d",
+ xdev->name, repr_port->info.repr_id);
+ ret = rte_eth_dev_create(dev,
+ name,
+ sizeof(struct xsc_ethdev_priv),
+ NULL, NULL,
+ xsc_ethdev_init_one_representor,
+ repr_port);
+ if (ret != 0) {
+ PMD_DRV_LOG(ERR, "Failed to create representor: %d", i);
+ goto destroy_reprs;
+ }
+ }
+
+ ret = xsc_ethdev_set_link_up(eth_dev);
+ if (ret != 0) {
+ PMD_DRV_LOG(ERR, "Failed to set mac link up");
+ goto destroy_reprs;
+ }
+
+ return 0;
+
+destroy_reprs:
+ /* Destroy vf reprs */
+ while ((i--) > 1) {
+ repr_port = &xdev->repr_ports[i];
+ rte_eth_dev_destroy((struct rte_eth_dev *)repr_port->drv_data, NULL);
+ }
+
+ /* Destroy pf repr */
+ repr_port = &xdev->repr_ports[xdev->num_repr_ports - 1];
+ rte_eth_dev_destroy((struct rte_eth_dev *)repr_port->drv_data, NULL);
+ return ret;
+}
+
+static void
+xsc_ethdev_intr_handler(void *param)
+{
+ struct rte_eth_dev *eth_dev = param;
+ struct xsc_ethdev_priv *priv = TO_XSC_ETHDEV_PRIV(eth_dev);
+ int event_type;
+
+ event_type = xsc_dev_intr_event_get(priv->xdev);
+ switch (event_type) {
+ case XSC_EVENT_TYPE_CHANGE_LINK:
+ PMD_DRV_LOG(DEBUG, "Get intr event type=%04x", event_type);
+ xsc_ethdev_link_update(eth_dev, 0);
+ rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL);
+ break;
+ default:
+ break;
+ }
+}
+
+static int
+xsc_ethdev_init(struct rte_eth_dev *eth_dev)
+{
+ struct xsc_ethdev_priv *priv = TO_XSC_ETHDEV_PRIV(eth_dev);
+ int ret;
+
+ PMD_INIT_FUNC_TRACE();
+
+ priv->eth_dev = eth_dev;
+ priv->pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+
+ ret = xsc_dev_init(priv->pci_dev, &priv->xdev);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Failed to initialize xsc device");
+ return ret;
+ }
+ priv->xdev->port_id = eth_dev->data->port_id;
+
+ ret = xsc_ethdev_init_representors(eth_dev);
+ if (ret != 0) {
+ PMD_DRV_LOG(ERR, "Failed to initialize representors");
+ goto uninit_xsc_dev;
+ }
+
+ ret = xsc_dev_intr_handler_install(priv->xdev, xsc_ethdev_intr_handler, eth_dev);
+ if (ret != 0) {
+ PMD_DRV_LOG(ERR, "Failed to install intr handler");
+ goto uninit_xsc_dev;
+ }
+
+ return 0;
+
+uninit_xsc_dev:
+ xsc_dev_uninit(priv->xdev);
+ return ret;
+}
+
+static int
+xsc_ethdev_uninit(struct rte_eth_dev *eth_dev)
+{
+ struct xsc_ethdev_priv *priv = TO_XSC_ETHDEV_PRIV(eth_dev);
+
+ PMD_INIT_FUNC_TRACE();
+
+ xsc_dev_uninit(priv->xdev);
+ xsc_dev_intr_handler_uninstall(priv->xdev);
+
+ return 0;
+}
+
+static int
+xsc_ethdev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ int ret;
+
+ PMD_INIT_FUNC_TRACE();
+
+ ret = rte_eth_dev_pci_generic_probe(pci_dev,
+ sizeof(struct xsc_ethdev_priv),
+ xsc_ethdev_init);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Failed to probe ethdev: %s", pci_dev->name);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+xsc_ethdev_pci_remove(struct rte_pci_device *pci_dev)
+{
+ int ret;
+
+ PMD_INIT_FUNC_TRACE();
+
+ ret = rte_eth_dev_pci_generic_remove(pci_dev, xsc_ethdev_uninit);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Could not remove ethdev: %s", pci_dev->name);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct rte_pci_id xsc_ethdev_pci_id_map[] = {
+ { RTE_PCI_DEVICE(XSC_PCI_VENDOR_ID, XSC_PCI_DEV_ID_MS) },
+ { RTE_PCI_DEVICE(XSC_PCI_VENDOR_ID, XSC_PCI_DEV_ID_MSVF) },
+ { RTE_PCI_DEVICE(XSC_PCI_VENDOR_ID, XSC_PCI_DEV_ID_MVH) },
+ { RTE_PCI_DEVICE(XSC_PCI_VENDOR_ID, XSC_PCI_DEV_ID_MVHVF) },
+ { RTE_PCI_DEVICE(XSC_PCI_VENDOR_ID, XSC_PCI_DEV_ID_MVS) },
+ { RTE_PCI_DEVICE(0, 0) },
+};
+
+static struct rte_pci_driver xsc_ethdev_pci_driver = {
+ .id_table = xsc_ethdev_pci_id_map,
+ .probe = xsc_ethdev_pci_probe,
+ .remove = xsc_ethdev_pci_remove,
+};
+
+RTE_PMD_REGISTER_PCI(net_xsc, xsc_ethdev_pci_driver);
+RTE_PMD_REGISTER_PCI_TABLE(net_xsc, xsc_ethdev_pci_id_map);
+RTE_PMD_REGISTER_PARAM_STRING(net_xsc,
+ XSC_PPH_MODE_ARG "=<x>"
+ XSC_NIC_MODE_ARG "=<x>"
+ XSC_FLOW_MODE_ARG "=<x>");
+
+RTE_LOG_REGISTER_SUFFIX(xsc_logtype_init, init, NOTICE);
+RTE_LOG_REGISTER_SUFFIX(xsc_logtype_driver, driver, NOTICE);
diff --git a/drivers/net/xsc/xsc_ethdev.h b/drivers/net/xsc/xsc_ethdev.h
new file mode 100644
index 0000000..0b307c2
--- /dev/null
+++ b/drivers/net/xsc/xsc_ethdev.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2025 Yunsilicon Technology Co., Ltd.
+ */
+
+#ifndef _XSC_ETHDEV_H_
+#define _XSC_ETHDEV_H_
+
+#include "xsc_dev.h"
+
+#define XSC_FLAG_RX_QUEUE_INIT 0x1
+#define XSC_FLAG_TX_QUEUE_INIT 0x2
+
+struct xsc_ethdev_priv {
+ struct rte_eth_dev *eth_dev;
+ struct rte_pci_device *pci_dev;
+ struct xsc_dev *xdev;
+ struct xsc_repr_port *repr_port;
+ struct xsc_dev_config config;
+ struct rte_eth_dev_data *dev_data;
+ struct rte_ether_addr mac[XSC_MAX_MAC_ADDRESSES];
+ struct rte_eth_rss_conf rss_conf;
+
+ int representor_id;
+ uint32_t ifindex;
+ uint16_t mtu;
+ uint8_t isolated;
+ uint8_t is_representor;
+
+ uint32_t mode:7;
+ uint32_t member_bitmap:8;
+ uint32_t funcid_type:3;
+ uint32_t funcid:14;
+
+ uint16_t eth_type;
+ uint16_t qp_set_id;
+
+ uint16_t num_sq;
+ uint16_t num_rq;
+
+ uint16_t flags;
+ struct xsc_txq_data *(*txqs)[];
+ struct xsc_rxq_data *(*rxqs)[];
+};
+
+#define TO_XSC_ETHDEV_PRIV(dev) ((struct xsc_ethdev_priv *)(dev)->data->dev_private)
+
+static __rte_always_inline struct xsc_txq_data *
+xsc_txq_get(struct xsc_ethdev_priv *priv, uint16_t idx)
+{
+ if (priv->txqs != NULL && (*priv->txqs)[idx] != NULL)
+ return (*priv->txqs)[idx];
+ return NULL;
+}
+
+static __rte_always_inline struct xsc_rxq_data *
+xsc_rxq_get(struct xsc_ethdev_priv *priv, uint16_t idx)
+{
+ if (priv->rxqs != NULL && (*priv->rxqs)[idx] != NULL)
+ return (*priv->rxqs)[idx];
+ return NULL;
+}
+
+#endif /* _XSC_ETHDEV_H_ */
diff --git a/drivers/net/xsc/xsc_log.h b/drivers/net/xsc/xsc_log.h
new file mode 100644
index 0000000..7fd16e2
--- /dev/null
+++ b/drivers/net/xsc/xsc_log.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2025 Yunsilicon Technology Co., Ltd.
+ */
+
+#ifndef _XSC_LOG_H_
+#define _XSC_LOG_H_
+
+#include <rte_log.h>
+#include <rte_version.h>
+
+extern int xsc_logtype_init;
+extern int xsc_logtype_driver;
+#define RTE_LOGTYPE_XSC_INIT xsc_logtype_init
+#define RTE_LOGTYPE_XSC_DRV xsc_logtype_driver
+
+#if RTE_VERSION_NUM(25, 0, 0, 0) > RTE_VERSION
+
+#define PMD_INIT_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, RTE_LOGTYPE_XSC_INIT, "%s(): " fmt "\n", \
+ __func__, ##args)
+#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>")
+
+
+#define PMD_DRV_LOG_RAW(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, RTE_LOGTYPE_XSC_DRV, "%s(): " fmt, \
+ __func__, ## args)
+
+#define PMD_DRV_LOG(level, fmt, args...) \
+ PMD_DRV_LOG_RAW(level, fmt "\n", ## args)
+
+#else
+
+#define PMD_INIT_LOG(level, ...) \
+ RTE_LOG_LINE_PREFIX(level, XSC_INIT, "%s(): ", __func__, __VA_ARGS__)
+
+
+#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>")
+
+
+#define PMD_DRV_LOG_RAW(level, ...) \
+ RTE_LOG_LINE_PREFIX(level, XSC_DRV, "%s(): ", __func__, __VA_ARGS__)
+
+#define PMD_DRV_LOG(level, ...) \
+ PMD_DRV_LOG_RAW(level, __VA_ARGS__)
+
+#endif
+
+#endif /* _XSC_LOG_H_ */
diff --git a/drivers/net/xsc/xsc_np.c b/drivers/net/xsc/xsc_np.c
new file mode 100644
index 0000000..f96797b
--- /dev/null
+++ b/drivers/net/xsc/xsc_np.c
@@ -0,0 +1,489 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2025 Yunsilicon Technology Co., Ltd.
+ */
+
+#include <rte_bitmap.h>
+#include <rte_malloc.h>
+
+#include "xsc_log.h"
+#include "xsc_defs.h"
+#include "xsc_np.h"
+#include "xsc_cmd.h"
+#include "xsc_dev.h"
+
+#define XSC_RSS_HASH_FUNC_TOPELIZ 0x1
+#define XSC_LOGIC_PORT_MASK 0x07FF
+
+#define XSC_DEV_DEF_PCT_IDX_MIN 128
+#define XSC_DEV_DEF_PCT_IDX_MAX 138
+
+/* Each board has a PCT manager*/
+static struct xsc_dev_pct_mgr xsc_pct_mgr;
+
+enum xsc_np_type {
+ XSC_NP_IPAT = 0,
+ XSC_NP_PCT_V4 = 4,
+ XSC_NP_EPAT = 19,
+ XSC_NP_VFOS = 31,
+ XSC_NP_PG_QP_SET_ID = 41,
+ XSC_NP_MAX
+};
+
+enum xsc_np_opcode {
+ XSC_NP_OP_ADD,
+ XSC_NP_OP_DEL,
+ XSC_NP_OP_GET,
+ XSC_NP_OP_CLR,
+ XSC_NP_OP_MOD,
+ XSC_NP_OP_MAX
+};
+
+struct xsc_np_mbox_in {
+ struct xsc_cmd_inbox_hdr hdr;
+ rte_be16_t len;
+ rte_be16_t rsvd;
+ uint8_t data[];
+};
+
+struct xsc_np_mbox_out {
+ struct xsc_cmd_outbox_hdr hdr;
+ rte_be32_t error;
+ rte_be16_t len;
+ rte_be16_t rsvd;
+ uint8_t data[];
+};
+
+struct xsc_np_data_tl {
+ uint16_t table;
+ uint16_t opmod;
+ uint16_t length;
+ uint16_t rsvd;
+};
+
+enum xsc_hash_tmpl {
+ XSC_HASH_TMPL_IDX_IP_PORTS_IP6_PORTS = 0,
+ XSC_HASH_TMPL_IDX_IP_IP6,
+ XSC_HASH_TMPL_IDX_IP_PORTS_IP6,
+ XSC_HASH_TMPL_IDX_IP_IP6_PORTS,
+ XSC_HASH_TMPL_IDX_MAX,
+};
+
+static const int xsc_rss_hash_tmplate[XSC_HASH_TMPL_IDX_MAX] = {
+ XSC_RSS_HASH_BIT_IPV4_SIP | XSC_RSS_HASH_BIT_IPV4_DIP |
+ XSC_RSS_HASH_BIT_IPV6_SIP | XSC_RSS_HASH_BIT_IPV6_DIP |
+ XSC_RSS_HASH_BIT_IPV4_SPORT | XSC_RSS_HASH_BIT_IPV4_DPORT |
+ XSC_RSS_HASH_BIT_IPV6_SPORT | XSC_RSS_HASH_BIT_IPV6_DPORT,
+
+ XSC_RSS_HASH_BIT_IPV4_SIP | XSC_RSS_HASH_BIT_IPV4_DIP |
+ XSC_RSS_HASH_BIT_IPV6_SIP | XSC_RSS_HASH_BIT_IPV6_DIP,
+
+ XSC_RSS_HASH_BIT_IPV4_SIP | XSC_RSS_HASH_BIT_IPV4_DIP |
+ XSC_RSS_HASH_BIT_IPV6_SIP | XSC_RSS_HASH_BIT_IPV6_DIP |
+ XSC_RSS_HASH_BIT_IPV4_SPORT | XSC_RSS_HASH_BIT_IPV4_DPORT,
+
+ XSC_RSS_HASH_BIT_IPV4_SIP | XSC_RSS_HASH_BIT_IPV4_DIP |
+ XSC_RSS_HASH_BIT_IPV6_SIP | XSC_RSS_HASH_BIT_IPV6_DIP |
+ XSC_RSS_HASH_BIT_IPV6_SPORT | XSC_RSS_HASH_BIT_IPV6_DPORT,
+};
+
+static uint8_t
+xsc_rss_hash_template_get(struct rte_eth_rss_conf *rss_conf)
+{
+ int rss_hf = 0;
+ int i = 0;
+ uint8_t idx = 0;
+ uint8_t outer = 1;
+
+ if (rss_conf->rss_hf & RTE_ETH_RSS_IP) {
+ rss_hf |= XSC_RSS_HASH_BIT_IPV4_SIP;
+ rss_hf |= XSC_RSS_HASH_BIT_IPV4_DIP;
+ rss_hf |= XSC_RSS_HASH_BIT_IPV6_SIP;
+ rss_hf |= XSC_RSS_HASH_BIT_IPV6_DIP;
+ }
+
+ if ((rss_conf->rss_hf & RTE_ETH_RSS_UDP) ||
+ (rss_conf->rss_hf & RTE_ETH_RSS_TCP)) {
+ rss_hf |= XSC_RSS_HASH_BIT_IPV4_SPORT;
+ rss_hf |= XSC_RSS_HASH_BIT_IPV4_DPORT;
+ rss_hf |= XSC_RSS_HASH_BIT_IPV6_SPORT;
+ rss_hf |= XSC_RSS_HASH_BIT_IPV6_DPORT;
+ }
+
+ if (rss_conf->rss_hf & RTE_ETH_RSS_L3_SRC_ONLY) {
+ rss_hf |= XSC_RSS_HASH_BIT_IPV4_SIP;
+ rss_hf |= XSC_RSS_HASH_BIT_IPV6_SIP;
+ rss_hf &= ~XSC_RSS_HASH_BIT_IPV4_DIP;
+ rss_hf &= ~XSC_RSS_HASH_BIT_IPV6_DIP;
+ }
+
+ if (rss_conf->rss_hf & RTE_ETH_RSS_L3_DST_ONLY) {
+ rss_hf |= XSC_RSS_HASH_BIT_IPV4_DIP;
+ rss_hf |= XSC_RSS_HASH_BIT_IPV6_DIP;
+ rss_hf &= ~XSC_RSS_HASH_BIT_IPV4_SIP;
+ rss_hf &= ~XSC_RSS_HASH_BIT_IPV6_SIP;
+ }
+
+ if (rss_conf->rss_hf & RTE_ETH_RSS_L4_SRC_ONLY) {
+ rss_hf |= XSC_RSS_HASH_BIT_IPV4_SPORT;
+ rss_hf |= XSC_RSS_HASH_BIT_IPV6_SPORT;
+ rss_hf &= ~XSC_RSS_HASH_BIT_IPV4_DPORT;
+ rss_hf &= ~XSC_RSS_HASH_BIT_IPV6_DPORT;
+ }
+
+ if (rss_conf->rss_hf & RTE_ETH_RSS_L4_DST_ONLY) {
+ rss_hf |= XSC_RSS_HASH_BIT_IPV4_DPORT;
+ rss_hf |= XSC_RSS_HASH_BIT_IPV6_DPORT;
+ rss_hf &= ~XSC_RSS_HASH_BIT_IPV4_SPORT;
+ rss_hf &= ~XSC_RSS_HASH_BIT_IPV6_SPORT;
+ }
+
+ if (rss_conf->rss_hf & RTE_ETH_RSS_LEVEL_INNERMOST)
+ outer = 0;
+
+ for (i = 0; i < XSC_HASH_TMPL_IDX_MAX; i++) {
+ if (xsc_rss_hash_tmplate[i] == rss_hf) {
+ idx = i;
+ break;
+ }
+ }
+
+ idx = (idx << 1) | outer;
+ return idx;
+}
+
+static int
+xsc_dev_np_exec(struct xsc_dev *xdev, void *cmd, int len, int table, int opmod)
+{
+ struct xsc_np_data_tl *tl;
+ struct xsc_np_mbox_in *in;
+ struct xsc_np_mbox_out *out;
+ int in_len;
+ int out_len;
+ int data_len;
+ int cmd_len;
+ int ret;
+ void *cmd_buf;
+
+ data_len = sizeof(struct xsc_np_data_tl) + len;
+ in_len = sizeof(struct xsc_np_mbox_in) + data_len;
+ out_len = sizeof(struct xsc_np_mbox_out) + data_len;
+ cmd_len = RTE_MAX(in_len, out_len);
+ cmd_buf = malloc(cmd_len);
+ if (cmd_buf == NULL) {
+ rte_errno = ENOMEM;
+ PMD_DRV_LOG(ERR, "Failed to alloc np cmd memory");
+ return -rte_errno;
+ }
+
+ in = cmd_buf;
+ memset(in, 0, cmd_len);
+ in->hdr.opcode = rte_cpu_to_be_16(XSC_CMD_OP_EXEC_NP);
+ in->len = rte_cpu_to_be_16(data_len);
+
+ tl = (struct xsc_np_data_tl *)in->data;
+ tl->length = len;
+ tl->table = table;
+ tl->opmod = opmod;
+ if (cmd && len)
+ memcpy(tl + 1, cmd, len);
+
+ out = cmd_buf;
+ ret = xsc_dev_mailbox_exec(xdev, in, in_len, out, out_len);
+
+ free(cmd_buf);
+ return ret;
+}
+
+int
+xsc_dev_create_pct(struct xsc_dev *xdev, int repr_id,
+ uint16_t logical_in_port, uint16_t dst_info)
+{
+ int ret;
+ struct xsc_np_pct_v4_add add;
+ struct xsc_repr_port *repr = &xdev->repr_ports[repr_id];
+ struct xsc_dev_pct_list *pct_list = &repr->def_pct_list;
+
+ memset(&add, 0, sizeof(add));
+ add.key.logical_in_port = logical_in_port & XSC_LOGIC_PORT_MASK;
+ add.mask.logical_in_port = XSC_LOGIC_PORT_MASK;
+ add.action.dst_info = dst_info;
+ add.pct_idx = xsc_dev_pct_idx_alloc();
+ if (add.pct_idx == XSC_DEV_PCT_IDX_INVALID)
+ return -1;
+
+ ret = xsc_dev_np_exec(xdev, &add, sizeof(add), XSC_NP_PCT_V4, XSC_NP_OP_ADD);
+ if (unlikely(ret != 0)) {
+ xsc_dev_pct_idx_free(add.pct_idx);
+ return -1;
+ }
+
+ xsc_dev_pct_entry_insert(pct_list, add.key.logical_in_port, add.pct_idx);
+ return 0;
+}
+
+int
+xsc_dev_destroy_pct(struct xsc_dev *xdev, uint16_t logical_in_port, uint32_t pct_idx)
+{
+ struct xsc_np_pct_v4_del del;
+
+ memset(&del, 0, sizeof(del));
+ del.key.logical_in_port = logical_in_port & XSC_LOGIC_PORT_MASK;
+ del.mask.logical_in_port = XSC_LOGIC_PORT_MASK;
+ del.pct_idx = pct_idx;
+ return xsc_dev_np_exec(xdev, &del, sizeof(del), XSC_NP_PCT_V4, XSC_NP_OP_DEL);
+}
+
+void
+xsc_dev_clear_pct(struct xsc_dev *xdev, int repr_id)
+{
+ struct xsc_repr_port *repr;
+ struct xsc_dev_pct_entry *pct_entry;
+ struct xsc_dev_pct_list *pct_list;
+
+ if (repr_id == XSC_DEV_REPR_ID_INVALID)
+ return;
+
+ repr = &xdev->repr_ports[repr_id];
+ pct_list = &repr->def_pct_list;
+
+ while ((pct_entry = xsc_dev_pct_first_get(pct_list)) != NULL) {
+ xsc_dev_destroy_pct(xdev, pct_entry->logic_port, pct_entry->pct_idx);
+ xsc_dev_pct_entry_remove(pct_entry);
+ }
+}
+
+int
+xsc_dev_create_ipat(struct xsc_dev *xdev, uint16_t logic_in_port, uint16_t dst_info)
+{
+ struct xsc_np_ipat add;
+
+ memset(&add, 0, sizeof(add));
+ add.key.logical_in_port = logic_in_port;
+ add.action.dst_info = dst_info;
+ add.action.vld = 1;
+ return xsc_dev_np_exec(xdev, &add, sizeof(add), XSC_NP_IPAT, XSC_NP_OP_ADD);
+}
+
+int
+xsc_dev_get_ipat_vld(struct xsc_dev *xdev, uint16_t logic_in_port)
+{
+ int ret;
+ struct xsc_np_ipat get;
+
+ memset(&get, 0, sizeof(get));
+ get.key.logical_in_port = logic_in_port;
+
+ ret = xsc_dev_np_exec(xdev, &get, sizeof(get), XSC_NP_IPAT, XSC_NP_OP_GET);
+ if (ret != 0)
+ PMD_DRV_LOG(ERR, "Get ipat vld failed, logic in port=%u", logic_in_port);
+
+ return get.action.vld;
+}
+
+int
+xsc_dev_destroy_ipat(struct xsc_dev *xdev, uint16_t logic_in_port)
+{
+ struct xsc_ipat_key del;
+
+ memset(&del, 0, sizeof(del));
+ del.logical_in_port = logic_in_port;
+ return xsc_dev_np_exec(xdev, &del, sizeof(del), XSC_NP_IPAT, XSC_NP_OP_DEL);
+}
+
+int
+xsc_dev_create_epat(struct xsc_dev *xdev, uint16_t dst_info, uint8_t dst_port,
+ uint16_t qpn_ofst, uint8_t qp_num, struct rte_eth_rss_conf *rss_conf)
+{
+ struct xsc_np_epat_add add;
+
+ memset(&add, 0, sizeof(add));
+ add.key.dst_info = dst_info;
+ add.action.dst_port = dst_port;
+ add.action.vld = 1;
+ add.action.rx_qp_id_ofst = qpn_ofst;
+ add.action.qp_num = qp_num - 1;
+ add.action.rss_en = 1;
+ add.action.rss_hash_func = XSC_RSS_HASH_FUNC_TOPELIZ;
+ add.action.rss_hash_template = xsc_rss_hash_template_get(rss_conf);
+
+ return xsc_dev_np_exec(xdev, &add, sizeof(add), XSC_NP_EPAT, XSC_NP_OP_ADD);
+}
+
+int
+xsc_dev_vf_modify_epat(struct xsc_dev *xdev, uint16_t dst_info, uint16_t qpn_ofst,
+ uint8_t qp_num, struct rte_eth_rss_conf *rss_conf)
+{
+ struct xsc_np_epat_mod mod;
+
+ memset(&mod, 0, sizeof(mod));
+ mod.flags = XSC_EPAT_VLD_FLAG | XSC_EPAT_RX_QP_ID_OFST_FLAG |
+ XSC_EPAT_QP_NUM_FLAG | XSC_EPAT_HAS_PPH_FLAG |
+ XSC_EPAT_RSS_EN_FLAG | XSC_EPAT_RSS_HASH_TEMPLATE_FLAG |
+ XSC_EPAT_RSS_HASH_FUNC_FLAG;
+
+ mod.key.dst_info = dst_info;
+ mod.action.vld = 1;
+ mod.action.rx_qp_id_ofst = qpn_ofst;
+ mod.action.qp_num = qp_num - 1;
+ mod.action.rss_en = 1;
+ mod.action.rss_hash_func = XSC_RSS_HASH_FUNC_TOPELIZ;
+ mod.action.rss_hash_template = xsc_rss_hash_template_get(rss_conf);
+
+ return xsc_dev_np_exec(xdev, &mod, sizeof(mod), XSC_NP_EPAT, XSC_NP_OP_MOD);
+}
+
+int
+xsc_dev_set_qpsetid(struct xsc_dev *xdev, uint32_t txqpn, uint16_t qp_set_id)
+{
+ int ret;
+ struct xsc_pg_set_id add;
+ uint16_t qp_id_base = xdev->hwinfo.raw_qp_id_base;
+
+ memset(&add, 0, sizeof(add));
+ add.key.qp_id = txqpn - qp_id_base;
+ add.action.qp_set_id = qp_set_id;
+
+ ret = xsc_dev_np_exec(xdev, &add, sizeof(add), XSC_NP_PG_QP_SET_ID, XSC_NP_OP_ADD);
+ if (ret != 0)
+ PMD_DRV_LOG(ERR, "Failed to set qp %u setid %u", txqpn, qp_set_id);
+
+ return ret;
+}
+
+int
+xsc_dev_destroy_epat(struct xsc_dev *xdev, uint16_t dst_info)
+{
+ struct xsc_epat_key del;
+
+ memset(&del, 0, sizeof(del));
+
+ del.dst_info = dst_info;
+ return xsc_dev_np_exec(xdev, &del, sizeof(del), XSC_NP_EPAT, XSC_NP_OP_DEL);
+}
+
+int
+xsc_dev_create_vfos_baselp(struct xsc_dev *xdev)
+{
+ int ret;
+ struct xsc_np_vfso add;
+
+ memset(&add, 0, sizeof(add));
+ add.key.src_port = xdev->vfrep_offset;
+ add.action.ofst = xdev->vfos_logical_in_port;
+
+ ret = xsc_dev_np_exec(xdev, &add, sizeof(add), XSC_NP_VFOS, XSC_NP_OP_ADD);
+ if (ret != 0)
+ PMD_DRV_LOG(ERR, "Failed to set vfos, port=%u, offset=%u",
+ add.key.src_port, add.action.ofst);
+
+ return ret;
+}
+
+void
+xsc_dev_pct_uninit(void)
+{
+ rte_free(xsc_pct_mgr.bmp_mem);
+}
+
+int
+xsc_dev_pct_init(void)
+{
+ int ret;
+ uint8_t *bmp_mem;
+ uint32_t pos, pct_sz, bmp_sz;
+
+ if (xsc_pct_mgr.bmp_mem != NULL)
+ return 0;
+
+ pct_sz = XSC_DEV_DEF_PCT_IDX_MAX - XSC_DEV_DEF_PCT_IDX_MIN + 1;
+ bmp_sz = rte_bitmap_get_memory_footprint(pct_sz);
+ bmp_mem = rte_zmalloc(NULL, bmp_sz, RTE_CACHE_LINE_SIZE);
+ if (bmp_mem == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to alloc pct bitmap memory");
+ ret = -ENOMEM;
+ goto pct_init_fail;
+ }
+
+ xsc_pct_mgr.bmp_mem = bmp_mem;
+ xsc_pct_mgr.bmp_pct = rte_bitmap_init(pct_sz, bmp_mem, bmp_sz);
+ if (xsc_pct_mgr.bmp_pct == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to init pct bitmap");
+ ret = -EINVAL;
+ goto pct_init_fail;
+ }
+
+ /* Mark all pct bitmap available */
+ for (pos = 0; pos < pct_sz; pos++)
+ rte_bitmap_set(xsc_pct_mgr.bmp_pct, pos);
+
+ return 0;
+
+pct_init_fail:
+ xsc_dev_pct_uninit();
+ return ret;
+}
+
+uint32_t
+xsc_dev_pct_idx_alloc(void)
+{
+ int ret;
+ uint64_t slab = 0;
+ uint32_t pos = 0;
+
+ ret = rte_bitmap_scan(xsc_pct_mgr.bmp_pct, &pos, &slab);
+ if (ret != 0) {
+ pos += rte_bsf64(slab);
+ rte_bitmap_clear(xsc_pct_mgr.bmp_pct, pos);
+ return (pos + XSC_DEV_DEF_PCT_IDX_MIN);
+ }
+
+ PMD_DRV_LOG(ERR, "Failed to alloc xsc pct idx");
+ return XSC_DEV_PCT_IDX_INVALID;
+}
+
+void
+xsc_dev_pct_idx_free(uint32_t pct_idx)
+{
+ rte_bitmap_set(xsc_pct_mgr.bmp_pct, pct_idx - XSC_DEV_DEF_PCT_IDX_MIN);
+}
+
+int
+xsc_dev_pct_entry_insert(struct xsc_dev_pct_list *pct_list,
+ uint32_t logic_port, uint32_t pct_idx)
+{
+ struct xsc_dev_pct_entry *pct_entry;
+
+ pct_entry = rte_zmalloc(NULL, sizeof(struct xsc_dev_pct_entry), RTE_CACHE_LINE_SIZE);
+ if (pct_entry == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to alloc pct entry memory");
+ return -ENOMEM;
+ }
+
+ pct_entry->logic_port = logic_port;
+ pct_entry->pct_idx = pct_idx;
+ LIST_INSERT_HEAD(pct_list, pct_entry, next);
+
+ return 0;
+}
+
+struct xsc_dev_pct_entry *
+xsc_dev_pct_first_get(struct xsc_dev_pct_list *pct_list)
+{
+ struct xsc_dev_pct_entry *pct_entry;
+
+ pct_entry = LIST_FIRST(pct_list);
+ return pct_entry;
+}
+
+int
+xsc_dev_pct_entry_remove(struct xsc_dev_pct_entry *pct_entry)
+{
+ if (pct_entry == NULL)
+ return -1;
+
+ xsc_dev_pct_idx_free(pct_entry->pct_idx);
+ LIST_REMOVE(pct_entry, next);
+ rte_free(pct_entry);
+
+ return 0;
+}
diff --git a/drivers/net/xsc/xsc_np.h b/drivers/net/xsc/xsc_np.h
new file mode 100644
index 0000000..02383b2
--- /dev/null
+++ b/drivers/net/xsc/xsc_np.h
@@ -0,0 +1,156 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2025 Yunsilicon Technology Co., Ltd.
+ */
+
+#ifndef _XSC_NP_H_
+#define _XSC_NP_H_
+
+#include <rte_byteorder.h>
+#include <rte_ethdev.h>
+
+#include "xsc_compat.h"
+
+struct xsc_dev;
+
+struct __rte_packed_begin xsc_ipat_key {
+ uint16_t logical_in_port:11;
+ uint16_t rsv:5;
+} __rte_packed_end;
+
+struct __rte_packed_begin xsc_ipat_action {
+ uint64_t rsv0;
+ uint64_t rsv1:9;
+ uint64_t dst_info:11;
+ uint64_t rsv2:34;
+ uint64_t vld:1;
+ uint64_t rsv:1;
+} __rte_packed_end;
+
+struct xsc_np_ipat {
+ struct xsc_ipat_key key;
+ struct xsc_ipat_action action;
+};
+
+struct __rte_packed_begin xsc_epat_key {
+ uint16_t dst_info:11;
+ uint16_t rsv:5;
+} __rte_packed_end;
+
+struct __rte_packed_begin xsc_epat_action {
+ uint8_t rsv0[14];
+ uint8_t rsv1:4;
+ uint8_t dst_port:4;
+ uint8_t rss_hash_func:2;
+ uint8_t rss_hash_template:5;
+ uint8_t rss_en:1;
+ uint8_t qp_num;
+ uint16_t rx_qp_id_ofst:12;
+ uint16_t rsv3:4;
+ uint8_t rsv4:7;
+ uint8_t vld:1;
+} __rte_packed_end;
+
+struct xsc_np_epat_add {
+ struct xsc_epat_key key;
+ struct xsc_epat_action action;
+};
+
+struct xsc_np_epat_mod {
+ uint64_t flags;
+ struct xsc_epat_key key;
+ struct xsc_epat_action action;
+};
+
+struct __rte_packed_begin xsc_pct_v4_key {
+ uint16_t rsv0[20];
+ uint32_t rsv1:13;
+ uint32_t logical_in_port:11;
+ uint32_t rsv2:8;
+} __rte_packed_end;
+
+struct __rte_packed_begin xsc_pct_action {
+ uint64_t rsv0:29;
+ uint64_t dst_info:11;
+ uint64_t rsv1:8;
+} __rte_packed_end;
+
+struct xsc_np_pct_v4_add {
+ struct xsc_pct_v4_key key;
+ struct xsc_pct_v4_key mask;
+ struct xsc_pct_action action;
+ uint32_t pct_idx;
+};
+
+struct xsc_np_pct_v4_del {
+ struct xsc_pct_v4_key key;
+ struct xsc_pct_v4_key mask;
+ uint32_t pct_idx;
+};
+
+struct __rte_packed_begin xsc_pg_qp_set_id_key {
+ uint16_t qp_id:13;
+ uint16_t rsv:3;
+} __rte_packed_end;
+
+struct __rte_packed_begin xsc_pg_qp_set_id_action {
+ uint16_t qp_set_id:9;
+ uint16_t rsv:7;
+} __rte_packed_end;
+
+struct xsc_pg_set_id {
+ struct xsc_pg_qp_set_id_key key;
+ struct xsc_pg_qp_set_id_action action;
+};
+
+struct __rte_packed_begin xsc_vfos_key {
+ uint16_t src_port:11;
+ uint16_t rsv:5;
+} __rte_packed_end;
+
+struct __rte_packed_begin xsc_vfos_start_ofst_action {
+ uint16_t ofst:11;
+ uint16_t rsv:5;
+} __rte_packed_end;
+
+struct xsc_np_vfso {
+ struct xsc_vfos_key key;
+ struct xsc_vfos_start_ofst_action action;
+};
+
+struct xsc_dev_pct_mgr {
+ uint8_t *bmp_mem;
+ struct rte_bitmap *bmp_pct;
+};
+
+struct xsc_dev_pct_entry {
+ LIST_ENTRY(xsc_dev_pct_entry) next;
+ uint32_t logic_port;
+ uint32_t pct_idx;
+};
+
+LIST_HEAD(xsc_dev_pct_list, xsc_dev_pct_entry);
+
+int xsc_dev_create_pct(struct xsc_dev *xdev, int repr_id,
+ uint16_t logical_in_port, uint16_t dst_info);
+int xsc_dev_destroy_pct(struct xsc_dev *xdev, uint16_t logical_in_port, uint32_t pct_idx);
+void xsc_dev_clear_pct(struct xsc_dev *xdev, int repr_id);
+int xsc_dev_create_ipat(struct xsc_dev *xdev, uint16_t logic_in_port, uint16_t dst_info);
+int xsc_dev_get_ipat_vld(struct xsc_dev *xdev, uint16_t logic_in_port);
+int xsc_dev_destroy_ipat(struct xsc_dev *xdev, uint16_t logic_in_port);
+int xsc_dev_create_epat(struct xsc_dev *xdev, uint16_t dst_info, uint8_t dst_port,
+ uint16_t qpn_ofst, uint8_t qp_num, struct rte_eth_rss_conf *rss_conf);
+int xsc_dev_vf_modify_epat(struct xsc_dev *xdev, uint16_t dst_info, uint16_t qpn_ofst,
+ uint8_t qp_num, struct rte_eth_rss_conf *rss_conf);
+int xsc_dev_destroy_epat(struct xsc_dev *xdev, uint16_t dst_info);
+int xsc_dev_set_qpsetid(struct xsc_dev *xdev, uint32_t txqpn, uint16_t qp_set_id);
+int xsc_dev_create_vfos_baselp(struct xsc_dev *xdev);
+void xsc_dev_pct_uninit(void);
+int xsc_dev_pct_init(void);
+uint32_t xsc_dev_pct_idx_alloc(void);
+void xsc_dev_pct_idx_free(uint32_t pct_idx);
+int xsc_dev_pct_entry_insert(struct xsc_dev_pct_list *pct_list,
+ uint32_t logic_port, uint32_t pct_idx);
+struct xsc_dev_pct_entry *xsc_dev_pct_first_get(struct xsc_dev_pct_list *pct_list);
+int xsc_dev_pct_entry_remove(struct xsc_dev_pct_entry *pct_entry);
+
+#endif /* _XSC_NP_H_ */
diff --git a/drivers/net/xsc/xsc_rdma.c b/drivers/net/xsc/xsc_rdma.c
new file mode 100644
index 0000000..956a01d
--- /dev/null
+++ b/drivers/net/xsc/xsc_rdma.c
@@ -0,0 +1,1304 @@
+#include <stdlib.h>
+#include <unistd.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <limits.h>
+#include <sys/mman.h>
+#include <sys/ioctl.h>
+#include <stdalign.h>
+
+#include <linux/ethtool.h>
+#include <linux/sockios.h>
+#include <rte_pci.h>
+#include <ethdev_pci.h>
+#include <rte_bus_pci.h>
+#include <rte_eal_paging.h>
+#include <rte_ethdev.h>
+
+#include <infiniband/xscdv.h>
+#include <infiniband/verbs.h>
+#include "xsc_log.h"
+#include "xsc_defs.h"
+#include "xsc_ethdev.h"
+#include "xsc_rxtx.h"
+#include "xsc_np.h"
+#include "xsc_cmd.h"
+
+struct xsc_ioctl_attr {
+ uint16_t opcode; /* ioctl cmd */
+ uint16_t length; /* data length */
+ uint32_t error; /* ioctl error info */
+ uint16_t ver;
+ uint16_t rsvd;
+ uint8_t data[]; /* specific table info */
+};
+
+struct xsc_ioctl_hdr {
+ uint32_t check_field;
+ uint32_t domain;
+ uint32_t bus;
+ uint32_t devfn;
+ struct xsc_ioctl_attr attr;
+};
+
+enum xsc_ioctl_opmod {
+ XSC_IOCTL_OP_GET_LOCAL,
+};
+
+struct xsc_ioctl_data_tl {
+ uint16_t table;
+ uint16_t opmod;
+ uint16_t length;
+ uint16_t rsvd;
+};
+
+struct xsc_ioctl_get_hwinfo {
+ uint32_t domain;
+ uint32_t bus;
+ uint32_t devfn;
+ uint32_t pcie_no;
+ uint32_t func_id;
+ uint32_t pcie_host;
+ uint32_t mac_phy_port;
+ uint32_t funcid_to_logic_port_off;
+ uint16_t lag_id;
+ uint16_t raw_qp_id_base;
+ uint16_t raw_rss_qp_id_base;
+ uint16_t pf0_vf_funcid_base;
+ uint16_t pf0_vf_funcid_top;
+ uint16_t pf1_vf_funcid_base;
+ uint16_t pf1_vf_funcid_top;
+ uint16_t pcie0_pf_funcid_base;
+ uint16_t pcie0_pf_funcid_top;
+ uint16_t pcie1_pf_funcid_base;
+ uint16_t pcie1_pf_funcid_top;
+ uint16_t lag_port_start;
+ uint16_t raw_tpe_qp_num;
+ int send_seg_num;
+ int recv_seg_num;
+ uint8_t on_chip_tbl_vld;
+ uint8_t dma_rw_tbl_vld;
+ uint8_t pct_compress_vld;
+ uint32_t chip_version;
+ uint32_t hca_core_clock;
+ uint8_t mac_bit;
+ uint8_t esw_mode;
+};
+
+struct xsc_ioctl_qp_range {
+ uint16_t opcode;
+ int num;
+ uint32_t qpn;
+};
+
+#define XSC_IOCTL_CHECK_FIELD 0x01234567
+#define XSC_IOCTL_MAGIC 0x1b
+#define XSC_IOCTL_CMDQ _IOWR(XSC_IOCTL_MAGIC, 1, struct xsc_ioctl_hdr)
+#define XSC_IOCTL_DRV_GET _IOR(XSC_IOCTL_MAGIC, 2, struct xsc_ioctl_hdr)
+#define XSC_IOCTL_CMDQ_RAW _IOWR(XSC_IOCTL_MAGIC, 5, struct xsc_ioctl_hdr)
+
+#define XSC_DEV_CTRL_FILE_FMT "/dev/yunsilicon/port_ctrl_" PCI_PRI_FMT
+
+#ifndef ETHTOOL_GLINKSETTINGS
+struct ethtool_link_settings {
+ uint32_t cmd;
+ uint32_t speed;
+ uint8_t duplex;
+ uint8_t port;
+ uint8_t phy_address;
+ uint8_t autoneg;
+ uint8_t mdio_support;
+ uint8_t eth_to_mdix;
+ uint8_t eth_tp_mdix_ctrl;
+ int8_t link_mode_masks_nwords;
+ uint32_t reserved[8];
+ uint32_t link_mode_masks[];
+};
+
+/* The kernel values can be found in /include/uapi/linux/ethtool.h */
+#define ETHTOOL_GLINKSETTINGS 0x0000004c
+#define ETHTOOL_LINK_MODE_1000baseT_Full_BIT 5
+#define ETHTOOL_LINK_MODE_Autoneg_BIT 6
+#define ETHTOOL_LINK_MODE_1000baseKX_Full_BIT 17
+#define ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT 18
+#define ETHTOOL_LINK_MODE_10000baseKR_Full_BIT 19
+#define ETHTOOL_LINK_MODE_10000baseR_FEC_BIT 20
+#define ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT 21
+#define ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT 22
+#define ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT 23
+#define ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT 24
+#define ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT 25
+#define ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT 26
+#define ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT 27
+#define ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT 28
+#define ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT 29
+#define ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT 30
+#endif
+#ifndef HAVE_ETHTOOL_LINK_MODE_25G
+#define ETHTOOL_LINK_MODE_25000baseCR_Full_BIT 31
+#define ETHTOOL_LINK_MODE_25000baseKR_Full_BIT 32
+#define ETHTOOL_LINK_MODE_25000baseSR_Full_BIT 33
+#endif
+#ifndef HAVE_ETHTOOL_LINK_MODE_50G
+#define ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT 34
+#define ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT 35
+#endif
+#ifndef HAVE_ETHTOOL_LINK_MODE_100G
+#define ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT 36
+#define ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT 37
+#define ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT 38
+#define ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT 39
+#endif
+#ifndef HAVE_ETHTOOL_LINK_MODE_200G
+#define ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT 62
+#define ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT 63
+#define ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT 64
+#define ETHTOOL_LINK_MODE_200000baseDR4_Full_BIT 65
+#define ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT 66
+#endif
+
+#define XSC_CHECK_BIT(val, bit_shift) ((val) & (1UL << (bit_shift)))
+
+#if !HAVE_XSC_DV_PROVIDER
+enum xscdv_obj_type {
+ XSCDV_OBJ_QP = 1 << 0,
+ XSCDV_OBJ_CQ = 1 << 1,
+ XSCDV_OBJ_SRQ = 1 << 2,
+ XSCDV_OBJ_RWQ = 1 << 3,
+ XSCDV_OBJ_DM = 1 << 4,
+ XSCDV_OBJ_AH = 1 << 5,
+ XSCDV_OBJ_PD = 1 << 6,
+};
+
+enum xsc_qp_create_flags {
+ XSC_QP_CREATE_RAWPACKE_TSO = 1 << 0,
+ XSC_QP_CREATE_RAWPACKET_TSO = 1 << 0,
+ XSC_QP_CREATE_RAWPACKET_TX = 1 << 1,
+};
+
+struct xscdv_cq_init_attr {
+ uint64_t comp_mask; /* Use enum xscdv_cq_init_attr_mask */
+ uint8_t cqe_comp_res_format; /* Use enum xscdv_cqe_comp_res_format */
+ uint32_t flags;
+ uint16_t cqe_size; /* When XSCDV_CQ_INIT_ATTR_MASK_CQE_SIZE set */
+};
+
+struct xscdv_obj {
+ struct {
+ struct ibv_qp *in;
+ struct xscdv_qp *out;
+ } qp;
+ struct {
+ struct ibv_cq *in;
+ struct xscdv_cq *out;
+ } cq;
+};
+
+struct xscdv_qp {
+ rte_le32_t *dbrec;
+ struct {
+ void *buf;
+ uint32_t wqe_cnt;
+ uint32_t stride;
+ rte_le32_t *db;
+ } sq;
+ struct {
+ void *buf;
+ uint32_t wqe_cnt;
+ uint32_t stride;
+ rte_le32_t *db;
+ } rq;
+ uint64_t comp_mask;
+ uint32_t tirn;
+ uint32_t tisn;
+ uint32_t rqn;
+ uint32_t sqn;
+};
+
+struct xscdv_cq {
+ void *buf;
+ rte_le32_t *dbrec;
+ rte_le32_t *db;
+ uint32_t cqe_cnt;
+ uint32_t cqe_size;
+ uint32_t cqn;
+ uint64_t comp_mask;
+};
+
+#endif
+
+struct xsc_rdma_priv {
+ struct ibv_context *ibv_ctx;
+ struct ibv_pd *ibv_pd;
+};
+
+static int
+xsc_ioctl(struct xsc_dev *xdev, int cmd, int opcode,
+ void *data_in, int in_len, void *data_out, int out_len)
+{
+ struct xsc_ioctl_hdr *hdr;
+ int data_len = RTE_MAX(in_len, out_len);
+ int alloc_len = sizeof(struct xsc_ioctl_hdr) + data_len;
+ int ret = 0;
+
+ hdr = malloc(alloc_len);
+ memset(hdr, 0, alloc_len);
+ if (hdr == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to allocate xsc ioctl cmd memory");
+ return -ENOMEM;
+ }
+
+ hdr->check_field = XSC_IOCTL_CHECK_FIELD;
+ hdr->attr.opcode = opcode;
+ hdr->attr.length = data_len;
+ hdr->attr.error = 0;
+
+ if (data_in != NULL && in_len > 0)
+ memcpy(hdr + 1, data_in, in_len);
+
+ ret = ioctl(xdev->ctrl_fd, cmd, hdr);
+ if (ret == 0) {
+ if (hdr->attr.error != 0)
+ ret = hdr->attr.error;
+ else if (data_out != NULL && out_len > 0)
+ memcpy(data_out, hdr + 1, out_len);
+ }
+
+ free(hdr);
+ return ret;
+}
+
+static int
+xsc_rdma_mailbox_exec(struct xsc_dev *xdev, void *data_in,
+ int in_len, void *data_out, int out_len)
+{
+ return xsc_ioctl(xdev, XSC_IOCTL_CMDQ_RAW, 0, data_in, in_len, data_out, out_len);
+}
+
+static int
+xsc_rdma_fork_init(void)
+{
+ setenv("RDMAV_HUGEPAGES_SAFE", "1", 1);
+
+ if (RTE_CACHE_LINE_SIZE == 128)
+ setenv("XSC_CQE_SIZE", "128", 0);
+
+ setenv("XSC_DEVICE_FATAL_CLEANUP", "1", 1);
+
+ return ibv_fork_init();
+}
+
+static int
+xsc_rdma_hwinfo_init(struct xsc_dev *xdev)
+{
+ struct {
+ struct xsc_ioctl_data_tl tl;
+ struct xsc_ioctl_get_hwinfo hwinfo;
+ } data;
+ struct xsc_ioctl_get_hwinfo *info = &data.hwinfo;
+ int data_len;
+ int ret;
+
+ PMD_INIT_FUNC_TRACE();
+
+ data_len = sizeof(data);
+ data.tl.opmod = XSC_IOCTL_OP_GET_LOCAL;
+ ret = xsc_ioctl(xdev, XSC_IOCTL_DRV_GET, XSC_CMD_OP_QUERY_HCA_CAP,
+ &data, data_len, &data, data_len);
+ if (ret != 0) {
+ PMD_DRV_LOG(ERR, "Failed to get hardware info");
+ return ret;
+ }
+
+ xdev->hwinfo.valid = 1;
+ xdev->hwinfo.pcie_no = info->pcie_no;
+ xdev->hwinfo.func_id = info->func_id;
+ xdev->hwinfo.pcie_host = info->pcie_host;
+ xdev->hwinfo.mac_phy_port = info->mac_phy_port;
+ xdev->hwinfo.funcid_to_logic_port_off = info->funcid_to_logic_port_off;
+ xdev->hwinfo.lag_id = info->lag_id;
+ xdev->hwinfo.raw_qp_id_base = info->raw_qp_id_base;
+ xdev->hwinfo.raw_rss_qp_id_base = info->raw_rss_qp_id_base;
+ xdev->hwinfo.pf0_vf_funcid_base = info->pf0_vf_funcid_base;
+ xdev->hwinfo.pf0_vf_funcid_top = info->pf0_vf_funcid_top;
+ xdev->hwinfo.pf1_vf_funcid_base = info->pf1_vf_funcid_base;
+ xdev->hwinfo.pf1_vf_funcid_top = info->pf1_vf_funcid_top;
+ xdev->hwinfo.pcie0_pf_funcid_base = info->pcie0_pf_funcid_base;
+ xdev->hwinfo.pcie0_pf_funcid_top = info->pcie0_pf_funcid_top;
+ xdev->hwinfo.pcie1_pf_funcid_base = info->pcie1_pf_funcid_base;
+ xdev->hwinfo.pcie1_pf_funcid_top = info->pcie1_pf_funcid_top;
+ xdev->hwinfo.lag_port_start = info->lag_port_start;
+ xdev->hwinfo.raw_tpe_qp_num = info->raw_tpe_qp_num;
+ xdev->hwinfo.send_seg_num = info->send_seg_num;
+ xdev->hwinfo.recv_seg_num = info->recv_seg_num;
+ xdev->hwinfo.on_chip_tbl_vld = info->on_chip_tbl_vld;
+ xdev->hwinfo.dma_rw_tbl_vld = info->dma_rw_tbl_vld;
+ xdev->hwinfo.pct_compress_vld = info->pct_compress_vld;
+ xdev->hwinfo.chip_version = info->chip_version;
+ xdev->hwinfo.hca_core_clock = info->hca_core_clock;
+ xdev->hwinfo.mac_bit = info->mac_bit;
+ xdev->hwinfo.esw_mode = info->esw_mode;
+
+ return 0;
+}
+
+static int
+xsc_get_ibdev_pci_addr(const char *dev_path, struct rte_pci_addr *pci_addr)
+{
+ FILE *file;
+ char *line = NULL;
+ size_t len = 0;
+ char path[PATH_MAX];
+ int ret = -ENOENT;
+
+ ret = snprintf(path, sizeof(path), "%s/device/uevent", dev_path);
+ if (ret < 0 || ret >= (int)sizeof(path)) {
+ rte_errno = ENAMETOOLONG;
+ return -rte_errno;
+ }
+
+ file = fopen(path, "rb");
+ if (file == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to open file: (%s) ", path);
+ return ret;
+ }
+
+ while (getline(&line, &len, file) != -1) {
+ if (sscanf(line,
+ "PCI_SLOT_NAME=%04x:%hhx:%hhx.%hhx",
+ &pci_addr->domain,
+ &pci_addr->bus,
+ &pci_addr->devid,
+ &pci_addr->function) == 4) {
+ ret = 0;
+ break;
+ }
+ }
+
+ free(line);
+ fclose(file);
+ return ret;
+}
+
+static struct ibv_device *
+xsc_rdma_get_ibv_device(const struct rte_pci_addr *addr)
+{
+ int ibv_num, i;
+ struct ibv_device **ibv_list;
+ struct ibv_device *ibv_match = NULL;
+ struct rte_pci_addr ibv_pci_addr;
+
+ ibv_list = ibv_get_device_list(&ibv_num);
+ if (ibv_list == NULL)
+ return NULL;
+
+ for (i = 0; i < ibv_num; i++) {
+ if (xsc_get_ibdev_pci_addr(ibv_list[i]->ibdev_path, &ibv_pci_addr) != 0)
+ continue;
+ if (rte_pci_addr_cmp(addr, &ibv_pci_addr) != 0)
+ continue;
+ ibv_match = ibv_list[i];
+ PMD_DRV_LOG(DEBUG, "Finding device \"name:%s, %s, path:%s, %s\"..",
+ ibv_list[i]->name, ibv_list[i]->dev_name,
+ ibv_list[i]->dev_path, ibv_list[i]->ibdev_path);
+ break;
+ }
+ ibv_free_device_list(ibv_list);
+
+ if (ibv_match == NULL) {
+ PMD_DRV_LOG(WARNING,
+ "No verbs device matches PCI device " PCI_PRI_FMT,
+ addr->domain, addr->bus, addr->devid, addr->function);
+ }
+
+ return ibv_match;
+}
+
+#if HAVE_XSC_DV_PROVIDER
+static void *
+xsc_rdma_verbs_buf_alloc(size_t size, void *data)
+{
+ struct rte_device *dev = data;
+ size_t alignment = rte_mem_page_size();
+ void *addr;
+
+ if (size == 0)
+ return NULL;
+
+ if (alignment == (size_t)-1) {
+ PMD_DRV_LOG(ERR, "Failed to get mem page size");
+ rte_errno = ENOMEM;
+ return NULL;
+ }
+
+ addr = rte_malloc_socket(NULL, size, alignment, dev->numa_node);
+ if (addr == NULL) {
+ rte_errno = ENOMEM;
+ PMD_DRV_LOG(ERR, "Alloc verbs buf memory failed");
+ return NULL;
+ }
+
+ return addr;
+}
+
+static void
+xsc_rdma_verbs_buf_free(void *ptr, void *data __rte_unused)
+{
+ rte_free(ptr);
+}
+#endif
+
+static void
+xsc_rdma_context_attr_set(struct rte_device *dev, struct ibv_context *ctx)
+{
+#if HAVE_XSC_DV_PROVIDER
+ struct xscdv_ctx_allocators allocator = {
+ .alloc = &xsc_rdma_verbs_buf_alloc,
+ .free = &xsc_rdma_verbs_buf_free,
+ .data = dev,
+ };
+
+ /* Hint libxsc to use PMD allocator for data plane resources */
+ xscdv_set_context_attr(ctx, XSCDV_CTX_ATTR_BUF_ALLOCATORS,
+ (void *)((uintptr_t)&allocator));
+#else
+ (void)dev;
+ (void)ctx;
+ PMD_DRV_LOG(WARNING, "Huge page memory registration is not supported");
+#endif
+}
+
+static int
+xsc_rdma_dev_open(struct xsc_dev *xdev)
+{
+ struct ibv_device *ib_dev;
+ struct rte_pci_device *pci_dev = xdev->pci_dev;
+ struct rte_pci_addr *pci_addr = &xdev->pci_dev->addr;
+ struct xsc_rdma_priv *priv = xdev->dev_priv;
+ int ret;
+
+ priv = rte_zmalloc(NULL, sizeof(struct xsc_rdma_priv), RTE_CACHE_LINE_SIZE);
+ if (priv == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to alloc xsc rdma priv");
+ return -ENOMEM;
+ }
+
+ ib_dev = xsc_rdma_get_ibv_device(pci_addr);
+ if (ib_dev == NULL) {
+ PMD_DRV_LOG(ERR, "Could not get ibv device");
+ ret = -ENODEV;
+ goto aloc_priv_fail;
+ }
+
+ priv->ibv_ctx = ibv_open_device(ib_dev);
+ if (priv->ibv_ctx == NULL) {
+ PMD_DRV_LOG(ERR, "Could not open ibv device: %s", ib_dev->name);
+ ret = -ENODEV;
+ goto aloc_priv_fail;
+ }
+
+ xsc_rdma_context_attr_set(&pci_dev->device, priv->ibv_ctx);
+
+ priv->ibv_pd = ibv_alloc_pd(priv->ibv_ctx);
+ if (priv->ibv_pd == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to create pd:%s", ib_dev->name);
+ ret = -EINVAL;
+ goto alloc_pd_fail;
+ }
+
+ strcpy(xdev->name, ib_dev->name);
+
+ xdev->dev_priv = priv;
+ return 0;
+
+alloc_pd_fail:
+ ibv_close_device(priv->ibv_ctx);
+aloc_priv_fail:
+ rte_free(priv);
+ return ret;
+}
+
+static int
+xsc_rdma_bar_init(struct xsc_dev *xdev)
+{
+ struct rte_pci_addr *pci_addr = &xdev->pci_dev->addr;
+ char ctrl_file[PATH_MAX];
+ int ret;
+
+ snprintf(ctrl_file, PATH_MAX, XSC_DEV_CTRL_FILE_FMT,
+ pci_addr->domain, pci_addr->bus, pci_addr->devid, pci_addr->function);
+
+ ret = open(ctrl_file, O_RDWR);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "Failed to open file: (%s) ", ctrl_file);
+ return ret;
+ }
+ xdev->ctrl_fd = ret;
+
+ xdev->bar_len = xdev->pci_dev->mem_resource[0].len;
+ xdev->bar_addr = mmap(NULL, xdev->bar_len, PROT_READ | PROT_WRITE,
+ MAP_SHARED, xdev->ctrl_fd, 0);
+ if (xdev->bar_addr == MAP_FAILED) {
+ PMD_DRV_LOG(ERR, "Failed to mmap file: (%s) ", ctrl_file);
+ ret = -EINVAL;
+ goto mmap_fail;
+ }
+
+ return 0;
+
+mmap_fail:
+ close(xdev->ctrl_fd);
+
+ return ret;
+}
+
+static int
+xsc_ifreq_by_ifname(const char *ifname, int req, struct ifreq *ifr)
+{
+ int sock = socket(PF_INET, SOCK_DGRAM, IPPROTO_IP);
+ int ret = 0;
+
+ if (sock == -1) {
+ rte_errno = errno;
+ return -rte_errno;
+ }
+ rte_strscpy(ifr->ifr_name, ifname, sizeof(ifr->ifr_name));
+ ret = ioctl(sock, req, ifr);
+ if (ret == -1) {
+ rte_errno = errno;
+ goto error;
+ }
+ close(sock);
+ return 0;
+error:
+ close(sock);
+ return -rte_errno;
+}
+
+static int
+xsc_rdma_link_process(uint32_t ifindex, unsigned int flags)
+{
+ struct ifreq request;
+ struct ifreq *ifr = &request;
+ char ifname[sizeof(ifr->ifr_name)];
+ int ret;
+ unsigned int keep = ~IFF_UP;
+
+ if (if_indextoname(ifindex, ifname) == NULL)
+ return -rte_errno;
+
+ ret = xsc_ifreq_by_ifname(ifname, SIOCGIFFLAGS, &request);
+ if (ret)
+ return ret;
+
+ request.ifr_flags &= keep;
+ request.ifr_flags |= flags & ~keep;
+
+ return xsc_ifreq_by_ifname(ifname, SIOCSIFFLAGS, &request);
+}
+
+static int
+xsc_rdma_set_link_up(struct xsc_dev *xdev)
+{
+ return xsc_rdma_link_process(xdev->ifindex, IFF_UP);
+}
+
+static int
+xsc_rdma_set_link_down(struct xsc_dev *xdev)
+{
+ return xsc_rdma_link_process(xdev->ifindex, ~IFF_UP);
+}
+
+static int
+xsc_rdma_link_status_get(struct xsc_dev *xdev, struct rte_eth_link *link, uint32_t *capa)
+{
+ struct rte_eth_link dev_link;
+ struct ethtool_link_settings settings = { .cmd = ETHTOOL_GLINKSETTINGS };
+ struct ifreq request;
+ uint64_t flags;
+ uint32_t link_speed_capa;
+ char ifname[sizeof(request.ifr_name)];
+ int ret;
+
+ if (if_indextoname(xdev->ifindex, ifname) == NULL)
+ return -rte_errno;
+
+ ret = xsc_ifreq_by_ifname(ifname, SIOCGIFFLAGS, &request);
+ if (ret)
+ return ret;
+
+ if ((request.ifr_flags & IFF_UP) && (request.ifr_flags & IFF_RUNNING))
+ dev_link.link_status = 1;
+ else
+ dev_link.link_status = 0;
+
+ request.ifr_data = (void *)&settings;
+ ret = xsc_ifreq_by_ifname(ifname, SIOCETHTOOL, &request);
+ if (ret)
+ return ret;
+
+ settings.link_mode_masks_nwords = -settings.link_mode_masks_nwords;
+
+ alignas(struct ethtool_link_settings)
+ uint8_t data[offsetof(struct ethtool_link_settings, link_mode_masks) +
+ sizeof(uint32_t) * settings.link_mode_masks_nwords * 3];
+ struct ethtool_link_settings *ecmd = (void *)data;
+
+ *ecmd = settings;
+ request.ifr_data = (void *)ecmd;
+ ret = xsc_ifreq_by_ifname(ifname, SIOCETHTOOL, &request);
+ if (ret)
+ return ret;
+
+ dev_link.link_speed = (ecmd->speed == UINT32_MAX) ?
+ RTE_ETH_SPEED_NUM_UNKNOWN : ecmd->speed;
+
+ dev_link.link_duplex = ((ecmd->duplex == DUPLEX_HALF) ?
+ RTE_ETH_LINK_HALF_DUPLEX : RTE_ETH_LINK_FULL_DUPLEX);
+
+ link_speed_capa = 0;
+ flags = ecmd->link_mode_masks[0] |
+ ((uint64_t)ecmd->link_mode_masks[1] << 32);
+ if (XSC_CHECK_BIT(flags, ETHTOOL_LINK_MODE_1000baseT_Full_BIT) ||
+ XSC_CHECK_BIT(flags, ETHTOOL_LINK_MODE_1000baseKX_Full_BIT))
+ link_speed_capa |= RTE_ETH_LINK_SPEED_1G;
+
+ if (XSC_CHECK_BIT(flags, ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT) ||
+ XSC_CHECK_BIT(flags, ETHTOOL_LINK_MODE_10000baseKR_Full_BIT) ||
+ XSC_CHECK_BIT(flags, ETHTOOL_LINK_MODE_10000baseR_FEC_BIT))
+ link_speed_capa |= RTE_ETH_LINK_SPEED_10G;
+
+ if (XSC_CHECK_BIT(flags, ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT) ||
+ XSC_CHECK_BIT(flags, ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT))
+ link_speed_capa |= RTE_ETH_LINK_SPEED_20G;
+
+ if (XSC_CHECK_BIT(flags, ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT) ||
+ XSC_CHECK_BIT(flags, ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT) ||
+ XSC_CHECK_BIT(flags, ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT) ||
+ XSC_CHECK_BIT(flags, ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT))
+ link_speed_capa |= RTE_ETH_LINK_SPEED_40G;
+
+ if (XSC_CHECK_BIT(flags, ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT) ||
+ XSC_CHECK_BIT(flags, ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT) ||
+ XSC_CHECK_BIT(flags, ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT) ||
+ XSC_CHECK_BIT(flags, ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT))
+ link_speed_capa |= RTE_ETH_LINK_SPEED_56G;
+
+ if (XSC_CHECK_BIT(flags, ETHTOOL_LINK_MODE_25000baseCR_Full_BIT) ||
+ XSC_CHECK_BIT(flags, ETHTOOL_LINK_MODE_25000baseKR_Full_BIT) ||
+ XSC_CHECK_BIT(flags, ETHTOOL_LINK_MODE_25000baseSR_Full_BIT))
+ link_speed_capa |= RTE_ETH_LINK_SPEED_25G;
+
+ if (XSC_CHECK_BIT(flags, ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT) ||
+ XSC_CHECK_BIT(flags, ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT))
+ link_speed_capa |= RTE_ETH_LINK_SPEED_50G;
+
+ if (XSC_CHECK_BIT(flags, ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT) ||
+ XSC_CHECK_BIT(flags, ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT) ||
+ XSC_CHECK_BIT(flags, ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT) ||
+ XSC_CHECK_BIT(flags, ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT))
+ link_speed_capa |= RTE_ETH_LINK_SPEED_100G;
+
+ if (XSC_CHECK_BIT(flags, ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT) ||
+ XSC_CHECK_BIT(flags, ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT))
+ link_speed_capa |= RTE_ETH_LINK_SPEED_200G;
+
+ flags = ecmd->link_mode_masks[2] |
+ ((uint64_t)ecmd->link_mode_masks[3] << 32);
+ if (XSC_CHECK_BIT(flags, ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT - 64) ||
+ XSC_CHECK_BIT(flags, ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT - 64) ||
+ XSC_CHECK_BIT(flags, ETHTOOL_LINK_MODE_200000baseDR4_Full_BIT - 64))
+ link_speed_capa |= RTE_ETH_LINK_SPEED_200G;
+
+
+ *link = dev_link;
+ *capa = link_speed_capa;
+ return 0;
+}
+
+static int
+xsc_rdma_link_update(struct xsc_dev *xdev, int wait_to_complete)
+{
+ int ret = 0;
+ struct rte_eth_link dev_link;
+ time_t start_time = time(NULL);
+ int retry = 3;
+ uint32_t link_speed_capa = 0;
+
+ do {
+ ret = xsc_rdma_link_status_get(xdev, &dev_link, &link_speed_capa);
+ if (ret == 0)
+ break;
+ /* Handle wait to complete situation. */
+ if ((wait_to_complete || retry) && ret == -EAGAIN) {
+ if (abs((int)difftime(time(NULL), start_time)) <
+ 10) {
+ usleep(0);
+ continue;
+ } else {
+ rte_errno = EBUSY;
+ return -rte_errno;
+ }
+ } else if (ret < 0) {
+ return ret;
+ }
+ } while (wait_to_complete || retry-- > 0);
+ ret = !memcmp(&xdev->pf_dev_link, &dev_link,
+ sizeof(struct rte_eth_link));
+ xdev->pf_dev_link = dev_link;
+ xdev->link_speed_capa = link_speed_capa;
+ return ret;
+}
+
+static int
+xsc_rdma_event_get(struct xsc_dev *xdev)
+{
+ struct xsc_rdma_priv *priv = (struct xsc_rdma_priv *)xdev->dev_priv;
+ struct ibv_context *ibv_ctx = priv->ibv_ctx;
+ struct ibv_async_event event;
+
+ if (ibv_get_async_event(ibv_ctx, &event))
+ return XSC_EVENT_TYPE_NONE;
+
+ if ((event.event_type == IBV_EVENT_PORT_ACTIVE ||
+ event.event_type == IBV_EVENT_PORT_ERR)) {
+ ibv_ack_async_event(&event);
+ return XSC_EVENT_TYPE_CHANGE_LINK;
+ }
+
+ ibv_ack_async_event(&event);
+ return XSC_EVENT_TYPE_NONE;
+}
+
+static int
+xsc_rdma_intr_handler_install(struct xsc_dev *xdev, rte_intr_callback_fn cb, void *cb_arg)
+{
+ int ret;
+ int flags;
+ struct xsc_rdma_priv *priv = (struct xsc_rdma_priv *)xdev->dev_priv;
+ struct ibv_context *ibv_ctx = priv->ibv_ctx;
+
+ xdev->intr_handle = rte_intr_instance_alloc(RTE_INTR_INSTANCE_F_SHARED);
+ if (xdev->intr_handle == NULL) {
+ rte_errno = ENOMEM;
+ return -ENOMEM;
+ }
+
+ rte_intr_fd_set(xdev->intr_handle, -1);
+
+ flags = fcntl(ibv_ctx->async_fd, F_GETFL);
+ ret = fcntl(ibv_ctx->async_fd, F_SETFL, flags | O_NONBLOCK);
+ if (ret == 0) {
+ rte_intr_fd_set(xdev->intr_handle, ibv_ctx->async_fd);
+ rte_intr_type_set(xdev->intr_handle, RTE_INTR_HANDLE_EXT);
+ ret = rte_intr_callback_register(xdev->intr_handle, cb, cb_arg);
+ if (ret != 0)
+ rte_intr_fd_set(xdev->intr_handle, -1);
+ }
+
+ return ret;
+}
+
+static int
+xsc_rdma_intr_handler_uninstall(struct xsc_dev *xdev)
+{
+ rte_intr_instance_free(xdev->intr_handle);
+ return 0;
+}
+
+static int
+xsc_rdma_dev_close(struct xsc_dev *xdev)
+{
+ struct xsc_rdma_priv *rdma_priv = (struct xsc_rdma_priv *)xdev->dev_priv;
+
+ munmap(xdev->bar_addr, xdev->bar_len);
+ close(xdev->ctrl_fd);
+ ibv_close_device(rdma_priv->ibv_ctx);
+ rte_free(rdma_priv);
+
+ return 0;
+}
+
+static int
+xsc_rdma_init_obj(struct xscdv_obj *obj, uint64_t obj_type)
+{
+#if HAVE_XSC_DV_PROVIDER
+ return xscdv_init_obj(obj, obj_type);
+#else
+ (void)obj;
+ (void)obj_type;
+ return 0;
+#endif
+}
+
+static int
+xsc_rdma_destroy_qp(void *qp)
+{
+ return ibv_destroy_qp(qp);
+}
+
+static int
+xsc_rdma_destroy_cq(void *cq)
+{
+ return ibv_destroy_cq(cq);
+}
+
+static int
+xsc_set_mtu(uint16_t mtu, uint32_t ifindex)
+{
+ struct ifreq request = { .ifr_mtu = mtu, };
+ struct ifreq *ifr = &request;
+ char ifname[sizeof(ifr->ifr_name)];
+
+ if (if_indextoname(ifindex, ifname) == NULL)
+ return -rte_errno;
+
+ return xsc_ifreq_by_ifname(ifname, SIOCSIFMTU, &request);
+}
+
+static int
+xsc_get_mtu(uint16_t *mtu, uint32_t ifindex)
+{
+ struct ifreq request;
+ struct ifreq *ifr = &request;
+ char ifname[sizeof(ifr->ifr_name)];
+ int ret;
+
+ if (if_indextoname(ifindex, ifname) == NULL)
+ return -rte_errno;
+
+ ret = xsc_ifreq_by_ifname(ifname, SIOCGIFMTU, &request);
+ if (ret)
+ return ret;
+ *mtu = request.ifr_mtu;
+ return 0;
+}
+
+static int
+xsc_rdma_set_mtu(struct xsc_dev *xdev, uint16_t mtu)
+{
+ uint16_t get_mtu = 0;
+ int ret = 0;
+
+ ret = xsc_set_mtu(mtu, xdev->ifindex);
+ if (ret)
+ return ret;
+
+ ret = xsc_get_mtu(&get_mtu, xdev->ifindex);
+ if (ret)
+ return ret;
+
+ if (get_mtu != mtu) {
+ PMD_DRV_LOG(ERR, "Mtu set to %u failure", mtu);
+ return -EAGAIN;
+ }
+ return 0;
+}
+
+static int
+xsc_rdma_get_mac(struct xsc_dev *xdev, uint8_t *mac)
+{
+ struct ifreq request;
+ struct ifreq *ifr = &request;
+ char ifname[sizeof(ifr->ifr_name)];
+ int ret;
+
+ if (if_indextoname(xdev->ifindex, ifname) == NULL)
+ return -rte_errno;
+
+ ret = xsc_ifreq_by_ifname(ifname, SIOCGIFHWADDR, &request);
+ if (ret)
+ return ret;
+
+ memcpy(mac, request.ifr_hwaddr.sa_data, RTE_ETHER_ADDR_LEN);
+ return 0;
+}
+
+static int
+xsc_rdma_rx_cq_create(struct xsc_dev *xdev, struct xsc_rx_cq_params *cq_params,
+ struct xsc_rx_cq_info *cq_info)
+{
+ struct xscdv_obj obj;
+ struct xscdv_cq cq = {0};
+ struct xsc_rdma_priv *priv = (struct xsc_rdma_priv *)xdev->dev_priv;
+ uint32_t cqe_s = cq_params->wqe_s;
+ int ret;
+
+ cq_info->cq = ibv_create_cq(priv->ibv_ctx, cqe_s, NULL, NULL, 0);
+ if (cq_info->cq == NULL) {
+ rte_errno = errno;
+ goto error;
+ }
+
+ obj.cq.in = cq_info->cq;
+ obj.cq.out = &cq;
+
+ ret = xsc_rdma_init_obj(&obj, XSCDV_OBJ_CQ);
+ if (ret) {
+ rte_errno = errno;
+ goto error;
+ }
+
+ cq_info->cqe_n = rte_log2_u32(cq.cqe_cnt);
+ cq_info->cqes = (void *)(uintptr_t)cq.buf;
+ cq_info->cq_db = cq.db;
+ cq_info->cqn = cq.cqn;
+
+ return 0;
+
+error:
+ return -1;
+}
+
+static int
+xsc_rdma_tx_cq_create(struct xsc_dev *xdev, struct xsc_tx_cq_params *cq_params,
+ struct xsc_tx_cq_info *cq_info)
+{
+ struct xsc_rdma_priv *priv = (struct xsc_rdma_priv *)xdev->dev_priv;
+ const int cqe_n = 1 << cq_params->elts_n;
+ struct xscdv_obj obj = { };
+ struct xscdv_cq cq = { };
+ int ret;
+
+ cq_info->cq = ibv_create_cq(priv->ibv_ctx, cqe_n, NULL, NULL, 0);
+ if (cq_info->cq == NULL) {
+ rte_errno = errno;
+ goto error;
+ }
+
+ obj.cq.in = cq_info->cq;
+ obj.cq.out = (struct xscdv_cq *)&cq;
+ ret = xsc_rdma_init_obj(&obj, XSCDV_OBJ_CQ);
+ if (ret != 0) {
+ rte_errno = errno;
+ goto error;
+ }
+
+ cq_info->cqe_n = rte_log2_u32(cq.cqe_cnt);
+ cq_info->cqe_s = 1 << cq_info->cqe_n;
+ /* cqe doorbell */
+ cq_info->cq_db = cq.db;
+ cq_info->cqn = cq.cqn;
+ cq_info->cqes = cq.buf;
+
+ return 0;
+
+error:
+ return -1;
+}
+
+static struct ibv_qp *
+xsc_txq_ibv_qp_create(struct xsc_dev *xdev, struct xsc_tx_qp_params *qp_params,
+ struct xsc_tx_qp_info *qp_info)
+{
+ struct ibv_qp *qp_obj = NULL;
+ struct ibv_qp_init_attr_ex qp_attr = { 0 };
+ struct xsc_rdma_priv *priv = (struct xsc_rdma_priv *)xdev->dev_priv;
+ const int desc = 1 << qp_params->elts_n;
+
+ qp_attr.send_cq = qp_params->cq;
+ qp_attr.recv_cq = qp_params->cq;
+ qp_attr.cap.max_send_wr = desc;
+ qp_attr.cap.max_recv_wr = 0;
+ qp_attr.cap.max_send_sge = 1;
+ qp_attr.qp_type = IBV_QPT_RAW_PACKET;
+ qp_attr.pd = priv->ibv_pd;
+ qp_attr.sq_sig_all = 0;
+
+ if (qp_params->tx_offloads & (RTE_ETH_TX_OFFLOAD_TCP_TSO)) {
+ qp_attr.create_flags = XSC_QP_CREATE_RAWPACKET_TSO;
+ qp_attr.comp_mask = IBV_QP_INIT_ATTR_PD | IBV_QP_INIT_ATTR_CREATE_FLAGS;
+ qp_info->tso_en = 1;
+ PMD_DRV_LOG(DEBUG, "Port txq %u, create tso qp",
+ qp_params->qp_id);
+ } else {
+ qp_attr.create_flags = XSC_QP_CREATE_RAWPACKET_TX;
+ qp_attr.comp_mask = IBV_QP_INIT_ATTR_PD | IBV_QP_INIT_ATTR_CREATE_FLAGS;
+ PMD_DRV_LOG(DEBUG, "Port txq %u, create non-tso qp",
+ qp_params->qp_id);
+ }
+
+ qp_obj = ibv_create_qp_ex(priv->ibv_ctx, &qp_attr);
+ if (qp_obj == NULL) {
+ PMD_DRV_LOG(ERR, "Port txq %u, create %s qp fail, errno=%d",
+ qp_params->qp_id,
+ qp_attr.create_flags & XSC_QP_CREATE_RAWPACKET_TSO ?
+ "tso" : "non-tso", errno);
+
+ if (!(qp_params->tx_offloads & (RTE_ETH_TX_OFFLOAD_TCP_TSO))) {
+ qp_attr.create_flags = XSC_QP_CREATE_RAWPACKET_TSO;
+ qp_attr.comp_mask = IBV_QP_INIT_ATTR_PD |
+ IBV_QP_INIT_ATTR_CREATE_FLAGS;
+ PMD_DRV_LOG(DEBUG, "Port txq %u, recreate tso qp",
+ qp_params->qp_id);
+
+ qp_obj = ibv_create_qp_ex(priv->ibv_ctx, &qp_attr);
+ if (qp_obj == NULL)
+ PMD_DRV_LOG(ERR, "Port txq %u, recreate tso qp fail, errno=%d",
+ qp_params->qp_id, errno);
+ else
+ qp_info->tso_en = 1;
+ }
+ }
+
+ return qp_obj;
+}
+
+static int
+xsc_rdma_tx_qp_create(struct xsc_dev *xdev, struct xsc_tx_qp_params *qp_params,
+ struct xsc_tx_qp_info *qp_info)
+{
+ struct xscdv_obj obj = { };
+ struct xscdv_qp qp = { };
+ int ret;
+
+ qp_info->qp = xsc_txq_ibv_qp_create(xdev, qp_params, qp_info);
+ if (qp_info->qp == NULL) {
+ rte_errno = errno;
+ goto error;
+ }
+
+ obj.qp.in = qp_info->qp;
+ obj.qp.out = (struct xscdv_qp *)&qp;
+ ret = xsc_rdma_init_obj(&obj, XSCDV_OBJ_QP);
+ if (ret != 0) {
+ rte_errno = errno;
+ goto error;
+ }
+
+ qp_info->qpn = ((struct ibv_qp *)qp_info->qp)->qp_num;
+ qp_info->wqes = qp.sq.buf;
+ qp_info->wqe_n = rte_log2_u32(qp.sq.wqe_cnt);
+
+ qp_info->qp_db = qp.sq.db;
+ return 0;
+
+error:
+ return -1;
+}
+
+static int
+xsc_get_ifname_by_pci_addr(struct rte_pci_addr *addr, char *ifname)
+{
+ DIR *dir;
+ struct dirent *dent;
+ unsigned int dev_type = 0;
+ unsigned int dev_port_prev = ~0u;
+ char match[IF_NAMESIZE] = "";
+ char net_path[PATH_MAX];
+
+ snprintf(net_path, sizeof(net_path), "%s/" PCI_PRI_FMT "/net",
+ rte_pci_get_sysfs_path(), addr->domain, addr->bus,
+ addr->devid, addr->function);
+
+ dir = opendir(net_path);
+ if (dir == NULL) {
+ PMD_DRV_LOG(ERR, "Could not open %s", net_path);
+ return -ENOENT;
+ }
+
+ while ((dent = readdir(dir)) != NULL) {
+ char *name = dent->d_name;
+ FILE *file;
+ unsigned int dev_port;
+ int r;
+ char path[PATH_MAX];
+
+ if ((name[0] == '.') &&
+ ((name[1] == '\0') ||
+ ((name[1] == '.') && (name[2] == '\0'))))
+ continue;
+
+ snprintf(path, sizeof(path), "%s/%s/%s",
+ net_path, name, (dev_type ? "dev_id" : "dev_port"));
+
+ file = fopen(path, "rb");
+ if (file == NULL) {
+ if (errno != ENOENT)
+ continue;
+ /*
+ * Switch to dev_id when dev_port does not exist as
+ * is the case with Linux kernel versions < 3.15.
+ */
+try_dev_id:
+ match[0] = '\0';
+ if (dev_type)
+ break;
+ dev_type = 1;
+ dev_port_prev = ~0u;
+ rewinddir(dir);
+ continue;
+ }
+ r = fscanf(file, (dev_type ? "%x" : "%u"), &dev_port);
+ fclose(file);
+ if (r != 1)
+ continue;
+ /*
+ * Switch to dev_id when dev_port returns the same value for
+ * all ports. May happen when using a MOFED release older than
+ * 3.0 with a Linux kernel >= 3.15.
+ */
+ if (dev_port == dev_port_prev)
+ goto try_dev_id;
+ dev_port_prev = dev_port;
+ if (dev_port == 0)
+ snprintf(match, IF_NAMESIZE, "%s", name);
+ }
+ closedir(dir);
+ if (match[0] == '\0')
+ return -ENOENT;
+
+ snprintf(ifname, IF_NAMESIZE, "%s", match);
+ return 0;
+}
+
+static int
+xsc_get_ifindex_by_ifname(const char *ifname, int *ifindex)
+{
+ struct ifreq ifr;
+ int sockfd;
+
+ sockfd = socket(AF_INET, SOCK_DGRAM, 0);
+ if (sockfd == -1)
+ return -EINVAL;
+
+ strncpy(ifr.ifr_name, ifname, IFNAMSIZ - 1);
+ if (ioctl(sockfd, SIOCGIFINDEX, &ifr) == -1) {
+ close(sockfd);
+ return -EINVAL;
+ }
+
+ *ifindex = ifr.ifr_ifindex;
+
+ close(sockfd);
+ return 0;
+}
+
+static int
+xsc_rdma_ifindex_init(struct xsc_dev *xdev)
+{
+ char ifname[IF_NAMESIZE];
+ struct rte_pci_addr *addr = &xdev->pci_dev->addr;
+ int *ifindex = &xdev->ifindex;
+ int ret;
+
+ ret = xsc_get_ifname_by_pci_addr(addr, ifname);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Could not get ifname by pci address:" PCI_PRI_FMT,
+ addr->domain, addr->bus, addr->devid, addr->function);
+ return ret;
+ }
+
+ ret = xsc_get_ifindex_by_ifname(ifname, ifindex);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Could not get ifindex by ifname:%s", ifname);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+xsc_rdma_dev_init(struct xsc_dev *xdev)
+{
+ int ret;
+
+ ret = xsc_rdma_fork_init();
+ if (ret != 0)
+ goto init_fail;
+
+ ret = xsc_rdma_dev_open(xdev);
+ if (ret != 0)
+ goto init_fail;
+
+ ret = xsc_rdma_bar_init(xdev);
+ if (ret != 0)
+ goto init_fail;
+
+ ret = xsc_rdma_hwinfo_init(xdev);
+ if (ret != 0)
+ goto init_fail;
+
+ ret = xsc_rdma_ifindex_init(xdev);
+ if (ret != 0)
+ goto init_fail;
+
+ return 0;
+
+init_fail:
+ xsc_rdma_dev_close(xdev);
+ return -1;
+}
+
+static int
+xsc_rdma_modify_qp_status(struct xsc_dev *xdev, uint32_t qpn, int num, int opcode)
+{
+ struct {
+ struct xsc_ioctl_data_tl tl;
+ struct xsc_ioctl_qp_range info;
+ } data_info;
+
+ int ret;
+
+ data_info.tl.opmod = XSC_CMD_OP_SET_QP_STATUS;
+ data_info.info.opcode = opcode;
+ data_info.info.qpn = qpn;
+ data_info.info.num = num;
+
+ ret = xsc_ioctl(xdev, XSC_IOCTL_DRV_GET, XSC_CMD_OP_SET_QP_STATUS,
+ &data_info, sizeof(data_info), NULL, 0);
+ if (ret != 0) {
+ rte_errno = ret;
+ PMD_DRV_LOG(ERR, "Modify qp status fail, ret = %d", ret);
+ }
+
+ return ret;
+}
+
+static int
+xsc_rdma_modify_qp_qostree(struct xsc_dev *xdev, uint16_t qpn)
+{
+ int ret;
+ struct xsc_cmd_modify_raw_qp_request qos_request = { };
+
+ qos_request.prio = 0;
+ qos_request.qp_out_port = -1;
+ qos_request.lag_id = rte_cpu_to_be_16(xdev->hwinfo.lag_id);
+ qos_request.func_id = rte_cpu_to_be_16(xdev->hwinfo.func_id);
+ qos_request.dma_direct = 0;
+ qos_request.qpn = rte_cpu_to_be_16(qpn);
+ ret = xsc_ioctl(xdev, XSC_IOCTL_CMDQ, XSC_CMD_OP_MODIFY_RAW_QP,
+ &qos_request, sizeof(qos_request), NULL, 0);
+ if (ret != 0) {
+ rte_errno = ret;
+ PMD_DRV_LOG(ERR, "Modify qp qos fail, qpn=%u, ret=%d", qpn, ret);
+ }
+ return ret;
+}
+
+static struct xsc_dev_ops *xsc_rdma_ops = &(struct xsc_dev_ops) {
+ .kdrv = 1 << RTE_PCI_KDRV_UNKNOWN,
+ .dev_init = xsc_rdma_dev_init,
+ .dev_close = xsc_rdma_dev_close,
+ .set_mtu = xsc_rdma_set_mtu,
+ .get_mac = xsc_rdma_get_mac,
+ .set_link_up = xsc_rdma_set_link_up,
+ .set_link_down = xsc_rdma_set_link_down,
+ .link_update = xsc_rdma_link_update,
+ .destroy_qp = xsc_rdma_destroy_qp,
+ .destroy_cq = xsc_rdma_destroy_cq,
+ .modify_qp_status = xsc_rdma_modify_qp_status,
+ .modify_qp_qostree = xsc_rdma_modify_qp_qostree,
+ .rx_cq_create = xsc_rdma_rx_cq_create,
+ .tx_cq_create = xsc_rdma_tx_cq_create,
+ .tx_qp_create = xsc_rdma_tx_qp_create,
+ .mailbox_exec = xsc_rdma_mailbox_exec,
+ .intr_event_get = xsc_rdma_event_get,
+ .intr_handler_install = xsc_rdma_intr_handler_install,
+ .intr_handler_uninstall = xsc_rdma_intr_handler_uninstall,
+};
+
+RTE_INIT(xsc_rdma_ops_reg)
+{
+ xsc_dev_ops_register(xsc_rdma_ops);
+}
diff --git a/drivers/net/xsc/xsc_rx.c b/drivers/net/xsc/xsc_rx.c
new file mode 100644
index 0000000..c63d658
--- /dev/null
+++ b/drivers/net/xsc/xsc_rx.c
@@ -0,0 +1,518 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2025 Yunsilicon Technology Co., Ltd.
+ */
+
+#include <rte_io.h>
+
+#include "xsc_log.h"
+#include "xsc_defs.h"
+#include "xsc_dev.h"
+#include "xsc_ethdev.h"
+#include "xsc_cmd.h"
+#include "xsc_rx.h"
+
+#define XSC_MAX_RECV_LEN 9800
+
+static inline void
+xsc_cq_to_mbuf(struct xsc_rxq_data *rxq, struct rte_mbuf *pkt,
+ volatile struct xsc_cqe *cqe)
+{
+ uint32_t rss_hash_res = 0;
+
+ pkt->port = rxq->port_id;
+ if (rxq->rss_hash) {
+ rss_hash_res = rte_be_to_cpu_32(cqe->vni);
+ if (rss_hash_res) {
+ pkt->hash.rss = rss_hash_res;
+ pkt->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
+ }
+ }
+}
+
+static inline int
+xsc_rx_poll_len(struct xsc_rxq_data *rxq, volatile struct xsc_cqe *cqe)
+{
+ int len;
+
+ do {
+ len = 0;
+ int ret;
+
+ ret = xsc_check_cqe_own(cqe, rxq->cqe_n, rxq->cq_ci);
+ if (unlikely(ret != XSC_CQE_OWNER_SW)) {
+ if (unlikely(ret == XSC_CQE_OWNER_ERR))
+ ++rxq->stats.rx_errors;
+ else
+ return 0;
+ }
+
+ rxq->cq_ci += 1;
+ len = rte_le_to_cpu_32(cqe->msg_len);
+ return len;
+ } while (1);
+}
+
+static __rte_always_inline void
+xsc_pkt_info_sync(struct rte_mbuf *rep, struct rte_mbuf *seg)
+{
+ if (rep != NULL && seg != NULL) {
+ rep->data_len = seg->data_len;
+ rep->pkt_len = seg->pkt_len;
+ rep->data_off = seg->data_off;
+ rep->port = seg->port;
+ }
+}
+
+uint16_t
+xsc_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
+{
+ struct xsc_rxq_data *rxq = dpdk_rxq;
+ const uint32_t wqe_m = rxq->wqe_m;
+ const uint32_t cqe_m = rxq->cqe_m;
+ const uint32_t sge_n = rxq->sge_n;
+ struct rte_mbuf *pkt = NULL;
+ struct rte_mbuf *seg = NULL;
+ volatile struct xsc_cqe *cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_m];
+ uint32_t nb_pkts = 0;
+ uint64_t nb_bytes = 0;
+ uint32_t rq_ci = rxq->rq_ci;
+ int len = 0;
+ uint32_t cq_ci_two = 0;
+ int valid_cqe_num = 0;
+ int cqe_msg_len = 0;
+ volatile struct xsc_cqe_u64 *cqe_u64 = NULL;
+ struct rte_mbuf *rep;
+
+ while (pkts_n) {
+ uint32_t idx = rq_ci & wqe_m;
+ volatile struct xsc_wqe_data_seg *wqe =
+ &((volatile struct xsc_wqe_data_seg *)rxq->wqes)[idx << sge_n];
+
+ seg = (*rxq->elts)[idx];
+ rte_prefetch0(cqe);
+ rte_prefetch0(wqe);
+
+ rep = rte_mbuf_raw_alloc(seg->pool);
+ if (unlikely(rep == NULL)) {
+ ++rxq->stats.rx_nombuf;
+ break;
+ }
+
+ if (!pkt) {
+ if (valid_cqe_num) {
+ cqe = cqe + 1;
+ len = cqe_msg_len;
+ valid_cqe_num = 0;
+ } else if ((rxq->cq_ci % 2 == 0) && (pkts_n > 1)) {
+ cq_ci_two = (rxq->cq_ci & rxq->cqe_m) / 2;
+ cqe_u64 = &(*rxq->cqes_u64)[cq_ci_two];
+ cqe = (volatile struct xsc_cqe *)cqe_u64;
+ len = xsc_rx_poll_len(rxq, cqe);
+ if (len > 0) {
+ cqe_msg_len = xsc_rx_poll_len(rxq, cqe + 1);
+ if (cqe_msg_len > 0)
+ valid_cqe_num = 1;
+ }
+ } else {
+ cqe = &(*rxq->cqes)[rxq->cq_ci & rxq->cqe_m];
+ len = xsc_rx_poll_len(rxq, cqe);
+ }
+
+ if (!len) {
+ rte_mbuf_raw_free(rep);
+ break;
+ }
+
+ if (len > rte_pktmbuf_data_len(seg)) {
+ rte_mbuf_raw_free(rep);
+ pkt = NULL;
+ ++rq_ci;
+ continue;
+ }
+
+ pkt = seg;
+ pkt->ol_flags &= RTE_MBUF_F_EXTERNAL;
+ xsc_cq_to_mbuf(rxq, pkt, cqe);
+
+ if (rxq->crc_present)
+ len -= RTE_ETHER_CRC_LEN;
+ rte_pktmbuf_pkt_len(pkt) = len;
+ }
+
+ xsc_pkt_info_sync(rep, seg);
+ (*rxq->elts)[idx] = rep;
+
+ /* Fill wqe */
+ wqe->va = rte_cpu_to_le_64(rte_pktmbuf_iova(rep));
+ rte_pktmbuf_data_len(seg) = len;
+ nb_bytes += rte_pktmbuf_pkt_len(pkt);
+
+ *(pkts++) = pkt;
+ pkt = NULL;
+ --pkts_n;
+ ++nb_pkts;
+ ++rq_ci;
+ }
+
+ if (unlikely(nb_pkts == 0 && rq_ci == rxq->rq_ci))
+ return 0;
+
+ rxq->rq_ci = rq_ci;
+ rxq->nb_rx_hold += nb_pkts;
+
+ if (rxq->nb_rx_hold >= rxq->rx_free_thresh) {
+ union xsc_cq_doorbell cq_db = {
+ .cq_data = 0
+ };
+ cq_db.next_cid = rxq->cq_ci;
+ cq_db.cq_num = rxq->cqn;
+
+ union xsc_recv_doorbell rq_db = {
+ .recv_data = 0
+ };
+ rq_db.next_pid = (rxq->rq_ci << sge_n);
+ rq_db.qp_num = rxq->qpn;
+
+ rte_write32(rte_cpu_to_le_32(cq_db.cq_data), rxq->cq_db);
+ rte_write32(rte_cpu_to_le_32(rq_db.recv_data), rxq->rq_db);
+ rxq->nb_rx_hold = 0;
+ }
+
+ rxq->stats.rx_pkts += nb_pkts;
+ rxq->stats.rx_bytes += nb_bytes;
+
+ return nb_pkts;
+}
+
+static void
+xsc_rxq_initialize(struct xsc_dev *xdev, struct xsc_rxq_data *rxq_data)
+{
+ const uint32_t wqe_n = rxq_data->wqe_s;
+ uint32_t i;
+ uint32_t seg_len = 0;
+ struct xsc_hwinfo *hwinfo = &xdev->hwinfo;
+ uint32_t rx_ds_num = hwinfo->recv_seg_num;
+ uint32_t log2ds = rte_log2_u32(rx_ds_num);
+ uintptr_t addr;
+ struct rte_mbuf *mbuf;
+ void *jumbo_buffer_pa = xdev->jumbo_buffer_pa;
+ void *jumbo_buffer_va = xdev->jumbo_buffer_va;
+ volatile struct xsc_wqe_data_seg *seg;
+ volatile struct xsc_wqe_data_seg *seg_next;
+
+ for (i = 0; (i != wqe_n); ++i) {
+ mbuf = (*rxq_data->elts)[i];
+ seg = &((volatile struct xsc_wqe_data_seg *)rxq_data->wqes)[i * rx_ds_num];
+ addr = (uintptr_t)rte_pktmbuf_iova(mbuf);
+ if (rx_ds_num == 1)
+ seg_len = XSC_MAX_RECV_LEN;
+ else
+ seg_len = rte_pktmbuf_data_len(mbuf);
+ *seg = (struct xsc_wqe_data_seg){
+ .va = rte_cpu_to_le_64(addr),
+ .seg_len = rte_cpu_to_le_32(seg_len),
+ .lkey = 0,
+ };
+
+ if (rx_ds_num != 1) {
+ seg_next = seg + 1;
+ if (jumbo_buffer_va == NULL) {
+ jumbo_buffer_pa = rte_malloc(NULL, XSC_MAX_RECV_LEN, 0);
+ if (jumbo_buffer_pa == NULL) {
+ /* Rely on mtu */
+ seg->seg_len = XSC_MAX_RECV_LEN;
+ PMD_DRV_LOG(ERR, "Failed to malloc jumbo_buffer");
+ continue;
+ } else {
+ jumbo_buffer_va =
+ (void *)rte_malloc_virt2iova(jumbo_buffer_pa);
+ if ((rte_iova_t)jumbo_buffer_va == RTE_BAD_IOVA) {
+ seg->seg_len = XSC_MAX_RECV_LEN;
+ PMD_DRV_LOG(ERR, "Failed to turn jumbo_buffer");
+ continue;
+ }
+ }
+ xdev->jumbo_buffer_pa = jumbo_buffer_pa;
+ xdev->jumbo_buffer_va = jumbo_buffer_va;
+ }
+ *seg_next = (struct xsc_wqe_data_seg){
+ .va = rte_cpu_to_le_64((uint64_t)jumbo_buffer_va),
+ .seg_len = rte_cpu_to_le_32(XSC_MAX_RECV_LEN - seg_len),
+ .lkey = 0,
+ };
+ }
+ }
+
+ rxq_data->rq_ci = wqe_n;
+ rxq_data->sge_n = log2ds;
+
+ union xsc_recv_doorbell recv_db = {
+ .recv_data = 0
+ };
+
+ recv_db.next_pid = wqe_n << log2ds;
+ recv_db.qp_num = rxq_data->qpn;
+ rte_write32(rte_cpu_to_le_32(recv_db.recv_data), rxq_data->rq_db);
+}
+
+static int
+xsc_rss_qp_create(struct xsc_ethdev_priv *priv, int port_id)
+{
+ struct xsc_cmd_create_multiqp_mbox_in *in;
+ struct xsc_cmd_create_qp_request *req;
+ struct xsc_cmd_create_multiqp_mbox_out *out;
+ uint8_t log_ele;
+ uint64_t iova;
+ int wqe_n;
+ int in_len, out_len, cmd_len;
+ int entry_total_len, entry_len;
+ uint8_t log_rq_sz, log_sq_sz = 0;
+ uint32_t wqe_total_len;
+ int j, ret;
+ uint16_t i, pa_num;
+ int rqn_base;
+ struct xsc_rxq_data *rxq_data;
+ struct xsc_dev *xdev = priv->xdev;
+ struct xsc_hwinfo *hwinfo = &xdev->hwinfo;
+ char name[RTE_ETH_NAME_MAX_LEN] = { 0 };
+ void *cmd_buf;
+
+ rxq_data = xsc_rxq_get(priv, 0);
+ if (rxq_data == NULL)
+ return -EINVAL;
+
+ log_ele = rte_log2_u32(sizeof(struct xsc_wqe_data_seg));
+ wqe_n = rxq_data->wqe_s;
+ log_rq_sz = rte_log2_u32(wqe_n * hwinfo->recv_seg_num);
+ wqe_total_len = 1 << (log_rq_sz + log_sq_sz + log_ele);
+
+ pa_num = (wqe_total_len + XSC_PAGE_SIZE - 1) / XSC_PAGE_SIZE;
+ entry_len = sizeof(struct xsc_cmd_create_qp_request) + sizeof(uint64_t) * pa_num;
+ entry_total_len = entry_len * priv->num_rq;
+
+ in_len = sizeof(struct xsc_cmd_create_multiqp_mbox_in) + entry_total_len;
+ out_len = sizeof(struct xsc_cmd_create_multiqp_mbox_out) + entry_total_len;
+ cmd_len = RTE_MAX(in_len, out_len);
+ cmd_buf = malloc(cmd_len);
+ if (cmd_buf == NULL) {
+ rte_errno = ENOMEM;
+ PMD_DRV_LOG(ERR, "Alloc rss qp create cmd memory failed");
+ goto error;
+ }
+
+ in = cmd_buf;
+ memset(in, 0, cmd_len);
+ in->qp_num = rte_cpu_to_be_16((uint16_t)priv->num_rq);
+ in->qp_type = XSC_QUEUE_TYPE_RAW;
+ in->req_len = rte_cpu_to_be_32(cmd_len);
+
+ for (i = 0; i < priv->num_rq; i++) {
+ rxq_data = xsc_rxq_get(priv, i);
+ if (rxq_data == NULL)
+ return -EINVAL;
+ req = (struct xsc_cmd_create_qp_request *)(&in->data[0] + entry_len * i);
+ req->input_qpn = rte_cpu_to_be_16(0); /* useless for eth */
+ req->pa_num = rte_cpu_to_be_16(pa_num);
+ req->qp_type = XSC_QUEUE_TYPE_RAW;
+ req->log_rq_sz = log_rq_sz;
+ req->cqn_recv = rte_cpu_to_be_16((uint16_t)rxq_data->cqn);
+ req->cqn_send = req->cqn_recv;
+ req->glb_funcid = rte_cpu_to_be_16((uint16_t)hwinfo->func_id);
+ /* Alloc pas addr */
+ snprintf(name, sizeof(name), "wqe_mem_rx_%d_%d", port_id, i);
+ rxq_data->rq_pas = rte_memzone_reserve_aligned(name,
+ (XSC_PAGE_SIZE * pa_num),
+ SOCKET_ID_ANY,
+ 0, XSC_PAGE_SIZE);
+ if (rxq_data->rq_pas == NULL) {
+ rte_errno = ENOMEM;
+ PMD_DRV_LOG(ERR, "Alloc rxq pas memory failed");
+ goto error;
+ }
+
+ iova = rxq_data->rq_pas->iova;
+ for (j = 0; j < pa_num; j++)
+ req->pas[j] = rte_cpu_to_be_64(iova + j * XSC_PAGE_SIZE);
+ }
+
+ in->hdr.opcode = rte_cpu_to_be_16(XSC_CMD_OP_CREATE_MULTI_QP);
+ out = cmd_buf;
+ ret = xsc_dev_mailbox_exec(xdev, in, in_len, out, out_len);
+ if (ret != 0 || out->hdr.status != 0) {
+ PMD_DRV_LOG(ERR,
+ "Create rss rq failed, port id=%d, qp_num=%d, ret=%d, out.status=%u",
+ port_id, priv->num_rq, ret, out->hdr.status);
+ rte_errno = ENOEXEC;
+ goto error;
+ }
+ rqn_base = rte_be_to_cpu_32(out->qpn_base) & 0xffffff;
+
+ for (i = 0; i < priv->num_rq; i++) {
+ rxq_data = xsc_rxq_get(priv, i);
+ if (rxq_data == NULL)
+ return -EINVAL;
+ rxq_data->wqes = rxq_data->rq_pas->addr;
+ if (!xsc_dev_is_vf(xdev))
+ rxq_data->rq_db = (uint32_t *)((uint8_t *)xdev->bar_addr +
+ XSC_PF_RX_DB_ADDR);
+ else
+ rxq_data->rq_db = (uint32_t *)((uint8_t *)xdev->bar_addr +
+ XSC_VF_RX_DB_ADDR);
+
+ rxq_data->qpn = rqn_base + i;
+ xsc_dev_modify_qp_status(xdev, rxq_data->qpn, 1, XSC_CMD_OP_RTR2RTS_QP);
+ xsc_rxq_initialize(xdev, rxq_data);
+ rxq_data->cq_ci = 0;
+ priv->dev_data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
+ PMD_DRV_LOG(INFO, "Port %d create rx qp, wqe_s:%d, wqe_n:%d, qp_db=%p, qpn:%u",
+ port_id,
+ rxq_data->wqe_s, rxq_data->wqe_n,
+ rxq_data->rq_db, rxq_data->qpn);
+ }
+
+ free(cmd_buf);
+ return 0;
+
+error:
+ free(cmd_buf);
+ return -rte_errno;
+}
+
+int
+xsc_rxq_rss_obj_new(struct xsc_ethdev_priv *priv, uint16_t port_id)
+{
+ int ret;
+ uint32_t i;
+ struct xsc_dev *xdev = priv->xdev;
+ struct xsc_rxq_data *rxq_data;
+ struct xsc_rx_cq_params cq_params = {0};
+ struct xsc_rx_cq_info cq_info = {0};
+
+ /* Create CQ */
+ for (i = 0; i < priv->num_rq; ++i) {
+ rxq_data = xsc_rxq_get(priv, i);
+ if (rxq_data == NULL)
+ return -EINVAL;
+
+ memset(&cq_params, 0, sizeof(cq_params));
+ memset(&cq_info, 0, sizeof(cq_info));
+ cq_params.port_id = rxq_data->port_id;
+ cq_params.qp_id = rxq_data->idx;
+ cq_params.wqe_s = rxq_data->wqe_s;
+
+ ret = xsc_dev_rx_cq_create(xdev, &cq_params, &cq_info);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Port %u rxq %u create cq fail", port_id, i);
+ rte_errno = errno;
+ goto error;
+ }
+
+ rxq_data->cq = cq_info.cq;
+ rxq_data->cqe_n = cq_info.cqe_n;
+ rxq_data->cqe_s = 1 << rxq_data->cqe_n;
+ rxq_data->cqe_m = rxq_data->cqe_s - 1;
+ rxq_data->cqes = cq_info.cqes;
+ rxq_data->cq_db = cq_info.cq_db;
+ rxq_data->cqn = cq_info.cqn;
+
+ PMD_DRV_LOG(INFO, "Port %u create rx cq, cqe_s:%d, cqe_n:%d, cq_db=%p, cqn:%u",
+ port_id,
+ rxq_data->cqe_s, rxq_data->cqe_n,
+ rxq_data->cq_db, rxq_data->cqn);
+ }
+
+ ret = xsc_rss_qp_create(priv, port_id);
+ if (ret != 0) {
+ PMD_DRV_LOG(ERR, "Port %u rss rxq create fail", port_id);
+ goto error;
+ }
+ return 0;
+
+error:
+ return -rte_errno;
+}
+
+int
+xsc_rxq_elts_alloc(struct xsc_rxq_data *rxq_data)
+{
+ uint32_t elts_s = rxq_data->wqe_s;
+ struct rte_mbuf *mbuf;
+ uint32_t i;
+
+ for (i = 0; (i != elts_s); ++i) {
+ mbuf = rte_pktmbuf_alloc(rxq_data->mp);
+ if (mbuf == NULL) {
+ PMD_DRV_LOG(ERR, "Port %u rxq %u empty mbuf pool",
+ rxq_data->port_id, rxq_data->idx);
+ rte_errno = ENOMEM;
+ goto error;
+ }
+
+ mbuf->port = rxq_data->port_id;
+ mbuf->nb_segs = 1;
+ rte_pktmbuf_data_len(mbuf) = mbuf->buf_len - mbuf->data_off;
+ rte_pktmbuf_pkt_len(mbuf) = rte_pktmbuf_data_len(mbuf);
+ (*rxq_data->elts)[i] = mbuf;
+ }
+
+ return 0;
+error:
+ elts_s = i;
+ for (i = 0; (i != elts_s); ++i) {
+ if ((*rxq_data->elts)[i] != NULL)
+ rte_pktmbuf_free_seg((*rxq_data->elts)[i]);
+ (*rxq_data->elts)[i] = NULL;
+ }
+
+ PMD_DRV_LOG(ERR, "Port %u rxq %u start failed, free elts",
+ rxq_data->port_id, rxq_data->idx);
+
+ return -rte_errno;
+}
+
+void
+xsc_rxq_elts_free(struct xsc_rxq_data *rxq_data)
+{
+ uint16_t i;
+
+ if (rxq_data->elts == NULL)
+ return;
+ for (i = 0; i != rxq_data->wqe_s; ++i) {
+ if ((*rxq_data->elts)[i] != NULL)
+ rte_pktmbuf_free_seg((*rxq_data->elts)[i]);
+ (*rxq_data->elts)[i] = NULL;
+ }
+
+ PMD_DRV_LOG(DEBUG, "Port %u rxq %u free elts", rxq_data->port_id, rxq_data->idx);
+}
+
+void
+xsc_rxq_rss_obj_release(struct xsc_dev *xdev, struct xsc_rxq_data *rxq_data)
+{
+ struct xsc_cmd_destroy_qp_mbox_in in = { .hdr = { 0 } };
+ struct xsc_cmd_destroy_qp_mbox_out out = { .hdr = { 0 } };
+ int ret, in_len, out_len;
+ uint32_t qpn = rxq_data->qpn;
+
+ xsc_dev_modify_qp_status(xdev, qpn, 1, XSC_CMD_OP_QP_2RST);
+
+ in_len = sizeof(struct xsc_cmd_destroy_qp_mbox_in);
+ out_len = sizeof(struct xsc_cmd_destroy_qp_mbox_out);
+ in.hdr.opcode = rte_cpu_to_be_16(XSC_CMD_OP_DESTROY_QP);
+ in.qpn = rte_cpu_to_be_32(rxq_data->qpn);
+
+ ret = xsc_dev_mailbox_exec(xdev, &in, in_len, &out, out_len);
+ if (ret != 0 || out.hdr.status != 0) {
+ PMD_DRV_LOG(ERR,
+ "Release rss rq failed, port id=%d, qid=%d, err=%d, out.status=%u",
+ rxq_data->port_id, rxq_data->idx, ret, out.hdr.status);
+ rte_errno = ENOEXEC;
+ return;
+ }
+
+ rte_memzone_free(rxq_data->rq_pas);
+
+ if (rxq_data->cq != NULL)
+ xsc_dev_destroy_cq(xdev, rxq_data->cq);
+ rxq_data->cq = NULL;
+}
diff --git a/drivers/net/xsc/xsc_rx.h b/drivers/net/xsc/xsc_rx.h
new file mode 100644
index 0000000..90fbb89
--- /dev/null
+++ b/drivers/net/xsc/xsc_rx.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2025 Yunsilicon Technology Co., Ltd.
+ */
+
+#ifndef _XSC_RX_H_
+#define _XSC_RX_H_
+
+#define XSC_RX_FREE_THRESH 32
+
+struct xsc_rxq_stats {
+ uint64_t rx_pkts; /* Total number of rx packets */
+ uint64_t rx_bytes; /* Total number of rx bytes */
+ uint64_t rx_errors; /* Total number of rx error packets */
+ uint64_t rx_nombuf; /* Total number of rx mbuf alloc failed */
+};
+
+struct __rte_cache_aligned xsc_rxq_data {
+ uint16_t idx; /*QP idx */
+ uint16_t port_id;
+ void *cq; /* CQ pointer */
+ void *qp; /* QP pointer */
+ uint32_t cqn; /* CQ serial number */
+ uint32_t qpn; /* QP serial number */
+ uint16_t wqe_s; /* Number of WQE */
+ uint16_t wqe_m; /* Mask of WQE number */
+ uint16_t cqe_s; /* Number of CQE */
+ uint16_t cqe_m; /* Mask of CQE number */
+ uint16_t wqe_n:4; /* Log 2 of WQE number */
+ uint16_t sge_n:4; /* Log 2 of each WQE DS number */
+ uint16_t cqe_n:4; /* Log 2 of CQE number */
+ uint16_t rsv0:4;
+ volatile uint32_t *rq_db;
+ volatile uint32_t *cq_db;
+ uint32_t rq_ci;
+ uint32_t rq_pi;
+ uint16_t cq_ci;
+ uint16_t rx_free_thresh;
+ uint16_t nb_rx_hold;
+ volatile void *wqes;
+ union {
+ volatile struct xsc_cqe(*cqes)[];
+ volatile struct xsc_cqe_u64(*cqes_u64)[];
+ };
+ struct rte_mbuf *(*elts)[]; /* Record the mbuf of wqe addr */
+ struct rte_mempool *mp;
+ const struct rte_memzone *rq_pas; /* Palist memory */
+ uint32_t socket;
+ struct xsc_ethdev_priv *priv;
+ struct xsc_rxq_stats stats;
+ /* attr */
+ uint16_t csum:1; /* Checksum offloading enable */
+ uint16_t hw_timestamp:1;
+ uint16_t vlan_strip:1;
+ uint16_t crc_present:1; /* CRC flag */
+ uint16_t rss_hash:1; /* RSS hash enabled */
+ uint16_t rsv1:11;
+};
+
+uint16_t xsc_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n);
+int xsc_rxq_elts_alloc(struct xsc_rxq_data *rxq_data);
+int xsc_rxq_rss_obj_new(struct xsc_ethdev_priv *priv, uint16_t port_id);
+void xsc_rxq_rss_obj_release(struct xsc_dev *xdev, struct xsc_rxq_data *rxq_data);
+void xsc_rxq_elts_free(struct xsc_rxq_data *rxq_data);
+
+#endif /* _XSC_RX_H_ */
diff --git a/drivers/net/xsc/xsc_rxtx.h b/drivers/net/xsc/xsc_rxtx.h
new file mode 100644
index 0000000..0b5a19a
--- /dev/null
+++ b/drivers/net/xsc/xsc_rxtx.h
@@ -0,0 +1,193 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2025 Yunsilicon Technology Co., Ltd.
+ */
+
+#ifndef _XSC_RXTX_H_
+#define _XSC_RXTX_H_
+
+#include <rte_byteorder.h>
+
+#include "xsc_compat.h"
+
+#define XSC_CQE_OWNER_MASK 0x1
+#define XSC_CQE_OWNER_HW 0x2
+#define XSC_CQE_OWNER_SW 0x4
+#define XSC_CQE_OWNER_ERR 0x8
+#define XSC_OPCODE_RAW 0x7
+
+struct __rte_packed_begin xsc_send_wqe_ctrl_seg {
+ rte_le32_t msg_opcode:8;
+ rte_le32_t with_immdt:1;
+ rte_le32_t csum_en:2;
+ rte_le32_t ds_data_num:5;
+ rte_le32_t wqe_id:16;
+ rte_le32_t msg_len;
+ union {
+ rte_le32_t opcode_data;
+ struct {
+ rte_le16_t has_pph:1;
+ rte_le16_t so_type:1;
+ rte_le16_t so_data_size:14;
+ rte_le16_t rsv1:8;
+ rte_le16_t so_hdr_len:8;
+ };
+ struct {
+ rte_le16_t desc_id;
+ rte_le16_t is_last_wqe:1;
+ rte_le16_t dst_qp_id:15;
+ };
+ };
+ rte_le32_t se:1;
+ rte_le32_t ce:1;
+ rte_le32_t rsv2:30;
+} __rte_packed_end;
+
+struct __rte_packed_begin xsc_wqe_data_seg {
+ union {
+ struct {
+ uint8_t in_line:1;
+ uint8_t rsv0:7;
+ };
+ struct {
+ rte_le32_t rsv1:1;
+ rte_le32_t seg_len:31;
+ rte_le32_t lkey;
+ rte_le64_t va;
+ };
+ struct {
+ uint8_t rsv2:1;
+ uint8_t len:7;
+ uint8_t in_line_data[15];
+ };
+ };
+} __rte_packed_end;
+
+struct __rte_packed_begin xsc_wqe {
+ union {
+ struct xsc_send_wqe_ctrl_seg cseg;
+ uint32_t ctrl[4];
+ };
+ union {
+ struct xsc_wqe_data_seg dseg[XSC_SEND_WQE_DS];
+ uint8_t data[XSC_ESEG_EXTRA_DATA_SIZE];
+ };
+} __rte_packed_end;
+
+struct __rte_packed_begin xsc_cqe {
+ union {
+ uint8_t msg_opcode;
+ struct {
+ uint8_t error_code:7;
+ uint8_t is_error:1;
+ };
+ };
+ rte_le16_t qp_id:15;
+ rte_le16_t rsv:1;
+ uint8_t se:1;
+ uint8_t has_pph:1;
+ uint8_t type:1;
+ uint8_t with_immdt:1;
+ uint8_t csum_err:4;
+ rte_le32_t imm_data;
+ rte_le32_t msg_len;
+ rte_le32_t vni;
+ rte_le32_t tsl;
+ rte_le32_t tsh:16;
+ rte_le32_t wqe_id:16;
+ rte_le16_t rsv2[3];
+ rte_le16_t rsv3:15;
+ rte_le16_t owner:1;
+} __rte_packed_end;
+
+struct xsc_cqe_u64 {
+ struct xsc_cqe cqe0;
+ struct xsc_cqe cqe1;
+};
+
+union xsc_cq_doorbell {
+ struct {
+ uint32_t next_cid:16;
+ uint32_t cq_num:15;
+ uint32_t cq_sta:1;
+ };
+ uint32_t cq_data;
+};
+
+union xsc_send_doorbell {
+ struct {
+ uint32_t next_pid:16;
+ uint32_t qp_num:15;
+ uint32_t rsv:1;
+ };
+ uint32_t send_data;
+};
+
+struct xsc_tx_cq_params {
+ uint16_t port_id;
+ uint16_t qp_id;
+ uint16_t elts_n;
+};
+
+struct xsc_tx_cq_info {
+ void *cq;
+ void *cqes;
+ uint32_t *cq_db;
+ uint32_t cqn;
+ uint16_t cqe_s;
+ uint16_t cqe_n;
+};
+
+struct xsc_tx_qp_params {
+ void *cq;
+ uint64_t tx_offloads;
+ uint16_t port_id;
+ uint16_t qp_id;
+ uint16_t elts_n;
+};
+
+struct xsc_tx_qp_info {
+ void *qp;
+ void *wqes;
+ uint32_t *qp_db;
+ uint32_t qpn;
+ uint16_t tso_en;
+ uint16_t wqe_n;
+};
+
+union xsc_recv_doorbell {
+ struct {
+ uint32_t next_pid:13;
+ uint32_t qp_num:15;
+ uint32_t rsv:4;
+ };
+ uint32_t recv_data;
+};
+
+struct xsc_rx_cq_params {
+ uint16_t port_id;
+ uint16_t qp_id;
+ uint16_t wqe_s;
+};
+
+struct xsc_rx_cq_info {
+ void *cq;
+ void *cqes;
+ uint32_t *cq_db;
+ uint32_t cqn;
+ uint16_t cqe_n;
+};
+
+static __rte_always_inline int
+xsc_check_cqe_own(volatile struct xsc_cqe *cqe, const uint16_t cqe_n, const uint16_t ci)
+{
+ if (unlikely(((cqe->owner & XSC_CQE_OWNER_MASK) != ((ci >> cqe_n) & XSC_CQE_OWNER_MASK))))
+ return XSC_CQE_OWNER_HW;
+
+ rte_io_rmb();
+ if (cqe->msg_len <= 0 && cqe->is_error)
+ return XSC_CQE_OWNER_ERR;
+
+ return XSC_CQE_OWNER_SW;
+}
+
+#endif /* _XSC_RXTX_H_ */
diff --git a/drivers/net/xsc/xsc_tx.c b/drivers/net/xsc/xsc_tx.c
new file mode 100644
index 0000000..d1a0f32
--- /dev/null
+++ b/drivers/net/xsc/xsc_tx.c
@@ -0,0 +1,353 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2025 Yunsilicon Technology Co., Ltd.
+ */
+
+#include <rte_io.h>
+
+#include "xsc_log.h"
+#include "xsc_defs.h"
+#include "xsc_dev.h"
+#include "xsc_ethdev.h"
+#include "xsc_cmd.h"
+#include "xsc_tx.h"
+#include "xsc_np.h"
+
+void
+xsc_txq_elts_alloc(struct xsc_txq_data *txq_data)
+{
+ const uint32_t elts_s = 1 << txq_data->elts_n;
+ uint32_t i;
+
+ for (i = 0; i < elts_s; ++i)
+ txq_data->elts[i] = NULL;
+ txq_data->elts_head = 0;
+ txq_data->elts_tail = 0;
+ txq_data->elts_comp = 0;
+}
+
+int
+xsc_txq_obj_new(struct xsc_dev *xdev, struct xsc_txq_data *txq_data,
+ uint64_t offloads, uint16_t idx)
+{
+ int ret = 0;
+ struct xsc_tx_cq_params cq_params = {0};
+ struct xsc_tx_cq_info cq_info = {0};
+ struct xsc_tx_qp_params qp_params = {0};
+ struct xsc_tx_qp_info qp_info = {0};
+
+ cq_params.port_id = txq_data->port_id;
+ cq_params.qp_id = txq_data->idx;
+ cq_params.elts_n = txq_data->elts_n;
+ ret = xsc_dev_tx_cq_create(xdev, &cq_params, &cq_info);
+ if (ret) {
+ rte_errno = errno;
+ goto error;
+ }
+
+ txq_data->cq = cq_info.cq;
+ txq_data->cqe_n = cq_info.cqe_n;
+ txq_data->cqe_s = cq_info.cqe_s;
+ txq_data->cq_db = cq_info.cq_db;
+ txq_data->cqn = cq_info.cqn;
+ txq_data->cqes = cq_info.cqes;
+ txq_data->cqe_m = (uint16_t)(1 << cq_info.cqe_n) - 1;
+
+ PMD_DRV_LOG(INFO, "Create tx cq, cqe_s:%d, cqe_n:%d, cq_db=%p, cqn:%u",
+ txq_data->cqe_s, txq_data->cqe_n,
+ txq_data->cq_db, txq_data->cqn);
+
+ qp_params.cq = txq_data->cq;
+ qp_params.tx_offloads = offloads;
+ qp_params.port_id = txq_data->port_id;
+ qp_params.qp_id = idx;
+ qp_params.elts_n = txq_data->elts_n;
+ ret = xsc_dev_tx_qp_create(xdev, &qp_params, &qp_info);
+
+ if (ret != 0) {
+ rte_errno = errno;
+ goto error;
+ }
+
+ txq_data->qp = qp_info.qp;
+ txq_data->qpn = qp_info.qpn;
+ txq_data->wqes = qp_info.wqes;
+ txq_data->wqe_n = qp_info.wqe_n;
+ txq_data->wqe_s = 1 << txq_data->wqe_n;
+ txq_data->wqe_m = txq_data->wqe_s - 1;
+ txq_data->wqe_ds_n = rte_log2_u32(xdev->hwinfo.send_seg_num);
+ txq_data->qp_db = qp_info.qp_db;
+
+ txq_data->cq_ci = 0;
+ txq_data->cq_pi = 0;
+ txq_data->wqe_ci = 0;
+ txq_data->wqe_pi = 0;
+ txq_data->wqe_comp = 0;
+
+ PMD_DRV_LOG(INFO, "Create tx qp, wqe_s:%d, wqe_n:%d, qp_db=%p, qpn:%u",
+ txq_data->wqe_s, txq_data->wqe_n,
+ txq_data->qp_db, txq_data->qpn);
+ return 0;
+
+error:
+ return -rte_errno;
+}
+
+void
+xsc_txq_obj_release(struct xsc_dev *xdev, struct xsc_txq_data *txq_data)
+{
+ PMD_DRV_LOG(DEBUG, "Destroy tx queue %u, portid %u",
+ txq_data->idx, txq_data->port_id);
+ if (txq_data->qp != NULL)
+ xsc_dev_destroy_qp(xdev, txq_data->qp);
+ if (txq_data->cq != NULL)
+ xsc_dev_destroy_cq(xdev, txq_data->cq);
+}
+
+void
+xsc_txq_elts_free(struct xsc_txq_data *txq_data)
+{
+ const uint16_t elts_n = 1 << txq_data->elts_n;
+ const uint16_t elts_m = elts_n - 1;
+ uint16_t elts_head = txq_data->elts_head;
+ uint16_t elts_tail = txq_data->elts_tail;
+ struct rte_mbuf *(*elts)[elts_n] = &txq_data->elts;
+
+ txq_data->elts_head = 0;
+ txq_data->elts_tail = 0;
+ txq_data->elts_comp = 0;
+
+ while (elts_tail != elts_head) {
+ struct rte_mbuf *elt = (*elts)[elts_tail & elts_m];
+
+ rte_pktmbuf_free_seg(elt);
+ ++elts_tail;
+ }
+ PMD_DRV_LOG(DEBUG, "Port %u txq %u free elts", txq_data->port_id, txq_data->idx);
+}
+
+static __rte_always_inline void
+xsc_tx_elts_flush(struct xsc_txq_data *__rte_restrict txq, uint16_t tail)
+{
+ uint16_t elts_n = tail - txq->elts_tail;
+ uint32_t free_n;
+
+ do {
+ free_n = txq->elts_s - (txq->elts_tail & txq->elts_m);
+ free_n = RTE_MIN(free_n, elts_n);
+ rte_pktmbuf_free_bulk(&txq->elts[txq->elts_tail & txq->elts_m], free_n);
+ txq->elts_tail += free_n;
+ elts_n -= free_n;
+ } while (elts_n > 0);
+}
+
+static void
+xsc_tx_cqes_handle(struct xsc_txq_data *__rte_restrict txq)
+{
+ uint32_t count = XSC_TX_COMP_CQE_HANDLE_MAX;
+ volatile struct xsc_cqe *last_cqe = NULL;
+ volatile struct xsc_cqe *cqe;
+ bool doorbell = false;
+ int ret;
+ uint16_t tail;
+
+ do {
+ cqe = &txq->cqes[txq->cq_ci & txq->cqe_m];
+ ret = xsc_check_cqe_own(cqe, txq->cqe_n, txq->cq_ci);
+ if (unlikely(ret != XSC_CQE_OWNER_SW)) {
+ if (likely(ret != XSC_CQE_OWNER_ERR))
+ /* No new CQEs in completion queue. */
+ break;
+ doorbell = true;
+ ++txq->cq_ci;
+ txq->cq_pi = txq->cq_ci;
+ last_cqe = NULL;
+ ++txq->stats.tx_errors;
+ continue;
+ }
+
+ doorbell = true;
+ ++txq->cq_ci;
+ last_cqe = cqe;
+ } while (--count > 0);
+
+ if (likely(doorbell)) {
+ union xsc_cq_doorbell cq_db = {
+ .cq_data = 0
+ };
+ cq_db.next_cid = txq->cq_ci;
+ cq_db.cq_num = txq->cqn;
+
+ /* Ring doorbell */
+ rte_write32(rte_cpu_to_le_32(cq_db.cq_data), txq->cq_db);
+
+ /* Release completed elts */
+ if (likely(last_cqe != NULL)) {
+ txq->wqe_pi = rte_le_to_cpu_16(last_cqe->wqe_id) >> txq->wqe_ds_n;
+ tail = txq->fcqs[(txq->cq_ci - 1) & txq->cqe_m];
+ if (likely(tail != txq->elts_tail))
+ xsc_tx_elts_flush(txq, tail);
+ }
+ }
+}
+
+static __rte_always_inline void
+xsc_tx_wqe_ctrl_seg_init(struct xsc_txq_data *__rte_restrict txq,
+ struct rte_mbuf *__rte_restrict mbuf,
+ struct xsc_wqe *__rte_restrict wqe)
+{
+ struct xsc_send_wqe_ctrl_seg *cs = &wqe->cseg;
+ int i = 0;
+ int ds_max = (1 << txq->wqe_ds_n) - 1;
+
+ cs->msg_opcode = XSC_OPCODE_RAW;
+ cs->wqe_id = rte_cpu_to_le_16(txq->wqe_ci << txq->wqe_ds_n);
+ cs->has_pph = 0;
+ /* Clear dseg's seg len */
+ if (cs->ds_data_num > 1 && cs->ds_data_num <= ds_max) {
+ for (i = 1; i < cs->ds_data_num; i++)
+ wqe->dseg[i].seg_len = 0;
+ }
+
+ cs->ds_data_num = mbuf->nb_segs;
+ if (mbuf->ol_flags & RTE_MBUF_F_TX_IP_CKSUM)
+ cs->csum_en = 0x2;
+ else
+ cs->csum_en = 0;
+
+ if (txq->tso_en == 1 && (mbuf->ol_flags & RTE_MBUF_F_TX_TCP_SEG)) {
+ cs->so_type = 1;
+ cs->so_hdr_len = mbuf->l2_len + mbuf->l3_len + mbuf->l4_len;
+ cs->so_data_size = rte_cpu_to_le_16(mbuf->tso_segsz);
+ }
+
+ cs->msg_len = rte_cpu_to_le_32(rte_pktmbuf_pkt_len(mbuf));
+ if (unlikely(cs->msg_len == 0))
+ cs->msg_len = rte_cpu_to_le_32(rte_pktmbuf_data_len(mbuf));
+
+ /* Do not generate cqe for every pkts */
+ cs->ce = 0;
+}
+
+static __rte_always_inline void
+xsc_tx_wqe_data_seg_init(struct rte_mbuf *mbuf, struct xsc_wqe *wqe)
+{
+ uint16_t i, nb_segs = mbuf->nb_segs;
+ uint32_t data_len;
+ rte_iova_t iova;
+ struct xsc_wqe_data_seg *dseg;
+
+ for (i = 0; i < nb_segs; ++i) {
+ dseg = &wqe->dseg[i];
+ iova = rte_pktmbuf_iova(mbuf);
+ data_len = rte_pktmbuf_data_len(mbuf);
+
+ dseg->in_line = 0;
+ dseg->seg_len = rte_cpu_to_le_32(data_len);
+ dseg->lkey = 0;
+ dseg->va = rte_cpu_to_le_64(iova);
+ mbuf = mbuf->next;
+ }
+}
+
+static __rte_always_inline struct xsc_wqe *
+xsc_tx_wqes_fill(struct xsc_txq_data *__rte_restrict txq,
+ struct rte_mbuf **__rte_restrict pkts,
+ uint32_t pkts_n)
+{
+ uint32_t i;
+ struct xsc_wqe *wqe = NULL;
+ struct rte_mbuf *mbuf;
+
+ for (i = 0; i < pkts_n; i++) {
+ rte_prefetch0(pkts[i]);
+ mbuf = pkts[i];
+ wqe = (struct xsc_wqe *)((struct xsc_send_wqe_ctrl_seg *)txq->wqes +
+ (txq->wqe_ci & txq->wqe_m) * (1 << txq->wqe_ds_n));
+
+ /* Init wqe ctrl seg */
+ xsc_tx_wqe_ctrl_seg_init(txq, mbuf, wqe);
+ /* Init wqe data segs */
+ xsc_tx_wqe_data_seg_init(mbuf, wqe);
+ ++txq->wqe_ci;
+ txq->stats.tx_bytes += rte_pktmbuf_pkt_len(mbuf);
+ }
+
+ return wqe;
+}
+
+static __rte_always_inline void
+xsc_tx_doorbell_ring(volatile uint32_t *db, uint32_t index,
+ uint32_t qpn, uint16_t ds_n)
+{
+ union xsc_send_doorbell tx_db;
+
+ tx_db.next_pid = index << ds_n;
+ tx_db.qp_num = qpn;
+
+ rte_write32(rte_cpu_to_le_32(tx_db.send_data), db);
+}
+
+static __rte_always_inline void
+xsc_tx_elts_store(struct xsc_txq_data *__rte_restrict txq,
+ struct rte_mbuf **__rte_restrict pkts,
+ uint32_t pkts_n)
+{
+ uint32_t part;
+ struct rte_mbuf **elts = (struct rte_mbuf **)txq->elts;
+
+ part = txq->elts_s - (txq->elts_head & txq->elts_m);
+ rte_memcpy((void *)(elts + (txq->elts_head & txq->elts_m)),
+ (void *)pkts,
+ RTE_MIN(part, pkts_n) * sizeof(struct rte_mbuf *));
+
+ if (unlikely(part < pkts_n))
+ rte_memcpy((void *)elts, (void *)(pkts + part),
+ (pkts_n - part) * sizeof(struct rte_mbuf *));
+}
+
+uint16_t
+xsc_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
+{
+ struct xsc_txq_data *txq = dpdk_txq;
+ uint32_t tx_n, remain_n = pkts_n;
+ uint16_t idx, elts_free, wqe_free;
+ uint16_t elts_head;
+ struct xsc_wqe *last_wqe;
+
+ if (unlikely(!pkts_n))
+ return 0;
+
+ do {
+ xsc_tx_cqes_handle(txq);
+
+ elts_free = txq->elts_s - (uint16_t)(txq->elts_head - txq->elts_tail);
+ wqe_free = txq->wqe_s - ((uint16_t)((txq->wqe_ci << txq->wqe_ds_n) -
+ (txq->wqe_pi << txq->wqe_ds_n)) >> txq->wqe_ds_n);
+ if (unlikely(elts_free == 0 || wqe_free == 0))
+ break;
+
+ /* Fill in WQEs */
+ tx_n = RTE_MIN(remain_n, wqe_free);
+ idx = pkts_n - remain_n;
+ last_wqe = xsc_tx_wqes_fill(txq, &pkts[idx], tx_n);
+ remain_n -= tx_n;
+ last_wqe->cseg.ce = 1;
+
+ /* Update free-cqs, elts_comp */
+ elts_head = txq->elts_head;
+ elts_head += tx_n;
+ if ((uint16_t)(elts_head - txq->elts_comp) > 0) {
+ txq->elts_comp = elts_head;
+ txq->fcqs[txq->cq_pi++ & txq->cqe_m] = elts_head;
+ }
+
+ /* Ring tx doorbell */
+ xsc_tx_doorbell_ring(txq->qp_db, txq->wqe_ci, txq->qpn, txq->wqe_ds_n);
+
+ xsc_tx_elts_store(txq, &pkts[idx], tx_n);
+ txq->elts_head += tx_n;
+ } while (remain_n > 0);
+
+ txq->stats.tx_pkts += (pkts_n - remain_n);
+ return pkts_n - remain_n;
+}
diff --git a/drivers/net/xsc/xsc_tx.h b/drivers/net/xsc/xsc_tx.h
new file mode 100644
index 0000000..88419dd
--- /dev/null
+++ b/drivers/net/xsc/xsc_tx.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2025 Yunsilicon Technology Co., Ltd.
+ */
+
+#ifndef _XSC_TX_H_
+#define _XSC_TX_H_
+
+#define XSC_TX_COMP_CQE_HANDLE_MAX 2
+
+struct xsc_txq_stats {
+ uint64_t tx_pkts; /* Total number of tx packets */
+ uint64_t tx_bytes; /* Total number of tx bytes */
+ uint64_t tx_errors; /* Total number of tx error packets */
+};
+
+struct __rte_cache_aligned xsc_txq_data {
+ uint16_t idx; /*QP idx */
+ uint16_t port_id;
+ void *cq; /* CQ pointer */
+ void *qp; /* QP pointer */
+ uint32_t cqn; /* CQ serial number */
+ uint32_t qpn; /* QP serial number */
+ uint16_t elts_head; /* Current pos in (*elts)[] */
+ uint16_t elts_tail; /* Counter of first element awaiting completion */
+ uint16_t elts_comp; /* Elts index since last completion request */
+ uint16_t elts_s; /* Number of (*elts)[] */
+ uint16_t elts_m; /* Mask of (*elts)[] number */
+ uint16_t wqe_ci; /* Consumer index for TXQ */
+ uint16_t wqe_pi; /* Producer index for TXQ */
+ uint16_t wqe_s; /* Number of WQE */
+ uint16_t wqe_m; /* Mask of WQE number */
+ uint16_t wqe_comp; /* WQE index since last completion request */
+ uint16_t cq_ci; /* Consumer index for CQ */
+ uint16_t cq_pi; /* Production index for CQ */
+ uint16_t cqe_s; /* Number of CQE */
+ uint16_t cqe_m; /* Mask of CQE number */
+ uint16_t elts_n:4; /* Log 2 of (*elts)[] number */
+ uint16_t cqe_n:4; /* Log 2 of CQE number */
+ uint16_t wqe_n:4; /* Log 2 of WQE number */
+ uint16_t wqe_ds_n:4; /* Log 2 of each WQE DS number */
+ uint64_t offloads; /* TXQ offloads */
+ struct xsc_wqe *wqes;
+ volatile struct xsc_cqe *cqes;
+ volatile uint32_t *qp_db;
+ volatile uint32_t *cq_db;
+ struct xsc_ethdev_priv *priv;
+ struct xsc_txq_stats stats;
+ uint32_t socket;
+ uint8_t tso_en:1; /* TSO enable 0-off 1-on */
+ uint8_t rsv:7;
+ uint16_t *fcqs; /* Free completion queue. */
+ struct rte_mbuf *elts[]; /* Storage for queued packets, for free */
+};
+
+uint16_t xsc_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n);
+int xsc_txq_obj_new(struct xsc_dev *xdev, struct xsc_txq_data *txq_data,
+ uint64_t offloads, uint16_t idx);
+void xsc_txq_elts_alloc(struct xsc_txq_data *txq_data);
+void xsc_txq_obj_release(struct xsc_dev *xdev, struct xsc_txq_data *txq_data);
+void xsc_txq_elts_free(struct xsc_txq_data *txq_data);
+
+#endif /* _XSC_TX_H_ */
diff --git a/drivers/net/xsc/xsc_vfio.c b/drivers/net/xsc/xsc_vfio.c
new file mode 100644
index 0000000..d030019
--- /dev/null
+++ b/drivers/net/xsc/xsc_vfio.c
@@ -0,0 +1,1128 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2025 Yunsilicon Technology Co., Ltd.
+ */
+
+#include <sys/mman.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <linux/vfio.h>
+#include <sys/eventfd.h>
+#include <sys/ioctl.h>
+
+#include <rte_pci.h>
+#include <ethdev_pci.h>
+#include <rte_bus_pci.h>
+#include <rte_bitops.h>
+#include <rte_interrupts.h>
+
+#include "xsc_defs.h"
+#include "xsc_vfio_mbox.h"
+#include "xsc_ethdev.h"
+#include "xsc_rxtx.h"
+
+#define XSC_FEATURE_ONCHIP_FT_MASK RTE_BIT32(4)
+#define XSC_FEATURE_DMA_RW_TBL_MASK RTE_BIT32(8)
+#define XSC_FEATURE_PCT_EXP_MASK RTE_BIT32(19)
+#define XSC_HOST_PCIE_NO_DEFAULT 0
+#define XSC_SOC_PCIE_NO_DEFAULT 1
+
+#define XSC_SW2HW_MTU(mtu) ((mtu) + 14 + 4)
+#define XSC_SW2HW_RX_PKT_LEN(mtu) ((mtu) + 14 + 256)
+
+#define MAX_INTR_VEC_ID RTE_MAX_RXTX_INTR_VEC_ID
+#define MSIX_IRQ_SET_BUF_LEN (sizeof(struct vfio_irq_set) + \
+ sizeof(int) * (MAX_INTR_VEC_ID))
+
+enum xsc_port_status {
+ XSC_PORT_DOWN = 0,
+ XSC_PORT_UP = 1,
+};
+
+enum xsc_vector {
+ XSC_VEC_CMD = 0,
+ XSC_VEC_CMD_EVENT = 1,
+ XSC_EQ_VEC_COMP_BASE,
+};
+
+enum xsc_cq_type {
+ XSC_CQ_TYPE_NORMAL = 0,
+ XSC_CQ_TYPE_VIRTIO = 1,
+};
+
+struct xsc_vfio_cq {
+ const struct rte_memzone *mz;
+ struct xsc_dev *xdev;
+ uint32_t cqn;
+};
+
+struct xsc_vfio_qp {
+ const struct rte_memzone *mz;
+ struct xsc_dev *xdev;
+ uint32_t qpn;
+};
+
+static void
+xsc_vfio_pcie_no_init(struct xsc_hwinfo *hwinfo)
+{
+ uint func_id = hwinfo->func_id;
+
+ if (func_id >= hwinfo->pf0_vf_funcid_base &&
+ func_id <= hwinfo->pf0_vf_funcid_top)
+ hwinfo->pcie_no = hwinfo->pcie_host;
+ else if (func_id >= hwinfo->pf1_vf_funcid_base &&
+ func_id <= hwinfo->pf1_vf_funcid_top)
+ hwinfo->pcie_no = hwinfo->pcie_host;
+ else if (func_id >= hwinfo->pcie0_pf_funcid_base &&
+ func_id <= hwinfo->pcie0_pf_funcid_top)
+ hwinfo->pcie_no = XSC_HOST_PCIE_NO_DEFAULT;
+ else
+ hwinfo->pcie_no = XSC_SOC_PCIE_NO_DEFAULT;
+}
+
+static int
+xsc_vfio_hwinfo_init(struct xsc_dev *xdev)
+{
+ int ret;
+ uint32_t feature;
+ int in_len, out_len, cmd_len;
+ struct xsc_cmd_query_hca_cap_mbox_in *in;
+ struct xsc_cmd_query_hca_cap_mbox_out *out;
+ struct xsc_cmd_hca_cap *hca_cap;
+ void *cmd_buf;
+
+ in_len = sizeof(struct xsc_cmd_query_hca_cap_mbox_in);
+ out_len = sizeof(struct xsc_cmd_query_hca_cap_mbox_out);
+ cmd_len = RTE_MAX(in_len, out_len);
+
+ cmd_buf = malloc(cmd_len);
+ if (cmd_buf == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to alloc dev hwinfo cmd memory");
+ rte_errno = ENOMEM;
+ return -rte_errno;
+ }
+
+ in = cmd_buf;
+ memset(in, 0, cmd_len);
+ in->hdr.opcode = rte_cpu_to_be_16(XSC_CMD_OP_QUERY_HCA_CAP);
+ in->hdr.ver = rte_cpu_to_be_16(XSC_CMD_QUERY_HCA_CAP_V1);
+ in->cpu_num = rte_cpu_to_be_16(2);
+ out = cmd_buf;
+
+ ret = xsc_vfio_mbox_exec(xdev, in, in_len, out, out_len);
+ if (ret != 0 || out->hdr.status != 0) {
+ PMD_DRV_LOG(ERR, "Failed to get dev hwinfo, err=%d, out.status=%u",
+ ret, out->hdr.status);
+ rte_errno = ENOEXEC;
+ ret = -rte_errno;
+ goto exit;
+ }
+
+ hca_cap = &out->hca_cap;
+ xdev->hwinfo.valid = 1;
+ xdev->hwinfo.func_id = rte_be_to_cpu_32(hca_cap->glb_func_id);
+ xdev->hwinfo.pcie_host = hca_cap->pcie_host;
+ xdev->hwinfo.mac_phy_port = hca_cap->mac_port;
+ xdev->hwinfo.funcid_to_logic_port_off = rte_be_to_cpu_16(hca_cap->funcid_to_logic_port);
+ xdev->hwinfo.raw_qp_id_base = rte_be_to_cpu_16(hca_cap->raweth_qp_id_base);
+ xdev->hwinfo.raw_rss_qp_id_base = rte_be_to_cpu_16(hca_cap->raweth_rss_qp_id_base);
+ xdev->hwinfo.pf0_vf_funcid_base = rte_be_to_cpu_16(hca_cap->pf0_vf_funcid_base);
+ xdev->hwinfo.pf0_vf_funcid_top = rte_be_to_cpu_16(hca_cap->pf0_vf_funcid_top);
+ xdev->hwinfo.pf1_vf_funcid_base = rte_be_to_cpu_16(hca_cap->pf1_vf_funcid_base);
+ xdev->hwinfo.pf1_vf_funcid_top = rte_be_to_cpu_16(hca_cap->pf1_vf_funcid_top);
+ xdev->hwinfo.pcie0_pf_funcid_base = rte_be_to_cpu_16(hca_cap->pcie0_pf_funcid_base);
+ xdev->hwinfo.pcie0_pf_funcid_top = rte_be_to_cpu_16(hca_cap->pcie0_pf_funcid_top);
+ xdev->hwinfo.pcie1_pf_funcid_base = rte_be_to_cpu_16(hca_cap->pcie1_pf_funcid_base);
+ xdev->hwinfo.pcie1_pf_funcid_top = rte_be_to_cpu_16(hca_cap->pcie1_pf_funcid_top);
+ xdev->hwinfo.lag_port_start = hca_cap->lag_logic_port_ofst;
+ xdev->hwinfo.raw_tpe_qp_num = rte_be_to_cpu_16(hca_cap->raw_tpe_qp_num);
+ xdev->hwinfo.send_seg_num = hca_cap->send_seg_num;
+ xdev->hwinfo.recv_seg_num = hca_cap->recv_seg_num;
+ feature = rte_be_to_cpu_32(hca_cap->feature_flag);
+ xdev->hwinfo.on_chip_tbl_vld = (feature & XSC_FEATURE_ONCHIP_FT_MASK) ? 1 : 0;
+ xdev->hwinfo.dma_rw_tbl_vld = (feature & XSC_FEATURE_DMA_RW_TBL_MASK) ? 1 : 0;
+ xdev->hwinfo.pct_compress_vld = (feature & XSC_FEATURE_PCT_EXP_MASK) ? 1 : 0;
+ xdev->hwinfo.chip_version = rte_be_to_cpu_32(hca_cap->chip_ver_l);
+ xdev->hwinfo.hca_core_clock = rte_be_to_cpu_32(hca_cap->hca_core_clock);
+ xdev->hwinfo.mac_bit = hca_cap->mac_bit;
+ xdev->hwinfo.msix_base = rte_be_to_cpu_16(hca_cap->msix_base);
+ xdev->hwinfo.msix_num = rte_be_to_cpu_16(hca_cap->msix_num);
+ xsc_vfio_pcie_no_init(&xdev->hwinfo);
+
+exit:
+ free(in);
+ return ret;
+}
+
+static int
+xsc_vfio_dev_open(struct xsc_dev *xdev)
+{
+ struct rte_pci_addr *addr = &xdev->pci_dev->addr;
+ struct xsc_vfio_priv *priv;
+
+ snprintf(xdev->name, PCI_PRI_STR_SIZE, PCI_PRI_FMT,
+ addr->domain, addr->bus, addr->devid, addr->function);
+
+ priv = rte_zmalloc(NULL, sizeof(*priv), RTE_CACHE_LINE_SIZE);
+ if (priv == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to alloc xsc vfio priv");
+ return -ENOMEM;
+ }
+
+ xdev->dev_priv = (void *)priv;
+ return 0;
+}
+
+static int
+xsc_vfio_bar_init(struct xsc_dev *xdev)
+{
+ int ret;
+
+ ret = rte_pci_map_device(xdev->pci_dev);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Failed to map pci device");
+ return -EINVAL;
+ }
+
+ xdev->bar_len = xdev->pci_dev->mem_resource[0].len;
+ xdev->bar_addr = (void *)xdev->pci_dev->mem_resource[0].addr;
+ if (xdev->bar_addr == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to attach dev(%s) bar", xdev->pci_dev->device.name);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+xsc_vfio_dev_close(struct xsc_dev *xdev)
+{
+ struct xsc_vfio_priv *vfio_priv = (struct xsc_vfio_priv *)xdev->dev_priv;
+
+ xsc_vfio_mbox_destroy(vfio_priv->cmdq);
+ rte_free(vfio_priv);
+
+ return 0;
+}
+
+static int
+xsc_vfio_modify_link_status(struct xsc_dev *xdev, enum xsc_port_status status)
+{
+ struct xsc_cmd_set_port_admin_status_mbox_in in = { };
+ struct xsc_cmd_set_port_admin_status_mbox_out out = { };
+ int ret = 0;
+
+ in.hdr.opcode = rte_cpu_to_be_16(XSC_CMD_OP_SET_PORT_ADMIN_STATUS);
+ in.admin_status = rte_cpu_to_be_16(status);
+
+ ret = xsc_vfio_mbox_exec(xdev, &in, sizeof(in), &out, sizeof(out));
+ if (ret != 0 || out.hdr.status != 0) {
+ PMD_DRV_LOG(ERR, "Failed to set link status, ret=%d, status=%d",
+ ret, out.hdr.status);
+ return -ENOEXEC;
+ }
+
+ return ret;
+}
+
+static int
+xsc_vfio_set_link_up(struct xsc_dev *xdev)
+{
+ return xsc_vfio_modify_link_status(xdev, XSC_PORT_UP);
+}
+
+static int
+xsc_vfio_set_link_down(struct xsc_dev *xdev)
+{
+ return xsc_vfio_modify_link_status(xdev, XSC_PORT_DOWN);
+}
+
+static int
+xsc_vfio_get_link_info(struct xsc_dev *xdev, struct rte_eth_link *link)
+{
+ struct xsc_cmd_query_linkinfo_mbox_in in = { };
+ struct xsc_cmd_query_linkinfo_mbox_out out = { };
+ struct xsc_cmd_linkinfo linkinfo;
+ int ret;
+
+ in.hdr.opcode = rte_cpu_to_be_16(XSC_CMD_OP_QUERY_LINK_INFO);
+ ret = xsc_vfio_mbox_exec(xdev, &in, sizeof(in), &out, sizeof(out));
+ if (ret != 0 || out.hdr.status != 0) {
+ PMD_DRV_LOG(ERR, "Failed to get link info, ret=%d, status=%d",
+ ret, out.hdr.status);
+ return -ENOEXEC;
+ }
+
+ memcpy(&linkinfo, &out.ctx, sizeof(struct xsc_cmd_linkinfo));
+
+ link->link_speed = rte_be_to_cpu_32(linkinfo.linkspeed);
+ link->link_duplex = linkinfo.duplex;
+ link->link_autoneg = linkinfo.autoneg;
+
+ return 0;
+};
+
+static int
+xsc_vfio_link_update(struct xsc_dev *xdev, __rte_unused int wait_to_complete)
+{
+ int ret;
+ uint8_t linkup;
+ struct xsc_cmd_query_vport_state_in in = { };
+ struct xsc_cmd_query_vport_state_out out = { };
+ struct rte_eth_link *origin_link = &xdev->pf_dev_link;
+ struct rte_eth_link link;
+ uint16_t vport = 0;
+
+ in.hdr.opcode = rte_cpu_to_be_16(XSC_CMD_OP_QUERY_VPORT_STATE);
+ in.vport_number = rte_cpu_to_be_16(vport);
+ ret = xsc_vfio_mbox_exec(xdev, &in, sizeof(in), &out, sizeof(out));
+ if (ret != 0 || out.hdr.status != 0) {
+ PMD_DRV_LOG(ERR, "Failed to get port state, ret=%d, status=%d",
+ ret, out.hdr.status);
+ return -ENOEXEC;
+ }
+
+ linkup = out.state;
+ link.link_status = linkup ? XSC_PORT_UP : XSC_PORT_DOWN;
+
+ ret = xsc_vfio_get_link_info(xdev, &link);
+ if (ret)
+ return ret;
+
+ ret = !memcmp(origin_link, &link, sizeof(struct rte_eth_link));
+ xdev->pf_dev_link = link;
+ return ret;
+}
+
+static int
+xsc_vfio_destroy_qp(void *qp)
+{
+ int ret;
+ int in_len, out_len, cmd_len;
+ struct xsc_cmd_destroy_qp_mbox_in *in;
+ struct xsc_cmd_destroy_qp_mbox_out *out;
+ struct xsc_vfio_qp *data = (struct xsc_vfio_qp *)qp;
+ void *cmd_buf;
+
+ in_len = sizeof(struct xsc_cmd_destroy_qp_mbox_in);
+ out_len = sizeof(struct xsc_cmd_destroy_qp_mbox_out);
+ cmd_len = RTE_MAX(in_len, out_len);
+
+ cmd_buf = malloc(cmd_len);
+ if (cmd_buf == NULL) {
+ rte_errno = ENOMEM;
+ PMD_DRV_LOG(ERR, "Failed to alloc qp destroy cmd memory");
+ return -rte_errno;
+ }
+
+ in = cmd_buf;
+ memset(in, 0, cmd_len);
+ in->hdr.opcode = rte_cpu_to_be_16(XSC_CMD_OP_DESTROY_QP);
+ in->qpn = rte_cpu_to_be_32(data->qpn);
+ out = cmd_buf;
+ ret = xsc_vfio_mbox_exec(data->xdev, in, in_len, out, out_len);
+ if (ret != 0 || out->hdr.status != 0) {
+ PMD_DRV_LOG(ERR, "Failed to destroy qp, type=%d, err=%d, out.status=%u",
+ XSC_QUEUE_TYPE_RAW, ret, out->hdr.status);
+ rte_errno = ENOEXEC;
+ ret = -rte_errno;
+ goto exit;
+ }
+
+ rte_memzone_free(data->mz);
+ rte_free(qp);
+
+exit:
+ free(cmd_buf);
+ return ret;
+}
+
+static int
+xsc_vfio_destroy_cq(void *cq)
+{
+ int ret;
+ int in_len, out_len, cmd_len;
+ struct xsc_cmd_destroy_cq_mbox_in *in;
+ struct xsc_cmd_destroy_cq_mbox_out *out;
+ struct xsc_vfio_cq *data = (struct xsc_vfio_cq *)cq;
+ void *cmd_buf;
+
+ in_len = sizeof(struct xsc_cmd_destroy_cq_mbox_in);
+ out_len = sizeof(struct xsc_cmd_destroy_cq_mbox_out);
+ cmd_len = RTE_MAX(in_len, out_len);
+
+ cmd_buf = malloc(cmd_len);
+ if (cmd_buf == NULL) {
+ rte_errno = ENOMEM;
+ PMD_DRV_LOG(ERR, "Failed to alloc cq destroy cmd memory");
+ return -rte_errno;
+ }
+
+ in = cmd_buf;
+ memset(in, 0, cmd_len);
+
+ in->hdr.opcode = rte_cpu_to_be_16(XSC_CMD_OP_DESTROY_CQ);
+ in->cqn = rte_cpu_to_be_32(data->cqn);
+ out = cmd_buf;
+ ret = xsc_vfio_mbox_exec(data->xdev, in, in_len, out, out_len);
+ if (ret != 0 || out->hdr.status != 0) {
+ PMD_DRV_LOG(ERR, "Failed to destroy cq, type=%d, err=%d, out.status=%u",
+ XSC_QUEUE_TYPE_RAW, ret, out->hdr.status);
+ rte_errno = ENOEXEC;
+ ret = -rte_errno;
+ goto exit;
+ }
+
+ rte_memzone_free(data->mz);
+ rte_free(cq);
+
+exit:
+ free(cmd_buf);
+ return ret;
+}
+
+static int
+xsc_vfio_set_mtu(struct xsc_dev *xdev, uint16_t mtu)
+{
+ struct xsc_cmd_set_mtu_mbox_in in = { };
+ struct xsc_cmd_set_mtu_mbox_out out = { };
+ int ret;
+
+ in.hdr.opcode = rte_cpu_to_be_16(XSC_CMD_OP_SET_MTU);
+ in.mtu = rte_cpu_to_be_16(XSC_SW2HW_MTU(mtu));
+ in.rx_buf_sz_min = rte_cpu_to_be_16(XSC_SW2HW_RX_PKT_LEN(mtu));
+ in.mac_port = (uint8_t)xdev->hwinfo.mac_phy_port;
+
+ ret = xsc_vfio_mbox_exec(xdev, &in, sizeof(in), &out, sizeof(out));
+ if (ret != 0 || out.hdr.status != 0) {
+ PMD_DRV_LOG(ERR, "Failed to set mtu, port=%d, err=%d, out.status=%u",
+ xdev->port_id, ret, out.hdr.status);
+ rte_errno = ENOEXEC;
+ ret = -rte_errno;
+ }
+
+ return ret;
+}
+
+static int
+xsc_vfio_get_mac(struct xsc_dev *xdev, uint8_t *mac)
+{
+ struct xsc_cmd_query_eth_mac_mbox_in in = { };
+ struct xsc_cmd_query_eth_mac_mbox_out out = { };
+ int ret;
+
+ in.hdr.opcode = rte_cpu_to_be_16(XSC_CMD_OP_QUERY_ETH_MAC);
+ ret = xsc_vfio_mbox_exec(xdev, &in, sizeof(in), &out, sizeof(out));
+ if (ret != 0 || out.hdr.status != 0) {
+ PMD_DRV_LOG(ERR, "Failed to get mtu, port=%d, err=%d, out.status=%u",
+ xdev->port_id, ret, out.hdr.status);
+ rte_errno = ENOEXEC;
+ return -rte_errno;
+ }
+
+ memcpy(mac, out.mac, RTE_ETHER_ADDR_LEN);
+
+ return 0;
+}
+
+static int
+xsc_vfio_modify_qp_status(struct xsc_dev *xdev, uint32_t qpn, int num, int opcode)
+{
+ int i, ret;
+ int in_len, out_len, cmd_len;
+ struct xsc_cmd_modify_qp_mbox_in *in;
+ struct xsc_cmd_modify_qp_mbox_out *out;
+ void *cmd_buf;
+
+ in_len = sizeof(struct xsc_cmd_modify_qp_mbox_in);
+ out_len = sizeof(struct xsc_cmd_modify_qp_mbox_out);
+ cmd_len = RTE_MAX(in_len, out_len);
+
+ cmd_buf = malloc(cmd_len);
+ if (cmd_buf == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to alloc cmdq qp modify status");
+ rte_errno = ENOMEM;
+ return -rte_errno;
+ }
+
+ in = cmd_buf;
+ memset(in, 0, cmd_len);
+ out = cmd_buf;
+
+ for (i = 0; i < num; i++) {
+ in->hdr.opcode = rte_cpu_to_be_16(opcode);
+ in->hdr.ver = 0;
+ in->qpn = rte_cpu_to_be_32(qpn + i);
+ in->no_need_wait = 1;
+
+ ret = xsc_vfio_mbox_exec(xdev, in, in_len, out, out_len);
+ if (ret != 0 || out->hdr.status != 0) {
+ PMD_DRV_LOG(ERR, "Modify qp status failed, qpn=%u, err=%d, out.status=%u",
+ qpn + i, ret, out->hdr.status);
+ rte_errno = ENOEXEC;
+ ret = -rte_errno;
+ goto exit;
+ }
+ }
+
+exit:
+ free(cmd_buf);
+ return ret;
+}
+
+static int
+xsc_vfio_modify_qp_qostree(struct xsc_dev *xdev, uint16_t qpn)
+{
+ int ret;
+ int in_len, out_len, cmd_len;
+ struct xsc_cmd_modify_raw_qp_mbox_in *in;
+ struct xsc_cmd_modify_raw_qp_mbox_out *out;
+ void *cmd_buf;
+
+ in_len = sizeof(struct xsc_cmd_modify_raw_qp_mbox_in);
+ out_len = sizeof(struct xsc_cmd_modify_raw_qp_mbox_out);
+ cmd_len = RTE_MAX(in_len, out_len);
+
+ cmd_buf = malloc(cmd_len);
+ if (cmd_buf == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to alloc cmdq qp modify qostree");
+ rte_errno = ENOMEM;
+ return -rte_errno;
+ }
+
+ in = cmd_buf;
+ memset(in, 0, cmd_len);
+ in->hdr.opcode = rte_cpu_to_be_16(XSC_CMD_OP_MODIFY_RAW_QP);
+ in->req.prio = 0;
+ in->req.qp_out_port = 0xFF;
+ in->req.lag_id = rte_cpu_to_be_16(xdev->hwinfo.lag_id);
+ in->req.func_id = rte_cpu_to_be_16(xdev->hwinfo.func_id);
+ in->req.dma_direct = 0;
+ in->req.qpn = rte_cpu_to_be_16(qpn);
+ out = cmd_buf;
+
+ ret = xsc_vfio_mbox_exec(xdev, in, in_len, out, out_len);
+ if (ret != 0 || out->hdr.status != 0) {
+ PMD_DRV_LOG(ERR, "Filed to modify qp qostree, qpn=%d, err=%d, out.status=%u",
+ qpn, ret, out->hdr.status);
+ rte_errno = ENOEXEC;
+ ret = -rte_errno;
+ goto exit;
+ }
+
+exit:
+ free(cmd_buf);
+ return ret;
+}
+
+static int
+xsc_vfio_rx_cq_create(struct xsc_dev *xdev, struct xsc_rx_cq_params *cq_params,
+ struct xsc_rx_cq_info *cq_info)
+{
+ int ret;
+ int pa_len;
+ uint16_t i;
+ uint16_t pa_num;
+ uint8_t log_cq_sz;
+ uint16_t cqe_n;
+ uint32_t cqe_total_sz;
+ int in_len, out_len, cmd_len;
+ char name[RTE_ETH_NAME_MAX_LEN] = { 0 };
+ uint16_t port_id = cq_params->port_id;
+ uint16_t idx = cq_params->qp_id;
+ struct xsc_vfio_cq *cq;
+ const struct rte_memzone *cq_pas = NULL;
+ struct xsc_cqe *cqes;
+ struct xsc_cmd_create_cq_mbox_in *in = NULL;
+ struct xsc_cmd_create_cq_mbox_out *out = NULL;
+ void *cmd_buf;
+
+ cqe_n = cq_params->wqe_s;
+ log_cq_sz = rte_log2_u32(cqe_n);
+ cqe_total_sz = cqe_n * sizeof(struct xsc_cqe);
+ pa_num = (cqe_total_sz + XSC_PAGE_SIZE - 1) / XSC_PAGE_SIZE;
+ pa_len = sizeof(uint64_t) * pa_num;
+ in_len = sizeof(struct xsc_cmd_create_cq_mbox_in) + pa_len;
+ out_len = sizeof(struct xsc_cmd_create_cq_mbox_out);
+ cmd_len = RTE_MAX(in_len, out_len);
+
+ cq = rte_zmalloc(NULL, sizeof(struct xsc_vfio_cq), 0);
+ if (cq == NULL) {
+ rte_errno = ENOMEM;
+ PMD_DRV_LOG(ERR, "Failed to alloc rx cq memory");
+ return -rte_errno;
+ }
+
+ cmd_buf = malloc(cmd_len);
+ if (cmd_buf == NULL) {
+ rte_errno = ENOMEM;
+ PMD_DRV_LOG(ERR, "Failed to alloc rx cq exec cmd memory");
+ goto error;
+ }
+
+ in = cmd_buf;
+ memset(in, 0, cmd_len);
+ in->hdr.opcode = rte_cpu_to_be_16(XSC_CMD_OP_CREATE_CQ);
+ in->ctx.eqn = 0;
+ in->ctx.pa_num = rte_cpu_to_be_16(pa_num);
+ in->ctx.glb_func_id = rte_cpu_to_be_16((uint16_t)xdev->hwinfo.func_id);
+ in->ctx.log_cq_sz = log_cq_sz;
+ in->ctx.cq_type = XSC_CQ_TYPE_NORMAL;
+
+ snprintf(name, sizeof(name), "mz_cqe_mem_rx_%u_%u", port_id, idx);
+ cq_pas = rte_memzone_reserve_aligned(name,
+ (XSC_PAGE_SIZE * pa_num),
+ SOCKET_ID_ANY,
+ 0, XSC_PAGE_SIZE);
+ if (cq_pas == NULL) {
+ rte_errno = ENOMEM;
+ PMD_DRV_LOG(ERR, "Failed to alloc rx cq pas memory");
+ goto error;
+ }
+ cq->mz = cq_pas;
+
+ for (i = 0; i < pa_num; i++)
+ in->pas[i] = rte_cpu_to_be_64(cq_pas->iova + i * XSC_PAGE_SIZE);
+
+ out = cmd_buf;
+ ret = xsc_vfio_mbox_exec(xdev, in, in_len, out, out_len);
+ if (ret != 0 || out->hdr.status != 0) {
+ PMD_DRV_LOG(ERR,
+ "Failed to exec rx cq create cmd, port id=%d, err=%d, out.status=%u",
+ port_id, ret, out->hdr.status);
+ rte_errno = ENOEXEC;
+ goto error;
+ }
+
+ cq_info->cq = (void *)cq;
+ cq_info->cqe_n = log_cq_sz;
+ cqes = (struct xsc_cqe *)cq_pas->addr;
+ for (i = 0; i < (1 << cq_info->cqe_n); i++)
+ ((volatile struct xsc_cqe *)(cqes + i))->owner = 1;
+ cq_info->cqes = cqes;
+ if (xsc_dev_is_vf(xdev))
+ cq_info->cq_db = (uint32_t *)((uint8_t *)xdev->bar_addr + XSC_VF_CQ_DB_ADDR);
+ else
+ cq_info->cq_db = (uint32_t *)((uint8_t *)xdev->bar_addr + XSC_PF_CQ_DB_ADDR);
+ cq_info->cqn = rte_be_to_cpu_32(out->cqn);
+ cq->cqn = cq_info->cqn;
+ cq->xdev = xdev;
+ PMD_DRV_LOG(INFO, "Port id=%d, Rx cqe_n:%d, cqn:%u",
+ port_id, cq_info->cqe_n, cq_info->cqn);
+
+ free(cmd_buf);
+ return 0;
+
+error:
+ free(cmd_buf);
+ rte_memzone_free(cq_pas);
+ rte_free(cq);
+ return -rte_errno;
+}
+
+static int
+xsc_vfio_tx_cq_create(struct xsc_dev *xdev, struct xsc_tx_cq_params *cq_params,
+ struct xsc_tx_cq_info *cq_info)
+{
+ struct xsc_vfio_cq *cq = NULL;
+ char name[RTE_ETH_NAME_MAX_LEN] = {0};
+ struct xsc_cmd_create_cq_mbox_in *in = NULL;
+ struct xsc_cmd_create_cq_mbox_out *out = NULL;
+ const struct rte_memzone *cq_pas = NULL;
+ struct xsc_cqe *cqes;
+ int in_len, out_len, cmd_len;
+ uint16_t pa_num;
+ uint16_t log_cq_sz;
+ int ret = 0;
+ int cqe_s = 1 << cq_params->elts_n;
+ uint64_t iova;
+ int i;
+ void *cmd_buf = NULL;
+
+ cq = rte_zmalloc(NULL, sizeof(struct xsc_vfio_cq), 0);
+ if (cq == NULL) {
+ rte_errno = ENOMEM;
+ PMD_DRV_LOG(ERR, "Failed to alloc tx cq memory");
+ return -rte_errno;
+ }
+
+ log_cq_sz = rte_log2_u32(cqe_s);
+ pa_num = (((1 << log_cq_sz) * sizeof(struct xsc_cqe)) / XSC_PAGE_SIZE);
+
+ snprintf(name, sizeof(name), "mz_cqe_mem_tx_%u_%u", cq_params->port_id, cq_params->qp_id);
+ cq_pas = rte_memzone_reserve_aligned(name,
+ (XSC_PAGE_SIZE * pa_num),
+ SOCKET_ID_ANY,
+ 0, XSC_PAGE_SIZE);
+ if (cq_pas == NULL) {
+ rte_errno = ENOMEM;
+ PMD_DRV_LOG(ERR, "Failed to alloc tx cq pas memory");
+ goto error;
+ }
+
+ cq->mz = cq_pas;
+ in_len = (sizeof(struct xsc_cmd_create_cq_mbox_in) + (pa_num * sizeof(uint64_t)));
+ out_len = sizeof(struct xsc_cmd_create_cq_mbox_out);
+ cmd_len = RTE_MAX(in_len, out_len);
+ cmd_buf = malloc(cmd_len);
+ if (cmd_buf == NULL) {
+ rte_errno = ENOMEM;
+ PMD_DRV_LOG(ERR, "Failed to alloc tx cq exec cmd memory");
+ goto error;
+ }
+
+ in = cmd_buf;
+ memset(in, 0, cmd_len);
+
+ in->hdr.opcode = rte_cpu_to_be_16(XSC_CMD_OP_CREATE_CQ);
+ in->ctx.eqn = 0;
+ in->ctx.pa_num = rte_cpu_to_be_16(pa_num);
+ in->ctx.glb_func_id = rte_cpu_to_be_16((uint16_t)xdev->hwinfo.func_id);
+ in->ctx.log_cq_sz = rte_log2_u32(cqe_s);
+ in->ctx.cq_type = XSC_CQ_TYPE_NORMAL;
+ iova = cq->mz->iova;
+ for (i = 0; i < pa_num; i++)
+ in->pas[i] = rte_cpu_to_be_64(iova + i * XSC_PAGE_SIZE);
+
+ out = cmd_buf;
+ ret = xsc_vfio_mbox_exec(xdev, in, in_len, out, out_len);
+ if (ret != 0 || out->hdr.status != 0) {
+ PMD_DRV_LOG(ERR, "Failed to create tx cq, port id=%u, err=%d, out.status=%u",
+ cq_params->port_id, ret, out->hdr.status);
+ rte_errno = ENOEXEC;
+ goto error;
+ }
+
+ cq->cqn = rte_be_to_cpu_32(out->cqn);
+ cq->xdev = xdev;
+
+ cq_info->cq = cq;
+ cqes = (struct xsc_cqe *)((uint8_t *)cq->mz->addr);
+ if (xsc_dev_is_vf(xdev))
+ cq_info->cq_db = (uint32_t *)((uint8_t *)xdev->bar_addr + XSC_VF_CQ_DB_ADDR);
+ else
+ cq_info->cq_db = (uint32_t *)((uint8_t *)xdev->bar_addr + XSC_PF_CQ_DB_ADDR);
+ cq_info->cqn = cq->cqn;
+ cq_info->cqe_s = cqe_s;
+ cq_info->cqe_n = log_cq_sz;
+
+ for (i = 0; i < cq_info->cqe_s; i++)
+ ((volatile struct xsc_cqe *)(cqes + i))->owner = 1;
+ cq_info->cqes = cqes;
+
+ free(cmd_buf);
+ return 0;
+
+error:
+ free(cmd_buf);
+ rte_memzone_free(cq_pas);
+ rte_free(cq);
+ return -rte_errno;
+}
+
+static int
+xsc_vfio_tx_qp_create(struct xsc_dev *xdev, struct xsc_tx_qp_params *qp_params,
+ struct xsc_tx_qp_info *qp_info)
+{
+ struct xsc_cmd_create_qp_mbox_in *in = NULL;
+ struct xsc_cmd_create_qp_mbox_out *out = NULL;
+ const struct rte_memzone *qp_pas = NULL;
+ struct xsc_vfio_cq *cq = (struct xsc_vfio_cq *)qp_params->cq;
+ struct xsc_vfio_qp *qp = NULL;
+ int in_len, out_len, cmd_len;
+ int ret = 0;
+ uint32_t send_ds_num = xdev->hwinfo.send_seg_num;
+ int wqe_s = 1 << qp_params->elts_n;
+ uint16_t pa_num;
+ uint8_t log_ele = 0;
+ uint32_t log_rq_sz = 0;
+ uint32_t log_sq_sz = 0;
+ int i;
+ uint64_t iova;
+ char name[RTE_ETH_NAME_MAX_LEN] = {0};
+ void *cmd_buf = NULL;
+
+ qp = rte_zmalloc(NULL, sizeof(struct xsc_vfio_qp), 0);
+ if (qp == NULL) {
+ rte_errno = ENOMEM;
+ PMD_DRV_LOG(ERR, "Failed to alloc tx qp memory");
+ return -rte_errno;
+ }
+
+ log_sq_sz = rte_log2_u32(wqe_s * send_ds_num);
+ log_ele = rte_log2_u32(sizeof(struct xsc_wqe_data_seg));
+ pa_num = ((1 << (log_rq_sz + log_sq_sz + log_ele))) / XSC_PAGE_SIZE;
+
+ snprintf(name, sizeof(name), "mz_wqe_mem_tx_%u_%u", qp_params->port_id, qp_params->qp_id);
+ qp_pas = rte_memzone_reserve_aligned(name,
+ (XSC_PAGE_SIZE * pa_num),
+ SOCKET_ID_ANY,
+ 0, XSC_PAGE_SIZE);
+ if (qp_pas == NULL) {
+ rte_errno = ENOMEM;
+ PMD_DRV_LOG(ERR, "Failed to alloc tx qp pas memory");
+ goto error;
+ }
+ qp->mz = qp_pas;
+
+ in_len = (sizeof(struct xsc_cmd_create_qp_mbox_in) + (pa_num * sizeof(uint64_t)));
+ out_len = sizeof(struct xsc_cmd_create_qp_mbox_out);
+ cmd_len = RTE_MAX(in_len, out_len);
+ cmd_buf = malloc(cmd_len);
+ if (cmd_buf == NULL) {
+ rte_errno = ENOMEM;
+ PMD_DRV_LOG(ERR, "Failed to alloc tx qp exec cmd memory");
+ goto error;
+ }
+
+ in = cmd_buf;
+ memset(in, 0, cmd_len);
+
+ in->hdr.opcode = rte_cpu_to_be_16(XSC_CMD_OP_CREATE_QP);
+ in->req.input_qpn = 0;
+ in->req.pa_num = rte_cpu_to_be_16(pa_num);
+ in->req.qp_type = XSC_QUEUE_TYPE_RAW_TX;
+ in->req.log_sq_sz = log_sq_sz;
+ in->req.log_rq_sz = log_rq_sz;
+ in->req.dma_direct = 0;
+ in->req.pdn = 0;
+ in->req.cqn_send = rte_cpu_to_be_16((uint16_t)cq->cqn);
+ in->req.cqn_recv = 0;
+ in->req.glb_funcid = rte_cpu_to_be_16((uint16_t)xdev->hwinfo.func_id);
+ iova = qp->mz->iova;
+ for (i = 0; i < pa_num; i++)
+ in->req.pas[i] = rte_cpu_to_be_64(iova + i * XSC_PAGE_SIZE);
+
+ out = cmd_buf;
+ ret = xsc_vfio_mbox_exec(xdev, in, in_len, out, out_len);
+ if (ret != 0 || out->hdr.status != 0) {
+ PMD_DRV_LOG(ERR, "Failed to create tx qp, port id=%u, err=%d, out.status=%u",
+ qp_params->port_id, ret, out->hdr.status);
+ rte_errno = ENOEXEC;
+ goto error;
+ }
+
+ qp->qpn = rte_be_to_cpu_32(out->qpn);
+ qp->xdev = xdev;
+
+ qp_info->qp = qp;
+ qp_info->qpn = qp->qpn;
+ qp_info->wqes = (struct xsc_wqe *)qp->mz->addr;
+ qp_info->wqe_n = rte_log2_u32(wqe_s);
+
+ if (xsc_dev_is_vf(xdev))
+ qp_info->qp_db = (uint32_t *)((uint8_t *)xdev->bar_addr + XSC_VF_TX_DB_ADDR);
+ else
+ qp_info->qp_db = (uint32_t *)((uint8_t *)xdev->bar_addr + XSC_PF_TX_DB_ADDR);
+
+ free(cmd_buf);
+ return 0;
+
+error:
+ free(cmd_buf);
+ rte_memzone_free(qp_pas);
+ rte_free(qp);
+ return -rte_errno;
+}
+
+static int
+xsc_vfio_irq_info_get(struct rte_intr_handle *intr_handle)
+{
+ struct vfio_irq_info irq = { .argsz = sizeof(irq) };
+ int rc, vfio_dev_fd;
+
+ irq.index = VFIO_PCI_MSIX_IRQ_INDEX;
+
+ vfio_dev_fd = rte_intr_dev_fd_get(intr_handle);
+ rc = ioctl(vfio_dev_fd, VFIO_DEVICE_GET_IRQ_INFO, &irq);
+ if (rc < 0) {
+ PMD_DRV_LOG(ERR, "Failed to get IRQ info rc=%d errno=%d", rc, errno);
+ return rc;
+ }
+
+ PMD_DRV_LOG(INFO, "Flags=0x%x index=0x%x count=0x%x max_intr_vec_id=0x%x",
+ irq.flags, irq.index, irq.count, MAX_INTR_VEC_ID);
+
+ if (rte_intr_max_intr_set(intr_handle, irq.count))
+ return -1;
+
+ return 0;
+}
+
+static int
+xsc_vfio_irq_init(struct rte_intr_handle *intr_handle)
+{
+ char irq_set_buf[MSIX_IRQ_SET_BUF_LEN];
+ struct vfio_irq_set *irq_set;
+ int len, rc, vfio_dev_fd;
+ int32_t *fd_ptr;
+ uint32_t i;
+
+ if (rte_intr_max_intr_get(intr_handle) > MAX_INTR_VEC_ID) {
+ PMD_DRV_LOG(ERR, "Max_intr=%d greater than MAX_INTR_VEC_ID=%d",
+ rte_intr_max_intr_get(intr_handle),
+ MAX_INTR_VEC_ID);
+ return -ERANGE;
+ }
+
+ len = sizeof(struct vfio_irq_set) +
+ sizeof(int32_t) * rte_intr_max_intr_get(intr_handle);
+
+ irq_set = (struct vfio_irq_set *)irq_set_buf;
+ irq_set->argsz = len;
+ irq_set->start = 0;
+ irq_set->count = 10;
+ irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
+ VFIO_IRQ_SET_ACTION_TRIGGER;
+ irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
+
+ fd_ptr = (int32_t *)&irq_set->data[0];
+ for (i = 0; i < irq_set->count; i++)
+ fd_ptr[i] = -1;
+
+ vfio_dev_fd = rte_intr_dev_fd_get(intr_handle);
+ rc = ioctl(vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
+ if (rc)
+ PMD_DRV_LOG(ERR, "Failed to set irqs vector rc=%d", rc);
+
+ return rc;
+}
+
+static int
+xsc_vfio_irq_config(struct rte_intr_handle *intr_handle, unsigned int vec)
+{
+ char irq_set_buf[MSIX_IRQ_SET_BUF_LEN];
+ struct vfio_irq_set *irq_set;
+ int len, rc, vfio_dev_fd;
+ int32_t *fd_ptr;
+
+ if (vec > (uint32_t)rte_intr_max_intr_get(intr_handle)) {
+ PMD_DRV_LOG(INFO, "Vector=%d greater than max_intr=%d", vec,
+ rte_intr_max_intr_get(intr_handle));
+ return -EINVAL;
+ }
+
+ len = sizeof(struct vfio_irq_set) + sizeof(int32_t);
+
+ irq_set = (struct vfio_irq_set *)irq_set_buf;
+ irq_set->argsz = len;
+
+ irq_set->start = vec;
+ irq_set->count = 1;
+ irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
+ VFIO_IRQ_SET_ACTION_TRIGGER;
+ irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
+
+ /* Use vec fd to set interrupt vectors */
+ fd_ptr = (int32_t *)&irq_set->data[0];
+ fd_ptr[0] = rte_intr_efds_index_get(intr_handle, vec);
+
+ vfio_dev_fd = rte_intr_dev_fd_get(intr_handle);
+ rc = ioctl(vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
+ if (rc)
+ PMD_DRV_LOG(INFO, "Failed to set_irqs vector=0x%x rc=%d", vec, rc);
+
+ return rc;
+}
+
+static int
+xsc_vfio_irq_register(struct rte_intr_handle *intr_handle,
+ rte_intr_callback_fn cb, void *data, unsigned int vec)
+{
+ struct rte_intr_handle *tmp_handle;
+ uint32_t nb_efd, tmp_nb_efd;
+ int rc, fd;
+
+ if (rte_intr_max_intr_get(intr_handle) == 0) {
+ xsc_vfio_irq_info_get(intr_handle);
+ xsc_vfio_irq_init(intr_handle);
+ }
+
+ if (vec > (uint32_t)rte_intr_max_intr_get(intr_handle)) {
+ PMD_DRV_LOG(INFO, "Vector=%d greater than max_intr=%d", vec,
+ rte_intr_max_intr_get(intr_handle));
+ return -EINVAL;
+ }
+
+ tmp_handle = intr_handle;
+ /* Create new eventfd for interrupt vector */
+ fd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC);
+ if (fd == -1)
+ return -ENODEV;
+
+ if (rte_intr_fd_set(tmp_handle, fd))
+ return errno;
+
+ /* Register vector interrupt callback */
+ rc = rte_intr_callback_register(tmp_handle, cb, data);
+ if (rc) {
+ PMD_DRV_LOG(INFO, "Failed to register vector:0x%x irq callback.", vec);
+ return rc;
+ }
+
+ rte_intr_efds_index_set(intr_handle, vec, fd);
+ nb_efd = (vec > (uint32_t)rte_intr_nb_efd_get(intr_handle)) ?
+ vec : (uint32_t)rte_intr_nb_efd_get(intr_handle);
+ rte_intr_nb_efd_set(intr_handle, nb_efd);
+
+ tmp_nb_efd = rte_intr_nb_efd_get(intr_handle) + 1;
+ if (tmp_nb_efd > (uint32_t)rte_intr_max_intr_get(intr_handle))
+ rte_intr_max_intr_set(intr_handle, tmp_nb_efd);
+
+ PMD_DRV_LOG(INFO, "Enable vector:0x%x for vfio (efds: %d, max:%d)", vec,
+ rte_intr_nb_efd_get(intr_handle),
+ rte_intr_max_intr_get(intr_handle));
+
+ /* Enable MSIX vectors to VFIO */
+ return xsc_vfio_irq_config(intr_handle, vec);
+}
+
+static int
+xsc_vfio_msix_enable(struct xsc_dev *xdev)
+{
+ struct xsc_cmd_msix_table_info_mbox_in in = { };
+ struct xsc_cmd_msix_table_info_mbox_out out = { };
+ int ret;
+
+ in.hdr.opcode = rte_cpu_to_be_16(XSC_CMD_OP_ENABLE_MSIX);
+ ret = xsc_vfio_mbox_exec(xdev, &in, sizeof(in), &out, sizeof(out));
+ if (ret != 0 || out.hdr.status != 0) {
+ PMD_DRV_LOG(ERR, "Failed to enable msix, ret=%d, stats=%d",
+ ret, out.hdr.status);
+ return ret;
+ }
+
+ rte_write32(xdev->hwinfo.msix_base,
+ (uint8_t *)xdev->bar_addr + XSC_HIF_CMDQM_VECTOR_ID_MEM_ADDR);
+
+ return 0;
+}
+
+static int
+xsc_vfio_event_get(struct xsc_dev *xdev)
+{
+ int ret;
+ struct xsc_cmd_event_query_type_mbox_in in = { };
+ struct xsc_cmd_event_query_type_mbox_out out = { };
+
+ in.hdr.opcode = rte_cpu_to_be_16(XSC_CMD_OP_QUERY_EVENT_TYPE);
+ ret = xsc_vfio_mbox_exec(xdev, &in, sizeof(in), &out, sizeof(out));
+ if (ret != 0 || out.hdr.status != 0) {
+ PMD_DRV_LOG(ERR, "Failed to query event type, ret=%d, stats=%d",
+ ret, out.hdr.status);
+ return -1;
+ }
+
+ return out.ctx.resp_event_type;
+}
+
+static int
+xsc_vfio_intr_handler_install(struct xsc_dev *xdev, rte_intr_callback_fn cb, void *cb_arg)
+{
+ int ret;
+ struct rte_intr_handle *intr_handle = xdev->pci_dev->intr_handle;
+
+ /* xsc_register_irq(intr_handle, xsc_vfio_dummy_handler, xdev, XSC_VEC_CMD); */
+ ret = xsc_vfio_irq_register(intr_handle, cb, cb_arg, XSC_VEC_CMD_EVENT);
+ if (ret != 0) {
+ PMD_DRV_LOG(ERR, "Failed to register vfio irq, ret=%d", ret);
+ return ret;
+ }
+
+ ret = xsc_vfio_msix_enable(xdev);
+ if (ret != 0) {
+ PMD_DRV_LOG(ERR, "Failed to enable vfio msix, ret=%d", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+xsc_vfio_intr_handler_uninstall(struct xsc_dev *xdev)
+{
+ rte_intr_instance_free(xdev->intr_handle);
+
+ return 0;
+}
+
+static int
+xsc_vfio_function_reset(struct xsc_dev *xdev)
+{
+ struct xsc_cmd_function_reset_mbox_in in = { };
+ struct xsc_cmd_function_reset_mbox_out out = { };
+ uint16_t func_id = (uint16_t)xdev->hwinfo.func_id;
+ int ret;
+
+ in.hdr.opcode = rte_cpu_to_be_16(XSC_CMD_OP_FUNCTION_RESET);
+ in.glb_func_id = rte_cpu_to_be_16(func_id);
+
+ ret = xsc_vfio_mbox_exec(xdev, &in, sizeof(in), &out, sizeof(out));
+ if (ret != 0 || out.hdr.status != 0) {
+ PMD_DRV_LOG(ERR, "Failed to reset function, funcid=%u, ret=%d, stats=%d",
+ func_id, ret, out.hdr.status);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+xsc_vfio_dev_init(struct xsc_dev *xdev)
+{
+ int ret;
+
+ ret = xsc_vfio_dev_open(xdev);
+ if (ret != 0)
+ goto open_fail;
+
+ ret = xsc_vfio_bar_init(xdev);
+ if (ret != 0)
+ goto init_fail;
+
+ if (xsc_vfio_mbox_init(xdev) != 0)
+ goto init_fail;
+
+ ret = xsc_vfio_hwinfo_init(xdev);
+ if (ret != 0)
+ goto init_fail;
+
+ ret = xsc_vfio_function_reset(xdev);
+ if (ret != 0)
+ goto init_fail;
+
+ return 0;
+
+init_fail:
+ xsc_vfio_dev_close(xdev);
+
+open_fail:
+ return -1;
+}
+
+static struct xsc_dev_ops *xsc_vfio_ops = &(struct xsc_dev_ops) {
+ .kdrv = (1 << RTE_PCI_KDRV_VFIO) | (1 << RTE_PCI_KDRV_IGB_UIO),
+ .dev_init = xsc_vfio_dev_init,
+ .dev_close = xsc_vfio_dev_close,
+ .set_link_up = xsc_vfio_set_link_up,
+ .set_link_down = xsc_vfio_set_link_down,
+ .link_update = xsc_vfio_link_update,
+ .set_mtu = xsc_vfio_set_mtu,
+ .get_mac = xsc_vfio_get_mac,
+ .destroy_qp = xsc_vfio_destroy_qp,
+ .destroy_cq = xsc_vfio_destroy_cq,
+ .modify_qp_status = xsc_vfio_modify_qp_status,
+ .modify_qp_qostree = xsc_vfio_modify_qp_qostree,
+ .rx_cq_create = xsc_vfio_rx_cq_create,
+ .tx_cq_create = xsc_vfio_tx_cq_create,
+ .tx_qp_create = xsc_vfio_tx_qp_create,
+ .mailbox_exec = xsc_vfio_mbox_exec,
+ .intr_event_get = xsc_vfio_event_get,
+ .intr_handler_install = xsc_vfio_intr_handler_install,
+ .intr_handler_uninstall = xsc_vfio_intr_handler_uninstall,
+};
+
+RTE_INIT(xsc_vfio_ops_reg)
+{
+ xsc_dev_ops_register(xsc_vfio_ops);
+}
diff --git a/drivers/net/xsc/xsc_vfio_mbox.c b/drivers/net/xsc/xsc_vfio_mbox.c
new file mode 100644
index 0000000..ec1311a
--- /dev/null
+++ b/drivers/net/xsc/xsc_vfio_mbox.c
@@ -0,0 +1,691 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2025 Yunsilicon Technology Co., Ltd.
+ */
+#include <rte_malloc.h>
+#include <rte_version.h>
+
+#include "xsc_vfio_mbox.h"
+#include "xsc_log.h"
+
+#define XSC_MBOX_BUF_NUM 2048
+#define XSC_MBOX_BUF_CACHE_SIZE 256
+#define XSC_CMDQ_DEPTH_LOG 5
+#define XSC_CMDQ_ELEMENT_SIZE_LOG 6
+#define XSC_CMDQ_REQ_TYPE 7
+#define XSC_CMDQ_WAIT_TIMEOUT 10
+#define XSC_CMDQ_WAIT_DELAY_MS 100
+#define XSC_CMD_OP_DUMMY 0x10d
+
+#define XSC_PF_CMDQ_ELEMENT_SZ 0x1020020
+#define XSC_PF_CMDQ_REQ_BASE_H_ADDR 0x1022000
+#define XSC_PF_CMDQ_REQ_BASE_L_ADDR 0x1024000
+#define XSC_PF_CMDQ_RSP_BASE_H_ADDR 0x102a000
+#define XSC_PF_CMDQ_RSP_BASE_L_ADDR 0x102c000
+#define XSC_PF_CMDQ_REQ_PID 0x1026000
+#define XSC_PF_CMDQ_REQ_CID 0x1028000
+#define XSC_PF_CMDQ_RSP_PID 0x102e000
+#define XSC_PF_CMDQ_RSP_CID 0x1030000
+#define XSC_PF_CMDQ_DEPTH 0x1020028
+
+#define XSC_VF_CMDQ_REQ_BASE_H_ADDR 0x0
+#define XSC_VF_CMDQ_REQ_BASE_L_ADDR 0x4
+#define XSC_VF_CMDQ_RSP_BASE_H_ADDR 0x10
+#define XSC_VF_CMDQ_RSP_BASE_L_ADDR 0x14
+#define XSC_VF_CMDQ_REQ_PID 0x8
+#define XSC_VF_CMDQ_REQ_CID 0xc
+#define XSC_VF_CMDQ_RSP_PID 0x18
+#define XSC_VF_CMDQ_RSP_CID 0x1c
+#define XSC_VF_CMDQ_ELEMENT_SZ 0x28
+#define XSC_VF_CMDQ_DEPTH 0x2c
+
+static const char * const xsc_cmd_error[] = {
+ "xsc cmd success",
+ "xsc cmd fail",
+ "xsc cmd timeout"
+};
+
+static struct xsc_cmdq_config xsc_pf_config = {
+ .req_pid_addr = XSC_PF_CMDQ_REQ_PID,
+ .req_cid_addr = XSC_PF_CMDQ_REQ_CID,
+ .rsp_pid_addr = XSC_PF_CMDQ_RSP_PID,
+ .rsp_cid_addr = XSC_PF_CMDQ_RSP_CID,
+ .req_h_addr = XSC_PF_CMDQ_REQ_BASE_H_ADDR,
+ .req_l_addr = XSC_PF_CMDQ_REQ_BASE_L_ADDR,
+ .rsp_h_addr = XSC_PF_CMDQ_RSP_BASE_H_ADDR,
+ .rsp_l_addr = XSC_PF_CMDQ_RSP_BASE_L_ADDR,
+ .elt_sz_addr = XSC_PF_CMDQ_ELEMENT_SZ,
+ .depth_addr = XSC_PF_CMDQ_DEPTH,
+};
+
+static struct xsc_cmdq_config xsc_vf_config = {
+ .req_pid_addr = XSC_VF_CMDQ_REQ_PID,
+ .req_cid_addr = XSC_VF_CMDQ_REQ_CID,
+ .rsp_pid_addr = XSC_VF_CMDQ_RSP_PID,
+ .rsp_cid_addr = XSC_VF_CMDQ_RSP_CID,
+ .req_h_addr = XSC_VF_CMDQ_REQ_BASE_H_ADDR,
+ .req_l_addr = XSC_VF_CMDQ_REQ_BASE_L_ADDR,
+ .rsp_h_addr = XSC_VF_CMDQ_RSP_BASE_H_ADDR,
+ .rsp_l_addr = XSC_VF_CMDQ_RSP_BASE_L_ADDR,
+ .elt_sz_addr = XSC_VF_CMDQ_ELEMENT_SZ,
+ .depth_addr = XSC_VF_CMDQ_DEPTH,
+};
+
+static void
+xsc_cmdq_config_init(struct xsc_dev *xdev, struct xsc_cmd_queue *cmdq)
+{
+ if (!xsc_dev_is_vf(xdev))
+ cmdq->config = &xsc_pf_config;
+ else
+ cmdq->config = &xsc_vf_config;
+}
+
+static void
+xsc_cmdq_rsp_cid_update(struct xsc_dev *xdev, struct xsc_cmd_queue *cmdq)
+{
+ uint32_t rsp_pid;
+
+ cmdq->rsp_cid = rte_read32((uint8_t *)xdev->bar_addr + cmdq->config->rsp_cid_addr);
+ rsp_pid = rte_read32((uint8_t *)xdev->bar_addr + cmdq->config->rsp_pid_addr);
+ if (rsp_pid != cmdq->rsp_cid) {
+ PMD_DRV_LOG(INFO, "Update cid(%u) to latest pid(%u)",
+ cmdq->rsp_cid, rsp_pid);
+ cmdq->rsp_cid = rsp_pid;
+ rte_write32(cmdq->rsp_cid, (uint8_t *)xdev->bar_addr + cmdq->config->rsp_cid_addr);
+ }
+}
+
+static void
+xsc_cmdq_depth_set(struct xsc_dev *xdev, struct xsc_cmd_queue *cmdq)
+{
+ cmdq->depth_n = XSC_CMDQ_DEPTH_LOG;
+ cmdq->depth_m = (1 << XSC_CMDQ_DEPTH_LOG) - 1;
+ rte_write32(1 << cmdq->depth_n, (uint8_t *)xdev->bar_addr + cmdq->config->depth_addr);
+}
+
+static int
+xsc_cmdq_elt_size_check(struct xsc_dev *xdev, struct xsc_cmd_queue *cmdq)
+{
+ uint32_t elts_n;
+
+ elts_n = rte_read32((uint8_t *)xdev->bar_addr + cmdq->config->elt_sz_addr);
+ if (elts_n != XSC_CMDQ_ELEMENT_SIZE_LOG) {
+ PMD_DRV_LOG(ERR, "The cmdq elt size log(%u) is error, should be %u",
+ elts_n, XSC_CMDQ_ELEMENT_SIZE_LOG);
+ rte_errno = ENODEV;
+ return -1;
+ }
+
+ return 0;
+}
+
+static void
+xsc_cmdq_req_base_addr_set(struct xsc_dev *xdev, struct xsc_cmd_queue *cmdq)
+{
+ uint32_t h_addr, l_addr;
+
+ h_addr = (uint32_t)(cmdq->req_mz->iova >> 32);
+ l_addr = (uint32_t)(cmdq->req_mz->iova);
+ rte_write32(h_addr, (uint8_t *)xdev->bar_addr + cmdq->config->req_h_addr);
+ rte_write32(l_addr, (uint8_t *)xdev->bar_addr + cmdq->config->req_l_addr);
+}
+
+static void
+xsc_cmdq_rsp_base_addr_set(struct xsc_dev *xdev, struct xsc_cmd_queue *cmdq)
+{
+ uint32_t h_addr, l_addr;
+
+ h_addr = (uint32_t)(cmdq->rsp_mz->iova >> 32);
+ l_addr = (uint32_t)(cmdq->rsp_mz->iova);
+ rte_write32(h_addr, (uint8_t *)xdev->bar_addr + cmdq->config->rsp_h_addr);
+ rte_write32(l_addr, (uint8_t *)xdev->bar_addr + cmdq->config->rsp_l_addr);
+}
+
+static void
+xsc_cmdq_mbox_free(struct xsc_dev *xdev, struct xsc_cmdq_mbox *mbox)
+{
+ struct xsc_cmdq_mbox *next, *head;
+ struct xsc_vfio_priv *priv = xdev->dev_priv;
+
+ head = mbox;
+ while (head != NULL) {
+ next = head->next;
+ if (head->buf != NULL)
+ rte_mempool_put(priv->cmdq->mbox_buf_pool, head->buf);
+ free(head);
+ head = next;
+ }
+}
+
+static struct xsc_cmdq_mbox *
+xsc_cmdq_mbox_alloc(struct xsc_dev *xdev)
+{
+ struct xsc_cmdq_mbox *mbox;
+ int ret;
+ struct xsc_vfio_priv *priv = (struct xsc_vfio_priv *)xdev->dev_priv;
+
+ mbox = malloc(sizeof(*mbox));
+ if (mbox == NULL) {
+ rte_errno = -ENOMEM;
+ goto error;
+ }
+ memset(mbox, 0, sizeof(struct xsc_cmdq_mbox));
+
+ ret = rte_mempool_get(priv->cmdq->mbox_buf_pool, (void **)&mbox->buf);
+ if (ret != 0)
+ goto error;
+ mbox->buf_dma = rte_mempool_virt2iova(mbox->buf);
+ memset(mbox->buf, 0, sizeof(struct xsc_cmdq_mbox_buf));
+ mbox->next = NULL;
+
+ return mbox;
+
+error:
+ xsc_cmdq_mbox_free(xdev, mbox);
+ return NULL;
+}
+
+static struct xsc_cmdq_mbox *
+xsc_cmdq_mbox_alloc_bulk(struct xsc_dev *xdev, int n)
+{
+ int i;
+ struct xsc_cmdq_mbox *head = NULL;
+ struct xsc_cmdq_mbox *mbox;
+ struct xsc_cmdq_mbox_buf *mbox_buf;
+
+ for (i = 0; i < n; i++) {
+ mbox = xsc_cmdq_mbox_alloc(xdev);
+ if (mbox == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to alloc mailbox");
+ goto error;
+ }
+
+ mbox_buf = mbox->buf;
+ mbox->next = head;
+ mbox_buf->next = rte_cpu_to_be_64(mbox->next ? mbox->next->buf_dma : 0);
+ mbox_buf->block_num = rte_cpu_to_be_32(n - i - 1);
+ head = mbox;
+ }
+
+ return head;
+
+error:
+ xsc_cmdq_mbox_free(xdev, head);
+ return NULL;
+}
+
+static void
+xsc_cmdq_req_msg_free(struct xsc_dev *xdev, struct xsc_cmdq_req_msg *msg)
+{
+ struct xsc_cmdq_mbox *head;
+
+ if (msg == NULL)
+ return;
+
+ head = msg->next;
+ xsc_cmdq_mbox_free(xdev, head);
+ free(msg);
+}
+
+static struct xsc_cmdq_req_msg *
+xsc_cmdq_req_msg_alloc(struct xsc_dev *xdev, int len)
+{
+ struct xsc_cmdq_req_msg *msg;
+ struct xsc_cmdq_mbox *head = NULL;
+ int cmd_len, nb_mbox;
+
+ msg = malloc(sizeof(*msg));
+ if (msg == NULL) {
+ rte_errno = -ENOMEM;
+ goto error;
+ }
+ memset(msg, 0, sizeof(*msg));
+
+ cmd_len = len - RTE_MIN(sizeof(msg->hdr.data), (uint32_t)len);
+ nb_mbox = (cmd_len + XSC_CMDQ_DATA_SIZE - 1) / XSC_CMDQ_DATA_SIZE;
+ head = xsc_cmdq_mbox_alloc_bulk(xdev, nb_mbox);
+ if (head == NULL && nb_mbox != 0)
+ goto error;
+
+ msg->next = head;
+ msg->len = len;
+
+ return msg;
+
+error:
+ xsc_cmdq_req_msg_free(xdev, msg);
+ return NULL;
+}
+
+static void
+xsc_cmdq_rsp_msg_free(struct xsc_dev *xdev, struct xsc_cmdq_rsp_msg *msg)
+{
+ struct xsc_cmdq_mbox *head;
+
+ if (msg == NULL)
+ return;
+
+ head = msg->next;
+ xsc_cmdq_mbox_free(xdev, head);
+ free(msg);
+}
+
+static struct xsc_cmdq_rsp_msg *
+xsc_cmdq_rsp_msg_alloc(struct xsc_dev *xdev, int len)
+{
+ struct xsc_cmdq_rsp_msg *msg;
+ struct xsc_cmdq_mbox *head = NULL;
+ int cmd_len, nb_mbox;
+
+ msg = malloc(sizeof(*msg));
+ if (msg == NULL) {
+ rte_errno = -ENOMEM;
+ goto error;
+ }
+ memset(msg, 0, sizeof(*msg));
+
+ cmd_len = len - RTE_MIN(sizeof(msg->hdr.data), (uint32_t)len);
+ nb_mbox = (cmd_len + XSC_CMDQ_DATA_SIZE - 1) / XSC_CMDQ_DATA_SIZE;
+ head = xsc_cmdq_mbox_alloc_bulk(xdev, nb_mbox);
+ if (head == NULL && nb_mbox != 0)
+ goto error;
+
+ msg->next = head;
+ msg->len = len;
+
+ return msg;
+
+error:
+ xsc_cmdq_rsp_msg_free(xdev, msg);
+ return NULL;
+}
+
+static void
+xsc_cmdq_msg_destruct(struct xsc_dev *xdev,
+ struct xsc_cmdq_req_msg **req_msg,
+ struct xsc_cmdq_rsp_msg **rsp_msg)
+{
+ xsc_cmdq_req_msg_free(xdev, *req_msg);
+ xsc_cmdq_rsp_msg_free(xdev, *rsp_msg);
+ *req_msg = NULL;
+ *rsp_msg = NULL;
+}
+
+static int
+xsc_cmdq_msg_construct(struct xsc_dev *xdev,
+ struct xsc_cmdq_req_msg **req_msg, int in_len,
+ struct xsc_cmdq_rsp_msg **rsp_msg, int out_len)
+{
+ *req_msg = xsc_cmdq_req_msg_alloc(xdev, in_len);
+ if (*req_msg == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to alloc xsc cmd request msg");
+ goto error;
+ }
+
+ *rsp_msg = xsc_cmdq_rsp_msg_alloc(xdev, out_len);
+ if (*rsp_msg == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to alloc xsc cmd response msg");
+ goto error;
+ }
+
+ return 0;
+
+error:
+ xsc_cmdq_msg_destruct(xdev, req_msg, rsp_msg);
+ return -1;
+}
+
+static int
+xsc_cmdq_req_msg_copy(struct xsc_cmdq_req_msg *req_msg, void *data_in, int in_len)
+{
+ struct xsc_cmdq_mbox_buf *mbox_buf;
+ struct xsc_cmdq_mbox *mbox;
+ int copy;
+ uint8_t *data = data_in;
+
+ if (req_msg == NULL || data == NULL)
+ return -1;
+
+ copy = RTE_MIN((uint32_t)in_len, sizeof(req_msg->hdr.data));
+ memcpy(req_msg->hdr.data, data, copy);
+
+ in_len -= copy;
+ data += copy;
+
+ mbox = req_msg->next;
+ while (in_len > 0) {
+ if (mbox == NULL)
+ return -1;
+
+ copy = RTE_MIN(in_len, XSC_CMDQ_DATA_SIZE);
+ mbox_buf = mbox->buf;
+ memcpy(mbox_buf->data, data, copy);
+ mbox_buf->owner_status = 0;
+ data += copy;
+ in_len -= copy;
+ mbox = mbox->next;
+ }
+
+ return 0;
+}
+
+static int
+xsc_cmdq_rsp_msg_copy(void *data_out, struct xsc_cmdq_rsp_msg *rsp_msg, int out_len)
+{
+ struct xsc_cmdq_mbox_buf *mbox_buf;
+ struct xsc_cmdq_mbox *mbox;
+ int copy;
+ uint8_t *data = data_out;
+
+ if (data == NULL || rsp_msg == NULL)
+ return -1;
+
+ copy = RTE_MIN((uint32_t)out_len, sizeof(rsp_msg->hdr.data));
+ memcpy(data, rsp_msg->hdr.data, copy);
+ out_len -= copy;
+ data += copy;
+
+ mbox = rsp_msg->next;
+ while (out_len > 0) {
+ if (mbox == NULL)
+ return -1;
+ copy = RTE_MIN(out_len, XSC_CMDQ_DATA_SIZE);
+ mbox_buf = mbox->buf;
+ if (!mbox_buf->owner_status)
+ PMD_DRV_LOG(ERR, "Failed to check cmd owner");
+ memcpy(data, mbox_buf->data, copy);
+ data += copy;
+ out_len -= copy;
+ mbox = mbox->next;
+ }
+
+ return 0;
+}
+
+static enum xsc_cmd_status
+xsc_cmdq_wait_completion(struct xsc_dev *xdev, struct xsc_cmdq_rsp_msg *rsp_msg)
+{
+ struct xsc_vfio_priv *priv = (struct xsc_vfio_priv *)xdev->dev_priv;
+ struct xsc_cmd_queue *cmdq = priv->cmdq;
+ volatile struct xsc_cmdq_rsp_layout *rsp_lay;
+ struct xsc_cmd_outbox_hdr *out_hdr = (struct xsc_cmd_outbox_hdr *)rsp_msg->hdr.data;
+ int count = (XSC_CMDQ_WAIT_TIMEOUT * 1000) / XSC_CMDQ_WAIT_DELAY_MS;
+ uint32_t rsp_pid;
+ uint8_t cmd_status;
+ uint32_t i;
+
+ while (count-- > 0) {
+ rsp_pid = rte_read32((uint8_t *)xdev->bar_addr + cmdq->config->rsp_pid_addr);
+ if (rsp_pid == cmdq->rsp_cid) {
+ rte_delay_ms(XSC_CMDQ_WAIT_DELAY_MS);
+ continue;
+ }
+
+ rsp_lay = cmdq->rsp_lay + cmdq->rsp_cid;
+ if (cmdq->owner_learn == 0) {
+ /* First time learning owner_bit from hardware */
+ cmdq->owner_bit = rsp_lay->owner_bit;
+ cmdq->owner_learn = 1;
+ }
+
+ /* Waiting for dma to complete */
+ if (cmdq->owner_bit != rsp_lay->owner_bit)
+ continue;
+
+ for (i = 0; i < XSC_CMDQ_RSP_INLINE_SIZE; i++)
+ rsp_msg->hdr.data[i] = rsp_lay->out[i];
+
+ cmdq->rsp_cid = (cmdq->rsp_cid + 1) & cmdq->depth_m;
+ rte_write32(cmdq->rsp_cid, (uint8_t *)xdev->bar_addr + cmdq->config->rsp_cid_addr);
+
+ /* Change owner bit */
+ if (cmdq->rsp_cid == 0)
+ cmdq->owner_bit = !cmdq->owner_bit;
+
+ cmd_status = out_hdr->status;
+ if (cmd_status != 0)
+ return XSC_CMD_FAIL;
+ return XSC_CMD_SUCC;
+ }
+
+ return XSC_CMD_TIMEOUT;
+}
+
+static int
+xsc_cmdq_dummy_invoke(struct xsc_dev *xdev, struct xsc_cmd_queue *cmdq, uint32_t start, int num)
+{
+ struct xsc_cmdq_dummy_mbox_in in;
+ struct xsc_cmdq_dummy_mbox_out out;
+ struct xsc_cmdq_req_msg *req_msg = NULL;
+ struct xsc_cmdq_rsp_msg *rsp_msg = NULL;
+ struct xsc_cmdq_req_layout *req_lay;
+ int in_len = sizeof(in);
+ int out_len = sizeof(out);
+ int ret, i;
+ uint32_t start_pid = start;
+
+ memset(&in, 0, sizeof(in));
+ in.hdr.opcode = rte_cpu_to_be_16(XSC_CMD_OP_DUMMY);
+
+ ret = xsc_cmdq_msg_construct(xdev, &req_msg, in_len, &rsp_msg, out_len);
+ if (ret != 0) {
+ PMD_DRV_LOG(ERR, "Failed to construct cmd msg for dummy exec");
+ return -1;
+ }
+
+ ret = xsc_cmdq_req_msg_copy(req_msg, &in, in_len);
+ if (ret != 0) {
+ PMD_DRV_LOG(ERR, "Failed to copy cmd buf to request msg for dummy exec");
+ goto error;
+ }
+
+ rte_spinlock_lock(&cmdq->lock);
+
+ for (i = 0; i < num; i++) {
+ req_lay = cmdq->req_lay + start_pid;
+ memset(req_lay, 0, sizeof(*req_lay));
+ memcpy(req_lay->in, req_msg->hdr.data, sizeof(req_lay->in));
+ req_lay->inlen = rte_cpu_to_be_32(req_msg->len);
+ req_lay->outlen = rte_cpu_to_be_32(rsp_msg->len);
+ req_lay->sig = 0xff;
+ req_lay->idx = 0;
+ req_lay->type = XSC_CMDQ_REQ_TYPE;
+ start_pid = (start_pid + 1) & cmdq->depth_m;
+ }
+
+ /* Ring doorbell after the descriptor is valid */
+ rte_write32(cmdq->req_pid, (uint8_t *)xdev->bar_addr + cmdq->config->req_pid_addr);
+
+ ret = xsc_cmdq_wait_completion(xdev, rsp_msg);
+ rte_spinlock_unlock(&cmdq->lock);
+
+error:
+ xsc_cmdq_msg_destruct(xdev, &req_msg, &rsp_msg);
+ return ret;
+}
+
+static int
+xsc_cmdq_req_status_restore(struct xsc_dev *xdev, struct xsc_cmd_queue *cmdq)
+{
+ uint32_t req_pid, req_cid;
+ uint32_t cnt;
+
+ req_pid = rte_read32((uint8_t *)xdev->bar_addr + cmdq->config->req_pid_addr);
+ req_cid = rte_read32((uint8_t *)xdev->bar_addr + cmdq->config->req_cid_addr);
+
+ if (req_pid >= (uint32_t)(1 << cmdq->depth_n) ||
+ req_cid >= (uint32_t)(1 << cmdq->depth_n)) {
+ PMD_DRV_LOG(ERR, "Request pid %u and cid %u must be less than %u",
+ req_pid, req_cid, (uint32_t)(1 << cmdq->depth_n));
+ return -1;
+ }
+
+ cmdq->req_pid = req_pid;
+ if (req_pid == req_cid)
+ return 0;
+
+ cnt = (req_pid > req_cid) ? (req_pid - req_cid) :
+ ((1 << cmdq->depth_n) + req_pid - req_cid);
+ if (xsc_cmdq_dummy_invoke(xdev, cmdq, req_cid, cnt) != 0) {
+ PMD_DRV_LOG(ERR, "Failed to dummy invoke xsc cmd");
+ return -1;
+ }
+
+ return 0;
+}
+
+void
+xsc_vfio_mbox_destroy(struct xsc_cmd_queue *cmdq)
+{
+ if (cmdq == NULL)
+ return;
+
+ rte_memzone_free(cmdq->req_mz);
+ rte_memzone_free(cmdq->rsp_mz);
+ rte_mempool_free(cmdq->mbox_buf_pool);
+ rte_free(cmdq);
+}
+
+int
+xsc_vfio_mbox_init(struct xsc_dev *xdev)
+{
+ struct xsc_cmd_queue *cmdq;
+ struct xsc_vfio_priv *priv = (struct xsc_vfio_priv *)xdev->dev_priv;
+ char name[RTE_MEMZONE_NAMESIZE] = { 0 };
+ uint32_t size;
+
+ cmdq = rte_zmalloc(NULL, sizeof(*cmdq), RTE_CACHE_LINE_SIZE);
+ if (cmdq == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to alloc memory for xsc_cmd_queue");
+ return -1;
+ }
+
+ snprintf(name, RTE_MEMZONE_NAMESIZE, "%s_cmdq", xdev->pci_dev->device.name);
+ size = (1 << XSC_CMDQ_DEPTH_LOG) * sizeof(struct xsc_cmdq_req_layout);
+ cmdq->req_mz = rte_memzone_reserve_aligned(name,
+ size, SOCKET_ID_ANY,
+ RTE_MEMZONE_IOVA_CONTIG,
+ XSC_PAGE_SIZE);
+ if (cmdq->req_mz == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to alloc memory for cmd queue");
+ goto error;
+ }
+ cmdq->req_lay = cmdq->req_mz->addr;
+
+ snprintf(name, RTE_MEMZONE_NAMESIZE, "%s_cmd_cq", xdev->pci_dev->device.name);
+ size = (1 << XSC_CMDQ_DEPTH_LOG) * sizeof(struct xsc_cmdq_rsp_layout); /* -V1048 */
+ cmdq->rsp_mz = rte_memzone_reserve_aligned(name,
+ size, SOCKET_ID_ANY,
+ RTE_MEMZONE_IOVA_CONTIG,
+ XSC_PAGE_SIZE);
+ if (cmdq->rsp_mz == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to alloc memory for cmd cq");
+ goto error;
+ }
+ cmdq->rsp_lay = cmdq->rsp_mz->addr;
+
+ snprintf(name, RTE_MEMZONE_NAMESIZE, "%s_mempool", xdev->pci_dev->device.name);
+ cmdq->mbox_buf_pool = rte_mempool_create(name, XSC_MBOX_BUF_NUM,
+ sizeof(struct xsc_cmdq_mbox_buf),
+ XSC_MBOX_BUF_CACHE_SIZE, 0,
+ NULL, NULL, NULL, NULL,
+ SOCKET_ID_ANY, 0);
+ if (cmdq->mbox_buf_pool == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to create mailbox buf pool");
+ goto error;
+ }
+
+ xsc_cmdq_config_init(xdev, cmdq);
+ xsc_cmdq_rsp_cid_update(xdev, cmdq);
+ xsc_cmdq_depth_set(xdev, cmdq);
+ if (xsc_cmdq_elt_size_check(xdev, cmdq) != 0)
+ goto error;
+
+ xsc_cmdq_req_base_addr_set(xdev, cmdq);
+ xsc_cmdq_rsp_base_addr_set(xdev, cmdq);
+ /* Check request status and restore it */
+ if (xsc_cmdq_req_status_restore(xdev, cmdq) != 0)
+ goto error;
+
+ rte_spinlock_init(&cmdq->lock);
+ priv->cmdq = cmdq;
+ return 0;
+
+error:
+ xsc_vfio_mbox_destroy(cmdq);
+ return -1;
+}
+
+static enum xsc_cmd_status
+xsc_cmdq_invoke(struct xsc_dev *xdev, struct xsc_cmdq_req_msg *req_msg,
+ struct xsc_cmdq_rsp_msg *rsp_msg)
+{
+ struct xsc_vfio_priv *priv = (struct xsc_vfio_priv *)xdev->dev_priv;
+ struct xsc_cmd_queue *cmdq = priv->cmdq;
+ struct xsc_cmdq_req_layout *req_lay;
+ enum xsc_cmd_status status = XSC_CMD_FAIL;
+
+ rte_spinlock_lock(&cmdq->lock);
+ req_lay = cmdq->req_lay + cmdq->req_pid;
+ memset(req_lay, 0, sizeof(*req_lay));
+ memcpy(req_lay->in, req_msg->hdr.data, sizeof(req_lay->in));
+ if (req_msg->next != NULL)
+ req_lay->in_ptr = rte_cpu_to_be_64(req_msg->next->buf_dma);
+ req_lay->inlen = rte_cpu_to_be_32(req_msg->len);
+
+ if (rsp_msg->next != NULL)
+ req_lay->out_ptr = rte_cpu_to_be_64(rsp_msg->next->buf_dma);
+ req_lay->outlen = rte_cpu_to_be_32(rsp_msg->len);
+
+ req_lay->sig = 0xff;
+ req_lay->idx = 0;
+ req_lay->type = XSC_CMDQ_REQ_TYPE;
+
+ /* Ring doorbell after the descriptor is valid */
+ cmdq->req_pid = (cmdq->req_pid + 1) & cmdq->depth_m;
+ rte_write32(cmdq->req_pid, (uint8_t *)xdev->bar_addr + cmdq->config->req_pid_addr);
+
+ status = xsc_cmdq_wait_completion(xdev, rsp_msg);
+ rte_spinlock_unlock(&cmdq->lock);
+
+ return status;
+}
+
+int
+xsc_vfio_mbox_exec(struct xsc_dev *xdev, void *data_in,
+ int in_len, void *data_out, int out_len)
+{
+ struct xsc_cmdq_req_msg *req_msg = NULL;
+ struct xsc_cmdq_rsp_msg *rsp_msg = NULL;
+ int ret;
+ enum xsc_cmd_status status;
+
+ ret = xsc_cmdq_msg_construct(xdev, &req_msg, in_len, &rsp_msg, out_len);
+ if (ret != 0) {
+ PMD_DRV_LOG(ERR, "Failed to construct cmd msg");
+ return -1;
+ }
+
+ ret = xsc_cmdq_req_msg_copy(req_msg, data_in, in_len);
+ if (ret != 0) {
+ PMD_DRV_LOG(ERR, "Failed to copy cmd buf to request msg");
+ goto error;
+ }
+
+ status = xsc_cmdq_invoke(xdev, req_msg, rsp_msg);
+ if (status != XSC_CMD_SUCC) {
+ PMD_DRV_LOG(ERR, "Failed to invoke xsc cmd, %s",
+ xsc_cmd_error[status]);
+ ret = -1;
+ goto error;
+ }
+
+ ret = xsc_cmdq_rsp_msg_copy(data_out, rsp_msg, out_len);
+ if (ret != 0) {
+ PMD_DRV_LOG(ERR, "Failed to copy response msg to out data");
+ goto error;
+ }
+
+error:
+ xsc_cmdq_msg_destruct(xdev, &req_msg, &rsp_msg);
+ return ret;
+}
diff --git a/drivers/net/xsc/xsc_vfio_mbox.h b/drivers/net/xsc/xsc_vfio_mbox.h
new file mode 100644
index 0000000..49ca84f
--- /dev/null
+++ b/drivers/net/xsc/xsc_vfio_mbox.h
@@ -0,0 +1,142 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2025 Yunsilicon Technology Co., Ltd.
+ */
+
+#ifndef _XSC_CMDQ_H_
+#define _XSC_CMDQ_H_
+
+#include <rte_common.h>
+#include <rte_mempool.h>
+#include <rte_memzone.h>
+#include <rte_spinlock.h>
+#include <rte_byteorder.h>
+#include <rte_io.h>
+
+#include "xsc_dev.h"
+#include "xsc_cmd.h"
+
+#define XSC_CMDQ_DATA_SIZE 512
+#define XSC_CMDQ_REQ_INLINE_SIZE 8
+#define XSC_CMDQ_RSP_INLINE_SIZE 14
+
+struct xsc_cmdq_config {
+ uint32_t req_pid_addr;
+ uint32_t req_cid_addr;
+ uint32_t rsp_pid_addr;
+ uint32_t rsp_cid_addr;
+ uint32_t req_h_addr;
+ uint32_t req_l_addr;
+ uint32_t rsp_h_addr;
+ uint32_t rsp_l_addr;
+ uint32_t elt_sz_addr;
+ uint32_t depth_addr;
+};
+
+struct xsc_cmd_queue {
+ struct xsc_cmdq_req_layout *req_lay;
+ struct xsc_cmdq_rsp_layout *rsp_lay;
+ const struct rte_memzone *req_mz;
+ const struct rte_memzone *rsp_mz;
+ uint32_t req_pid;
+ uint32_t rsp_cid;
+ uint8_t owner_bit; /* CMDQ owner bit */
+ uint8_t owner_learn; /* Learn ownerbit from hw */
+ uint8_t depth_n; /* Log 2 of CMDQ depth */
+ uint8_t depth_m; /* CMDQ depth mask */
+ struct rte_mempool *mbox_buf_pool; /* CMDQ data pool */
+ struct xsc_cmdq_config *config;
+ rte_spinlock_t lock;
+};
+
+struct xsc_cmdq_mbox_buf {
+ uint8_t data[XSC_CMDQ_DATA_SIZE];
+ uint8_t rsv0[48];
+ rte_be64_t next; /* Next buf dma addr */
+ rte_be32_t block_num;
+ uint8_t owner_status;
+ uint8_t token;
+ uint8_t ctrl_sig;
+ uint8_t sig;
+};
+
+struct xsc_cmdq_mbox {
+ struct xsc_cmdq_mbox_buf *buf;
+ rte_iova_t buf_dma;
+ struct xsc_cmdq_mbox *next;
+};
+
+/* CMDQ request msg inline */
+struct xsc_cmdq_req_hdr {
+ rte_be32_t data[XSC_CMDQ_REQ_INLINE_SIZE];
+};
+
+struct xsc_cmdq_req_msg {
+ uint32_t len;
+ struct xsc_cmdq_req_hdr hdr;
+ struct xsc_cmdq_mbox *next;
+};
+
+/* CMDQ response msg inline */
+struct xsc_cmdq_rsp_hdr {
+ rte_be32_t data[XSC_CMDQ_RSP_INLINE_SIZE];
+};
+
+struct xsc_cmdq_rsp_msg {
+ uint32_t len;
+ struct xsc_cmdq_rsp_hdr hdr;
+ struct xsc_cmdq_mbox *next;
+};
+
+/* HW will use this for some records(e.g. vf_id) */
+struct xsc_cmdq_rsv {
+ uint16_t vf_id;
+ uint8_t rsv[2];
+};
+
+/* CMDQ request entry layout */
+struct xsc_cmdq_req_layout {
+ struct xsc_cmdq_rsv rsv0;
+ rte_be32_t inlen;
+ rte_be64_t in_ptr;
+ rte_be32_t in[XSC_CMDQ_REQ_INLINE_SIZE];
+ rte_be64_t out_ptr;
+ rte_be32_t outlen;
+ uint8_t token;
+ uint8_t sig;
+ uint8_t idx;
+ uint8_t type:7;
+ uint8_t owner_bit:1;
+};
+
+/* CMDQ response entry layout */
+struct xsc_cmdq_rsp_layout {
+ struct xsc_cmdq_rsv rsv0;
+ rte_be32_t out[XSC_CMDQ_RSP_INLINE_SIZE];
+ uint8_t token;
+ uint8_t sig;
+ uint8_t idx;
+ uint8_t type:7;
+ uint8_t owner_bit:1;
+};
+
+struct xsc_cmdq_dummy_mbox_in {
+ struct xsc_cmd_inbox_hdr hdr;
+ uint8_t rsv[8];
+};
+
+struct xsc_cmdq_dummy_mbox_out {
+ struct xsc_cmd_outbox_hdr hdr;
+ uint8_t rsv[8];
+};
+
+struct xsc_vfio_priv {
+ struct xsc_cmd_queue *cmdq;
+};
+
+int xsc_vfio_mbox_init(struct xsc_dev *xdev);
+void xsc_vfio_mbox_destroy(struct xsc_cmd_queue *cmdq);
+int xsc_vfio_mbox_exec(struct xsc_dev *xdev,
+ void *data_in, int in_len,
+ void *data_out, int out_len);
+
+#endif /* _XSC_CMDQ_H_ */
--
2.25.1