/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2018-2019 Hisilicon Limited.
+ * Copyright(c) 2018-2021 HiSilicon Limited.
*/
#include <rte_bus_pci.h>
#include <rte_common.h>
#include <rte_cycles.h>
+#include <rte_geneve.h>
#include <rte_vxlan.h>
#include <ethdev_driver.h>
#include <rte_io.h>
#include <rte_net.h>
#include <rte_malloc.h>
-#if defined(RTE_ARCH_ARM64) && defined(__ARM_FEATURE_SVE)
+#if defined(RTE_ARCH_ARM64)
#include <rte_cpuflags.h>
+#include <rte_vect.h>
#endif
#include "hns3_ethdev.h"
hns3pf_reset_tqp(struct hns3_hw *hw, uint16_t queue_id)
{
#define HNS3_TQP_RESET_TRY_MS 200
+ uint16_t wait_time = 0;
uint8_t reset_status;
- uint64_t end;
int ret;
- ret = hns3_tqp_enable(hw, queue_id, false);
- if (ret)
- return ret;
-
/*
* In current version VF is not supported when PF is driven by DPDK
* driver, all task queue pairs are mapped to PF function, so PF's queue
hns3_err(hw, "Send reset tqp cmd fail, ret = %d", ret);
return ret;
}
- end = get_timeofday_ms() + HNS3_TQP_RESET_TRY_MS;
+
do {
/* Wait for tqp hw reset */
rte_delay_ms(HNS3_POLL_RESPONE_MS);
+ wait_time += HNS3_POLL_RESPONE_MS;
ret = hns3_get_tqp_reset_status(hw, queue_id, &reset_status);
if (ret)
goto tqp_reset_fail;
if (reset_status)
break;
- } while (get_timeofday_ms() < end);
+ } while (wait_time < HNS3_TQP_RESET_TRY_MS);
if (!reset_status) {
ret = -ETIMEDOUT;
uint8_t msg_data[2];
int ret;
- /* Disable VF's queue before send queue reset msg to PF */
- ret = hns3_tqp_enable(hw, queue_id, false);
- if (ret)
- return ret;
-
memcpy(msg_data, &queue_id, sizeof(uint16_t));
ret = hns3_send_mbx_msg(hw, HNS3_MBX_QUEUE_RESET, 0, msg_data,
}
static int
-hns3_reset_tqp(struct hns3_adapter *hns, uint16_t queue_id)
+hns3_reset_rcb_cmd(struct hns3_hw *hw, uint8_t *reset_status)
{
- struct hns3_hw *hw = &hns->hw;
+ struct hns3_reset_cmd *req;
+ struct hns3_cmd_desc desc;
+ int ret;
- if (hns->is_vf)
- return hns3vf_reset_tqp(hw, queue_id);
- else
- return hns3pf_reset_tqp(hw, queue_id);
+ hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CFG_RST_TRIGGER, false);
+ req = (struct hns3_reset_cmd *)desc.data;
+ hns3_set_bit(req->mac_func_reset, HNS3_CFG_RESET_RCB_B, 1);
+
+ /*
+ * The start qid should be the global qid of the first tqp of the
+ * function which should be reset in this port. Since our PF not
+ * support take over of VFs, so we only need to reset function 0,
+ * and its start qid is always 0.
+ */
+ req->fun_reset_rcb_vqid_start = rte_cpu_to_le_16(0);
+ req->fun_reset_rcb_vqid_num = rte_cpu_to_le_16(hw->cfg_max_queues);
+
+ ret = hns3_cmd_send(hw, &desc, 1);
+ if (ret) {
+ hns3_err(hw, "fail to send rcb reset cmd, ret = %d.", ret);
+ return ret;
+ }
+
+ *reset_status = req->fun_reset_rcb_return_status;
+ return 0;
+}
+
+static int
+hns3pf_reset_all_tqps(struct hns3_hw *hw)
+{
+#define HNS3_RESET_RCB_NOT_SUPPORT 0U
+#define HNS3_RESET_ALL_TQP_SUCCESS 1U
+ uint8_t reset_status;
+ int ret;
+ int i;
+
+ ret = hns3_reset_rcb_cmd(hw, &reset_status);
+ if (ret)
+ return ret;
+
+ /*
+ * If the firmware version is low, it may not support the rcb reset
+ * which means reset all the tqps at a time. In this case, we should
+ * reset tqps one by one.
+ */
+ if (reset_status == HNS3_RESET_RCB_NOT_SUPPORT) {
+ for (i = 0; i < hw->cfg_max_queues; i++) {
+ ret = hns3pf_reset_tqp(hw, i);
+ if (ret) {
+ hns3_err(hw,
+ "fail to reset tqp, queue_id = %d, ret = %d.",
+ i, ret);
+ return ret;
+ }
+ }
+ } else if (reset_status != HNS3_RESET_ALL_TQP_SUCCESS) {
+ hns3_err(hw, "fail to reset all tqps, reset_status = %u.",
+ reset_status);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int
+hns3vf_reset_all_tqps(struct hns3_hw *hw)
+{
+#define HNS3VF_RESET_ALL_TQP_DONE 1U
+ uint8_t reset_status;
+ uint8_t msg_data[2];
+ int ret;
+ int i;
+
+ memset(msg_data, 0, sizeof(uint16_t));
+ ret = hns3_send_mbx_msg(hw, HNS3_MBX_QUEUE_RESET, 0, msg_data,
+ sizeof(msg_data), true, &reset_status,
+ sizeof(reset_status));
+ if (ret) {
+ hns3_err(hw, "fail to send rcb reset mbx, ret = %d.", ret);
+ return ret;
+ }
+
+ if (reset_status == HNS3VF_RESET_ALL_TQP_DONE)
+ return 0;
+
+ /*
+ * If the firmware version or kernel PF version is low, it may not
+ * support the rcb reset which means reset all the tqps at a time.
+ * In this case, we should reset tqps one by one.
+ */
+ for (i = 1; i < hw->cfg_max_queues; i++) {
+ ret = hns3vf_reset_tqp(hw, i);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
}
int
struct hns3_hw *hw = &hns->hw;
int ret, i;
+ /* Disable all queues before reset all queues */
for (i = 0; i < hw->cfg_max_queues; i++) {
- ret = hns3_reset_tqp(hns, i);
+ ret = hns3_tqp_enable(hw, i, false);
if (ret) {
- hns3_err(hw, "Failed to reset No.%d queue: %d", i, ret);
+ hns3_err(hw,
+ "fail to disable tqps before tqps reset, ret = %d.",
+ ret);
return ret;
}
}
- return 0;
+
+ if (hns->is_vf)
+ return hns3vf_reset_all_tqps(hw);
+ else
+ return hns3pf_reset_all_tqps(hw);
}
static int
{
static const uint32_t ptypes[] = {
RTE_PTYPE_L2_ETHER,
- RTE_PTYPE_L2_ETHER_VLAN,
- RTE_PTYPE_L2_ETHER_QINQ,
RTE_PTYPE_L2_ETHER_LLDP,
RTE_PTYPE_L2_ETHER_ARP,
RTE_PTYPE_L3_IPV4,
RTE_PTYPE_L4_UDP,
RTE_PTYPE_TUNNEL_GRE,
RTE_PTYPE_INNER_L2_ETHER,
- RTE_PTYPE_INNER_L2_ETHER_VLAN,
- RTE_PTYPE_INNER_L2_ETHER_QINQ,
RTE_PTYPE_INNER_L3_IPV4,
RTE_PTYPE_INNER_L3_IPV6,
RTE_PTYPE_INNER_L3_IPV4_EXT,
RTE_PTYPE_TUNNEL_NVGRE,
RTE_PTYPE_UNKNOWN
};
+ static const uint32_t adv_layout_ptypes[] = {
+ RTE_PTYPE_L2_ETHER,
+ RTE_PTYPE_L2_ETHER_TIMESYNC,
+ RTE_PTYPE_L2_ETHER_LLDP,
+ RTE_PTYPE_L2_ETHER_ARP,
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
+ RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
+ RTE_PTYPE_L4_FRAG,
+ RTE_PTYPE_L4_NONFRAG,
+ RTE_PTYPE_L4_UDP,
+ RTE_PTYPE_L4_TCP,
+ RTE_PTYPE_L4_SCTP,
+ RTE_PTYPE_L4_IGMP,
+ RTE_PTYPE_L4_ICMP,
+ RTE_PTYPE_TUNNEL_GRE,
+ RTE_PTYPE_TUNNEL_GRENAT,
+ RTE_PTYPE_INNER_L2_ETHER,
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
+ RTE_PTYPE_INNER_L4_FRAG,
+ RTE_PTYPE_INNER_L4_ICMP,
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ RTE_PTYPE_INNER_L4_UDP,
+ RTE_PTYPE_INNER_L4_TCP,
+ RTE_PTYPE_INNER_L4_SCTP,
+ RTE_PTYPE_INNER_L4_ICMP,
+ RTE_PTYPE_UNKNOWN
+ };
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- if (dev->rx_pkt_burst == hns3_recv_pkts ||
+ if (dev->rx_pkt_burst == hns3_recv_pkts_simple ||
dev->rx_pkt_burst == hns3_recv_scattered_pkts ||
dev->rx_pkt_burst == hns3_recv_pkts_vec ||
- dev->rx_pkt_burst == hns3_recv_pkts_vec_sve)
- return ptypes;
+ dev->rx_pkt_burst == hns3_recv_pkts_vec_sve) {
+ if (hns3_dev_rxd_adv_layout_supported(hw))
+ return adv_layout_ptypes;
+ else
+ return ptypes;
+ }
return NULL;
}
static void
hns3_init_non_tunnel_ptype_tbl(struct hns3_ptype_table *tbl)
{
- tbl->l2l3table[0][0] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4;
- tbl->l2l3table[0][1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6;
- tbl->l2l3table[0][2] = RTE_PTYPE_L2_ETHER_ARP;
- tbl->l2l3table[0][3] = RTE_PTYPE_L2_ETHER;
- tbl->l2l3table[0][4] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT;
- tbl->l2l3table[0][5] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT;
- tbl->l2l3table[0][6] = RTE_PTYPE_L2_ETHER_LLDP;
- tbl->l2l3table[0][15] = RTE_PTYPE_L2_ETHER;
-
- tbl->l2l3table[1][0] = RTE_PTYPE_L2_ETHER_VLAN | RTE_PTYPE_L3_IPV4;
- tbl->l2l3table[1][1] = RTE_PTYPE_L2_ETHER_VLAN | RTE_PTYPE_L3_IPV6;
- tbl->l2l3table[1][2] = RTE_PTYPE_L2_ETHER_ARP;
- tbl->l2l3table[1][3] = RTE_PTYPE_L2_ETHER_VLAN;
- tbl->l2l3table[1][4] = RTE_PTYPE_L2_ETHER_VLAN | RTE_PTYPE_L3_IPV4_EXT;
- tbl->l2l3table[1][5] = RTE_PTYPE_L2_ETHER_VLAN | RTE_PTYPE_L3_IPV6_EXT;
- tbl->l2l3table[1][6] = RTE_PTYPE_L2_ETHER_LLDP;
- tbl->l2l3table[1][15] = RTE_PTYPE_L2_ETHER_VLAN;
-
- tbl->l2l3table[2][0] = RTE_PTYPE_L2_ETHER_QINQ | RTE_PTYPE_L3_IPV4;
- tbl->l2l3table[2][1] = RTE_PTYPE_L2_ETHER_QINQ | RTE_PTYPE_L3_IPV6;
- tbl->l2l3table[2][2] = RTE_PTYPE_L2_ETHER_ARP;
- tbl->l2l3table[2][3] = RTE_PTYPE_L2_ETHER_QINQ;
- tbl->l2l3table[2][4] = RTE_PTYPE_L2_ETHER_QINQ | RTE_PTYPE_L3_IPV4_EXT;
- tbl->l2l3table[2][5] = RTE_PTYPE_L2_ETHER_QINQ | RTE_PTYPE_L3_IPV6_EXT;
- tbl->l2l3table[2][6] = RTE_PTYPE_L2_ETHER_LLDP;
- tbl->l2l3table[2][15] = RTE_PTYPE_L2_ETHER_QINQ;
+ tbl->l3table[0] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4;
+ tbl->l3table[1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6;
+ tbl->l3table[2] = RTE_PTYPE_L2_ETHER_ARP;
+ tbl->l3table[4] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT;
+ tbl->l3table[5] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT;
+ tbl->l3table[6] = RTE_PTYPE_L2_ETHER_LLDP;
tbl->l4table[0] = RTE_PTYPE_L4_UDP;
tbl->l4table[1] = RTE_PTYPE_L4_TCP;
static void
hns3_init_tunnel_ptype_tbl(struct hns3_ptype_table *tbl)
{
- tbl->inner_l2table[0] = RTE_PTYPE_INNER_L2_ETHER;
- tbl->inner_l2table[1] = RTE_PTYPE_INNER_L2_ETHER_VLAN;
- tbl->inner_l2table[2] = RTE_PTYPE_INNER_L2_ETHER_QINQ;
-
- tbl->inner_l3table[0] = RTE_PTYPE_INNER_L3_IPV4;
- tbl->inner_l3table[1] = RTE_PTYPE_INNER_L3_IPV6;
+ tbl->inner_l3table[0] = RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4;
+ tbl->inner_l3table[1] = RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6;
/* There is not a ptype for inner ARP/RARP */
tbl->inner_l3table[2] = RTE_PTYPE_UNKNOWN;
tbl->inner_l3table[3] = RTE_PTYPE_UNKNOWN;
- tbl->inner_l3table[4] = RTE_PTYPE_INNER_L3_IPV4_EXT;
- tbl->inner_l3table[5] = RTE_PTYPE_INNER_L3_IPV6_EXT;
+ tbl->inner_l3table[4] = RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT;
+ tbl->inner_l3table[5] = RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT;
tbl->inner_l4table[0] = RTE_PTYPE_INNER_L4_UDP;
tbl->inner_l4table[1] = RTE_PTYPE_INNER_L4_TCP;
tbl->inner_l4table[4] = RTE_PTYPE_UNKNOWN;
tbl->inner_l4table[5] = RTE_PTYPE_INNER_L4_ICMP;
- tbl->ol2table[0] = RTE_PTYPE_L2_ETHER;
- tbl->ol2table[1] = RTE_PTYPE_L2_ETHER_VLAN;
- tbl->ol2table[2] = RTE_PTYPE_L2_ETHER_QINQ;
-
- tbl->ol3table[0] = RTE_PTYPE_L3_IPV4;
- tbl->ol3table[1] = RTE_PTYPE_L3_IPV6;
+ tbl->ol3table[0] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4;
+ tbl->ol3table[1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6;
tbl->ol3table[2] = RTE_PTYPE_UNKNOWN;
tbl->ol3table[3] = RTE_PTYPE_UNKNOWN;
- tbl->ol3table[4] = RTE_PTYPE_L3_IPV4_EXT;
- tbl->ol3table[5] = RTE_PTYPE_L3_IPV6_EXT;
+ tbl->ol3table[4] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT;
+ tbl->ol3table[5] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT;
tbl->ol4table[0] = RTE_PTYPE_UNKNOWN;
- tbl->ol4table[1] = RTE_PTYPE_TUNNEL_VXLAN;
+ tbl->ol4table[1] = RTE_PTYPE_L4_UDP | RTE_PTYPE_TUNNEL_VXLAN;
tbl->ol4table[2] = RTE_PTYPE_TUNNEL_NVGRE;
}
RTE_PTYPE_L4_UDP;
ptype[20] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_L4_TCP;
- /* The next ptype is GRE over IPv4 */
- ptype[21] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
+ ptype[21] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRE;
ptype[22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_L4_SCTP;
ptype[23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_L4_UDP;
ptype[114] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
RTE_PTYPE_L4_TCP;
- /* The next ptype is GRE over IPv6 */
- ptype[115] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
+ ptype[115] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRE;
ptype[116] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
RTE_PTYPE_L4_SCTP;
ptype[117] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
return rte_mbuf_raw_alloc(rxq->mb_pool);
}
+static inline void
+hns3_rx_ptp_timestamp_handle(struct hns3_rx_queue *rxq, struct rte_mbuf *mbuf,
+ volatile struct hns3_desc *rxd)
+{
+ struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(rxq->hns);
+ uint64_t timestamp = rte_le_to_cpu_64(rxd->timestamp);
+
+ mbuf->ol_flags |= PKT_RX_IEEE1588_PTP | PKT_RX_IEEE1588_TMST;
+ if (hns3_timestamp_rx_dynflag > 0) {
+ *RTE_MBUF_DYNFIELD(mbuf, hns3_timestamp_dynfield_offset,
+ rte_mbuf_timestamp_t *) = timestamp;
+ mbuf->ol_flags |= hns3_timestamp_rx_dynflag;
+ }
+
+ pf->rx_timestamp = timestamp;
+}
+
uint16_t
-hns3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+hns3_recv_pkts_simple(void *rx_queue,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
{
volatile struct hns3_desc *rx_ring; /* RX ring (desc) */
volatile struct hns3_desc *rxdp; /* pointer of the current desc */
struct rte_mbuf *nmb; /* pointer of the new mbuf */
struct rte_mbuf *rxm;
uint32_t bd_base_info;
- uint32_t cksum_err;
uint32_t l234_info;
uint32_t ol_info;
uint64_t dma_addr;
}
rxm = rxe->mbuf;
+ rxm->ol_flags = 0;
rxe->mbuf = nmb;
+ if (unlikely(bd_base_info & BIT(HNS3_RXD_TS_VLD_B)))
+ hns3_rx_ptp_timestamp_handle(rxq, rxm, rxdp);
+
dma_addr = rte_mbuf_data_iova_default(nmb);
rxdp->addr = rte_cpu_to_le_64(dma_addr);
rxdp->rx.bd_base_info = 0;
rxm->data_len = rxm->pkt_len;
rxm->port = rxq->port_id;
rxm->hash.rss = rte_le_to_cpu_32(rxd.rx.rss_hash);
- rxm->ol_flags = PKT_RX_RSS_HASH;
+ rxm->ol_flags |= PKT_RX_RSS_HASH;
if (unlikely(bd_base_info & BIT(HNS3_RXD_LUM_B))) {
rxm->hash.fdir.hi =
rte_le_to_cpu_16(rxd.rx.fd_id);
/* Load remained descriptor data and extract necessary fields */
l234_info = rte_le_to_cpu_32(rxd.rx.l234_info);
ol_info = rte_le_to_cpu_32(rxd.rx.ol_info);
- ret = hns3_handle_bdinfo(rxq, rxm, bd_base_info,
- l234_info, &cksum_err);
+ ret = hns3_handle_bdinfo(rxq, rxm, bd_base_info, l234_info);
if (unlikely(ret))
goto pkt_err;
rxm->packet_type = hns3_rx_calc_ptype(rxq, l234_info, ol_info);
- if (likely(bd_base_info & BIT(HNS3_RXD_L3L4P_B)))
- hns3_rx_set_cksum_flag(rxm, rxm->packet_type,
- cksum_err);
+ if (rxm->packet_type == RTE_PTYPE_L2_ETHER_TIMESYNC)
+ rxm->ol_flags |= PKT_RX_IEEE1588_PTP;
+
hns3_rxd_to_vlan_tci(rxq, rxm, l234_info, &rxd);
/* Increment bytes counter */
struct rte_mbuf *rxm;
struct rte_eth_dev *dev;
uint32_t bd_base_info;
- uint32_t cksum_err;
uint32_t l234_info;
uint32_t gro_size;
uint32_t ol_info;
continue;
}
+ if (unlikely(bd_base_info & BIT(HNS3_RXD_TS_VLD_B)))
+ hns3_rx_ptp_timestamp_handle(rxq, first_seg, rxdp);
+
/*
* The last buffer of the received packet. packet len from
* buffer description may contains CRC len, packet len should
l234_info = rte_le_to_cpu_32(rxd.rx.l234_info);
ol_info = rte_le_to_cpu_32(rxd.rx.ol_info);
ret = hns3_handle_bdinfo(rxq, first_seg, bd_base_info,
- l234_info, &cksum_err);
+ l234_info);
if (unlikely(ret))
goto pkt_err;
first_seg->packet_type = hns3_rx_calc_ptype(rxq,
l234_info, ol_info);
- if (bd_base_info & BIT(HNS3_RXD_L3L4P_B))
- hns3_rx_set_cksum_flag(first_seg,
- first_seg->packet_type,
- cksum_err);
+ if (first_seg->packet_type == RTE_PTYPE_L2_ETHER_TIMESYNC)
+ rxm->ol_flags |= PKT_RX_IEEE1588_PTP;
+
hns3_rxd_to_vlan_tci(rxq, first_seg, l234_info, &rxd);
/* Increment bytes counter */
eth_rx_burst_t pkt_burst;
const char *info;
} burst_infos[] = {
- { hns3_recv_pkts, "Scalar" },
+ { hns3_recv_pkts_simple, "Scalar Simple" },
{ hns3_recv_scattered_pkts, "Scalar Scattered" },
- { hns3_recv_pkts_vec, "Vector Neon" },
- { hns3_recv_pkts_vec_sve, "Vector Sve" },
+ { hns3_recv_pkts_vec, "Vector Neon" },
+ { hns3_recv_pkts_vec_sve, "Vector Sve" },
};
eth_rx_burst_t pkt_burst = dev->rx_pkt_burst;
}
static bool
-hns3_check_sve_support(void)
+hns3_get_default_vec_support(void)
+{
+#if defined(RTE_ARCH_ARM64)
+ if (rte_vect_get_max_simd_bitwidth() < RTE_VECT_SIMD_128)
+ return false;
+ if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_NEON))
+ return true;
+#endif
+ return false;
+}
+
+static bool
+hns3_get_sve_support(void)
{
#if defined(RTE_ARCH_ARM64) && defined(__ARM_FEATURE_SVE)
+ if (rte_vect_get_max_simd_bitwidth() < RTE_VECT_SIMD_256)
+ return false;
if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_SVE))
return true;
#endif
struct hns3_adapter *hns = dev->data->dev_private;
uint64_t offloads = dev->data->dev_conf.rxmode.offloads;
bool vec_allowed, sve_allowed, simple_allowed;
+ bool vec_support;
- vec_allowed = hns->rx_vec_allowed &&
- hns3_rx_check_vec_support(dev) == 0;
- sve_allowed = vec_allowed && hns3_check_sve_support();
- simple_allowed = hns->rx_simple_allowed && !dev->data->scattered_rx &&
+ vec_support = hns3_rx_check_vec_support(dev) == 0;
+ vec_allowed = vec_support && hns3_get_default_vec_support();
+ sve_allowed = vec_support && hns3_get_sve_support();
+ simple_allowed = !dev->data->scattered_rx &&
(offloads & DEV_RX_OFFLOAD_TCP_LRO) == 0;
if (hns->rx_func_hint == HNS3_IO_FUNC_HINT_VEC && vec_allowed)
if (hns->rx_func_hint == HNS3_IO_FUNC_HINT_SVE && sve_allowed)
return hns3_recv_pkts_vec_sve;
if (hns->rx_func_hint == HNS3_IO_FUNC_HINT_SIMPLE && simple_allowed)
- return hns3_recv_pkts;
+ return hns3_recv_pkts_simple;
if (hns->rx_func_hint == HNS3_IO_FUNC_HINT_COMMON)
return hns3_recv_scattered_pkts;
if (vec_allowed)
return hns3_recv_pkts_vec;
if (simple_allowed)
- return hns3_recv_pkts;
+ return hns3_recv_pkts_simple;
return hns3_recv_scattered_pkts;
}
HNS3_RING_TX_TAIL_REG);
txq->min_tx_pkt_len = hw->min_tx_pkt_len;
txq->tso_mode = hw->tso_mode;
+ txq->udp_cksum_mode = hw->udp_cksum_mode;
memset(&txq->basic_stats, 0, sizeof(struct hns3_tx_basic_stats));
memset(&txq->dfx_stats, 0, sizeof(struct hns3_tx_dfx_stats));
{
desc->addr = rte_mbuf_data_iova(rxm);
desc->tx.send_size = rte_cpu_to_le_16(rte_pktmbuf_data_len(rxm));
- desc->tx.tp_fe_sc_vld_ra_ri = rte_cpu_to_le_16(BIT(HNS3_TXD_VLD_B));
+ desc->tx.tp_fe_sc_vld_ra_ri |= rte_cpu_to_le_16(BIT(HNS3_TXD_VLD_B));
}
static void
rte_cpu_to_le_32(BIT(HNS3_TXD_VLAN_B));
desc->tx.vlan_tag = rte_cpu_to_le_16(rxm->vlan_tci);
}
+
+ if (ol_flags & PKT_TX_IEEE1588_TMST)
+ desc->tx.tp_fe_sc_vld_ra_ri |=
+ rte_cpu_to_le_16(BIT(HNS3_TXD_TSYN_B));
}
static inline int
}
#endif
+static uint16_t
+hns3_udp_cksum_help(struct rte_mbuf *m)
+{
+ uint64_t ol_flags = m->ol_flags;
+ uint16_t cksum = 0;
+ uint32_t l4_len;
+
+ if (ol_flags & PKT_TX_IPV4) {
+ struct rte_ipv4_hdr *ipv4_hdr = rte_pktmbuf_mtod_offset(m,
+ struct rte_ipv4_hdr *, m->l2_len);
+ l4_len = rte_be_to_cpu_16(ipv4_hdr->total_length) - m->l3_len;
+ } else {
+ struct rte_ipv6_hdr *ipv6_hdr = rte_pktmbuf_mtod_offset(m,
+ struct rte_ipv6_hdr *, m->l2_len);
+ l4_len = rte_be_to_cpu_16(ipv6_hdr->payload_len);
+ }
+
+ rte_raw_cksum_mbuf(m, m->l2_len + m->l3_len, l4_len, &cksum);
+
+ cksum = ~cksum;
+ /*
+ * RFC 768:If the computed checksum is zero for UDP, it is transmitted
+ * as all ones
+ */
+ if (cksum == 0)
+ cksum = 0xffff;
+
+ return (uint16_t)cksum;
+}
+
+static bool
+hns3_validate_tunnel_cksum(struct hns3_tx_queue *tx_queue, struct rte_mbuf *m)
+{
+ uint64_t ol_flags = m->ol_flags;
+ struct rte_udp_hdr *udp_hdr;
+ uint16_t dst_port;
+
+ if (tx_queue->udp_cksum_mode == HNS3_SPECIAL_PORT_HW_CKSUM_MODE ||
+ ol_flags & PKT_TX_TUNNEL_MASK ||
+ (ol_flags & PKT_TX_L4_MASK) != PKT_TX_UDP_CKSUM)
+ return true;
+ /*
+ * A UDP packet with the same dst_port as VXLAN\VXLAN_GPE\GENEVE will
+ * be recognized as a tunnel packet in HW. In this case, if UDP CKSUM
+ * offload is set and the tunnel mask has not been set, the CKSUM will
+ * be wrong since the header length is wrong and driver should complete
+ * the CKSUM to avoid CKSUM error.
+ */
+ udp_hdr = rte_pktmbuf_mtod_offset(m, struct rte_udp_hdr *,
+ m->l2_len + m->l3_len);
+ dst_port = rte_be_to_cpu_16(udp_hdr->dst_port);
+ switch (dst_port) {
+ case RTE_VXLAN_DEFAULT_PORT:
+ case RTE_VXLAN_GPE_DEFAULT_PORT:
+ case RTE_GENEVE_DEFAULT_PORT:
+ udp_hdr->dgram_cksum = hns3_udp_cksum_help(m);
+ m->ol_flags = ol_flags & ~PKT_TX_L4_MASK;
+ return false;
+ default:
+ return true;
+ }
+}
+
static int
hns3_prep_pkt_proc(struct hns3_tx_queue *tx_queue, struct rte_mbuf *m)
{
return ret;
}
+ if (!hns3_validate_tunnel_cksum(tx_queue, m))
+ return 0;
+
hns3_outer_header_cksum_prepare(m);
return 0;
return 0;
}
+static bool
+hns3_tx_check_simple_support(struct rte_eth_dev *dev)
+{
+ uint64_t offloads = dev->data->dev_conf.txmode.offloads;
+
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ if (hns3_dev_ptp_supported(hw))
+ return false;
+
+ return (offloads == (offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE));
+}
+
+static bool
+hns3_get_tx_prep_needed(struct rte_eth_dev *dev)
+{
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+ RTE_SET_USED(dev);
+ /* always perform tx_prepare when debug */
+ return true;
+#else
+#define HNS3_DEV_TX_CSKUM_TSO_OFFLOAD_MASK (\
+ DEV_TX_OFFLOAD_IPV4_CKSUM | \
+ DEV_TX_OFFLOAD_TCP_CKSUM | \
+ DEV_TX_OFFLOAD_UDP_CKSUM | \
+ DEV_TX_OFFLOAD_SCTP_CKSUM | \
+ DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | \
+ DEV_TX_OFFLOAD_OUTER_UDP_CKSUM | \
+ DEV_TX_OFFLOAD_TCP_TSO | \
+ DEV_TX_OFFLOAD_VXLAN_TNL_TSO | \
+ DEV_TX_OFFLOAD_GRE_TNL_TSO | \
+ DEV_TX_OFFLOAD_GENEVE_TNL_TSO)
+
+ uint64_t tx_offload = dev->data->dev_conf.txmode.offloads;
+ if (tx_offload & HNS3_DEV_TX_CSKUM_TSO_OFFLOAD_MASK)
+ return true;
+
+ return false;
+#endif
+}
+
static eth_tx_burst_t
hns3_get_tx_function(struct rte_eth_dev *dev, eth_tx_prep_t *prep)
{
- uint64_t offloads = dev->data->dev_conf.txmode.offloads;
struct hns3_adapter *hns = dev->data->dev_private;
bool vec_allowed, sve_allowed, simple_allowed;
+ bool vec_support, tx_prepare_needed;
- vec_allowed = hns->tx_vec_allowed &&
- hns3_tx_check_vec_support(dev) == 0;
- sve_allowed = vec_allowed && hns3_check_sve_support();
- simple_allowed = hns->tx_simple_allowed &&
- offloads == (offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE);
+ vec_support = hns3_tx_check_vec_support(dev) == 0;
+ vec_allowed = vec_support && hns3_get_default_vec_support();
+ sve_allowed = vec_support && hns3_get_sve_support();
+ simple_allowed = hns3_tx_check_simple_support(dev);
+ tx_prepare_needed = hns3_get_tx_prep_needed(dev);
*prep = NULL;
if (hns->tx_func_hint == HNS3_IO_FUNC_HINT_SIMPLE && simple_allowed)
return hns3_xmit_pkts_simple;
if (hns->tx_func_hint == HNS3_IO_FUNC_HINT_COMMON) {
- *prep = hns3_prep_pkts;
+ if (tx_prepare_needed)
+ *prep = hns3_prep_pkts;
return hns3_xmit_pkts;
}
if (simple_allowed)
return hns3_xmit_pkts_simple;
- *prep = hns3_prep_pkts;
+ if (tx_prepare_needed)
+ *prep = hns3_prep_pkts;
return hns3_xmit_pkts;
}
return 0;
}
+static void
+hns3_trace_rxtx_function(struct rte_eth_dev *dev)
+{
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_eth_burst_mode rx_mode;
+ struct rte_eth_burst_mode tx_mode;
+
+ memset(&rx_mode, 0, sizeof(rx_mode));
+ memset(&tx_mode, 0, sizeof(tx_mode));
+ (void)hns3_rx_burst_mode_get(dev, 0, &rx_mode);
+ (void)hns3_tx_burst_mode_get(dev, 0, &tx_mode);
+
+ hns3_dbg(hw, "using rx_pkt_burst: %s, tx_pkt_burst: %s.",
+ rx_mode.info, tx_mode.info);
+}
+
void hns3_set_rxtx_function(struct rte_eth_dev *eth_dev)
{
struct hns3_adapter *hns = eth_dev->data->dev_private;
eth_dev->tx_pkt_burst = hns3_get_tx_function(eth_dev, &prep);
eth_dev->tx_pkt_prepare = prep;
eth_dev->tx_descriptor_status = hns3_dev_tx_descriptor_status;
+ hns3_trace_rxtx_function(eth_dev);
} else {
eth_dev->rx_pkt_burst = hns3_dummy_rxtx_burst;
eth_dev->tx_pkt_burst = hns3_dummy_rxtx_burst;
if (!hns3_dev_indep_txrx_supported(hw))
return -ENOTSUP;
+ rte_spinlock_lock(&hw->lock);
ret = hns3_reset_queue(hw, rx_queue_id, HNS3_RING_TYPE_RX);
if (ret) {
hns3_err(hw, "fail to reset Rx queue %u, ret = %d.",
rx_queue_id, ret);
+ rte_spinlock_unlock(&hw->lock);
return ret;
}
if (ret) {
hns3_err(hw, "fail to init Rx queue %u, ret = %d.",
rx_queue_id, ret);
+ rte_spinlock_unlock(&hw->lock);
return ret;
}
hns3_enable_rxq(rxq, true);
dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+ rte_spinlock_unlock(&hw->lock);
return ret;
}
if (!hns3_dev_indep_txrx_supported(hw))
return -ENOTSUP;
+ rte_spinlock_lock(&hw->lock);
hns3_enable_rxq(rxq, false);
hns3_rx_queue_release_mbufs(rxq);
hns3_reset_sw_rxq(rxq);
dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+ rte_spinlock_unlock(&hw->lock);
return 0;
}
if (!hns3_dev_indep_txrx_supported(hw))
return -ENOTSUP;
+ rte_spinlock_lock(&hw->lock);
ret = hns3_reset_queue(hw, tx_queue_id, HNS3_RING_TYPE_TX);
if (ret) {
hns3_err(hw, "fail to reset Tx queue %u, ret = %d.",
tx_queue_id, ret);
+ rte_spinlock_unlock(&hw->lock);
return ret;
}
hns3_init_txq(txq);
hns3_enable_txq(txq, true);
dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+ rte_spinlock_unlock(&hw->lock);
return ret;
}
if (!hns3_dev_indep_txrx_supported(hw))
return -ENOTSUP;
+ rte_spinlock_lock(&hw->lock);
hns3_enable_txq(txq, false);
hns3_tx_queue_release_mbufs(txq);
/*
*/
hns3_init_txq(txq);
dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+ rte_spinlock_unlock(&hw->lock);
return 0;
}
rxdp = &rxq->rx_ring[desc_id];
bd_base_info = rte_le_to_cpu_32(rxdp->rx.bd_base_info);
dev = &rte_eth_devices[rxq->port_id];
- if (dev->rx_pkt_burst == hns3_recv_pkts ||
+ if (dev->rx_pkt_burst == hns3_recv_pkts_simple ||
dev->rx_pkt_burst == hns3_recv_scattered_pkts) {
if (offset >= rxq->nb_rx_desc - rxq->rx_free_hold)
return RTE_ETH_RX_DESC_UNAVAIL;
} else if (dev->rx_pkt_burst == hns3_recv_pkts_vec ||
- dev->rx_pkt_burst == hns3_recv_pkts_vec_sve){
+ dev->rx_pkt_burst == hns3_recv_pkts_vec_sve) {
if (offset >= rxq->nb_rx_desc - rxq->rx_rearm_nb)
return RTE_ETH_RX_DESC_UNAVAIL;
} else {