X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;ds=sidebyside;f=drivers%2Fnet%2Fhns3%2Fhns3_rxtx.c;h=a28de06dfdf0d436dca183a7cd31bd4206219ee4;hb=a41f593f1bce27cd94eae0e85a8085c592b14b30;hp=602548a4f25bebeb4cdec16e2c3b9e355952fe0e;hpb=1bb4a528c41f4af4847bd3d58cc2b2b9f1ec9a27;p=dpdk.git diff --git a/drivers/net/hns3/hns3_rxtx.c b/drivers/net/hns3/hns3_rxtx.c index 602548a4f2..a28de06dfd 100644 --- a/drivers/net/hns3/hns3_rxtx.c +++ b/drivers/net/hns3/hns3_rxtx.c @@ -16,7 +16,7 @@ #include #endif -#include "hns3_ethdev.h" +#include "hns3_common.h" #include "hns3_rxtx.h" #include "hns3_regs.h" #include "hns3_logs.h" @@ -86,8 +86,7 @@ hns3_rx_queue_release(void *queue) hns3_rx_queue_release_mbufs(rxq); if (rxq->mz) rte_memzone_free(rxq->mz); - if (rxq->sw_ring) - rte_free(rxq->sw_ring); + rte_free(rxq->sw_ring); rte_free(rxq); } } @@ -100,10 +99,8 @@ hns3_tx_queue_release(void *queue) hns3_tx_queue_release_mbufs(txq); if (txq->mz) rte_memzone_free(txq->mz); - if (txq->sw_ring) - rte_free(txq->sw_ring); - if (txq->free) - rte_free(txq->free); + rte_free(txq->sw_ring); + rte_free(txq->free); rte_free(txq); } } @@ -322,7 +319,7 @@ hns3_init_rx_queue_hw(struct hns3_rx_queue *rxq) hns3_write_dev(rxq, HNS3_RING_RX_BASEADDR_L_REG, (uint32_t)dma_addr); hns3_write_dev(rxq, HNS3_RING_RX_BASEADDR_H_REG, - (uint32_t)((dma_addr >> 31) >> 1)); + (uint32_t)(dma_addr >> 32)); hns3_write_dev(rxq, HNS3_RING_RX_BD_LEN_REG, hns3_buf_size2type(rx_buf_len)); @@ -337,7 +334,7 @@ hns3_init_tx_queue_hw(struct hns3_tx_queue *txq) hns3_write_dev(txq, HNS3_RING_TX_BASEADDR_L_REG, (uint32_t)dma_addr); hns3_write_dev(txq, HNS3_RING_TX_BASEADDR_H_REG, - (uint32_t)((dma_addr >> 31) >> 1)); + (uint32_t)(dma_addr >> 32)); hns3_write_dev(txq, HNS3_RING_TX_BD_NUM_REG, HNS3_CFG_DESC_NUM(txq->nb_tx_desc)); @@ -1050,7 +1047,7 @@ int hns3_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) { struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); - struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + struct rte_intr_handle *intr_handle = pci_dev->intr_handle; struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); if (dev->data->dev_conf.intr_conf.rxq == 0) @@ -1382,9 +1379,6 @@ hns3_alloc_rxq_and_dma_zone(struct rte_eth_dev *dev, rxq->rx_ring = (struct hns3_desc *)rx_mz->addr; rxq->rx_ring_phys_addr = rx_mz->iova; - hns3_dbg(hw, "No.%u rx descriptors iova 0x%" PRIx64, q_info->idx, - rxq->rx_ring_phys_addr); - return rxq; } @@ -1469,9 +1463,6 @@ hns3_alloc_txq_and_dma_zone(struct rte_eth_dev *dev, txq->tx_ring = (struct hns3_desc *)tx_mz->addr; txq->tx_ring_phys_addr = tx_mz->iova; - hns3_dbg(hw, "No.%u tx descriptors iova 0x%" PRIx64, q_info->idx, - txq->tx_ring_phys_addr); - /* Clear tx bd */ desc = txq->tx_ring; for (i = 0; i < txq->nb_tx_desc; i++) { @@ -1899,15 +1890,15 @@ hns3_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc, /* * For hns3 PF device, if the VLAN mode is HW_SHIFT_AND_DISCARD_MODE, * the pvid_sw_discard_en in the queue struct should not be changed, - * because PVID-related operations do not need to be processed by PMD - * driver. For hns3 VF device, whether it needs to process PVID depends + * because PVID-related operations do not need to be processed by PMD. + * For hns3 VF device, whether it needs to process PVID depends * on the configuration of PF kernel mode netdevice driver. And the * related PF configuration is delivered through the mailbox and finally - * reflectd in port_base_vlan_cfg. + * reflected in port_base_vlan_cfg. */ if (hns->is_vf || hw->vlan_mode == HNS3_SW_SHIFT_AND_DISCARD_MODE) rxq->pvid_sw_discard_en = hw->port_base_vlan_cfg.state == - HNS3_PORT_BASE_VLAN_ENABLE; + HNS3_PORT_BASE_VLAN_ENABLE; else rxq->pvid_sw_discard_en = false; rxq->ptype_en = hns3_dev_get_support(hw, RXD_ADV_LAYOUT) ? true : false; @@ -1924,7 +1915,7 @@ hns3_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc, memset(&rxq->dfx_stats, 0, sizeof(struct hns3_rx_dfx_stats)); /* CRC len set here is used for amending packet length */ - if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) + if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) rxq->crc_len = RTE_ETHER_CRC_LEN; else rxq->crc_len = 0; @@ -1969,7 +1960,7 @@ hns3_rx_scattered_calc(struct rte_eth_dev *dev) rxq->rx_buf_len); } - if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_SCATTER || + if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER || dev->data->mtu + HNS3_ETH_OVERHEAD > hw->rx_buf_len) dev->data->scattered_rx = true; } @@ -2341,11 +2332,11 @@ hns3_rxd_to_vlan_tci(struct hns3_rx_queue *rxq, struct rte_mbuf *mb, mb->vlan_tci = 0; return; case HNS3_INNER_STRP_VLAN_VLD: - mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED; + mb->ol_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED; mb->vlan_tci = rte_le_to_cpu_16(rxd->rx.vlan_tag); return; case HNS3_OUTER_STRP_VLAN_VLD: - mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED; + mb->ol_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED; mb->vlan_tci = rte_le_to_cpu_16(rxd->rx.ot_vlan_tag); return; default: @@ -2388,14 +2379,14 @@ hns3_rx_alloc_buffer(struct hns3_rx_queue *rxq) return rte_mbuf_raw_alloc(rxq->mb_pool); } -static inline void +static void hns3_rx_ptp_timestamp_handle(struct hns3_rx_queue *rxq, struct rte_mbuf *mbuf, - volatile struct hns3_desc *rxd) + uint64_t timestamp) { struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(rxq->hns); - uint64_t timestamp = rte_le_to_cpu_64(rxd->timestamp); - mbuf->ol_flags |= PKT_RX_IEEE1588_PTP | PKT_RX_IEEE1588_TMST; + mbuf->ol_flags |= RTE_MBUF_F_RX_IEEE1588_PTP | + RTE_MBUF_F_RX_IEEE1588_TMST; if (hns3_timestamp_rx_dynflag > 0) { *RTE_MBUF_DYNFIELD(mbuf, hns3_timestamp_dynfield_offset, rte_mbuf_timestamp_t *) = timestamp; @@ -2469,7 +2460,8 @@ hns3_recv_pkts_simple(void *rx_queue, rxe->mbuf = nmb; if (unlikely(bd_base_info & BIT(HNS3_RXD_TS_VLD_B))) - hns3_rx_ptp_timestamp_handle(rxq, rxm, rxdp); + hns3_rx_ptp_timestamp_handle(rxq, rxm, + rte_le_to_cpu_64(rxdp->timestamp)); dma_addr = rte_mbuf_data_iova_default(nmb); rxdp->addr = rte_cpu_to_le_64(dma_addr); @@ -2481,11 +2473,11 @@ hns3_recv_pkts_simple(void *rx_queue, rxm->data_len = rxm->pkt_len; rxm->port = rxq->port_id; rxm->hash.rss = rte_le_to_cpu_32(rxd.rx.rss_hash); - rxm->ol_flags |= PKT_RX_RSS_HASH; + rxm->ol_flags |= RTE_MBUF_F_RX_RSS_HASH; if (unlikely(bd_base_info & BIT(HNS3_RXD_LUM_B))) { rxm->hash.fdir.hi = rte_le_to_cpu_16(rxd.rx.fd_id); - rxm->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID; + rxm->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID; } rxm->nb_segs = 1; rxm->next = NULL; @@ -2500,7 +2492,7 @@ hns3_recv_pkts_simple(void *rx_queue, rxm->packet_type = hns3_rx_calc_ptype(rxq, l234_info, ol_info); if (rxm->packet_type == RTE_PTYPE_L2_ETHER_TIMESYNC) - rxm->ol_flags |= PKT_RX_IEEE1588_PTP; + rxm->ol_flags |= RTE_MBUF_F_RX_IEEE1588_PTP; hns3_rxd_to_vlan_tci(rxq, rxm, l234_info, &rxd); @@ -2540,6 +2532,7 @@ hns3_recv_scattered_pkts(void *rx_queue, struct rte_mbuf *rxm; struct rte_eth_dev *dev; uint32_t bd_base_info; + uint64_t timestamp; uint32_t l234_info; uint32_t gro_size; uint32_t ol_info; @@ -2649,6 +2642,9 @@ hns3_recv_scattered_pkts(void *rx_queue, rxm = rxe->mbuf; rxe->mbuf = nmb; + if (unlikely(bd_base_info & BIT(HNS3_RXD_TS_VLD_B))) + timestamp = rte_le_to_cpu_64(rxdp->timestamp); + dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb)); rxdp->rx.bd_base_info = 0; rxdp->addr = dma_addr; @@ -2671,7 +2667,7 @@ hns3_recv_scattered_pkts(void *rx_queue, } if (unlikely(bd_base_info & BIT(HNS3_RXD_TS_VLD_B))) - hns3_rx_ptp_timestamp_handle(rxq, first_seg, rxdp); + hns3_rx_ptp_timestamp_handle(rxq, first_seg, timestamp); /* * The last buffer of the received packet. packet len from @@ -2699,17 +2695,17 @@ hns3_recv_scattered_pkts(void *rx_queue, first_seg->port = rxq->port_id; first_seg->hash.rss = rte_le_to_cpu_32(rxd.rx.rss_hash); - first_seg->ol_flags = PKT_RX_RSS_HASH; + first_seg->ol_flags = RTE_MBUF_F_RX_RSS_HASH; if (unlikely(bd_base_info & BIT(HNS3_RXD_LUM_B))) { first_seg->hash.fdir.hi = rte_le_to_cpu_16(rxd.rx.fd_id); - first_seg->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID; + first_seg->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID; } gro_size = hns3_get_field(bd_base_info, HNS3_RXD_GRO_SIZE_M, HNS3_RXD_GRO_SIZE_S); if (gro_size != 0) { - first_seg->ol_flags |= PKT_RX_LRO; + first_seg->ol_flags |= RTE_MBUF_F_RX_LRO; first_seg->tso_segsz = gro_size; } @@ -2724,7 +2720,7 @@ hns3_recv_scattered_pkts(void *rx_queue, l234_info, ol_info); if (first_seg->packet_type == RTE_PTYPE_L2_ETHER_TIMESYNC) - rxm->ol_flags |= PKT_RX_IEEE1588_PTP; + rxm->ol_flags |= RTE_MBUF_F_RX_IEEE1588_PTP; hns3_rxd_to_vlan_tci(rxq, first_seg, l234_info, &rxd); @@ -2845,7 +2841,7 @@ hns3_get_rx_function(struct rte_eth_dev *dev) vec_allowed = vec_support && hns3_get_default_vec_support(); sve_allowed = vec_support && hns3_get_sve_support(); simple_allowed = !dev->data->scattered_rx && - (offloads & DEV_RX_OFFLOAD_TCP_LRO) == 0; + (offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) == 0; if (hns->rx_func_hint == HNS3_IO_FUNC_HINT_VEC && vec_allowed) return hns3_recv_pkts_vec; @@ -3039,11 +3035,11 @@ hns3_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc, /* * For hns3 PF device, if the VLAN mode is HW_SHIFT_AND_DISCARD_MODE, * the pvid_sw_shift_en in the queue struct should not be changed, - * because PVID-related operations do not need to be processed by PMD - * driver. For hns3 VF device, whether it needs to process PVID depends + * because PVID-related operations do not need to be processed by PMD. + * For hns3 VF device, whether it needs to process PVID depends * on the configuration of PF kernel mode netdev driver. And the * related PF configuration is delivered through the mailbox and finally - * reflectd in port_base_vlan_cfg. + * reflected in port_base_vlan_cfg. */ if (hns->is_vf || hw->vlan_mode == HNS3_SW_SHIFT_AND_DISCARD_MODE) txq->pvid_sw_shift_en = hw->port_base_vlan_cfg.state == @@ -3059,6 +3055,8 @@ hns3_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc, txq->min_tx_pkt_len = hw->min_tx_pkt_len; txq->tso_mode = hw->tso_mode; txq->udp_cksum_mode = hw->udp_cksum_mode; + txq->mbuf_fast_free_en = !!(dev->data->dev_conf.txmode.offloads & + RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE); memset(&txq->basic_stats, 0, sizeof(struct hns3_tx_basic_stats)); memset(&txq->dfx_stats, 0, sizeof(struct hns3_tx_dfx_stats)); @@ -3075,40 +3073,51 @@ hns3_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc, return 0; } -static void +static int hns3_tx_free_useless_buffer(struct hns3_tx_queue *txq) { uint16_t tx_next_clean = txq->next_to_clean; - uint16_t tx_next_use = txq->next_to_use; - uint16_t tx_bd_ready = txq->tx_bd_ready; - uint16_t tx_bd_max = txq->nb_tx_desc; - struct hns3_entry *tx_bak_pkt = &txq->sw_ring[tx_next_clean]; + uint16_t tx_next_use = txq->next_to_use; + struct hns3_entry *tx_entry = &txq->sw_ring[tx_next_clean]; struct hns3_desc *desc = &txq->tx_ring[tx_next_clean]; - struct rte_mbuf *mbuf; + int i; - while ((!(desc->tx.tp_fe_sc_vld_ra_ri & - rte_cpu_to_le_16(BIT(HNS3_TXD_VLD_B)))) && - tx_next_use != tx_next_clean) { - mbuf = tx_bak_pkt->mbuf; - if (mbuf) { - rte_pktmbuf_free_seg(mbuf); - tx_bak_pkt->mbuf = NULL; - } + if (tx_next_use >= tx_next_clean && + tx_next_use < tx_next_clean + txq->tx_rs_thresh) + return -1; - desc++; - tx_bak_pkt++; - tx_next_clean++; - tx_bd_ready++; - - if (tx_next_clean >= tx_bd_max) { - tx_next_clean = 0; - desc = txq->tx_ring; - tx_bak_pkt = txq->sw_ring; - } + /* + * All mbufs can be released only when the VLD bits of all + * descriptors in a batch are cleared. + */ + for (i = 0; i < txq->tx_rs_thresh; i++) { + if (desc[i].tx.tp_fe_sc_vld_ra_ri & + rte_le_to_cpu_16(BIT(HNS3_TXD_VLD_B))) + return -1; } - txq->next_to_clean = tx_next_clean; - txq->tx_bd_ready = tx_bd_ready; + for (i = 0; i < txq->tx_rs_thresh; i++) { + rte_pktmbuf_free_seg(tx_entry[i].mbuf); + tx_entry[i].mbuf = NULL; + } + + /* Update numbers of available descriptor due to buffer freed */ + txq->tx_bd_ready += txq->tx_rs_thresh; + txq->next_to_clean += txq->tx_rs_thresh; + if (txq->next_to_clean >= txq->nb_tx_desc) + txq->next_to_clean = 0; + + return 0; +} + +static inline int +hns3_tx_free_required_buffer(struct hns3_tx_queue *txq, uint16_t required_bds) +{ + while (required_bds > txq->tx_bd_ready) { + if (hns3_tx_free_useless_buffer(txq) != 0) + return -1; + } + return 0; } int @@ -3139,7 +3148,7 @@ hns3_restore_gro_conf(struct hns3_hw *hw) int ret; offloads = hw->data->dev_conf.rxmode.offloads; - gro_en = offloads & DEV_RX_OFFLOAD_TCP_LRO ? true : false; + gro_en = offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO ? true : false; ret = hns3_config_gro(hw, gro_en); if (ret) hns3_err(hw, "restore hardware GRO to %s failed, ret = %d", @@ -3151,7 +3160,7 @@ hns3_restore_gro_conf(struct hns3_hw *hw) static inline bool hns3_pkt_is_tso(struct rte_mbuf *m) { - return (m->tso_segsz != 0 && m->ol_flags & PKT_TX_TCP_SEG); + return (m->tso_segsz != 0 && m->ol_flags & RTE_MBUF_F_TX_TCP_SEG); } static void @@ -3184,7 +3193,7 @@ hns3_fill_first_desc(struct hns3_tx_queue *txq, struct hns3_desc *desc, uint32_t paylen; hdr_len = rxm->l2_len + rxm->l3_len + rxm->l4_len; - hdr_len += (ol_flags & PKT_TX_TUNNEL_MASK) ? + hdr_len += (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) ? rxm->outer_l2_len + rxm->outer_l3_len : 0; paylen = rxm->pkt_len - hdr_len; desc->tx.paylen_fd_dop_ol4cs |= rte_cpu_to_le_32(paylen); @@ -3195,18 +3204,18 @@ hns3_fill_first_desc(struct hns3_tx_queue *txq, struct hns3_desc *desc, * in Tx direction based on hns3 network engine. So when the number of * VLANs in the packets represented by rxm plus the number of VLAN * offload by hardware such as PVID etc, exceeds two, the packets will - * be discarded or the original VLAN of the packets will be overwitted + * be discarded or the original VLAN of the packets will be overwritten * by hardware. When the PF PVID is enabled by calling the API function * named rte_eth_dev_set_vlan_pvid or the VF PVID is enabled by the hns3 * PF kernel ether driver, the outer VLAN tag will always be the PVID. * To avoid the VLAN of Tx descriptor is overwritten by PVID, it should * be added to the position close to the IP header when PVID is enabled. */ - if (!txq->pvid_sw_shift_en && ol_flags & (PKT_TX_VLAN_PKT | - PKT_TX_QINQ_PKT)) { + if (!txq->pvid_sw_shift_en && + ol_flags & (RTE_MBUF_F_TX_VLAN | RTE_MBUF_F_TX_QINQ)) { desc->tx.ol_type_vlan_len_msec |= rte_cpu_to_le_32(BIT(HNS3_TXD_OVLAN_B)); - if (ol_flags & PKT_TX_QINQ_PKT) + if (ol_flags & RTE_MBUF_F_TX_QINQ) desc->tx.outer_vlan_tag = rte_cpu_to_le_16(rxm->vlan_tci_outer); else @@ -3214,14 +3223,14 @@ hns3_fill_first_desc(struct hns3_tx_queue *txq, struct hns3_desc *desc, rte_cpu_to_le_16(rxm->vlan_tci); } - if (ol_flags & PKT_TX_QINQ_PKT || - ((ol_flags & PKT_TX_VLAN_PKT) && txq->pvid_sw_shift_en)) { + if (ol_flags & RTE_MBUF_F_TX_QINQ || + ((ol_flags & RTE_MBUF_F_TX_VLAN) && txq->pvid_sw_shift_en)) { desc->tx.type_cs_vlan_tso_len |= rte_cpu_to_le_32(BIT(HNS3_TXD_VLAN_B)); desc->tx.vlan_tag = rte_cpu_to_le_16(rxm->vlan_tci); } - if (ol_flags & PKT_TX_IEEE1588_TMST) + if (ol_flags & RTE_MBUF_F_TX_IEEE1588_TMST) desc->tx.tp_fe_sc_vld_ra_ri |= rte_cpu_to_le_16(BIT(HNS3_TXD_TSYN_B)); } @@ -3343,14 +3352,14 @@ hns3_parse_outer_params(struct rte_mbuf *m, uint32_t *ol_type_vlan_len_msec) uint64_t ol_flags = m->ol_flags; /* (outer) IP header type */ - if (ol_flags & PKT_TX_OUTER_IPV4) { - if (ol_flags & PKT_TX_OUTER_IP_CKSUM) + if (ol_flags & RTE_MBUF_F_TX_OUTER_IPV4) { + if (ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM) tmp |= hns3_gen_field_val(HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S, HNS3_OL3T_IPV4_CSUM); else tmp |= hns3_gen_field_val(HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S, HNS3_OL3T_IPV4_NO_CSUM); - } else if (ol_flags & PKT_TX_OUTER_IPV6) { + } else if (ol_flags & RTE_MBUF_F_TX_OUTER_IPV6) { tmp |= hns3_gen_field_val(HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S, HNS3_OL3T_IPV6); } @@ -3370,17 +3379,17 @@ hns3_parse_inner_params(struct rte_mbuf *m, uint32_t *ol_type_vlan_len_msec, uint64_t ol_flags = m->ol_flags; uint16_t inner_l2_len; - switch (ol_flags & PKT_TX_TUNNEL_MASK) { - case PKT_TX_TUNNEL_VXLAN_GPE: - case PKT_TX_TUNNEL_GENEVE: - case PKT_TX_TUNNEL_VXLAN: + switch (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) { + case RTE_MBUF_F_TX_TUNNEL_VXLAN_GPE: + case RTE_MBUF_F_TX_TUNNEL_GENEVE: + case RTE_MBUF_F_TX_TUNNEL_VXLAN: /* MAC in UDP tunnelling packet, include VxLAN and GENEVE */ tmp_outer |= hns3_gen_field_val(HNS3_TXD_TUNTYPE_M, HNS3_TXD_TUNTYPE_S, HNS3_TUN_MAC_IN_UDP); /* * The inner l2 length of mbuf is the sum of outer l4 length, * tunneling header length and inner l2 length for a tunnel - * packect. But in hns3 tx descriptor, the tunneling header + * packet. But in hns3 tx descriptor, the tunneling header * length is contained in the field of outer L4 length. * Therefore, driver need to calculate the outer L4 length and * inner L2 length. @@ -3392,11 +3401,11 @@ hns3_parse_inner_params(struct rte_mbuf *m, uint32_t *ol_type_vlan_len_msec, inner_l2_len = m->l2_len - RTE_ETHER_VXLAN_HLEN; break; - case PKT_TX_TUNNEL_GRE: + case RTE_MBUF_F_TX_TUNNEL_GRE: tmp_outer |= hns3_gen_field_val(HNS3_TXD_TUNTYPE_M, HNS3_TXD_TUNTYPE_S, HNS3_TUN_NVGRE); /* - * For NVGRE tunnel packect, the outer L4 is empty. So only + * For NVGRE tunnel packet, the outer L4 is empty. So only * fill the NVGRE header length to the outer L4 field. */ tmp_outer |= hns3_gen_field_val(HNS3_TXD_L4LEN_M, @@ -3439,9 +3448,9 @@ hns3_parse_tunneling_params(struct hns3_tx_queue *txq, struct rte_mbuf *m, * mbuf, but for hns3 descriptor, it is contained in the outer L4. So, * there is a need that switching between them. To avoid multiple * calculations, the length of the L2 header include the outer and - * inner, will be filled during the parsing of tunnel packects. + * inner, will be filled during the parsing of tunnel packets. */ - if (!(ol_flags & PKT_TX_TUNNEL_MASK)) { + if (!(ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)) { /* * For non tunnel type the tunnel type id is 0, so no need to * assign a value to it. Only the inner(normal) L2 header length @@ -3457,7 +3466,7 @@ hns3_parse_tunneling_params(struct hns3_tx_queue *txq, struct rte_mbuf *m, * calculate the header length. */ if (unlikely(!(ol_flags & - (PKT_TX_OUTER_IP_CKSUM | PKT_TX_OUTER_UDP_CKSUM)) && + (RTE_MBUF_F_TX_OUTER_IP_CKSUM | RTE_MBUF_F_TX_OUTER_UDP_CKSUM)) && m->outer_l2_len == 0)) { struct rte_net_hdr_lens hdr_len; (void)rte_net_get_ptype(m, &hdr_len, @@ -3474,7 +3483,7 @@ hns3_parse_tunneling_params(struct hns3_tx_queue *txq, struct rte_mbuf *m, desc->tx.ol_type_vlan_len_msec = rte_cpu_to_le_32(tmp_outer); desc->tx.type_cs_vlan_tso_len = rte_cpu_to_le_32(tmp_inner); - tmp_ol4cs = ol_flags & PKT_TX_OUTER_UDP_CKSUM ? + tmp_ol4cs = ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM ? BIT(HNS3_TXD_OL4CS_B) : 0; desc->tx.paylen_fd_dop_ol4cs = rte_cpu_to_le_32(tmp_ol4cs); @@ -3489,9 +3498,9 @@ hns3_parse_l3_cksum_params(struct rte_mbuf *m, uint32_t *type_cs_vlan_tso_len) uint32_t tmp; tmp = *type_cs_vlan_tso_len; - if (ol_flags & PKT_TX_IPV4) + if (ol_flags & RTE_MBUF_F_TX_IPV4) l3_type = HNS3_L3T_IPV4; - else if (ol_flags & PKT_TX_IPV6) + else if (ol_flags & RTE_MBUF_F_TX_IPV6) l3_type = HNS3_L3T_IPV6; else l3_type = HNS3_L3T_NONE; @@ -3503,7 +3512,7 @@ hns3_parse_l3_cksum_params(struct rte_mbuf *m, uint32_t *type_cs_vlan_tso_len) tmp |= hns3_gen_field_val(HNS3_TXD_L3T_M, HNS3_TXD_L3T_S, l3_type); /* Enable L3 checksum offloads */ - if (ol_flags & PKT_TX_IP_CKSUM) + if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM) tmp |= BIT(HNS3_TXD_L3CS_B); *type_cs_vlan_tso_len = tmp; } @@ -3514,20 +3523,20 @@ hns3_parse_l4_cksum_params(struct rte_mbuf *m, uint32_t *type_cs_vlan_tso_len) uint64_t ol_flags = m->ol_flags; uint32_t tmp; /* Enable L4 checksum offloads */ - switch (ol_flags & (PKT_TX_L4_MASK | PKT_TX_TCP_SEG)) { - case PKT_TX_TCP_CKSUM | PKT_TX_TCP_SEG: - case PKT_TX_TCP_CKSUM: - case PKT_TX_TCP_SEG: + switch (ol_flags & (RTE_MBUF_F_TX_L4_MASK | RTE_MBUF_F_TX_TCP_SEG)) { + case RTE_MBUF_F_TX_TCP_CKSUM | RTE_MBUF_F_TX_TCP_SEG: + case RTE_MBUF_F_TX_TCP_CKSUM: + case RTE_MBUF_F_TX_TCP_SEG: tmp = *type_cs_vlan_tso_len; tmp |= hns3_gen_field_val(HNS3_TXD_L4T_M, HNS3_TXD_L4T_S, HNS3_L4T_TCP); break; - case PKT_TX_UDP_CKSUM: + case RTE_MBUF_F_TX_UDP_CKSUM: tmp = *type_cs_vlan_tso_len; tmp |= hns3_gen_field_val(HNS3_TXD_L4T_M, HNS3_TXD_L4T_S, HNS3_L4T_UDP); break; - case PKT_TX_SCTP_CKSUM: + case RTE_MBUF_F_TX_SCTP_CKSUM: tmp = *type_cs_vlan_tso_len; tmp |= hns3_gen_field_val(HNS3_TXD_L4T_M, HNS3_TXD_L4T_S, HNS3_L4T_SCTP); @@ -3584,7 +3593,7 @@ hns3_pkt_need_linearized(struct rte_mbuf *tx_pkts, uint32_t bd_num, /* ensure the first 8 frags is greater than mss + header */ hdr_len = tx_pkts->l2_len + tx_pkts->l3_len + tx_pkts->l4_len; - hdr_len += (tx_pkts->ol_flags & PKT_TX_TUNNEL_MASK) ? + hdr_len += (tx_pkts->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) ? tx_pkts->outer_l2_len + tx_pkts->outer_l3_len : 0; if (tot_len + m_last->data_len < tx_pkts->tso_segsz + hdr_len) return true; @@ -3614,15 +3623,15 @@ hns3_outer_ipv4_cksum_prepared(struct rte_mbuf *m, uint64_t ol_flags, struct rte_ipv4_hdr *ipv4_hdr; ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *, m->outer_l2_len); - if (ol_flags & PKT_TX_OUTER_IP_CKSUM) + if (ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM) ipv4_hdr->hdr_checksum = 0; - if (ol_flags & PKT_TX_OUTER_UDP_CKSUM) { + if (ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM) { struct rte_udp_hdr *udp_hdr; /* - * If OUTER_UDP_CKSUM is support, HW can caclulate the pseudo + * If OUTER_UDP_CKSUM is support, HW can calculate the pseudo * header for TSO packets */ - if (ol_flags & PKT_TX_TCP_SEG) + if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) return true; udp_hdr = rte_pktmbuf_mtod_offset(m, struct rte_udp_hdr *, m->outer_l2_len + m->outer_l3_len); @@ -3641,13 +3650,13 @@ hns3_outer_ipv6_cksum_prepared(struct rte_mbuf *m, uint64_t ol_flags, struct rte_ipv6_hdr *ipv6_hdr; ipv6_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv6_hdr *, m->outer_l2_len); - if (ol_flags & PKT_TX_OUTER_UDP_CKSUM) { + if (ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM) { struct rte_udp_hdr *udp_hdr; /* - * If OUTER_UDP_CKSUM is support, HW can caclulate the pseudo + * If OUTER_UDP_CKSUM is support, HW can calculate the pseudo * header for TSO packets */ - if (ol_flags & PKT_TX_TCP_SEG) + if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) return true; udp_hdr = rte_pktmbuf_mtod_offset(m, struct rte_udp_hdr *, m->outer_l2_len + m->outer_l3_len); @@ -3666,10 +3675,10 @@ hns3_outer_header_cksum_prepare(struct rte_mbuf *m) uint32_t paylen, hdr_len, l4_proto; struct rte_udp_hdr *udp_hdr; - if (!(ol_flags & (PKT_TX_OUTER_IPV4 | PKT_TX_OUTER_IPV6))) + if (!(ol_flags & (RTE_MBUF_F_TX_OUTER_IPV4 | RTE_MBUF_F_TX_OUTER_IPV6))) return; - if (ol_flags & PKT_TX_OUTER_IPV4) { + if (ol_flags & RTE_MBUF_F_TX_OUTER_IPV4) { if (hns3_outer_ipv4_cksum_prepared(m, ol_flags, &l4_proto)) return; } else { @@ -3678,7 +3687,7 @@ hns3_outer_header_cksum_prepare(struct rte_mbuf *m) } /* driver should ensure the outer udp cksum is 0 for TUNNEL TSO */ - if (l4_proto == IPPROTO_UDP && (ol_flags & PKT_TX_TCP_SEG)) { + if (l4_proto == IPPROTO_UDP && (ol_flags & RTE_MBUF_F_TX_TCP_SEG)) { hdr_len = m->l2_len + m->l3_len + m->l4_len; hdr_len += m->outer_l2_len + m->outer_l3_len; paylen = m->pkt_len - hdr_len; @@ -3704,7 +3713,7 @@ hns3_check_tso_pkt_valid(struct rte_mbuf *m) return -EINVAL; hdr_len = m->l2_len + m->l3_len + m->l4_len; - hdr_len += (m->ol_flags & PKT_TX_TUNNEL_MASK) ? + hdr_len += (m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) ? m->outer_l2_len + m->outer_l3_len : 0; if (hdr_len > HNS3_MAX_TSO_HDR_SIZE) return -EINVAL; @@ -3754,12 +3763,12 @@ hns3_vld_vlan_chk(struct hns3_tx_queue *txq, struct rte_mbuf *m) * implementation function named hns3_prep_pkts to inform users that * these packets will be discarded. */ - if (m->ol_flags & PKT_TX_QINQ_PKT) + if (m->ol_flags & RTE_MBUF_F_TX_QINQ) return -EINVAL; eh = rte_pktmbuf_mtod(m, struct rte_ether_hdr *); if (eh->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN)) { - if (m->ol_flags & PKT_TX_VLAN_PKT) + if (m->ol_flags & RTE_MBUF_F_TX_VLAN) return -EINVAL; /* Ensure the incoming packet is not a QinQ packet */ @@ -3779,7 +3788,7 @@ hns3_udp_cksum_help(struct rte_mbuf *m) uint16_t cksum = 0; uint32_t l4_len; - if (ol_flags & PKT_TX_IPV4) { + if (ol_flags & RTE_MBUF_F_TX_IPV4) { struct rte_ipv4_hdr *ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *, m->l2_len); l4_len = rte_be_to_cpu_16(ipv4_hdr->total_length) - m->l3_len; @@ -3810,8 +3819,8 @@ hns3_validate_tunnel_cksum(struct hns3_tx_queue *tx_queue, struct rte_mbuf *m) uint16_t dst_port; if (tx_queue->udp_cksum_mode == HNS3_SPECIAL_PORT_HW_CKSUM_MODE || - ol_flags & PKT_TX_TUNNEL_MASK || - (ol_flags & PKT_TX_L4_MASK) != PKT_TX_UDP_CKSUM) + ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK || + (ol_flags & RTE_MBUF_F_TX_L4_MASK) != RTE_MBUF_F_TX_UDP_CKSUM) return true; /* * A UDP packet with the same dst_port as VXLAN\VXLAN_GPE\GENEVE will @@ -3828,7 +3837,7 @@ hns3_validate_tunnel_cksum(struct hns3_tx_queue *tx_queue, struct rte_mbuf *m) case RTE_VXLAN_GPE_DEFAULT_PORT: case RTE_GENEVE_DEFAULT_PORT: udp_hdr->dgram_cksum = hns3_udp_cksum_help(m); - m->ol_flags = ol_flags & ~PKT_TX_L4_MASK; + m->ol_flags = ol_flags & ~RTE_MBUF_F_TX_L4_MASK; return false; default: return true; @@ -3991,6 +4000,14 @@ hns3_tx_free_buffer_simple(struct hns3_tx_queue *txq) tx_entry = &txq->sw_ring[txq->next_to_clean]; + if (txq->mbuf_fast_free_en) { + rte_mempool_put_bulk(tx_entry->mbuf->pool, + (void **)tx_entry, txq->tx_rs_thresh); + for (i = 0; i < txq->tx_rs_thresh; i++) + tx_entry[i].mbuf = NULL; + goto update_field; + } + for (i = 0; i < txq->tx_rs_thresh; i++) rte_prefetch0((tx_entry + i)->mbuf); for (i = 0; i < txq->tx_rs_thresh; i++, tx_entry++) { @@ -3998,6 +4015,7 @@ hns3_tx_free_buffer_simple(struct hns3_tx_queue *txq) tx_entry->mbuf = NULL; } +update_field: txq->next_to_clean = (tx_next_clean + 1) % txq->nb_tx_desc; txq->tx_bd_ready += txq->tx_rs_thresh; } @@ -4022,7 +4040,7 @@ static inline void hns3_tx_setup_4bd(struct hns3_desc *txdp, struct rte_mbuf **pkts) { #define PER_LOOP_NUM 4 - const uint16_t bd_flag = BIT(HNS3_TXD_VLD_B) | BIT(HNS3_TXD_FE_B); + uint16_t bd_flag = BIT(HNS3_TXD_VLD_B) | BIT(HNS3_TXD_FE_B); uint64_t dma_addr; uint32_t i; @@ -4033,6 +4051,8 @@ hns3_tx_setup_4bd(struct hns3_desc *txdp, struct rte_mbuf **pkts) txdp->tx.paylen_fd_dop_ol4cs = 0; txdp->tx.type_cs_vlan_tso_len = 0; txdp->tx.ol_type_vlan_len_msec = 0; + if (unlikely((*pkts)->ol_flags & RTE_MBUF_F_TX_IEEE1588_TMST)) + bd_flag |= BIT(HNS3_TXD_TSYN_B); txdp->tx.tp_fe_sc_vld_ra_ri = rte_cpu_to_le_16(bd_flag); } } @@ -4040,7 +4060,7 @@ hns3_tx_setup_4bd(struct hns3_desc *txdp, struct rte_mbuf **pkts) static inline void hns3_tx_setup_1bd(struct hns3_desc *txdp, struct rte_mbuf **pkts) { - const uint16_t bd_flag = BIT(HNS3_TXD_VLD_B) | BIT(HNS3_TXD_FE_B); + uint16_t bd_flag = BIT(HNS3_TXD_VLD_B) | BIT(HNS3_TXD_FE_B); uint64_t dma_addr; dma_addr = rte_mbuf_data_iova(*pkts); @@ -4049,6 +4069,8 @@ hns3_tx_setup_1bd(struct hns3_desc *txdp, struct rte_mbuf **pkts) txdp->tx.paylen_fd_dop_ol4cs = 0; txdp->tx.type_cs_vlan_tso_len = 0; txdp->tx.ol_type_vlan_len_msec = 0; + if (unlikely((*pkts)->ol_flags & RTE_MBUF_F_TX_IEEE1588_TMST)) + bd_flag |= BIT(HNS3_TXD_TSYN_B); txdp->tx.tp_fe_sc_vld_ra_ri = rte_cpu_to_le_16(bd_flag); } @@ -4136,8 +4158,8 @@ hns3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) uint16_t nb_tx; uint16_t i; - /* free useless buffer */ - hns3_tx_free_useless_buffer(txq); + if (txq->tx_bd_ready < txq->tx_free_thresh) + (void)hns3_tx_free_useless_buffer(txq); tx_next_use = txq->next_to_use; tx_bd_max = txq->nb_tx_desc; @@ -4152,11 +4174,14 @@ hns3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) nb_buf = tx_pkt->nb_segs; if (nb_buf > txq->tx_bd_ready) { - txq->dfx_stats.queue_full_cnt++; - if (nb_tx == 0) - return 0; - - goto end_of_tx; + /* Try to release the required MBUF, but avoid releasing + * all MBUFs, otherwise, the MBUFs will be released for + * a long time and may cause jitter. + */ + if (hns3_tx_free_required_buffer(txq, nb_buf) != 0) { + txq->dfx_stats.queue_full_cnt++; + goto end_of_tx; + } } /* @@ -4287,11 +4312,7 @@ hns3_tx_check_simple_support(struct rte_eth_dev *dev) { uint64_t offloads = dev->data->dev_conf.txmode.offloads; - struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); - if (hns3_dev_get_support(hw, PTP)) - return false; - - return (offloads == (offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE)); + return (offloads == (offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)); } static bool @@ -4303,16 +4324,16 @@ hns3_get_tx_prep_needed(struct rte_eth_dev *dev) return true; #else #define HNS3_DEV_TX_CSKUM_TSO_OFFLOAD_MASK (\ - DEV_TX_OFFLOAD_IPV4_CKSUM | \ - DEV_TX_OFFLOAD_TCP_CKSUM | \ - DEV_TX_OFFLOAD_UDP_CKSUM | \ - DEV_TX_OFFLOAD_SCTP_CKSUM | \ - DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | \ - DEV_TX_OFFLOAD_OUTER_UDP_CKSUM | \ - DEV_TX_OFFLOAD_TCP_TSO | \ - DEV_TX_OFFLOAD_VXLAN_TNL_TSO | \ - DEV_TX_OFFLOAD_GRE_TNL_TSO | \ - DEV_TX_OFFLOAD_GENEVE_TNL_TSO) + RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | \ + RTE_ETH_TX_OFFLOAD_TCP_CKSUM | \ + RTE_ETH_TX_OFFLOAD_UDP_CKSUM | \ + RTE_ETH_TX_OFFLOAD_SCTP_CKSUM | \ + RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | \ + RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM | \ + RTE_ETH_TX_OFFLOAD_TCP_TSO | \ + RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO | \ + RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO | \ + RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO) uint64_t tx_offload = dev->data->dev_conf.txmode.offloads; if (tx_offload & HNS3_DEV_TX_CSKUM_TSO_OFFLOAD_MASK) @@ -4359,14 +4380,6 @@ hns3_get_tx_function(struct rte_eth_dev *dev, eth_tx_prep_t *prep) return hns3_xmit_pkts; } -uint16_t -hns3_dummy_rxtx_burst(void *dpdk_txq __rte_unused, - struct rte_mbuf **pkts __rte_unused, - uint16_t pkts_n __rte_unused) -{ - return 0; -} - static void hns3_trace_rxtx_function(struct rte_eth_dev *dev) { @@ -4383,7 +4396,21 @@ hns3_trace_rxtx_function(struct rte_eth_dev *dev) rx_mode.info, tx_mode.info); } -void hns3_set_rxtx_function(struct rte_eth_dev *eth_dev) +static void +hns3_eth_dev_fp_ops_config(const struct rte_eth_dev *dev) +{ + struct rte_eth_fp_ops *fpo = rte_eth_fp_ops; + uint16_t port_id = dev->data->port_id; + + fpo[port_id].rx_pkt_burst = dev->rx_pkt_burst; + fpo[port_id].tx_pkt_burst = dev->tx_pkt_burst; + fpo[port_id].tx_pkt_prepare = dev->tx_pkt_prepare; + fpo[port_id].rx_descriptor_status = dev->rx_descriptor_status; + fpo[port_id].tx_descriptor_status = dev->tx_descriptor_status; +} + +void +hns3_set_rxtx_function(struct rte_eth_dev *eth_dev) { struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); struct hns3_adapter *hns = eth_dev->data->dev_private; @@ -4394,16 +4421,18 @@ void hns3_set_rxtx_function(struct rte_eth_dev *eth_dev) eth_dev->rx_pkt_burst = hns3_get_rx_function(eth_dev); eth_dev->rx_descriptor_status = hns3_dev_rx_descriptor_status; eth_dev->tx_pkt_burst = hw->set_link_down ? - hns3_dummy_rxtx_burst : + rte_eth_pkt_burst_dummy : hns3_get_tx_function(eth_dev, &prep); eth_dev->tx_pkt_prepare = prep; eth_dev->tx_descriptor_status = hns3_dev_tx_descriptor_status; hns3_trace_rxtx_function(eth_dev); } else { - eth_dev->rx_pkt_burst = hns3_dummy_rxtx_burst; - eth_dev->tx_pkt_burst = hns3_dummy_rxtx_burst; + eth_dev->rx_pkt_burst = rte_eth_pkt_burst_dummy; + eth_dev->tx_pkt_burst = rte_eth_pkt_burst_dummy; eth_dev->tx_pkt_prepare = NULL; } + + hns3_eth_dev_fp_ops_config(eth_dev); } void @@ -4566,46 +4595,22 @@ hns3_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) static int hns3_tx_done_cleanup_full(struct hns3_tx_queue *txq, uint32_t free_cnt) { - uint16_t next_to_clean = txq->next_to_clean; - uint16_t next_to_use = txq->next_to_use; - uint16_t tx_bd_ready = txq->tx_bd_ready; - struct hns3_entry *tx_pkt = &txq->sw_ring[next_to_clean]; - struct hns3_desc *desc = &txq->tx_ring[next_to_clean]; + uint16_t round_free_cnt; uint32_t idx; if (free_cnt == 0 || free_cnt > txq->nb_tx_desc) free_cnt = txq->nb_tx_desc; - for (idx = 0; idx < free_cnt; idx++) { - if (next_to_clean == next_to_use) - break; + if (txq->tx_rs_thresh == 0) + return 0; - if (desc->tx.tp_fe_sc_vld_ra_ri & - rte_cpu_to_le_16(BIT(HNS3_TXD_VLD_B))) + round_free_cnt = roundup(free_cnt, txq->tx_rs_thresh); + for (idx = 0; idx < round_free_cnt; idx += txq->tx_rs_thresh) { + if (hns3_tx_free_useless_buffer(txq) != 0) break; - - if (tx_pkt->mbuf != NULL) { - rte_pktmbuf_free_seg(tx_pkt->mbuf); - tx_pkt->mbuf = NULL; - } - - next_to_clean++; - tx_bd_ready++; - tx_pkt++; - desc++; - if (next_to_clean == txq->nb_tx_desc) { - tx_pkt = txq->sw_ring; - desc = txq->tx_ring; - next_to_clean = 0; - } } - if (idx > 0) { - txq->next_to_clean = next_to_clean; - txq->tx_bd_ready = tx_bd_ready; - } - - return (int)idx; + return RTE_MIN(idx, free_cnt); } int @@ -4616,7 +4621,7 @@ hns3_tx_done_cleanup(void *txq, uint32_t free_cnt) if (dev->tx_pkt_burst == hns3_xmit_pkts) return hns3_tx_done_cleanup_full(q, free_cnt); - else if (dev->tx_pkt_burst == hns3_dummy_rxtx_burst) + else if (dev->tx_pkt_burst == rte_eth_pkt_burst_dummy) return 0; else return -ENOTSUP; @@ -4726,8 +4731,13 @@ hns3_enable_rxd_adv_layout(struct hns3_hw *hw) void hns3_stop_tx_datapath(struct rte_eth_dev *dev) { - dev->tx_pkt_burst = hns3_dummy_rxtx_burst; + dev->tx_pkt_burst = rte_eth_pkt_burst_dummy; dev->tx_pkt_prepare = NULL; + hns3_eth_dev_fp_ops_config(dev); + + if (rte_eal_process_type() == RTE_PROC_SECONDARY) + return; + rte_wmb(); /* Disable tx datapath on secondary process. */ hns3_mp_req_stop_tx(dev); @@ -4742,5 +4752,10 @@ hns3_start_tx_datapath(struct rte_eth_dev *dev) dev->tx_pkt_burst = hns3_get_tx_function(dev, &prep); dev->tx_pkt_prepare = prep; + hns3_eth_dev_fp_ops_config(dev); + + if (rte_eal_process_type() == RTE_PROC_SECONDARY) + return; + hns3_mp_req_start_tx(dev); }