X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;ds=sidebyside;f=drivers%2Fnet%2Fhns3%2Fhns3_rxtx.c;h=a28de06dfdf0d436dca183a7cd31bd4206219ee4;hb=a41f593f1bce27cd94eae0e85a8085c592b14b30;hp=d26e26233551baf5e9933822ba29ede51e6cae7a;hpb=a4c7152d0581bd2a4c3c3bb82f42bc7ec52a99a1;p=dpdk.git diff --git a/drivers/net/hns3/hns3_rxtx.c b/drivers/net/hns3/hns3_rxtx.c index d26e262335..a28de06dfd 100644 --- a/drivers/net/hns3/hns3_rxtx.c +++ b/drivers/net/hns3/hns3_rxtx.c @@ -86,8 +86,7 @@ hns3_rx_queue_release(void *queue) hns3_rx_queue_release_mbufs(rxq); if (rxq->mz) rte_memzone_free(rxq->mz); - if (rxq->sw_ring) - rte_free(rxq->sw_ring); + rte_free(rxq->sw_ring); rte_free(rxq); } } @@ -100,10 +99,8 @@ hns3_tx_queue_release(void *queue) hns3_tx_queue_release_mbufs(txq); if (txq->mz) rte_memzone_free(txq->mz); - if (txq->sw_ring) - rte_free(txq->sw_ring); - if (txq->free) - rte_free(txq->free); + rte_free(txq->sw_ring); + rte_free(txq->free); rte_free(txq); } } @@ -1382,9 +1379,6 @@ hns3_alloc_rxq_and_dma_zone(struct rte_eth_dev *dev, rxq->rx_ring = (struct hns3_desc *)rx_mz->addr; rxq->rx_ring_phys_addr = rx_mz->iova; - hns3_dbg(hw, "No.%u rx descriptors iova 0x%" PRIx64, q_info->idx, - rxq->rx_ring_phys_addr); - return rxq; } @@ -1469,9 +1463,6 @@ hns3_alloc_txq_and_dma_zone(struct rte_eth_dev *dev, txq->tx_ring = (struct hns3_desc *)tx_mz->addr; txq->tx_ring_phys_addr = tx_mz->iova; - hns3_dbg(hw, "No.%u tx descriptors iova 0x%" PRIx64, q_info->idx, - txq->tx_ring_phys_addr); - /* Clear tx bd */ desc = txq->tx_ring; for (i = 0; i < txq->nb_tx_desc; i++) { @@ -1899,11 +1890,11 @@ hns3_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc, /* * For hns3 PF device, if the VLAN mode is HW_SHIFT_AND_DISCARD_MODE, * the pvid_sw_discard_en in the queue struct should not be changed, - * because PVID-related operations do not need to be processed by PMD - * driver. For hns3 VF device, whether it needs to process PVID depends + * because PVID-related operations do not need to be processed by PMD. + * For hns3 VF device, whether it needs to process PVID depends * on the configuration of PF kernel mode netdevice driver. And the * related PF configuration is delivered through the mailbox and finally - * reflectd in port_base_vlan_cfg. + * reflected in port_base_vlan_cfg. */ if (hns->is_vf || hw->vlan_mode == HNS3_SW_SHIFT_AND_DISCARD_MODE) rxq->pvid_sw_discard_en = hw->port_base_vlan_cfg.state == @@ -2388,14 +2379,14 @@ hns3_rx_alloc_buffer(struct hns3_rx_queue *rxq) return rte_mbuf_raw_alloc(rxq->mb_pool); } -static inline void +static void hns3_rx_ptp_timestamp_handle(struct hns3_rx_queue *rxq, struct rte_mbuf *mbuf, - volatile struct hns3_desc *rxd) + uint64_t timestamp) { struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(rxq->hns); - uint64_t timestamp = rte_le_to_cpu_64(rxd->timestamp); - mbuf->ol_flags |= RTE_MBUF_F_RX_IEEE1588_PTP | RTE_MBUF_F_RX_IEEE1588_TMST; + mbuf->ol_flags |= RTE_MBUF_F_RX_IEEE1588_PTP | + RTE_MBUF_F_RX_IEEE1588_TMST; if (hns3_timestamp_rx_dynflag > 0) { *RTE_MBUF_DYNFIELD(mbuf, hns3_timestamp_dynfield_offset, rte_mbuf_timestamp_t *) = timestamp; @@ -2469,7 +2460,8 @@ hns3_recv_pkts_simple(void *rx_queue, rxe->mbuf = nmb; if (unlikely(bd_base_info & BIT(HNS3_RXD_TS_VLD_B))) - hns3_rx_ptp_timestamp_handle(rxq, rxm, rxdp); + hns3_rx_ptp_timestamp_handle(rxq, rxm, + rte_le_to_cpu_64(rxdp->timestamp)); dma_addr = rte_mbuf_data_iova_default(nmb); rxdp->addr = rte_cpu_to_le_64(dma_addr); @@ -2540,6 +2532,7 @@ hns3_recv_scattered_pkts(void *rx_queue, struct rte_mbuf *rxm; struct rte_eth_dev *dev; uint32_t bd_base_info; + uint64_t timestamp; uint32_t l234_info; uint32_t gro_size; uint32_t ol_info; @@ -2649,6 +2642,9 @@ hns3_recv_scattered_pkts(void *rx_queue, rxm = rxe->mbuf; rxe->mbuf = nmb; + if (unlikely(bd_base_info & BIT(HNS3_RXD_TS_VLD_B))) + timestamp = rte_le_to_cpu_64(rxdp->timestamp); + dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb)); rxdp->rx.bd_base_info = 0; rxdp->addr = dma_addr; @@ -2671,7 +2667,7 @@ hns3_recv_scattered_pkts(void *rx_queue, } if (unlikely(bd_base_info & BIT(HNS3_RXD_TS_VLD_B))) - hns3_rx_ptp_timestamp_handle(rxq, first_seg, rxdp); + hns3_rx_ptp_timestamp_handle(rxq, first_seg, timestamp); /* * The last buffer of the received packet. packet len from @@ -3039,11 +3035,11 @@ hns3_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc, /* * For hns3 PF device, if the VLAN mode is HW_SHIFT_AND_DISCARD_MODE, * the pvid_sw_shift_en in the queue struct should not be changed, - * because PVID-related operations do not need to be processed by PMD - * driver. For hns3 VF device, whether it needs to process PVID depends + * because PVID-related operations do not need to be processed by PMD. + * For hns3 VF device, whether it needs to process PVID depends * on the configuration of PF kernel mode netdev driver. And the * related PF configuration is delivered through the mailbox and finally - * reflectd in port_base_vlan_cfg. + * reflected in port_base_vlan_cfg. */ if (hns->is_vf || hw->vlan_mode == HNS3_SW_SHIFT_AND_DISCARD_MODE) txq->pvid_sw_shift_en = hw->port_base_vlan_cfg.state == @@ -3059,6 +3055,8 @@ hns3_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc, txq->min_tx_pkt_len = hw->min_tx_pkt_len; txq->tso_mode = hw->tso_mode; txq->udp_cksum_mode = hw->udp_cksum_mode; + txq->mbuf_fast_free_en = !!(dev->data->dev_conf.txmode.offloads & + RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE); memset(&txq->basic_stats, 0, sizeof(struct hns3_tx_basic_stats)); memset(&txq->dfx_stats, 0, sizeof(struct hns3_tx_dfx_stats)); @@ -3075,40 +3073,51 @@ hns3_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc, return 0; } -static void +static int hns3_tx_free_useless_buffer(struct hns3_tx_queue *txq) { uint16_t tx_next_clean = txq->next_to_clean; - uint16_t tx_next_use = txq->next_to_use; - uint16_t tx_bd_ready = txq->tx_bd_ready; - uint16_t tx_bd_max = txq->nb_tx_desc; - struct hns3_entry *tx_bak_pkt = &txq->sw_ring[tx_next_clean]; + uint16_t tx_next_use = txq->next_to_use; + struct hns3_entry *tx_entry = &txq->sw_ring[tx_next_clean]; struct hns3_desc *desc = &txq->tx_ring[tx_next_clean]; - struct rte_mbuf *mbuf; + int i; - while ((!(desc->tx.tp_fe_sc_vld_ra_ri & - rte_cpu_to_le_16(BIT(HNS3_TXD_VLD_B)))) && - tx_next_use != tx_next_clean) { - mbuf = tx_bak_pkt->mbuf; - if (mbuf) { - rte_pktmbuf_free_seg(mbuf); - tx_bak_pkt->mbuf = NULL; - } + if (tx_next_use >= tx_next_clean && + tx_next_use < tx_next_clean + txq->tx_rs_thresh) + return -1; - desc++; - tx_bak_pkt++; - tx_next_clean++; - tx_bd_ready++; - - if (tx_next_clean >= tx_bd_max) { - tx_next_clean = 0; - desc = txq->tx_ring; - tx_bak_pkt = txq->sw_ring; - } + /* + * All mbufs can be released only when the VLD bits of all + * descriptors in a batch are cleared. + */ + for (i = 0; i < txq->tx_rs_thresh; i++) { + if (desc[i].tx.tp_fe_sc_vld_ra_ri & + rte_le_to_cpu_16(BIT(HNS3_TXD_VLD_B))) + return -1; } - txq->next_to_clean = tx_next_clean; - txq->tx_bd_ready = tx_bd_ready; + for (i = 0; i < txq->tx_rs_thresh; i++) { + rte_pktmbuf_free_seg(tx_entry[i].mbuf); + tx_entry[i].mbuf = NULL; + } + + /* Update numbers of available descriptor due to buffer freed */ + txq->tx_bd_ready += txq->tx_rs_thresh; + txq->next_to_clean += txq->tx_rs_thresh; + if (txq->next_to_clean >= txq->nb_tx_desc) + txq->next_to_clean = 0; + + return 0; +} + +static inline int +hns3_tx_free_required_buffer(struct hns3_tx_queue *txq, uint16_t required_bds) +{ + while (required_bds > txq->tx_bd_ready) { + if (hns3_tx_free_useless_buffer(txq) != 0) + return -1; + } + return 0; } int @@ -3195,7 +3204,7 @@ hns3_fill_first_desc(struct hns3_tx_queue *txq, struct hns3_desc *desc, * in Tx direction based on hns3 network engine. So when the number of * VLANs in the packets represented by rxm plus the number of VLAN * offload by hardware such as PVID etc, exceeds two, the packets will - * be discarded or the original VLAN of the packets will be overwitted + * be discarded or the original VLAN of the packets will be overwritten * by hardware. When the PF PVID is enabled by calling the API function * named rte_eth_dev_set_vlan_pvid or the VF PVID is enabled by the hns3 * PF kernel ether driver, the outer VLAN tag will always be the PVID. @@ -3380,7 +3389,7 @@ hns3_parse_inner_params(struct rte_mbuf *m, uint32_t *ol_type_vlan_len_msec, /* * The inner l2 length of mbuf is the sum of outer l4 length, * tunneling header length and inner l2 length for a tunnel - * packect. But in hns3 tx descriptor, the tunneling header + * packet. But in hns3 tx descriptor, the tunneling header * length is contained in the field of outer L4 length. * Therefore, driver need to calculate the outer L4 length and * inner L2 length. @@ -3396,7 +3405,7 @@ hns3_parse_inner_params(struct rte_mbuf *m, uint32_t *ol_type_vlan_len_msec, tmp_outer |= hns3_gen_field_val(HNS3_TXD_TUNTYPE_M, HNS3_TXD_TUNTYPE_S, HNS3_TUN_NVGRE); /* - * For NVGRE tunnel packect, the outer L4 is empty. So only + * For NVGRE tunnel packet, the outer L4 is empty. So only * fill the NVGRE header length to the outer L4 field. */ tmp_outer |= hns3_gen_field_val(HNS3_TXD_L4LEN_M, @@ -3439,7 +3448,7 @@ hns3_parse_tunneling_params(struct hns3_tx_queue *txq, struct rte_mbuf *m, * mbuf, but for hns3 descriptor, it is contained in the outer L4. So, * there is a need that switching between them. To avoid multiple * calculations, the length of the L2 header include the outer and - * inner, will be filled during the parsing of tunnel packects. + * inner, will be filled during the parsing of tunnel packets. */ if (!(ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)) { /* @@ -3619,7 +3628,7 @@ hns3_outer_ipv4_cksum_prepared(struct rte_mbuf *m, uint64_t ol_flags, if (ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM) { struct rte_udp_hdr *udp_hdr; /* - * If OUTER_UDP_CKSUM is support, HW can caclulate the pseudo + * If OUTER_UDP_CKSUM is support, HW can calculate the pseudo * header for TSO packets */ if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) @@ -3644,7 +3653,7 @@ hns3_outer_ipv6_cksum_prepared(struct rte_mbuf *m, uint64_t ol_flags, if (ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM) { struct rte_udp_hdr *udp_hdr; /* - * If OUTER_UDP_CKSUM is support, HW can caclulate the pseudo + * If OUTER_UDP_CKSUM is support, HW can calculate the pseudo * header for TSO packets */ if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) @@ -3991,6 +4000,14 @@ hns3_tx_free_buffer_simple(struct hns3_tx_queue *txq) tx_entry = &txq->sw_ring[txq->next_to_clean]; + if (txq->mbuf_fast_free_en) { + rte_mempool_put_bulk(tx_entry->mbuf->pool, + (void **)tx_entry, txq->tx_rs_thresh); + for (i = 0; i < txq->tx_rs_thresh; i++) + tx_entry[i].mbuf = NULL; + goto update_field; + } + for (i = 0; i < txq->tx_rs_thresh; i++) rte_prefetch0((tx_entry + i)->mbuf); for (i = 0; i < txq->tx_rs_thresh; i++, tx_entry++) { @@ -3998,6 +4015,7 @@ hns3_tx_free_buffer_simple(struct hns3_tx_queue *txq) tx_entry->mbuf = NULL; } +update_field: txq->next_to_clean = (tx_next_clean + 1) % txq->nb_tx_desc; txq->tx_bd_ready += txq->tx_rs_thresh; } @@ -4022,7 +4040,7 @@ static inline void hns3_tx_setup_4bd(struct hns3_desc *txdp, struct rte_mbuf **pkts) { #define PER_LOOP_NUM 4 - const uint16_t bd_flag = BIT(HNS3_TXD_VLD_B) | BIT(HNS3_TXD_FE_B); + uint16_t bd_flag = BIT(HNS3_TXD_VLD_B) | BIT(HNS3_TXD_FE_B); uint64_t dma_addr; uint32_t i; @@ -4033,6 +4051,8 @@ hns3_tx_setup_4bd(struct hns3_desc *txdp, struct rte_mbuf **pkts) txdp->tx.paylen_fd_dop_ol4cs = 0; txdp->tx.type_cs_vlan_tso_len = 0; txdp->tx.ol_type_vlan_len_msec = 0; + if (unlikely((*pkts)->ol_flags & RTE_MBUF_F_TX_IEEE1588_TMST)) + bd_flag |= BIT(HNS3_TXD_TSYN_B); txdp->tx.tp_fe_sc_vld_ra_ri = rte_cpu_to_le_16(bd_flag); } } @@ -4040,7 +4060,7 @@ hns3_tx_setup_4bd(struct hns3_desc *txdp, struct rte_mbuf **pkts) static inline void hns3_tx_setup_1bd(struct hns3_desc *txdp, struct rte_mbuf **pkts) { - const uint16_t bd_flag = BIT(HNS3_TXD_VLD_B) | BIT(HNS3_TXD_FE_B); + uint16_t bd_flag = BIT(HNS3_TXD_VLD_B) | BIT(HNS3_TXD_FE_B); uint64_t dma_addr; dma_addr = rte_mbuf_data_iova(*pkts); @@ -4049,6 +4069,8 @@ hns3_tx_setup_1bd(struct hns3_desc *txdp, struct rte_mbuf **pkts) txdp->tx.paylen_fd_dop_ol4cs = 0; txdp->tx.type_cs_vlan_tso_len = 0; txdp->tx.ol_type_vlan_len_msec = 0; + if (unlikely((*pkts)->ol_flags & RTE_MBUF_F_TX_IEEE1588_TMST)) + bd_flag |= BIT(HNS3_TXD_TSYN_B); txdp->tx.tp_fe_sc_vld_ra_ri = rte_cpu_to_le_16(bd_flag); } @@ -4136,8 +4158,8 @@ hns3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) uint16_t nb_tx; uint16_t i; - /* free useless buffer */ - hns3_tx_free_useless_buffer(txq); + if (txq->tx_bd_ready < txq->tx_free_thresh) + (void)hns3_tx_free_useless_buffer(txq); tx_next_use = txq->next_to_use; tx_bd_max = txq->nb_tx_desc; @@ -4152,11 +4174,14 @@ hns3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) nb_buf = tx_pkt->nb_segs; if (nb_buf > txq->tx_bd_ready) { - txq->dfx_stats.queue_full_cnt++; - if (nb_tx == 0) - return 0; - - goto end_of_tx; + /* Try to release the required MBUF, but avoid releasing + * all MBUFs, otherwise, the MBUFs will be released for + * a long time and may cause jitter. + */ + if (hns3_tx_free_required_buffer(txq, nb_buf) != 0) { + txq->dfx_stats.queue_full_cnt++; + goto end_of_tx; + } } /* @@ -4287,10 +4312,6 @@ hns3_tx_check_simple_support(struct rte_eth_dev *dev) { uint64_t offloads = dev->data->dev_conf.txmode.offloads; - struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); - if (hns3_dev_get_support(hw, PTP)) - return false; - return (offloads == (offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)); } @@ -4359,14 +4380,6 @@ hns3_get_tx_function(struct rte_eth_dev *dev, eth_tx_prep_t *prep) return hns3_xmit_pkts; } -uint16_t -hns3_dummy_rxtx_burst(void *dpdk_txq __rte_unused, - struct rte_mbuf **pkts __rte_unused, - uint16_t pkts_n __rte_unused) -{ - return 0; -} - static void hns3_trace_rxtx_function(struct rte_eth_dev *dev) { @@ -4383,7 +4396,21 @@ hns3_trace_rxtx_function(struct rte_eth_dev *dev) rx_mode.info, tx_mode.info); } -void hns3_set_rxtx_function(struct rte_eth_dev *eth_dev) +static void +hns3_eth_dev_fp_ops_config(const struct rte_eth_dev *dev) +{ + struct rte_eth_fp_ops *fpo = rte_eth_fp_ops; + uint16_t port_id = dev->data->port_id; + + fpo[port_id].rx_pkt_burst = dev->rx_pkt_burst; + fpo[port_id].tx_pkt_burst = dev->tx_pkt_burst; + fpo[port_id].tx_pkt_prepare = dev->tx_pkt_prepare; + fpo[port_id].rx_descriptor_status = dev->rx_descriptor_status; + fpo[port_id].tx_descriptor_status = dev->tx_descriptor_status; +} + +void +hns3_set_rxtx_function(struct rte_eth_dev *eth_dev) { struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); struct hns3_adapter *hns = eth_dev->data->dev_private; @@ -4394,16 +4421,18 @@ void hns3_set_rxtx_function(struct rte_eth_dev *eth_dev) eth_dev->rx_pkt_burst = hns3_get_rx_function(eth_dev); eth_dev->rx_descriptor_status = hns3_dev_rx_descriptor_status; eth_dev->tx_pkt_burst = hw->set_link_down ? - hns3_dummy_rxtx_burst : + rte_eth_pkt_burst_dummy : hns3_get_tx_function(eth_dev, &prep); eth_dev->tx_pkt_prepare = prep; eth_dev->tx_descriptor_status = hns3_dev_tx_descriptor_status; hns3_trace_rxtx_function(eth_dev); } else { - eth_dev->rx_pkt_burst = hns3_dummy_rxtx_burst; - eth_dev->tx_pkt_burst = hns3_dummy_rxtx_burst; + eth_dev->rx_pkt_burst = rte_eth_pkt_burst_dummy; + eth_dev->tx_pkt_burst = rte_eth_pkt_burst_dummy; eth_dev->tx_pkt_prepare = NULL; } + + hns3_eth_dev_fp_ops_config(eth_dev); } void @@ -4566,46 +4595,22 @@ hns3_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) static int hns3_tx_done_cleanup_full(struct hns3_tx_queue *txq, uint32_t free_cnt) { - uint16_t next_to_clean = txq->next_to_clean; - uint16_t next_to_use = txq->next_to_use; - uint16_t tx_bd_ready = txq->tx_bd_ready; - struct hns3_entry *tx_pkt = &txq->sw_ring[next_to_clean]; - struct hns3_desc *desc = &txq->tx_ring[next_to_clean]; + uint16_t round_free_cnt; uint32_t idx; if (free_cnt == 0 || free_cnt > txq->nb_tx_desc) free_cnt = txq->nb_tx_desc; - for (idx = 0; idx < free_cnt; idx++) { - if (next_to_clean == next_to_use) - break; + if (txq->tx_rs_thresh == 0) + return 0; - if (desc->tx.tp_fe_sc_vld_ra_ri & - rte_cpu_to_le_16(BIT(HNS3_TXD_VLD_B))) + round_free_cnt = roundup(free_cnt, txq->tx_rs_thresh); + for (idx = 0; idx < round_free_cnt; idx += txq->tx_rs_thresh) { + if (hns3_tx_free_useless_buffer(txq) != 0) break; - - if (tx_pkt->mbuf != NULL) { - rte_pktmbuf_free_seg(tx_pkt->mbuf); - tx_pkt->mbuf = NULL; - } - - next_to_clean++; - tx_bd_ready++; - tx_pkt++; - desc++; - if (next_to_clean == txq->nb_tx_desc) { - tx_pkt = txq->sw_ring; - desc = txq->tx_ring; - next_to_clean = 0; - } } - if (idx > 0) { - txq->next_to_clean = next_to_clean; - txq->tx_bd_ready = tx_bd_ready; - } - - return (int)idx; + return RTE_MIN(idx, free_cnt); } int @@ -4616,7 +4621,7 @@ hns3_tx_done_cleanup(void *txq, uint32_t free_cnt) if (dev->tx_pkt_burst == hns3_xmit_pkts) return hns3_tx_done_cleanup_full(q, free_cnt); - else if (dev->tx_pkt_burst == hns3_dummy_rxtx_burst) + else if (dev->tx_pkt_burst == rte_eth_pkt_burst_dummy) return 0; else return -ENOTSUP; @@ -4726,8 +4731,13 @@ hns3_enable_rxd_adv_layout(struct hns3_hw *hw) void hns3_stop_tx_datapath(struct rte_eth_dev *dev) { - dev->tx_pkt_burst = hns3_dummy_rxtx_burst; + dev->tx_pkt_burst = rte_eth_pkt_burst_dummy; dev->tx_pkt_prepare = NULL; + hns3_eth_dev_fp_ops_config(dev); + + if (rte_eal_process_type() == RTE_PROC_SECONDARY) + return; + rte_wmb(); /* Disable tx datapath on secondary process. */ hns3_mp_req_stop_tx(dev); @@ -4742,5 +4752,10 @@ hns3_start_tx_datapath(struct rte_eth_dev *dev) dev->tx_pkt_burst = hns3_get_tx_function(dev, &prep); dev->tx_pkt_prepare = prep; + hns3_eth_dev_fp_ops_config(dev); + + if (rte_eal_process_type() == RTE_PROC_SECONDARY) + return; + hns3_mp_req_start_tx(dev); }