X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fhns3%2Fhns3_rxtx_vec_sve.c;h=d5c49333b25eb1b3d55f32fa69f0702963fc49b8;hb=ed2ffccdc1c862a24361f67dcf470428be142418;hp=2eaf692801c35924347af6583f758dd103dd4690;hpb=53e6f86cf550a75fb1d55a76f411e214e783d14d;p=dpdk.git diff --git a/drivers/net/hns3/hns3_rxtx_vec_sve.c b/drivers/net/hns3/hns3_rxtx_vec_sve.c index 2eaf692801..d5c49333b2 100644 --- a/drivers/net/hns3/hns3_rxtx_vec_sve.c +++ b/drivers/net/hns3/hns3_rxtx_vec_sve.c @@ -39,7 +39,6 @@ hns3_desc_parse_field_sve(struct hns3_rx_queue *rxq, uint32_t bd_vld_num) { uint32_t retcode = 0; - uint32_t cksum_err; int ret, i; for (i = 0; i < (int)bd_vld_num; i++) { @@ -47,7 +46,7 @@ hns3_desc_parse_field_sve(struct hns3_rx_queue *rxq, rx_pkts[i]->ol_flags = PKT_RX_RSS_HASH; ret = hns3_handle_bdinfo(rxq, rx_pkts[i], key->bd_base_info[i], - key->l234_info[i], &cksum_err); + key->l234_info[i]); if (unlikely(ret)) { retcode |= 1u << i; continue; @@ -55,9 +54,6 @@ hns3_desc_parse_field_sve(struct hns3_rx_queue *rxq, rx_pkts[i]->packet_type = hns3_rx_calc_ptype(rxq, key->l234_info[i], key->ol_info[i]); - if (likely(key->bd_base_info[i] & BIT(HNS3_RXD_L3L4P_B))) - hns3_rx_set_cksum_flag(rx_pkts[i], - rx_pkts[i]->packet_type, cksum_err); /* Increment bytes counter */ rxq->basic_stats.bytes += rx_pkts[i]->pkt_len; @@ -122,6 +118,12 @@ hns3_recv_burst_vec_sve(struct hns3_rx_queue *__restrict rxq, svuint32_t rss_tbl1 = svld1_u32(PG32_256BIT, rss_adjust); svuint32_t rss_tbl2 = svld1_u32(PG32_256BIT, &rss_adjust[8]); + /* compile-time verifies the xlen_adjust mask */ + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) != + offsetof(struct rte_mbuf, pkt_len) + 4); + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, vlan_tci) != + offsetof(struct rte_mbuf, data_len) + 2); + for (pos = 0; pos < nb_pkts; pos += HNS3_SVE_DEFAULT_DESCS_PER_LOOP, rxdp += HNS3_SVE_DEFAULT_DESCS_PER_LOOP) { svuint64_t vld_clz, mbp1st, mbp2st, mbuf_init; @@ -290,12 +292,11 @@ hns3_recv_pkts_vec_sve(void *__restrict rx_queue, { struct hns3_rx_queue *rxq = rx_queue; struct hns3_desc *rxdp = &rxq->rx_ring[rxq->next_to_use]; - uint64_t bd_err_mask; /* bit mask indicate whick pkts is error */ + uint64_t pkt_err_mask; /* bit mask indicate whick pkts is error */ uint16_t nb_rx; rte_prefetch_non_temporal(rxdp); - nb_pkts = RTE_MIN(nb_pkts, HNS3_DEFAULT_RX_BURST); nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, HNS3_SVE_DEFAULT_DESCS_PER_LOOP); if (rxq->rx_rearm_nb > HNS3_DEFAULT_RXQ_REARM_THRESH) @@ -307,10 +308,31 @@ hns3_recv_pkts_vec_sve(void *__restrict rx_queue, hns3_rx_prefetch_mbuf_sve(&rxq->sw_ring[rxq->next_to_use]); - bd_err_mask = 0; - nb_rx = hns3_recv_burst_vec_sve(rxq, rx_pkts, nb_pkts, &bd_err_mask); - if (unlikely(bd_err_mask)) - nb_rx = hns3_rx_reassemble_pkts(rx_pkts, nb_rx, bd_err_mask); + if (likely(nb_pkts <= HNS3_DEFAULT_RX_BURST)) { + pkt_err_mask = 0; + nb_rx = hns3_recv_burst_vec_sve(rxq, rx_pkts, nb_pkts, + &pkt_err_mask); + nb_rx = hns3_rx_reassemble_pkts(rx_pkts, nb_rx, pkt_err_mask); + return nb_rx; + } + + nb_rx = 0; + while (nb_pkts > 0) { + uint16_t ret, n; + + n = RTE_MIN(nb_pkts, HNS3_DEFAULT_RX_BURST); + pkt_err_mask = 0; + ret = hns3_recv_burst_vec_sve(rxq, &rx_pkts[nb_rx], n, + &pkt_err_mask); + nb_pkts -= ret; + nb_rx += hns3_rx_reassemble_pkts(&rx_pkts[nb_rx], ret, + pkt_err_mask); + if (ret < n) + break; + + if (rxq->rx_rearm_nb > HNS3_DEFAULT_RXQ_REARM_THRESH) + hns3_rxq_rearm_mbuf_sve(rxq); + } return nb_rx; } @@ -453,7 +475,7 @@ hns3_xmit_fixed_burst_vec_sve(void *__restrict tx_queue, txq->next_to_use += nb_pkts - nb_tx; txq->tx_bd_ready -= nb_pkts; - hns3_write_reg_opt(txq->io_tail_reg, nb_pkts); + hns3_write_txq_tail_reg(txq, nb_pkts); return nb_pkts; }