X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fhns3%2Fhns3_rxtx_vec_sve.c;h=d5c49333b25eb1b3d55f32fa69f0702963fc49b8;hb=ed2ffccdc1c862a24361f67dcf470428be142418;hp=9a81cb0c933463cb38beffd65327b94e956b3611;hpb=952ebacce4f2bc97e4399ce0ca4a36149dde43fe;p=dpdk.git diff --git a/drivers/net/hns3/hns3_rxtx_vec_sve.c b/drivers/net/hns3/hns3_rxtx_vec_sve.c index 9a81cb0c93..d5c49333b2 100644 --- a/drivers/net/hns3/hns3_rxtx_vec_sve.c +++ b/drivers/net/hns3/hns3_rxtx_vec_sve.c @@ -1,10 +1,10 @@ /* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2020 Hisilicon Limited. + * Copyright(c) 2020-2021 HiSilicon Limited. */ #include #include -#include +#include #include "hns3_ethdev.h" #include "hns3_rxtx.h" @@ -39,7 +39,6 @@ hns3_desc_parse_field_sve(struct hns3_rx_queue *rxq, uint32_t bd_vld_num) { uint32_t retcode = 0; - uint32_t cksum_err; int ret, i; for (i = 0; i < (int)bd_vld_num; i++) { @@ -47,7 +46,7 @@ hns3_desc_parse_field_sve(struct hns3_rx_queue *rxq, rx_pkts[i]->ol_flags = PKT_RX_RSS_HASH; ret = hns3_handle_bdinfo(rxq, rx_pkts[i], key->bd_base_info[i], - key->l234_info[i], &cksum_err); + key->l234_info[i]); if (unlikely(ret)) { retcode |= 1u << i; continue; @@ -55,9 +54,9 @@ hns3_desc_parse_field_sve(struct hns3_rx_queue *rxq, rx_pkts[i]->packet_type = hns3_rx_calc_ptype(rxq, key->l234_info[i], key->ol_info[i]); - if (likely(key->bd_base_info[i] & BIT(HNS3_RXD_L3L4P_B))) - hns3_rx_set_cksum_flag(rx_pkts[i], - rx_pkts[i]->packet_type, cksum_err); + + /* Increment bytes counter */ + rxq->basic_stats.bytes += rx_pkts[i]->pkt_len; } return retcode; @@ -119,6 +118,12 @@ hns3_recv_burst_vec_sve(struct hns3_rx_queue *__restrict rxq, svuint32_t rss_tbl1 = svld1_u32(PG32_256BIT, rss_adjust); svuint32_t rss_tbl2 = svld1_u32(PG32_256BIT, &rss_adjust[8]); + /* compile-time verifies the xlen_adjust mask */ + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) != + offsetof(struct rte_mbuf, pkt_len) + 4); + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, vlan_tci) != + offsetof(struct rte_mbuf, data_len) + 2); + for (pos = 0; pos < nb_pkts; pos += HNS3_SVE_DEFAULT_DESCS_PER_LOOP, rxdp += HNS3_SVE_DEFAULT_DESCS_PER_LOOP) { svuint64_t vld_clz, mbp1st, mbp2st, mbuf_init; @@ -287,12 +292,11 @@ hns3_recv_pkts_vec_sve(void *__restrict rx_queue, { struct hns3_rx_queue *rxq = rx_queue; struct hns3_desc *rxdp = &rxq->rx_ring[rxq->next_to_use]; - uint64_t bd_err_mask; /* bit mask indicate whick pkts is error */ + uint64_t pkt_err_mask; /* bit mask indicate whick pkts is error */ uint16_t nb_rx; rte_prefetch_non_temporal(rxdp); - nb_pkts = RTE_MIN(nb_pkts, HNS3_DEFAULT_RX_BURST); nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, HNS3_SVE_DEFAULT_DESCS_PER_LOOP); if (rxq->rx_rearm_nb > HNS3_DEFAULT_RXQ_REARM_THRESH) @@ -304,10 +308,196 @@ hns3_recv_pkts_vec_sve(void *__restrict rx_queue, hns3_rx_prefetch_mbuf_sve(&rxq->sw_ring[rxq->next_to_use]); - bd_err_mask = 0; - nb_rx = hns3_recv_burst_vec_sve(rxq, rx_pkts, nb_pkts, &bd_err_mask); - if (unlikely(bd_err_mask)) - nb_rx = hns3_rx_reassemble_pkts(rx_pkts, nb_rx, bd_err_mask); + if (likely(nb_pkts <= HNS3_DEFAULT_RX_BURST)) { + pkt_err_mask = 0; + nb_rx = hns3_recv_burst_vec_sve(rxq, rx_pkts, nb_pkts, + &pkt_err_mask); + nb_rx = hns3_rx_reassemble_pkts(rx_pkts, nb_rx, pkt_err_mask); + return nb_rx; + } + + nb_rx = 0; + while (nb_pkts > 0) { + uint16_t ret, n; + + n = RTE_MIN(nb_pkts, HNS3_DEFAULT_RX_BURST); + pkt_err_mask = 0; + ret = hns3_recv_burst_vec_sve(rxq, &rx_pkts[nb_rx], n, + &pkt_err_mask); + nb_pkts -= ret; + nb_rx += hns3_rx_reassemble_pkts(&rx_pkts[nb_rx], ret, + pkt_err_mask); + if (ret < n) + break; + + if (rxq->rx_rearm_nb > HNS3_DEFAULT_RXQ_REARM_THRESH) + hns3_rxq_rearm_mbuf_sve(rxq); + } return nb_rx; } + +static inline void +hns3_tx_free_buffers_sve(struct hns3_tx_queue *txq) +{ +#define HNS3_SVE_CHECK_DESCS_PER_LOOP 8 +#define TX_VLD_U8_ZIP_INDEX svindex_u8(0, 4) + svbool_t pg32 = svwhilelt_b32(0, HNS3_SVE_CHECK_DESCS_PER_LOOP); + svuint32_t vld, vld2; + svuint8_t vld_u8; + uint64_t vld_all; + struct hns3_desc *tx_desc; + int i; + + /* + * All mbufs can be released only when the VLD bits of all + * descriptors in a batch are cleared. + */ + /* do logical OR operation for all desc's valid field */ + vld = svdup_n_u32(0); + tx_desc = &txq->tx_ring[txq->next_to_clean]; + for (i = 0; i < txq->tx_rs_thresh; i += HNS3_SVE_CHECK_DESCS_PER_LOOP, + tx_desc += HNS3_SVE_CHECK_DESCS_PER_LOOP) { + vld2 = svld1_gather_u32offset_u32(pg32, (uint32_t *)tx_desc, + svindex_u32(BD_FIELD_VALID_OFFSET, BD_SIZE)); + vld = svorr_u32_z(pg32, vld, vld2); + } + /* shift left and then right to get all valid bit */ + vld = svlsl_n_u32_z(pg32, vld, + HNS3_UINT32_BIT - 1 - HNS3_TXD_VLD_B); + vld = svreinterpret_u32_s32(svasr_n_s32_z(pg32, + svreinterpret_s32_u32(vld), HNS3_UINT32_BIT - 1)); + /* use tbl to compress 32bit-lane to 8bit-lane */ + vld_u8 = svtbl_u8(svreinterpret_u8_u32(vld), TX_VLD_U8_ZIP_INDEX); + /* dump compressed 64bit to variable */ + svst1_u64(PG64_64BIT, &vld_all, svreinterpret_u64_u8(vld_u8)); + if (vld_all > 0) + return; + + hns3_tx_bulk_free_buffers(txq); +} + +static inline void +hns3_tx_fill_hw_ring_sve(struct hns3_tx_queue *txq, + struct rte_mbuf **pkts, + uint16_t nb_pkts) +{ +#define DATA_OFF_LEN_VAL_MASK 0xFFFF + struct hns3_desc *txdp = &txq->tx_ring[txq->next_to_use]; + struct hns3_entry *tx_entry = &txq->sw_ring[txq->next_to_use]; + const uint64_t valid_bit = (BIT(HNS3_TXD_VLD_B) | BIT(HNS3_TXD_FE_B)) << + HNS3_UINT32_BIT; + svuint64_t base_addr, buf_iova, data_off, data_len, addr; + svuint64_t offsets = svindex_u64(0, BD_SIZE); + uint32_t i = 0; + svbool_t pg = svwhilelt_b64_u32(i, nb_pkts); + + do { + base_addr = svld1_u64(pg, (uint64_t *)pkts); + /* calc mbuf's field buf_iova address */ + buf_iova = svadd_n_u64_z(pg, base_addr, + offsetof(struct rte_mbuf, buf_iova)); + /* calc mbuf's field data_off address */ + data_off = svadd_n_u64_z(pg, base_addr, + offsetof(struct rte_mbuf, data_off)); + /* calc mbuf's field data_len address */ + data_len = svadd_n_u64_z(pg, base_addr, + offsetof(struct rte_mbuf, data_len)); + /* store mbuf to tx_entry */ + svst1_u64(pg, (uint64_t *)tx_entry, base_addr); + /* read pkts->buf_iova */ + buf_iova = svld1_gather_u64base_u64(pg, buf_iova); + /* read pkts->data_off's 64bit val */ + data_off = svld1_gather_u64base_u64(pg, data_off); + /* read pkts->data_len's 64bit val */ + data_len = svld1_gather_u64base_u64(pg, data_len); + /* zero data_off high 48bit by svand ops */ + data_off = svand_n_u64_z(pg, data_off, DATA_OFF_LEN_VAL_MASK); + /* zero data_len high 48bit by svand ops */ + data_len = svand_n_u64_z(pg, data_len, DATA_OFF_LEN_VAL_MASK); + /* calc mbuf data region iova addr */ + addr = svadd_u64_z(pg, buf_iova, data_off); + /* shift due data_len's offset is 2byte of BD's second 8byte */ + data_len = svlsl_n_u64_z(pg, data_len, HNS3_UINT16_BIT); + /* save offset 0~7byte of every BD */ + svst1_scatter_u64offset_u64(pg, (uint64_t *)&txdp->addr, + offsets, addr); + /* save offset 8~15byte of every BD */ + svst1_scatter_u64offset_u64(pg, (uint64_t *)&txdp->tx.vlan_tag, + offsets, data_len); + /* save offset 16~23byte of every BD */ + svst1_scatter_u64offset_u64(pg, + (uint64_t *)&txdp->tx.outer_vlan_tag, + offsets, svdup_n_u64(0)); + /* save offset 24~31byte of every BD */ + svst1_scatter_u64offset_u64(pg, + (uint64_t *)&txdp->tx.paylen_fd_dop_ol4cs, + offsets, svdup_n_u64(valid_bit)); + + /* Increment bytes counter */ + uint32_t idx; + for (idx = 0; idx < svcntd(); idx++) + txq->basic_stats.bytes += pkts[idx]->pkt_len; + + /* update index for next loop */ + i += svcntd(); + pkts += svcntd(); + txdp += svcntd(); + tx_entry += svcntd(); + pg = svwhilelt_b64_u32(i, nb_pkts); + } while (svptest_any(svptrue_b64(), pg)); +} + +static uint16_t +hns3_xmit_fixed_burst_vec_sve(void *__restrict tx_queue, + struct rte_mbuf **__restrict tx_pkts, + uint16_t nb_pkts) +{ + struct hns3_tx_queue *txq = (struct hns3_tx_queue *)tx_queue; + uint16_t nb_tx = 0; + + if (txq->tx_bd_ready < txq->tx_free_thresh) + hns3_tx_free_buffers_sve(txq); + + nb_pkts = RTE_MIN(txq->tx_bd_ready, nb_pkts); + if (unlikely(nb_pkts == 0)) { + txq->dfx_stats.queue_full_cnt++; + return 0; + } + + if (txq->next_to_use + nb_pkts > txq->nb_tx_desc) { + nb_tx = txq->nb_tx_desc - txq->next_to_use; + hns3_tx_fill_hw_ring_sve(txq, tx_pkts, nb_tx); + txq->next_to_use = 0; + } + + hns3_tx_fill_hw_ring_sve(txq, tx_pkts + nb_tx, nb_pkts - nb_tx); + txq->next_to_use += nb_pkts - nb_tx; + + txq->tx_bd_ready -= nb_pkts; + hns3_write_txq_tail_reg(txq, nb_pkts); + + return nb_pkts; +} + +uint16_t +hns3_xmit_pkts_vec_sve(void *tx_queue, + struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + struct hns3_tx_queue *txq = (struct hns3_tx_queue *)tx_queue; + uint16_t ret, new_burst; + uint16_t nb_tx = 0; + + while (nb_pkts) { + new_burst = RTE_MIN(nb_pkts, txq->tx_rs_thresh); + ret = hns3_xmit_fixed_burst_vec_sve(tx_queue, &tx_pkts[nb_tx], + new_burst); + nb_tx += ret; + nb_pkts -= ret; + if (ret < new_burst) + break; + } + + return nb_tx; +}