rxq->io_base = (void *)((char *)hw->io_base + HNS3_TQP_REG_OFFSET +
idx * HNS3_TQP_REG_SIZE);
rxq->rx_buf_len = hw->rx_buf_len;
- rxq->non_vld_descs = 0;
rxq->l2_errors = 0;
rxq->pkt_len_errors = 0;
rxq->l3_csum_erros = 0;
uint16_t pkt_len;
uint16_t nb_rx;
uint16_t rx_id;
- int num; /* num of desc in ring */
int ret;
nb_rx = 0;
last_seg = rxq->pkt_last_seg;
sw_ring = rxq->sw_ring;
- /* Get num of packets in descriptor ring */
- num = hns3_read_dev(rxq, HNS3_RING_RX_FBDNUM_REG);
- while (nb_rx_bd < num && nb_rx < nb_pkts) {
+ while (nb_rx < nb_pkts) {
rxdp = &rx_ring[rx_id];
bd_base_info = rte_le_to_cpu_32(rxdp->rx.bd_base_info);
- if (unlikely(!hns3_get_bit(bd_base_info, HNS3_RXD_VLD_B))) {
- rxq->non_vld_descs++;
+ if (unlikely(!hns3_get_bit(bd_base_info, HNS3_RXD_VLD_B)))
break;
- }
nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
if (unlikely(nmb == NULL)) {
nb_rx_bd++;
rxe = &sw_ring[rx_id];
rx_id++;
- if (rx_id == rxq->nb_rx_desc)
+ if (unlikely(rx_id == rxq->nb_rx_desc))
rx_id = 0;
rte_prefetch0(sw_ring[rx_id].mbuf);
bool rx_deferred_start; /* don't start this queue in dev start */
bool configured; /* indicate if rx queue has been configured */
- uint64_t non_vld_descs; /* num of non valid rx descriptors */
uint64_t l2_errors;
uint64_t pkt_len_errors;
uint64_t l3_csum_erros;
/* The statistic of errors in Rx BD */
static const struct hns3_xstats_name_offset hns3_rx_bd_error_strings[] = {
- {"NONE_VALIDATED_DESCRIPTORS",
- HNS3_RX_BD_ERROR_STATS_FIELD_OFFSET(non_vld_descs)},
{"RX_PKT_LEN_ERRORS",
HNS3_RX_BD_ERROR_STATS_FIELD_OFFSET(pkt_len_errors)},
{"L2_RX_ERRORS",
rxq = eth_dev->data->rx_queues[i];
if (rxq) {
rxq->pkt_len_errors = 0;
- rxq->non_vld_descs = 0;
rxq->l2_errors = 0;
rxq->l3_csum_erros = 0;
rxq->l4_csum_erros = 0;