net/axgbe: support scattered Rx
authorSelwin Sebastian <selwin.sebastian@amd.com>
Wed, 4 Mar 2020 13:44:14 +0000 (19:14 +0530)
committerFerruh Yigit <ferruh.yigit@intel.com>
Wed, 18 Mar 2020 09:21:41 +0000 (10:21 +0100)
Enable scattered rx support and add jumbo packet receive capability

Signed-off-by: Selwin Sebastian <selwin.sebastian@amd.com>
doc/guides/nics/features/axgbe.ini
drivers/net/axgbe/axgbe_common.h
drivers/net/axgbe/axgbe_ethdev.c
drivers/net/axgbe/axgbe_rxtx.c
drivers/net/axgbe/axgbe_rxtx.h

index ab4da55..0becaa0 100644 (file)
@@ -7,6 +7,7 @@
 Speed capabilities   = Y
 Link status          = Y
 Jumbo frame          = Y
+Scattered Rx         = Y
 Promiscuous mode     = Y
 Allmulticast mode    = Y
 RSS hash             = Y
index 2ac4d89..843ac1d 100644 (file)
 #define RX_NORMAL_DESC3_PL_WIDTH               14
 #define RX_NORMAL_DESC3_RSV_INDEX              26
 #define RX_NORMAL_DESC3_RSV_WIDTH              1
+#define RX_NORMAL_DESC3_LD_INDEX               28
+#define RX_NORMAL_DESC3_LD_WIDTH               1
 
 #define RX_DESC3_L34T_IPV4_TCP                 1
 #define RX_DESC3_L34T_IPV4_UDP                 2
index 00974e7..70a4209 100644 (file)
@@ -255,6 +255,10 @@ axgbe_dev_start(struct rte_eth_dev *dev)
 {
        struct axgbe_port *pdata = dev->data->dev_private;
        int ret;
+       struct rte_eth_dev_data *dev_data = dev->data;
+       uint16_t max_pkt_len = dev_data->dev_conf.rxmode.max_rx_pkt_len;
+
+       dev->dev_ops = &axgbe_eth_dev_ops;
 
        PMD_INIT_FUNC_TRACE();
 
@@ -285,6 +289,16 @@ axgbe_dev_start(struct rte_eth_dev *dev)
 
        axgbe_clear_bit(AXGBE_STOPPED, &pdata->dev_state);
        axgbe_clear_bit(AXGBE_DOWN, &pdata->dev_state);
+       if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) ||
+                               max_pkt_len > pdata->rx_buf_size)
+               dev_data->scattered_rx = 1;
+
+       /*  Scatter Rx handling */
+       if (dev_data->scattered_rx)
+               dev->rx_pkt_burst = &eth_axgbe_recv_scattered_pkts;
+       else
+               dev->rx_pkt_burst = &axgbe_recv_pkts;
+
        return 0;
 }
 
@@ -816,6 +830,8 @@ axgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
                DEV_RX_OFFLOAD_IPV4_CKSUM |
                DEV_RX_OFFLOAD_UDP_CKSUM  |
                DEV_RX_OFFLOAD_TCP_CKSUM  |
+               DEV_RX_OFFLOAD_JUMBO_FRAME      |
+               DEV_RX_OFFLOAD_SCATTER    |
                DEV_RX_OFFLOAD_KEEP_CRC;
 
        dev_info->tx_offload_capa =
@@ -1047,7 +1063,6 @@ eth_axgbe_dev_init(struct rte_eth_dev *eth_dev)
        int ret;
 
        eth_dev->dev_ops = &axgbe_eth_dev_ops;
-       eth_dev->rx_pkt_burst = &axgbe_recv_pkts;
 
        /*
         * For secondary processes, we don't initialise any further as primary
index 96055c2..57e2bbb 100644 (file)
@@ -307,6 +307,152 @@ err_set:
        return nb_rx;
 }
 
+
+uint16_t eth_axgbe_recv_scattered_pkts(void *rx_queue,
+               struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+{
+       PMD_INIT_FUNC_TRACE();
+       uint16_t nb_rx = 0;
+       struct axgbe_rx_queue *rxq = rx_queue;
+       volatile union axgbe_rx_desc *desc;
+
+       uint64_t old_dirty = rxq->dirty;
+       struct rte_mbuf *first_seg = NULL;
+       struct rte_mbuf *mbuf, *tmbuf;
+       unsigned int err;
+       uint32_t error_status;
+       uint16_t idx, pidx, data_len = 0, pkt_len = 0;
+
+       idx = AXGBE_GET_DESC_IDX(rxq, rxq->cur);
+       while (nb_rx < nb_pkts) {
+               bool eop = 0;
+next_desc:
+               if (unlikely(idx == rxq->nb_desc))
+                       idx = 0;
+
+               desc = &rxq->desc[idx];
+
+               if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, OWN))
+                       break;
+
+               tmbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
+               if (unlikely(!tmbuf)) {
+                       PMD_DRV_LOG(ERR, "RX mbuf alloc failed port_id = %u"
+                                   " queue_id = %u\n",
+                                   (unsigned int)rxq->port_id,
+                                   (unsigned int)rxq->queue_id);
+                       rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
+                       break;
+               }
+
+               pidx = idx + 1;
+               if (unlikely(pidx == rxq->nb_desc))
+                       pidx = 0;
+
+               rte_prefetch0(rxq->sw_ring[pidx]);
+               if ((pidx & 0x3) == 0) {
+                       rte_prefetch0(&rxq->desc[pidx]);
+                       rte_prefetch0(&rxq->sw_ring[pidx]);
+               }
+
+               mbuf = rxq->sw_ring[idx];
+               /* Check for any errors and free mbuf*/
+               err = AXGMAC_GET_BITS_LE(desc->write.desc3,
+                                        RX_NORMAL_DESC3, ES);
+               error_status = 0;
+               if (unlikely(err)) {
+                       error_status = desc->write.desc3 & AXGBE_ERR_STATUS;
+                       if ((error_status != AXGBE_L3_CSUM_ERR)
+                                       && (error_status != AXGBE_L4_CSUM_ERR)) {
+                               rxq->errors++;
+                               rte_pktmbuf_free(mbuf);
+                               goto err_set;
+                       }
+               }
+               rte_prefetch1(rte_pktmbuf_mtod(mbuf, void *));
+
+               if (!AXGMAC_GET_BITS_LE(desc->write.desc3,
+                                       RX_NORMAL_DESC3, LD)) {
+                       eop = 0;
+                       pkt_len = rxq->buf_size;
+                       data_len = pkt_len;
+               } else {
+                       eop = 1;
+                       pkt_len = AXGMAC_GET_BITS_LE(desc->write.desc3,
+                                       RX_NORMAL_DESC3, PL);
+                       data_len = pkt_len - rxq->crc_len;
+               }
+
+               if (first_seg != NULL) {
+                       if (rte_pktmbuf_chain(first_seg, mbuf) != 0)
+                               rte_mempool_put(rxq->mb_pool,
+                                               first_seg);
+               } else {
+                       first_seg = mbuf;
+               }
+
+               /* Get the RSS hash */
+               if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, RSV))
+                       mbuf->hash.rss = rte_le_to_cpu_32(desc->write.desc1);
+
+               /* Mbuf populate */
+               mbuf->data_off = RTE_PKTMBUF_HEADROOM;
+               mbuf->data_len = data_len;
+
+err_set:
+               rxq->cur++;
+               rxq->sw_ring[idx++] = tmbuf;
+               desc->read.baddr =
+                       rte_cpu_to_le_64(rte_mbuf_data_iova_default(tmbuf));
+               memset((void *)(&desc->read.desc2), 0, 8);
+               AXGMAC_SET_BITS_LE(desc->read.desc3, RX_NORMAL_DESC3, OWN, 1);
+               rxq->dirty++;
+
+               if (!eop) {
+                       rte_pktmbuf_free(mbuf);
+                       goto next_desc;
+               }
+
+               first_seg->pkt_len = pkt_len;
+               rxq->bytes += pkt_len;
+               mbuf->next = NULL;
+
+               first_seg->port = rxq->port_id;
+               if (rxq->pdata->rx_csum_enable) {
+                       mbuf->ol_flags = 0;
+                       mbuf->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
+                       mbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
+                       if (unlikely(error_status == AXGBE_L3_CSUM_ERR)) {
+                               mbuf->ol_flags &= ~PKT_RX_IP_CKSUM_GOOD;
+                               mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
+                               mbuf->ol_flags &= ~PKT_RX_L4_CKSUM_GOOD;
+                               mbuf->ol_flags |= PKT_RX_L4_CKSUM_UNKNOWN;
+                       } else if (unlikely(error_status
+                                               == AXGBE_L4_CSUM_ERR)) {
+                               mbuf->ol_flags &= ~PKT_RX_L4_CKSUM_GOOD;
+                               mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
+                       }
+               }
+
+               rx_pkts[nb_rx++] = first_seg;
+
+                /* Setup receipt context for a new packet.*/
+               first_seg = NULL;
+       }
+
+       /* Save receive context.*/
+       rxq->pkts += nb_rx;
+
+       if (rxq->dirty != old_dirty) {
+               rte_wmb();
+               idx = AXGBE_GET_DESC_IDX(rxq, rxq->dirty - 1);
+               AXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDTR_LO,
+                                  low32_value(rxq->ring_phys_addr +
+                                  (idx * sizeof(union axgbe_rx_desc))));
+       }
+       return nb_rx;
+}
+
 /* Tx Apis */
 static void axgbe_tx_queue_release(struct axgbe_tx_queue *tx_queue)
 {
index a21537d..f6796b0 100644 (file)
@@ -179,6 +179,8 @@ int axgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);
 int axgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id);
 uint16_t axgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
                         uint16_t nb_pkts);
+uint16_t eth_axgbe_recv_scattered_pkts(void *rx_queue,
+               struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
 uint16_t axgbe_recv_pkts_threshold_refresh(void *rx_queue,
                                           struct rte_mbuf **rx_pkts,
                                           uint16_t nb_pkts);