net/ngbe: support jumbo frame
[dpdk.git] / drivers / net / atlantic / atl_rxtx.c
index fbd50a7..e3f57de 100644 (file)
@@ -3,7 +3,7 @@
  */
 
 #include <rte_malloc.h>
-#include <rte_ethdev_driver.h>
+#include <ethdev_driver.h>
 #include <rte_net.h>
 
 #include "atl_ethdev.h"
 #include "hw_atl/hw_atl_b0_internal.h"
 
 #define ATL_TX_CKSUM_OFFLOAD_MASK (                     \
-       PKT_TX_IP_CKSUM |                                \
-       PKT_TX_L4_MASK |                                 \
-       PKT_TX_TCP_SEG)
+       RTE_MBUF_F_TX_IP_CKSUM |                                 \
+       RTE_MBUF_F_TX_L4_MASK |                          \
+       RTE_MBUF_F_TX_TCP_SEG)
 
 #define ATL_TX_OFFLOAD_MASK (                           \
-       PKT_TX_VLAN |                                    \
-       PKT_TX_IP_CKSUM |                                \
-       PKT_TX_L4_MASK |                                 \
-       PKT_TX_TCP_SEG)
+       RTE_MBUF_F_TX_VLAN |                                     \
+       RTE_MBUF_F_TX_IPV6 |                                     \
+       RTE_MBUF_F_TX_IPV4 |                                     \
+       RTE_MBUF_F_TX_IP_CKSUM |                                 \
+       RTE_MBUF_F_TX_L4_MASK |                          \
+       RTE_MBUF_F_TX_TCP_SEG)
 
 #define ATL_TX_OFFLOAD_NOTSUP_MASK \
-       (PKT_TX_OFFLOAD_MASK ^ ATL_TX_OFFLOAD_MASK)
+       (RTE_MBUF_F_TX_OFFLOAD_MASK ^ ATL_TX_OFFLOAD_MASK)
 
 /**
  * Structure associated with each descriptor of the RX ring of a RX queue.
@@ -123,7 +125,7 @@ atl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
         * different socket than was previously used.
         */
        if (dev->data->rx_queues[rx_queue_id] != NULL) {
-               atl_rx_queue_release(dev->data->rx_queues[rx_queue_id]);
+               atl_rx_queue_release(dev, rx_queue_id);
                dev->data->rx_queues[rx_queue_id] = NULL;
        }
 
@@ -143,10 +145,10 @@ atl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
        rxq->rx_free_thresh = rx_conf->rx_free_thresh;
 
        rxq->l3_csum_enabled = dev->data->dev_conf.rxmode.offloads &
-               DEV_RX_OFFLOAD_IPV4_CKSUM;
+               RTE_ETH_RX_OFFLOAD_IPV4_CKSUM;
        rxq->l4_csum_enabled = dev->data->dev_conf.rxmode.offloads &
-               (DEV_RX_OFFLOAD_UDP_CKSUM | DEV_RX_OFFLOAD_TCP_CKSUM);
-       if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+               (RTE_ETH_RX_OFFLOAD_UDP_CKSUM | RTE_ETH_RX_OFFLOAD_TCP_CKSUM);
+       if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
                PMD_DRV_LOG(ERR, "PMD does not support KEEP_CRC offload");
 
        /* allocate memory for the software ring */
@@ -245,7 +247,7 @@ atl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
         * different socket than was previously used.
         */
        if (dev->data->tx_queues[tx_queue_id] != NULL) {
-               atl_tx_queue_release(dev->data->tx_queues[tx_queue_id]);
+               atl_tx_queue_release(dev, tx_queue_id);
                dev->data->tx_queues[tx_queue_id] = NULL;
        }
 
@@ -336,6 +338,7 @@ int
 atl_rx_init(struct rte_eth_dev *eth_dev)
 {
        struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+       struct aq_rss_parameters *rss_params = &hw->aq_nic_cfg->aq_rss;
        struct atl_rx_queue *rxq;
        uint64_t base_addr = 0;
        int i = 0;
@@ -379,6 +382,10 @@ atl_rx_init(struct rte_eth_dev *eth_dev)
                }
        }
 
+       for (i = rss_params->indirection_table_size; i--;)
+               rss_params->indirection_table[i] = i &
+                       (eth_dev->data->nb_rx_queues - 1);
+       hw_atl_b0_hw_rss_set(hw, rss_params);
        return err;
 }
 
@@ -491,13 +498,13 @@ atl_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 }
 
 void
-atl_rx_queue_release(void *rx_queue)
+atl_rx_queue_release(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 {
-       PMD_INIT_FUNC_TRACE();
+       struct atl_rx_queue *rxq = dev->data->rx_queues[rx_queue_id];
 
-       if (rx_queue != NULL) {
-               struct atl_rx_queue *rxq = (struct atl_rx_queue *)rx_queue;
+       PMD_INIT_FUNC_TRACE();
 
+       if (rxq != NULL) {
                atl_rx_queue_release_mbufs(rxq);
                rte_free(rxq->sw_ring);
                rte_free(rxq);
@@ -562,13 +569,13 @@ atl_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 }
 
 void
-atl_tx_queue_release(void *tx_queue)
+atl_tx_queue_release(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 {
-       PMD_INIT_FUNC_TRACE();
+       struct atl_tx_queue *txq = dev->data->tx_queues[tx_queue_id];
 
-       if (tx_queue != NULL) {
-               struct atl_tx_queue *txq = (struct atl_tx_queue *)tx_queue;
+       PMD_INIT_FUNC_TRACE();
 
+       if (txq != NULL) {
                atl_tx_queue_release_mbufs(txq);
                rte_free(txq->sw_ring);
                rte_free(txq);
@@ -583,13 +590,13 @@ atl_free_queues(struct rte_eth_dev *dev)
        PMD_INIT_FUNC_TRACE();
 
        for (i = 0; i < dev->data->nb_rx_queues; i++) {
-               atl_rx_queue_release(dev->data->rx_queues[i]);
+               atl_rx_queue_release(dev, i);
                dev->data->rx_queues[i] = 0;
        }
        dev->data->nb_rx_queues = 0;
 
        for (i = 0; i < dev->data->nb_tx_queues; i++) {
-               atl_tx_queue_release(dev->data->tx_queues[i]);
+               atl_tx_queue_release(dev, i);
                dev->data->tx_queues[i] = 0;
        }
        dev->data->nb_tx_queues = 0;
@@ -682,18 +689,13 @@ atl_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
 /* Return Rx queue avail count */
 
 uint32_t
-atl_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+atl_rx_queue_count(void *rx_queue)
 {
        struct atl_rx_queue *rxq;
 
        PMD_INIT_FUNC_TRACE();
 
-       if (rx_queue_id >= dev->data->nb_rx_queues) {
-               PMD_DRV_LOG(ERR, "Invalid RX queue id=%d", rx_queue_id);
-               return 0;
-       }
-
-       rxq = dev->data->rx_queues[rx_queue_id];
+       rxq = rx_queue;
 
        if (rxq == NULL)
                return 0;
@@ -805,25 +807,25 @@ atl_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
                ol_flags = m->ol_flags;
 
                if (m->nb_segs > AQ_HW_MAX_SEGS_SIZE) {
-                       rte_errno = -EINVAL;
+                       rte_errno = EINVAL;
                        return i;
                }
 
                if (ol_flags & ATL_TX_OFFLOAD_NOTSUP_MASK) {
-                       rte_errno = -ENOTSUP;
+                       rte_errno = ENOTSUP;
                        return i;
                }
 
 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
                ret = rte_validate_tx_offload(m);
                if (ret != 0) {
-                       rte_errno = ret;
+                       rte_errno = -ret;
                        return i;
                }
 #endif
                ret = rte_net_intel_cksum_prepare(m);
                if (ret != 0) {
-                       rte_errno = ret;
+                       rte_errno = -ret;
                        return i;
                }
        }
@@ -843,21 +845,21 @@ atl_desc_to_offload_flags(struct atl_rx_queue *rxq,
        if (rxq->l3_csum_enabled && ((rxd_wb->pkt_type & 0x3) == 0)) {
                /* IPv4 csum error ? */
                if (rxd_wb->rx_stat & BIT(1))
-                       mbuf_flags |= PKT_RX_IP_CKSUM_BAD;
+                       mbuf_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
                else
-                       mbuf_flags |= PKT_RX_IP_CKSUM_GOOD;
+                       mbuf_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
        } else {
-               mbuf_flags |= PKT_RX_IP_CKSUM_UNKNOWN;
+               mbuf_flags |= RTE_MBUF_F_RX_IP_CKSUM_UNKNOWN;
        }
 
        /* CSUM calculated ? */
        if (rxq->l4_csum_enabled && (rxd_wb->rx_stat & BIT(3))) {
                if (rxd_wb->rx_stat & BIT(2))
-                       mbuf_flags |= PKT_RX_L4_CKSUM_BAD;
+                       mbuf_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
                else
-                       mbuf_flags |= PKT_RX_L4_CKSUM_GOOD;
+                       mbuf_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
        } else {
-               mbuf_flags |= PKT_RX_L4_CKSUM_UNKNOWN;
+               mbuf_flags |= RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN;
        }
 
        return mbuf_flags;
@@ -914,6 +916,8 @@ atl_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
        struct atl_adapter *adapter =
                ATL_DEV_TO_ADAPTER(&rte_eth_devices[rxq->port_id]);
        struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(adapter);
+       struct aq_hw_cfg_s *cfg =
+               ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
        struct atl_rx_entry *sw_ring = rxq->sw_ring;
 
        struct rte_mbuf *new_mbuf;
@@ -937,7 +941,7 @@ atl_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
                        break;
                }
 
-               PMD_RX_LOG(ERR, "port_id=%u queue_id=%u tail=%u "
+               PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u tail=%u "
                           "eop=0x%x pkt_len=%u hash=0x%x hash_type=0x%x",
                           (unsigned int)rxq->port_id,
                           (unsigned int)rxq->queue_id,
@@ -972,7 +976,7 @@ atl_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
                while (true) {
                        new_mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
                        if (new_mbuf == NULL) {
-                               PMD_RX_LOG(ERR,
+                               PMD_RX_LOG(DEBUG,
                                   "RX mbuf alloc failed port_id=%u "
                                   "queue_id=%u", (unsigned int)rxq->port_id,
                                   (unsigned int)rxq->queue_id);
@@ -1031,8 +1035,18 @@ atl_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 
                        rx_mbuf->ol_flags =
                                atl_desc_to_offload_flags(rxq, &rxd_wb);
+
                        rx_mbuf->packet_type = atl_desc_to_pkt_type(&rxd_wb);
 
+                       if (rx_mbuf->packet_type & RTE_PTYPE_L2_ETHER_VLAN) {
+                               rx_mbuf->ol_flags |= RTE_MBUF_F_RX_VLAN;
+                               rx_mbuf->vlan_tci = rxd_wb.vlan;
+
+                               if (cfg->vlan_strip)
+                                       rx_mbuf->ol_flags |=
+                                               RTE_MBUF_F_RX_VLAN_STRIPPED;
+                       }
+
                        if (!rx_mbuf_first)
                                rx_mbuf_first = rx_mbuf;
                        rx_mbuf_first->nb_segs++;
@@ -1065,7 +1079,7 @@ atl_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
                adapter->sw_stats.q_ibytes[rxq->queue_id] +=
                        rx_mbuf_first->pkt_len;
 
-               PMD_RX_LOG(ERR, "add mbuf segs=%d pkt_len=%d",
+               PMD_RX_LOG(DEBUG, "add mbuf segs=%d pkt_len=%d",
                        rx_mbuf_first->nb_segs,
                        rx_mbuf_first->pkt_len);
        }
@@ -1085,7 +1099,7 @@ err_stop:
         */
        nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
        if (nb_hold > rxq->rx_free_thresh) {
-               PMD_RX_LOG(ERR, "port_id=%u queue_id=%u rx_tail=%u "
+               PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
                        "nb_hold=%u nb_rx=%u",
                        (unsigned int)rxq->port_id, (unsigned int)rxq->queue_id,
                        (unsigned int)tail, (unsigned int)nb_hold,
@@ -1110,8 +1124,6 @@ atl_xmit_cleanup(struct atl_tx_queue *txq)
        struct hw_atl_txd_s *txd;
        int to_clean = 0;
 
-       PMD_INIT_FUNC_TRACE();
-
        if (txq != NULL) {
                sw_ring = txq->sw_ring;
                int head = txq->tx_head;
@@ -1162,16 +1174,12 @@ atl_tso_setup(struct rte_mbuf *tx_pkt, union hw_atl_txc_s *txc)
        uint32_t tx_cmd = 0;
        uint64_t ol_flags = tx_pkt->ol_flags;
 
-       PMD_INIT_FUNC_TRACE();
-
-       if (ol_flags & PKT_TX_TCP_SEG) {
-               PMD_DRV_LOG(DEBUG, "xmit TSO pkt");
-
+       if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
                tx_cmd |= tx_desc_cmd_lso | tx_desc_cmd_l4cs;
 
                txc->cmd = 0x4;
 
-               if (ol_flags & PKT_TX_IPV6)
+               if (ol_flags & RTE_MBUF_F_TX_IPV6)
                        txc->cmd |= 0x2;
 
                txc->l2_len = tx_pkt->l2_len;
@@ -1181,7 +1189,7 @@ atl_tso_setup(struct rte_mbuf *tx_pkt, union hw_atl_txc_s *txc)
                txc->mss_len = tx_pkt->tso_segsz;
        }
 
-       if (ol_flags & PKT_TX_VLAN) {
+       if (ol_flags & RTE_MBUF_F_TX_VLAN) {
                tx_cmd |= tx_desc_cmd_vlan;
                txc->vlan_tag = tx_pkt->vlan_tci;
        }
@@ -1199,9 +1207,9 @@ atl_setup_csum_offload(struct rte_mbuf *mbuf, struct hw_atl_txd_s *txd,
                       uint32_t tx_cmd)
 {
        txd->cmd |= tx_desc_cmd_fcs;
-       txd->cmd |= (mbuf->ol_flags & PKT_TX_IP_CKSUM) ? tx_desc_cmd_ipv4 : 0;
+       txd->cmd |= (mbuf->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) ? tx_desc_cmd_ipv4 : 0;
        /* L4 csum requested */
-       txd->cmd |= (mbuf->ol_flags & PKT_TX_L4_MASK) ? tx_desc_cmd_l4cs : 0;
+       txd->cmd |= (mbuf->ol_flags & RTE_MBUF_F_TX_L4_MASK) ? tx_desc_cmd_l4cs : 0;
        txd->cmd |= tx_cmd;
 }
 
@@ -1221,8 +1229,6 @@ atl_xmit_pkt(struct aq_hw_s *hw, struct atl_tx_queue *txq,
        u32 tx_cmd = 0U;
        int desc_count = 0;
 
-       PMD_INIT_FUNC_TRACE();
-
        tail = txq->tx_tail;
 
        txc = (union hw_atl_txc_s *)&txq->hw_ring[tail];
@@ -1337,4 +1343,3 @@ atl_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 
        return nb_tx;
 }
-