net/ngbe: support jumbo frame
[dpdk.git] / drivers / net / atlantic / atl_rxtx.c
index 7d367c9..e3f57de 100644 (file)
 #include "hw_atl/hw_atl_b0_internal.h"
 
 #define ATL_TX_CKSUM_OFFLOAD_MASK (                     \
-       PKT_TX_IP_CKSUM |                                \
-       PKT_TX_L4_MASK |                                 \
-       PKT_TX_TCP_SEG)
+       RTE_MBUF_F_TX_IP_CKSUM |                                 \
+       RTE_MBUF_F_TX_L4_MASK |                          \
+       RTE_MBUF_F_TX_TCP_SEG)
 
 #define ATL_TX_OFFLOAD_MASK (                           \
-       PKT_TX_VLAN |                                    \
-       PKT_TX_IPV6 |                                    \
-       PKT_TX_IPV4 |                                    \
-       PKT_TX_IP_CKSUM |                                \
-       PKT_TX_L4_MASK |                                 \
-       PKT_TX_TCP_SEG)
+       RTE_MBUF_F_TX_VLAN |                                     \
+       RTE_MBUF_F_TX_IPV6 |                                     \
+       RTE_MBUF_F_TX_IPV4 |                                     \
+       RTE_MBUF_F_TX_IP_CKSUM |                                 \
+       RTE_MBUF_F_TX_L4_MASK |                          \
+       RTE_MBUF_F_TX_TCP_SEG)
 
 #define ATL_TX_OFFLOAD_NOTSUP_MASK \
-       (PKT_TX_OFFLOAD_MASK ^ ATL_TX_OFFLOAD_MASK)
+       (RTE_MBUF_F_TX_OFFLOAD_MASK ^ ATL_TX_OFFLOAD_MASK)
 
 /**
  * Structure associated with each descriptor of the RX ring of a RX queue.
@@ -125,7 +125,7 @@ atl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
         * different socket than was previously used.
         */
        if (dev->data->rx_queues[rx_queue_id] != NULL) {
-               atl_rx_queue_release(dev->data->rx_queues[rx_queue_id]);
+               atl_rx_queue_release(dev, rx_queue_id);
                dev->data->rx_queues[rx_queue_id] = NULL;
        }
 
@@ -145,10 +145,10 @@ atl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
        rxq->rx_free_thresh = rx_conf->rx_free_thresh;
 
        rxq->l3_csum_enabled = dev->data->dev_conf.rxmode.offloads &
-               DEV_RX_OFFLOAD_IPV4_CKSUM;
+               RTE_ETH_RX_OFFLOAD_IPV4_CKSUM;
        rxq->l4_csum_enabled = dev->data->dev_conf.rxmode.offloads &
-               (DEV_RX_OFFLOAD_UDP_CKSUM | DEV_RX_OFFLOAD_TCP_CKSUM);
-       if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+               (RTE_ETH_RX_OFFLOAD_UDP_CKSUM | RTE_ETH_RX_OFFLOAD_TCP_CKSUM);
+       if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
                PMD_DRV_LOG(ERR, "PMD does not support KEEP_CRC offload");
 
        /* allocate memory for the software ring */
@@ -247,7 +247,7 @@ atl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
         * different socket than was previously used.
         */
        if (dev->data->tx_queues[tx_queue_id] != NULL) {
-               atl_tx_queue_release(dev->data->tx_queues[tx_queue_id]);
+               atl_tx_queue_release(dev, tx_queue_id);
                dev->data->tx_queues[tx_queue_id] = NULL;
        }
 
@@ -498,13 +498,13 @@ atl_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 }
 
 void
-atl_rx_queue_release(void *rx_queue)
+atl_rx_queue_release(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 {
-       PMD_INIT_FUNC_TRACE();
+       struct atl_rx_queue *rxq = dev->data->rx_queues[rx_queue_id];
 
-       if (rx_queue != NULL) {
-               struct atl_rx_queue *rxq = (struct atl_rx_queue *)rx_queue;
+       PMD_INIT_FUNC_TRACE();
 
+       if (rxq != NULL) {
                atl_rx_queue_release_mbufs(rxq);
                rte_free(rxq->sw_ring);
                rte_free(rxq);
@@ -569,13 +569,13 @@ atl_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 }
 
 void
-atl_tx_queue_release(void *tx_queue)
+atl_tx_queue_release(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 {
-       PMD_INIT_FUNC_TRACE();
+       struct atl_tx_queue *txq = dev->data->tx_queues[tx_queue_id];
 
-       if (tx_queue != NULL) {
-               struct atl_tx_queue *txq = (struct atl_tx_queue *)tx_queue;
+       PMD_INIT_FUNC_TRACE();
 
+       if (txq != NULL) {
                atl_tx_queue_release_mbufs(txq);
                rte_free(txq->sw_ring);
                rte_free(txq);
@@ -590,13 +590,13 @@ atl_free_queues(struct rte_eth_dev *dev)
        PMD_INIT_FUNC_TRACE();
 
        for (i = 0; i < dev->data->nb_rx_queues; i++) {
-               atl_rx_queue_release(dev->data->rx_queues[i]);
+               atl_rx_queue_release(dev, i);
                dev->data->rx_queues[i] = 0;
        }
        dev->data->nb_rx_queues = 0;
 
        for (i = 0; i < dev->data->nb_tx_queues; i++) {
-               atl_tx_queue_release(dev->data->tx_queues[i]);
+               atl_tx_queue_release(dev, i);
                dev->data->tx_queues[i] = 0;
        }
        dev->data->nb_tx_queues = 0;
@@ -689,18 +689,13 @@ atl_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
 /* Return Rx queue avail count */
 
 uint32_t
-atl_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+atl_rx_queue_count(void *rx_queue)
 {
        struct atl_rx_queue *rxq;
 
        PMD_INIT_FUNC_TRACE();
 
-       if (rx_queue_id >= dev->data->nb_rx_queues) {
-               PMD_DRV_LOG(ERR, "Invalid RX queue id=%d", rx_queue_id);
-               return 0;
-       }
-
-       rxq = dev->data->rx_queues[rx_queue_id];
+       rxq = rx_queue;
 
        if (rxq == NULL)
                return 0;
@@ -850,21 +845,21 @@ atl_desc_to_offload_flags(struct atl_rx_queue *rxq,
        if (rxq->l3_csum_enabled && ((rxd_wb->pkt_type & 0x3) == 0)) {
                /* IPv4 csum error ? */
                if (rxd_wb->rx_stat & BIT(1))
-                       mbuf_flags |= PKT_RX_IP_CKSUM_BAD;
+                       mbuf_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
                else
-                       mbuf_flags |= PKT_RX_IP_CKSUM_GOOD;
+                       mbuf_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
        } else {
-               mbuf_flags |= PKT_RX_IP_CKSUM_UNKNOWN;
+               mbuf_flags |= RTE_MBUF_F_RX_IP_CKSUM_UNKNOWN;
        }
 
        /* CSUM calculated ? */
        if (rxq->l4_csum_enabled && (rxd_wb->rx_stat & BIT(3))) {
                if (rxd_wb->rx_stat & BIT(2))
-                       mbuf_flags |= PKT_RX_L4_CKSUM_BAD;
+                       mbuf_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
                else
-                       mbuf_flags |= PKT_RX_L4_CKSUM_GOOD;
+                       mbuf_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
        } else {
-               mbuf_flags |= PKT_RX_L4_CKSUM_UNKNOWN;
+               mbuf_flags |= RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN;
        }
 
        return mbuf_flags;
@@ -1044,12 +1039,12 @@ atl_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
                        rx_mbuf->packet_type = atl_desc_to_pkt_type(&rxd_wb);
 
                        if (rx_mbuf->packet_type & RTE_PTYPE_L2_ETHER_VLAN) {
-                               rx_mbuf->ol_flags |= PKT_RX_VLAN;
+                               rx_mbuf->ol_flags |= RTE_MBUF_F_RX_VLAN;
                                rx_mbuf->vlan_tci = rxd_wb.vlan;
 
                                if (cfg->vlan_strip)
                                        rx_mbuf->ol_flags |=
-                                               PKT_RX_VLAN_STRIPPED;
+                                               RTE_MBUF_F_RX_VLAN_STRIPPED;
                        }
 
                        if (!rx_mbuf_first)
@@ -1179,12 +1174,12 @@ atl_tso_setup(struct rte_mbuf *tx_pkt, union hw_atl_txc_s *txc)
        uint32_t tx_cmd = 0;
        uint64_t ol_flags = tx_pkt->ol_flags;
 
-       if (ol_flags & PKT_TX_TCP_SEG) {
+       if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
                tx_cmd |= tx_desc_cmd_lso | tx_desc_cmd_l4cs;
 
                txc->cmd = 0x4;
 
-               if (ol_flags & PKT_TX_IPV6)
+               if (ol_flags & RTE_MBUF_F_TX_IPV6)
                        txc->cmd |= 0x2;
 
                txc->l2_len = tx_pkt->l2_len;
@@ -1194,7 +1189,7 @@ atl_tso_setup(struct rte_mbuf *tx_pkt, union hw_atl_txc_s *txc)
                txc->mss_len = tx_pkt->tso_segsz;
        }
 
-       if (ol_flags & PKT_TX_VLAN) {
+       if (ol_flags & RTE_MBUF_F_TX_VLAN) {
                tx_cmd |= tx_desc_cmd_vlan;
                txc->vlan_tag = tx_pkt->vlan_tci;
        }
@@ -1212,9 +1207,9 @@ atl_setup_csum_offload(struct rte_mbuf *mbuf, struct hw_atl_txd_s *txd,
                       uint32_t tx_cmd)
 {
        txd->cmd |= tx_desc_cmd_fcs;
-       txd->cmd |= (mbuf->ol_flags & PKT_TX_IP_CKSUM) ? tx_desc_cmd_ipv4 : 0;
+       txd->cmd |= (mbuf->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) ? tx_desc_cmd_ipv4 : 0;
        /* L4 csum requested */
-       txd->cmd |= (mbuf->ol_flags & PKT_TX_L4_MASK) ? tx_desc_cmd_l4cs : 0;
+       txd->cmd |= (mbuf->ol_flags & RTE_MBUF_F_TX_L4_MASK) ? tx_desc_cmd_l4cs : 0;
        txd->cmd |= tx_cmd;
 }