ixgbe: offload VxLAN and NVGRE Rx checksum on X550
[dpdk.git] / drivers / net / ixgbe / ixgbe_rxtx.c
index 52a263c..8d5576b 100644 (file)
@@ -94,7 +94,7 @@ rte_rxmbuf_alloc(struct rte_mempool *mp)
 
        m = __rte_mbuf_raw_alloc(mp);
        __rte_mbuf_sanity_check_raw(m, 0);
-       return (m);
+       return m;
 }
 
 
@@ -171,7 +171,7 @@ tx4(volatile union ixgbe_adv_tx_desc *txdp, struct rte_mbuf **pkts)
        int i;
 
        for (i = 0; i < 4; ++i, ++txdp, ++pkts) {
-               buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(*pkts);
+               buf_dma_addr = rte_mbuf_data_dma_addr(*pkts);
                pkt_len = (*pkts)->data_len;
 
                /* write data to descriptor */
@@ -194,7 +194,7 @@ tx1(volatile union ixgbe_adv_tx_desc *txdp, struct rte_mbuf **pkts)
        uint64_t buf_dma_addr;
        uint32_t pkt_len;
 
-       buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(*pkts);
+       buf_dma_addr = rte_mbuf_data_dma_addr(*pkts);
        pkt_len = (*pkts)->data_len;
 
        /* write data to descriptor */
@@ -468,7 +468,7 @@ what_advctx_update(struct ixgbe_tx_queue *txq, uint64_t flags,
        }
 
        /* Mismatch, use the previous context */
-       return (IXGBE_CTX_NUM);
+       return IXGBE_CTX_NUM;
 }
 
 static inline uint32_t
@@ -561,7 +561,7 @@ ixgbe_xmit_cleanup(struct ixgbe_tx_queue *txq)
        txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + nb_tx_to_clean);
 
        /* No Error */
-       return (0);
+       return 0;
 }
 
 uint16_t
@@ -683,7 +683,7 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
                        if (ixgbe_xmit_cleanup(txq) != 0) {
                                /* Could not clean any descriptors */
                                if (nb_tx == 0)
-                                       return (0);
+                                       return 0;
                                goto end_of_tx;
                        }
 
@@ -712,7 +712,7 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
                                                 * descriptors
                                                 */
                                                if (nb_tx == 0)
-                                                       return (0);
+                                                       return 0;
                                                goto end_of_tx;
                                        }
                                }
@@ -816,7 +816,7 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
                         * Set up Transmit Data Descriptor.
                         */
                        slen = m_seg->data_len;
-                       buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(m_seg);
+                       buf_dma_addr = rte_mbuf_data_dma_addr(m_seg);
                        txd->read.buffer_addr =
                                rte_cpu_to_le_64(buf_dma_addr);
                        txd->read.cmd_type_len =
@@ -870,7 +870,7 @@ end_of_tx:
        IXGBE_PCI_REG_WRITE(txq->tdt_reg_addr, tx_id);
        txq->tx_tail = tx_id;
 
-       return (nb_tx);
+       return nb_tx;
 }
 
 /*********************************************************************
@@ -1003,6 +1003,8 @@ rx_desc_status_to_pkt_flags(uint32_t rx_status)
 static inline uint64_t
 rx_desc_error_to_pkt_flags(uint32_t rx_status)
 {
+       uint64_t pkt_flags;
+
        /*
         * Bit 31: IPE, IPv4 checksum error
         * Bit 30: L4I, L4I integrity error
@@ -1011,8 +1013,15 @@ rx_desc_error_to_pkt_flags(uint32_t rx_status)
                0,  PKT_RX_L4_CKSUM_BAD, PKT_RX_IP_CKSUM_BAD,
                PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD
        };
-       return error_to_pkt_flags_map[(rx_status >>
+       pkt_flags = error_to_pkt_flags_map[(rx_status >>
                IXGBE_RXDADV_ERR_CKSUM_BIT) & IXGBE_RXDADV_ERR_CKSUM_MSK];
+
+       if ((rx_status & IXGBE_RXD_STAT_OUTERIPCS) &&
+           (rx_status & IXGBE_RXDADV_ERR_OUTERIPER)) {
+               pkt_flags |= PKT_RX_EIP_CKSUM_BAD;
+       }
+
+       return pkt_flags;
 }
 
 /*
@@ -1136,7 +1145,7 @@ ixgbe_rx_alloc_bufs(struct ixgbe_rx_queue *rxq, bool reset_mbuf)
        diag = rte_mempool_get_bulk(rxq->mb_pool, (void *)rxep,
                                    rxq->rx_free_thresh);
        if (unlikely(diag != 0))
-               return (-ENOMEM);
+               return -ENOMEM;
 
        rxdp = &rxq->rx_ring[alloc_idx];
        for (i = 0; i < rxq->rx_free_thresh; ++i) {
@@ -1152,7 +1161,7 @@ ixgbe_rx_alloc_bufs(struct ixgbe_rx_queue *rxq, bool reset_mbuf)
                mb->data_off = RTE_PKTMBUF_HEADROOM;
 
                /* populate the descriptors */
-               dma_addr = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb));
+               dma_addr = rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(mb));
                rxdp[i].read.hdr_addr = 0;
                rxdp[i].read.pkt_addr = dma_addr;
        }
@@ -1379,7 +1388,7 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
                rxm = rxe->mbuf;
                rxe->mbuf = nmb;
                dma_addr =
-                       rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
+                       rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb));
                rxdp->read.hdr_addr = 0;
                rxdp->read.pkt_addr = dma_addr;
 
@@ -1458,7 +1467,7 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
                nb_hold = 0;
        }
        rxq->nb_rx_hold = nb_hold;
-       return (nb_rx);
+       return nb_rx;
 }
 
 /**
@@ -1672,7 +1681,7 @@ next_desc:
 
                if (!bulk_alloc) {
                        __le64 dma =
-                         rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
+                         rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb));
                        /*
                         * Update RX descriptor with the physical address of the
                         * new data buffer of the new allocated mbuf.
@@ -2068,7 +2077,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
        txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct ixgbe_tx_queue),
                                 RTE_CACHE_LINE_SIZE, socket_id);
        if (txq == NULL)
-               return (-ENOMEM);
+               return -ENOMEM;
 
        /*
         * Allocate TX ring hardware descriptors. A memzone large enough to
@@ -2080,7 +2089,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
                        IXGBE_ALIGN, socket_id);
        if (tz == NULL) {
                ixgbe_tx_queue_release(txq);
-               return (-ENOMEM);
+               return -ENOMEM;
        }
 
        txq->nb_tx_desc = nb_desc;
@@ -2117,7 +2126,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
                                RTE_CACHE_LINE_SIZE, socket_id);
        if (txq->sw_ring == NULL) {
                ixgbe_tx_queue_release(txq);
-               return (-ENOMEM);
+               return -ENOMEM;
        }
        PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
                     txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
@@ -2130,7 +2139,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
        dev->data->tx_queues[queue_idx] = txq;
 
 
-       return (0);
+       return 0;
 }
 
 /**
@@ -2347,7 +2356,7 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
        if (nb_desc % IXGBE_RXD_ALIGN != 0 ||
                        (nb_desc > IXGBE_MAX_RING_DESC) ||
                        (nb_desc < IXGBE_MIN_RING_DESC)) {
-               return (-EINVAL);
+               return -EINVAL;
        }
 
        /* Free memory prior to re-allocation if needed... */
@@ -2360,7 +2369,7 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
        rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct ixgbe_rx_queue),
                                 RTE_CACHE_LINE_SIZE, socket_id);
        if (rxq == NULL)
-               return (-ENOMEM);
+               return -ENOMEM;
        rxq->mb_pool = mp;
        rxq->nb_rx_desc = nb_desc;
        rxq->rx_free_thresh = rx_conf->rx_free_thresh;
@@ -2382,7 +2391,7 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
                                      RX_RING_SZ, IXGBE_ALIGN, socket_id);
        if (rz == NULL) {
                ixgbe_rx_queue_release(rxq);
-               return (-ENOMEM);
+               return -ENOMEM;
        }
 
        /*
@@ -2439,7 +2448,7 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
                                          RTE_CACHE_LINE_SIZE, socket_id);
        if (!rxq->sw_ring) {
                ixgbe_rx_queue_release(rxq);
-               return (-ENOMEM);
+               return -ENOMEM;
        }
 
        /*
@@ -2456,7 +2465,7 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
                                   RTE_CACHE_LINE_SIZE, socket_id);
        if (!rxq->sw_sc_ring) {
                ixgbe_rx_queue_release(rxq);
-               return (-ENOMEM);
+               return -ENOMEM;
        }
 
        PMD_INIT_LOG(DEBUG, "sw_ring=%p sw_sc_ring=%p hw_ring=%p "
@@ -3584,7 +3593,7 @@ ixgbe_alloc_rx_queue_mbufs(struct ixgbe_rx_queue *rxq)
                if (mbuf == NULL) {
                        PMD_INIT_LOG(ERR, "RX mbuf alloc failed queue_id=%u",
                                     (unsigned) rxq->queue_id);
-                       return (-ENOMEM);
+                       return -ENOMEM;
                }
 
                rte_mbuf_refcnt_set(mbuf, 1);
@@ -3594,7 +3603,7 @@ ixgbe_alloc_rx_queue_mbufs(struct ixgbe_rx_queue *rxq)
                mbuf->port = rxq->port_id;
 
                dma_addr =
-                       rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mbuf));
+                       rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(mbuf));
                rxd = &rxq->rx_ring[i];
                rxd->read.hdr_addr = 0;
                rxd->read.pkt_addr = dma_addr;