net/i40e: handle QinQ strip
[dpdk.git] / drivers / net / i40e / i40e_rxtx.c
index 153ca60..6a66cec 100644 (file)
@@ -172,12 +172,6 @@ i40e_get_iee15888_flags(struct rte_mbuf *mb, uint64_t qword)
 }
 #endif
 
-#define I40E_RX_DESC_EXT_STATUS_FLEXBH_MASK   0x03
-#define I40E_RX_DESC_EXT_STATUS_FLEXBH_FD_ID  0x01
-#define I40E_RX_DESC_EXT_STATUS_FLEXBH_FLEX   0x02
-#define I40E_RX_DESC_EXT_STATUS_FLEXBL_MASK   0x03
-#define I40E_RX_DESC_EXT_STATUS_FLEXBL_FLEX   0x01
-
 static inline uint64_t
 i40e_rxd_build_fdir(volatile union i40e_rx_desc *rxdp, struct rte_mbuf *mb)
 {
@@ -312,7 +306,7 @@ i40e_txd_enable_checksum(uint64_t ol_flags,
                break;
        case PKT_TX_UDP_CKSUM:
                *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP;
-               *td_offset |= (sizeof(struct udp_hdr) >> 2) <<
+               *td_offset |= (sizeof(struct rte_udp_hdr) >> 2) <<
                                I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
                break;
        default:
@@ -564,8 +558,7 @@ i40e_rx_alloc_bufs(struct i40e_rx_queue *rxq)
        }
 
        /* Update rx tail regsiter */
-       rte_wmb();
-       I40E_PCI_REG_WRITE_RELAXED(rxq->qrx_tail, rxq->rx_free_trigger);
+       I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->rx_free_trigger);
 
        rxq->rx_free_trigger =
                (uint16_t)(rxq->rx_free_trigger + rxq->rx_free_thresh);
@@ -981,15 +974,9 @@ i40e_set_tso_ctx(struct rte_mbuf *mbuf, union i40e_tx_offload tx_offload)
                return ctx_desc;
        }
 
-       /**
-        * in case of non tunneling packet, the outer_l2_len and
-        * outer_l3_len must be 0.
-        */
-       hdr_len = tx_offload.outer_l2_len +
-               tx_offload.outer_l3_len +
-               tx_offload.l2_len +
-               tx_offload.l3_len +
-               tx_offload.l4_len;
+       hdr_len = tx_offload.l2_len + tx_offload.l3_len + tx_offload.l4_len;
+       hdr_len += (mbuf->ol_flags & PKT_TX_TUNNEL_MASK) ?
+                  tx_offload.outer_l2_len + tx_offload.outer_l3_len : 0;
 
        cd_cmd = I40E_TX_CTX_DESC_TSO;
        cd_tso_len = mbuf->pkt_len - hdr_len;
@@ -1214,13 +1201,11 @@ i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
        }
 
 end_of_tx:
-       rte_wmb();
-
        PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
                   (unsigned) txq->port_id, (unsigned) txq->queue_id,
                   (unsigned) tx_id, (unsigned) nb_tx);
 
-       I40E_PCI_REG_WRITE_RELAXED(txq->qtx_tail, tx_id);
+       I40E_PCI_REG_WRITE(txq->qtx_tail, tx_id);
        txq->tx_tail = tx_id;
 
        return nb_tx;
@@ -1371,8 +1356,7 @@ tx_xmit_pkts(struct i40e_tx_queue *txq,
                txq->tx_tail = 0;
 
        /* Update the tx tail register */
-       rte_wmb();
-       I40E_PCI_REG_WRITE_RELAXED(txq->qtx_tail, txq->tx_tail);
+       I40E_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
 
        return nb_pkts;
 }
@@ -1446,7 +1430,7 @@ i40e_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
                if (!(ol_flags & PKT_TX_TCP_SEG)) {
                        if (m->nb_segs > I40E_TX_MAX_MTU_SEG ||
                            m->pkt_len > I40E_FRAME_SIZE_MAX) {
-                               rte_errno = -EINVAL;
+                               rte_errno = EINVAL;
                                return i;
                        }
                } else if (m->nb_segs > I40E_TX_MAX_SEG ||
@@ -1456,31 +1440,31 @@ i40e_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
                        /* MSS outside the range (256B - 9674B) are considered
                         * malicious
                         */
-                       rte_errno = -EINVAL;
+                       rte_errno = EINVAL;
                        return i;
                }
 
                if (ol_flags & I40E_TX_OFFLOAD_NOTSUP_MASK) {
-                       rte_errno = -ENOTSUP;
+                       rte_errno = ENOTSUP;
                        return i;
                }
 
                /* check the size of packet */
                if (m->pkt_len < I40E_TX_MIN_PKT_LEN) {
-                       rte_errno = -EINVAL;
+                       rte_errno = EINVAL;
                        return i;
                }
 
 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
                ret = rte_validate_tx_offload(m);
                if (ret != 0) {
-                       rte_errno = ret;
+                       rte_errno = -ret;
                        return i;
                }
 #endif
                ret = rte_net_intel_cksum_prepare(m);
                if (ret != 0) {
-                       rte_errno = ret;
+                       rte_errno = -ret;
                        return i;
                }
        }
@@ -1550,8 +1534,6 @@ i40e_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
                return err;
        }
 
-       rte_wmb();
-
        /* Init the RX tail regieter. */
        I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
 
@@ -2608,7 +2590,7 @@ i40e_rx_queue_config(struct i40e_rx_queue *rxq)
        struct i40e_pf *pf = I40E_VSI_TO_PF(rxq->vsi);
        struct i40e_hw *hw = I40E_VSI_TO_HW(rxq->vsi);
        struct rte_eth_dev_data *data = pf->dev_data;
-       uint16_t buf_size, len;
+       uint16_t buf_size;
 
        buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
                RTE_PKTMBUF_HEADROOM);
@@ -2631,8 +2613,9 @@ i40e_rx_queue_config(struct i40e_rx_queue *rxq)
                break;
        }
 
-       len = hw->func_caps.rx_buf_chain_len * rxq->rx_buf_len;
-       rxq->max_pkt_len = RTE_MIN(len, data->dev_conf.rxmode.max_rx_pkt_len);
+       rxq->max_pkt_len =
+               RTE_MIN((uint32_t)(hw->func_caps.rx_buf_chain_len *
+                       rxq->rx_buf_len), data->dev_conf.rxmode.max_rx_pkt_len);
        if (data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
                if (rxq->max_pkt_len <= RTE_ETHER_MAX_LEN ||
                        rxq->max_pkt_len > I40E_FRAME_SIZE_MAX) {
@@ -2934,7 +2917,7 @@ i40e_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
 static eth_rx_burst_t
 i40e_get_latest_rx_vec(bool scatter)
 {
-#ifdef RTE_ARCH_X86
+#if defined(RTE_ARCH_X86) && defined(CC_AVX2_SUPPORT)
        if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2))
                return scatter ? i40e_recv_scattered_pkts_vec_avx2 :
                                 i40e_recv_pkts_vec_avx2;
@@ -2946,7 +2929,7 @@ i40e_get_latest_rx_vec(bool scatter)
 static eth_rx_burst_t
 i40e_get_recommend_rx_vec(bool scatter)
 {
-#ifdef RTE_ARCH_X86
+#if defined(RTE_ARCH_X86) && defined(CC_AVX2_SUPPORT)
        /*
         * since AVX frequency can be different to base frequency, limit
         * use of AVX2 version to later plaforms, not all those that could
@@ -3034,6 +3017,51 @@ i40e_set_rx_function(struct rte_eth_dev *dev)
        }
 }
 
+int
+i40e_rx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
+                      struct rte_eth_burst_mode *mode)
+{
+       eth_rx_burst_t pkt_burst = dev->rx_pkt_burst;
+       uint64_t options;
+
+       if (pkt_burst == i40e_recv_scattered_pkts)
+               options = RTE_ETH_BURST_SCALAR | RTE_ETH_BURST_SCATTERED;
+       else if (pkt_burst == i40e_recv_pkts_bulk_alloc)
+               options = RTE_ETH_BURST_SCALAR | RTE_ETH_BURST_BULK_ALLOC;
+       else if (pkt_burst == i40e_recv_pkts)
+               options = RTE_ETH_BURST_SCALAR;
+#ifdef RTE_ARCH_X86
+       else if (pkt_burst == i40e_recv_scattered_pkts_vec_avx2)
+               options = RTE_ETH_BURST_VECTOR | RTE_ETH_BURST_AVX2 |
+                         RTE_ETH_BURST_SCATTERED;
+       else if (pkt_burst == i40e_recv_pkts_vec_avx2)
+               options = RTE_ETH_BURST_VECTOR | RTE_ETH_BURST_AVX2;
+       else if (pkt_burst == i40e_recv_scattered_pkts_vec)
+               options = RTE_ETH_BURST_VECTOR | RTE_ETH_BURST_SSE |
+                         RTE_ETH_BURST_SCATTERED;
+       else if (pkt_burst == i40e_recv_pkts_vec)
+               options = RTE_ETH_BURST_VECTOR | RTE_ETH_BURST_SSE;
+#elif defined(RTE_ARCH_ARM64)
+       else if (pkt_burst == i40e_recv_scattered_pkts_vec)
+               options = RTE_ETH_BURST_VECTOR | RTE_ETH_BURST_NEON |
+                         RTE_ETH_BURST_SCATTERED;
+       else if (pkt_burst == i40e_recv_pkts_vec)
+               options = RTE_ETH_BURST_VECTOR | RTE_ETH_BURST_NEON;
+#elif defined(RTE_ARCH_PPC_64)
+       else if (pkt_burst == i40e_recv_scattered_pkts_vec)
+               options = RTE_ETH_BURST_VECTOR | RTE_ETH_BURST_ALTIVEC |
+                         RTE_ETH_BURST_SCATTERED;
+       else if (pkt_burst == i40e_recv_pkts_vec)
+               options = RTE_ETH_BURST_VECTOR | RTE_ETH_BURST_ALTIVEC;
+#endif
+       else
+               options = 0;
+
+       mode->options = options;
+
+       return options != 0 ? 0 : -EINVAL;
+}
+
 void __attribute__((cold))
 i40e_set_tx_function_flag(struct rte_eth_dev *dev, struct i40e_tx_queue *txq)
 {
@@ -3063,7 +3091,7 @@ i40e_set_tx_function_flag(struct rte_eth_dev *dev, struct i40e_tx_queue *txq)
 static eth_tx_burst_t
 i40e_get_latest_tx_vec(void)
 {
-#ifdef RTE_ARCH_X86
+#if defined(RTE_ARCH_X86) && defined(CC_AVX2_SUPPORT)
        if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2))
                return i40e_xmit_pkts_vec_avx2;
 #endif
@@ -3073,7 +3101,7 @@ i40e_get_latest_tx_vec(void)
 static eth_tx_burst_t
 i40e_get_recommend_tx_vec(void)
 {
-#ifdef RTE_ARCH_X86
+#if defined(RTE_ARCH_X86) && defined(CC_AVX2_SUPPORT)
        /*
         * since AVX frequency can be different to base frequency, limit
         * use of AVX2 version to later plaforms, not all those that could
@@ -3127,6 +3155,37 @@ i40e_set_tx_function(struct rte_eth_dev *dev)
        }
 }
 
+int
+i40e_tx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
+                      struct rte_eth_burst_mode *mode)
+{
+       eth_tx_burst_t pkt_burst = dev->tx_pkt_burst;
+       uint64_t options;
+
+       if (pkt_burst == i40e_xmit_pkts_simple)
+               options = RTE_ETH_BURST_SCALAR | RTE_ETH_BURST_SIMPLE;
+       else if (pkt_burst == i40e_xmit_pkts)
+               options = RTE_ETH_BURST_SCALAR;
+#ifdef RTE_ARCH_X86
+       else if (pkt_burst == i40e_xmit_pkts_vec_avx2)
+               options = RTE_ETH_BURST_VECTOR | RTE_ETH_BURST_AVX2;
+       else if (pkt_burst == i40e_xmit_pkts_vec)
+               options = RTE_ETH_BURST_VECTOR | RTE_ETH_BURST_SSE;
+#elif defined(RTE_ARCH_ARM64)
+       else if (pkt_burst == i40e_xmit_pkts_vec)
+               options = RTE_ETH_BURST_VECTOR | RTE_ETH_BURST_NEON;
+#elif defined(RTE_ARCH_PPC_64)
+       else if (pkt_burst == i40e_xmit_pkts_vec)
+               options = RTE_ETH_BURST_VECTOR | RTE_ETH_BURST_ALTIVEC;
+#endif
+       else
+               options = 0;
+
+       mode->options = options;
+
+       return options != 0 ? 0 : -EINVAL;
+}
+
 void __attribute__((cold))
 i40e_set_default_ptype_table(struct rte_eth_dev *dev)
 {
@@ -3174,7 +3233,8 @@ i40e_set_default_pctype_table(struct rte_eth_dev *dev)
        ad->pctypes_tbl[RTE_ETH_FLOW_L2_PAYLOAD] =
                                (1ULL << I40E_FILTER_PCTYPE_L2_PAYLOAD);
 
-       if (hw->mac.type == I40E_MAC_X722) {
+       if (hw->mac.type == I40E_MAC_X722 ||
+               hw->mac.type == I40E_MAC_X722_VF) {
                ad->pctypes_tbl[RTE_ETH_FLOW_NONFRAG_IPV4_UDP] |=
                        (1ULL << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP);
                ad->pctypes_tbl[RTE_ETH_FLOW_NONFRAG_IPV4_UDP] |=
@@ -3196,14 +3256,15 @@ i40e_set_default_pctype_table(struct rte_eth_dev *dev)
        }
 }
 
+#ifndef RTE_LIBRTE_I40E_INC_VECTOR
 /* Stubs needed for linkage when CONFIG_RTE_LIBRTE_I40E_INC_VECTOR is set to 'n' */
-__rte_weak int
+int
 i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev __rte_unused *dev)
 {
        return -1;
 }
 
-__rte_weak uint16_t
+uint16_t
 i40e_recv_pkts_vec(
        void __rte_unused *rx_queue,
        struct rte_mbuf __rte_unused **rx_pkts,
@@ -3212,7 +3273,7 @@ i40e_recv_pkts_vec(
        return 0;
 }
 
-__rte_weak uint16_t
+uint16_t
 i40e_recv_scattered_pkts_vec(
        void __rte_unused *rx_queue,
        struct rte_mbuf __rte_unused **rx_pkts,
@@ -3221,52 +3282,55 @@ i40e_recv_scattered_pkts_vec(
        return 0;
 }
 
-__rte_weak uint16_t
-i40e_recv_pkts_vec_avx2(void __rte_unused *rx_queue,
-                       struct rte_mbuf __rte_unused **rx_pkts,
-                       uint16_t __rte_unused nb_pkts)
-{
-       return 0;
-}
-
-__rte_weak uint16_t
-i40e_recv_scattered_pkts_vec_avx2(void __rte_unused *rx_queue,
-                       struct rte_mbuf __rte_unused **rx_pkts,
-                       uint16_t __rte_unused nb_pkts)
-{
-       return 0;
-}
-
-__rte_weak int
+int
 i40e_rxq_vec_setup(struct i40e_rx_queue __rte_unused *rxq)
 {
        return -1;
 }
 
-__rte_weak int
+int
 i40e_txq_vec_setup(struct i40e_tx_queue __rte_unused *txq)
 {
        return -1;
 }
 
-__rte_weak void
+void
 i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue __rte_unused*rxq)
 {
        return;
 }
 
-__rte_weak uint16_t
+uint16_t
 i40e_xmit_fixed_burst_vec(void __rte_unused * tx_queue,
                          struct rte_mbuf __rte_unused **tx_pkts,
                          uint16_t __rte_unused nb_pkts)
 {
        return 0;
 }
+#endif /* ifndef RTE_LIBRTE_I40E_INC_VECTOR */
 
-__rte_weak uint16_t
+#ifndef CC_AVX2_SUPPORT
+uint16_t
+i40e_recv_pkts_vec_avx2(void __rte_unused *rx_queue,
+                       struct rte_mbuf __rte_unused **rx_pkts,
+                       uint16_t __rte_unused nb_pkts)
+{
+       return 0;
+}
+
+uint16_t
+i40e_recv_scattered_pkts_vec_avx2(void __rte_unused *rx_queue,
+                       struct rte_mbuf __rte_unused **rx_pkts,
+                       uint16_t __rte_unused nb_pkts)
+{
+       return 0;
+}
+
+uint16_t
 i40e_xmit_pkts_vec_avx2(void __rte_unused * tx_queue,
                          struct rte_mbuf __rte_unused **tx_pkts,
                          uint16_t __rte_unused nb_pkts)
 {
        return 0;
 }
+#endif /* ifndef CC_AVX2_SUPPORT */