net/hns3: list supported ptypes for advanced Rx descriptor
[dpdk.git] / drivers / net / i40e / i40e_rxtx_vec_altivec.c
index 2f6f70a..1ad7464 100644 (file)
@@ -1,39 +1,10 @@
-/*-
- *   BSD LICENSE
- *
- *   Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
- *   Copyright(c) 2017 IBM Corporation.
- *   All rights reserved.
- *
- *   Redistribution and use in source and binary forms, with or without
- *   modification, are permitted provided that the following conditions
- *   are met:
- *
- *     * Redistributions of source code must retain the above copyright
- *       notice, this list of conditions and the following disclaimer.
- *     * Redistributions in binary form must reproduce the above copyright
- *       notice, this list of conditions and the following disclaimer in
- *       the documentation and/or other materials provided with the
- *       distribution.
- *     * Neither the name of Intel Corporation nor the names of its
- *       contributors may be used to endorse or promote products derived
- *       from this software without specific prior written permission.
- *
- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010 - 2015 Intel Corporation
+ * Copyright(c) 2017 IBM Corporation.
  */
 
 #include <stdint.h>
-#include <rte_ethdev.h>
+#include <ethdev_driver.h>
 #include <rte_malloc.h>
 
 #include "base/i40e_prototype.h"
@@ -42,7 +13,7 @@
 #include "i40e_rxtx.h"
 #include "i40e_rxtx_vec_common.h"
 
-#include <altivec.h>
+#include <rte_altivec.h>
 
 #pragma GCC diagnostic ignored "-Wcast-qual"
 
@@ -100,7 +71,7 @@ i40e_rxq_rearm(struct i40e_rx_queue *rxq)
                p1 = (uintptr_t)&mb1->rearm_data;
                *(uint64_t *)p1 = rxq->mbuf_initializer;
 
-               /* load buf_addr(lo 64bit) and buf_physaddr(hi 64bit) */
+               /* load buf_addr(lo 64bit) and buf_iova(hi 64bit) */
                vaddr0 = vec_ld(0, (vector unsigned long *)&mb0->buf_addr);
                vaddr1 = vec_ld(0, (vector unsigned long *)&mb1->buf_addr);
 
@@ -130,14 +101,6 @@ i40e_rxq_rearm(struct i40e_rx_queue *rxq)
        I40E_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
 }
 
-/* Handling the offload flags (olflags) field takes computation
- * time when receiving packets. Therefore we provide a flag to disable
- * the processing of the olflags field when they are not needed. This
- * gives improved performance, at the cost of losing the offload info
- * in the received packet
- */
-#ifdef RTE_LIBRTE_I40E_RX_OLFLAGS_ENABLE
-
 static inline void
 desc_to_olflags_v(vector unsigned long descs[4], struct rte_mbuf **rx_pkts)
 {
@@ -154,7 +117,7 @@ desc_to_olflags_v(vector unsigned long descs[4], struct rte_mbuf **rx_pkts)
        /* map rss and vlan type to rss hash and vlan flag */
        const vector unsigned char vlan_flags = (vector unsigned char){
                        0, 0, 0, 0,
-                       PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED, 0, 0, 0,
+                       PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED, 0, 0, 0,
                        0, 0, 0, 0,
                        0, 0, 0, 0};
 
@@ -169,10 +132,10 @@ desc_to_olflags_v(vector unsigned long descs[4], struct rte_mbuf **rx_pkts)
                        PKT_RX_IP_CKSUM_BAD,
                        PKT_RX_L4_CKSUM_BAD,
                        PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD,
-                       PKT_RX_EIP_CKSUM_BAD,
-                       PKT_RX_EIP_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD,
-                       PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD,
-                       PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD
+                       PKT_RX_OUTER_IP_CKSUM_BAD,
+                       PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD,
+                       PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD,
+                       PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD
                                             | PKT_RX_IP_CKSUM_BAD,
                        0, 0, 0, 0, 0, 0, 0, 0};
 
@@ -202,14 +165,12 @@ desc_to_olflags_v(vector unsigned long descs[4], struct rte_mbuf **rx_pkts)
        rx_pkts[2]->ol_flags = (uint64_t)vlan0[0];
        rx_pkts[3]->ol_flags = (uint64_t)vlan0[1];
 }
-#else
-#define desc_to_olflags_v(desc, rx_pkts) do {} while (0)
-#endif
 
 #define PKTLEN_SHIFT     10
 
 static inline void
-desc_to_ptype_v(vector unsigned long descs[4], struct rte_mbuf **rx_pkts)
+desc_to_ptype_v(vector unsigned long descs[4], struct rte_mbuf **rx_pkts,
+               uint32_t *ptype_tbl)
 {
        vector unsigned long ptype0 = vec_mergel(descs[0], descs[1]);
        vector unsigned long ptype1 = vec_mergel(descs[2], descs[3]);
@@ -217,21 +178,23 @@ desc_to_ptype_v(vector unsigned long descs[4], struct rte_mbuf **rx_pkts)
        ptype0 = vec_sr(ptype0, (vector unsigned long){30, 30});
        ptype1 = vec_sr(ptype1, (vector unsigned long){30, 30});
 
-       rx_pkts[0]->packet_type = i40e_rxd_pkt_type_mapping(
-                                       (*(vector unsigned char *)&ptype0)[0]);
-       rx_pkts[1]->packet_type = i40e_rxd_pkt_type_mapping(
-                                       (*(vector unsigned char *)&ptype0)[8]);
-       rx_pkts[2]->packet_type = i40e_rxd_pkt_type_mapping(
-                                       (*(vector unsigned char *)&ptype1)[0]);
-       rx_pkts[3]->packet_type = i40e_rxd_pkt_type_mapping(
-                                       (*(vector unsigned char *)&ptype1)[8]);
+       rx_pkts[0]->packet_type =
+               ptype_tbl[(*(vector unsigned char *)&ptype0)[0]];
+       rx_pkts[1]->packet_type =
+               ptype_tbl[(*(vector unsigned char *)&ptype0)[8]];
+       rx_pkts[2]->packet_type =
+               ptype_tbl[(*(vector unsigned char *)&ptype1)[0]];
+       rx_pkts[3]->packet_type =
+               ptype_tbl[(*(vector unsigned char *)&ptype1)[8]];
 }
 
- /* Notice:
-  * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
-  * - nb_pkts > RTE_I40E_VPMD_RX_BURST, only scan RTE_I40E_VPMD_RX_BURST
-  *   numbers of DD bits
-  */
+/**
+ * vPMD raw receive routine, only accept(nb_pkts >= RTE_I40E_DESCS_PER_LOOP)
+ *
+ * Notice:
+ * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
+ * - floor align nb_pkts to a RTE_I40E_DESCS_PER_LOOP power-of-two
+ */
 static inline uint16_t
 _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
                   uint16_t nb_pkts, uint8_t *split_packet)
@@ -242,6 +205,7 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
        int pos;
        uint64_t var;
        vector unsigned char shuf_msk;
+       uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
 
        vector unsigned short crc_adjust = (vector unsigned short){
                0, 0,         /* ignore pkt_type field */
@@ -252,9 +216,6 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
                };
        vector unsigned long dd_check, eop_check;
 
-       /* nb_pkts shall be less equal than RTE_I40E_MAX_RX_BURST */
-       nb_pkts = RTE_MIN(nb_pkts, RTE_I40E_MAX_RX_BURST);
-
        /* nb_pkts has to be floor-aligned to RTE_I40E_DESCS_PER_LOOP */
        nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_I40E_DESCS_PER_LOOP);
 
@@ -466,7 +427,7 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
                vec_st(pkt_mb1, 0,
                 (vector unsigned char *)&rx_pkts[pos]->rx_descriptor_fields1
                );
-               desc_to_ptype_v(descs, &rx_pkts[pos]);
+               desc_to_ptype_v(descs, &rx_pkts[pos], ptype_tbl);
                desc_to_olflags_v(descs, &rx_pkts[pos]);
 
                /* C.4 calc avaialbe number of desc */
@@ -497,15 +458,15 @@ i40e_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
        return _recv_raw_pkts_vec(rx_queue, rx_pkts, nb_pkts, NULL);
 }
 
- /* vPMD receive routine that reassembles scattered packets
-  * Notice:
-  * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
-  * - nb_pkts > RTE_I40E_VPMD_RX_BURST, only scan RTE_I40E_VPMD_RX_BURST
-  *   numbers of DD bits
 */
-uint16_t
-i40e_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
-                            uint16_t nb_pkts)
+/**
+ * vPMD receive routine that reassembles single burst of 32 scattered packets
+ *
+ * Notice:
+ * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
+ */
+static uint16_t
+i40e_recv_scattered_burst_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
+                             uint16_t nb_pkts)
 {
        struct i40e_rx_queue *rxq = rx_queue;
        uint8_t split_flags[RTE_I40E_VPMD_RX_BURST] = {0};
@@ -538,6 +499,32 @@ i40e_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
                &split_flags[i]);
 }
 
+/**
+ * vPMD receive routine that reassembles scattered packets.
+ */
+uint16_t
+i40e_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
+                            uint16_t nb_pkts)
+{
+       uint16_t retval = 0;
+
+       while (nb_pkts > RTE_I40E_VPMD_RX_BURST) {
+               uint16_t burst;
+
+               burst = i40e_recv_scattered_burst_vec(rx_queue,
+                                                     rx_pkts + retval,
+                                                     RTE_I40E_VPMD_RX_BURST);
+               retval += burst;
+               nb_pkts -= burst;
+               if (burst < RTE_I40E_VPMD_RX_BURST)
+                       return retval;
+       }
+
+       return retval + i40e_recv_scattered_burst_vec(rx_queue,
+                                                     rx_pkts + retval,
+                                                     nb_pkts);
+}
+
 static inline void
 vtx1(volatile struct i40e_tx_desc *txdp,
        struct rte_mbuf *pkt, uint64_t flags)
@@ -547,7 +534,7 @@ vtx1(volatile struct i40e_tx_desc *txdp,
                ((uint64_t)pkt->data_len << I40E_TXD_QW1_TX_BUF_SZ_SHIFT));
 
        vector unsigned long descriptor = (vector unsigned long){
-               pkt->buf_physaddr + pkt->data_off, high_qw};
+               pkt->buf_iova + pkt->data_off, high_qw};
        *(vector unsigned long *)txdp = descriptor;
 }
 
@@ -629,25 +616,25 @@ i40e_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
        return nb_pkts;
 }
 
-void __attribute__((cold))
+void __rte_cold
 i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue *rxq)
 {
        _i40e_rx_queue_release_mbufs_vec(rxq);
 }
 
-int __attribute__((cold))
+int __rte_cold
 i40e_rxq_vec_setup(struct i40e_rx_queue *rxq)
 {
        return i40e_rxq_vec_setup_default(rxq);
 }
 
-int __attribute__((cold))
+int __rte_cold
 i40e_txq_vec_setup(struct i40e_tx_queue __rte_unused * txq)
 {
        return 0;
 }
 
-int __attribute__((cold))
+int __rte_cold
 i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev)
 {
        return i40e_rx_vec_dev_conf_condition_check_default(dev);