net/ixgbe: use SPDX tags in 6WIND copyrighted files
[dpdk.git] / drivers / net / ixgbe / ixgbe_rxtx.c
index 37bb57b..6c582b4 100644 (file)
@@ -1,35 +1,6 @@
-/*-
- *   BSD LICENSE
- *
- *   Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
- *   Copyright 2014 6WIND S.A.
- *   All rights reserved.
- *
- *   Redistribution and use in source and binary forms, with or without
- *   modification, are permitted provided that the following conditions
- *   are met:
- *
- *     * Redistributions of source code must retain the above copyright
- *       notice, this list of conditions and the following disclaimer.
- *     * Redistributions in binary form must reproduce the above copyright
- *       notice, this list of conditions and the following disclaimer in
- *       the documentation and/or other materials provided with the
- *       distribution.
- *     * Neither the name of Intel Corporation nor the names of its
- *       contributors may be used to endorse or promote products derived
- *       from this software without specific prior written permission.
- *
- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2016 Intel Corporation.
+ * Copyright 2014 6WIND S.A.
  */
 
 #include <sys/queue.h>
@@ -62,7 +33,7 @@
 #include <rte_malloc.h>
 #include <rte_mbuf.h>
 #include <rte_ether.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
 #include <rte_prefetch.h>
 #include <rte_udp.h>
 #include <rte_tcp.h>
@@ -185,7 +156,7 @@ tx4(volatile union ixgbe_adv_tx_desc *txdp, struct rte_mbuf **pkts)
        int i;
 
        for (i = 0; i < 4; ++i, ++txdp, ++pkts) {
-               buf_dma_addr = rte_mbuf_data_dma_addr(*pkts);
+               buf_dma_addr = rte_mbuf_data_iova(*pkts);
                pkt_len = (*pkts)->data_len;
 
                /* write data to descriptor */
@@ -208,7 +179,7 @@ tx1(volatile union ixgbe_adv_tx_desc *txdp, struct rte_mbuf **pkts)
        uint64_t buf_dma_addr;
        uint32_t pkt_len;
 
-       buf_dma_addr = rte_mbuf_data_dma_addr(*pkts);
+       buf_dma_addr = rte_mbuf_data_iova(*pkts);
        pkt_len = (*pkts)->data_len;
 
        /* write data to descriptor */
@@ -924,7 +895,7 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
                         * Set up Transmit Data Descriptor.
                         */
                        slen = m_seg->data_len;
-                       buf_dma_addr = rte_mbuf_data_dma_addr(m_seg);
+                       buf_dma_addr = rte_mbuf_data_iova(m_seg);
                        txd->read.buffer_addr =
                                rte_cpu_to_le_64(buf_dma_addr);
                        txd->read.cmd_type_len =
@@ -1633,7 +1604,7 @@ ixgbe_rx_alloc_bufs(struct ixgbe_rx_queue *rxq, bool reset_mbuf)
                mb->data_off = RTE_PKTMBUF_HEADROOM;
 
                /* populate the descriptors */
-               dma_addr = rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(mb));
+               dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mb));
                rxdp[i].read.hdr_addr = 0;
                rxdp[i].read.pkt_addr = dma_addr;
        }
@@ -1865,7 +1836,7 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
                rxm = rxe->mbuf;
                rxe->mbuf = nmb;
                dma_addr =
-                       rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb));
+                       rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
                rxdp->read.hdr_addr = 0;
                rxdp->read.pkt_addr = dma_addr;
 
@@ -2159,7 +2130,7 @@ next_desc:
 
                if (!bulk_alloc) {
                        __le64 dma =
-                         rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb));
+                         rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
                        /*
                         * Update RX descriptor with the physical address of the
                         * new data buffer of the new allocated mbuf.
@@ -2599,7 +2570,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
        else
                txq->tdt_reg_addr = IXGBE_PCI_REG_ADDR(hw, IXGBE_TDT(txq->reg_idx));
 
-       txq->tx_ring_phys_addr = tz->phys_addr;
+       txq->tx_ring_phys_addr = tz->iova;
        txq->tx_ring = (union ixgbe_adv_tx_desc *) tz->addr;
 
        /* Allocate software ring */
@@ -2639,7 +2610,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
 static void __attribute__((cold))
 ixgbe_free_sc_cluster(struct rte_mbuf *m)
 {
-       uint8_t i, nb_segs = m->nb_segs;
+       uint16_t i, nb_segs = m->nb_segs;
        struct rte_mbuf *next_seg;
 
        for (i = 0; i < nb_segs; i++) {
@@ -2901,7 +2872,7 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
                        IXGBE_PCI_REG_ADDR(hw, IXGBE_RDH(rxq->reg_idx));
        }
 
-       rxq->rx_ring_phys_addr = rz->phys_addr;
+       rxq->rx_ring_phys_addr = rz->iova;
        rxq->rx_ring = (union ixgbe_adv_rx_desc *) rz->addr;
 
        /*
@@ -4188,7 +4159,7 @@ ixgbe_alloc_rx_queue_mbufs(struct ixgbe_rx_queue *rxq)
                mbuf->port = rxq->port_id;
 
                dma_addr =
-                       rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(mbuf));
+                       rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
                rxd = &rxq->rx_ring[i];
                rxd->read.hdr_addr = 0;
                rxd->read.pkt_addr = dma_addr;
@@ -5550,6 +5521,71 @@ ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev)
        }
 }
 
+int
+ixgbe_config_rss_filter(struct rte_eth_dev *dev,
+               struct ixgbe_rte_flow_rss_conf *conf, bool add)
+{
+       struct ixgbe_hw *hw;
+       uint32_t reta;
+       uint16_t i;
+       uint16_t j;
+       uint16_t sp_reta_size;
+       uint32_t reta_reg;
+       struct rte_eth_rss_conf rss_conf = conf->rss_conf;
+       struct ixgbe_filter_info *filter_info =
+               IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+
+       PMD_INIT_FUNC_TRACE();
+       hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       sp_reta_size = ixgbe_reta_size_get(hw->mac.type);
+
+       if (!add) {
+               if (memcmp(conf, &filter_info->rss_info,
+                       sizeof(struct ixgbe_rte_flow_rss_conf)) == 0) {
+                       ixgbe_rss_disable(dev);
+                       memset(&filter_info->rss_info, 0,
+                               sizeof(struct ixgbe_rte_flow_rss_conf));
+                       return 0;
+               }
+               return -EINVAL;
+       }
+
+       if (filter_info->rss_info.num)
+               return -EINVAL;
+       /* Fill in redirection table
+        * The byte-swap is needed because NIC registers are in
+        * little-endian order.
+        */
+       reta = 0;
+       for (i = 0, j = 0; i < sp_reta_size; i++, j++) {
+               reta_reg = ixgbe_reta_reg_get(hw->mac.type, i);
+
+               if (j == conf->num)
+                       j = 0;
+               reta = (reta << 8) | conf->queue[j];
+               if ((i & 3) == 3)
+                       IXGBE_WRITE_REG(hw, reta_reg,
+                                       rte_bswap32(reta));
+       }
+
+       /* Configure the RSS key and the RSS protocols used to compute
+        * the RSS hash of input packets.
+        */
+       if ((rss_conf.rss_hf & IXGBE_RSS_OFFLOAD_ALL) == 0) {
+               ixgbe_rss_disable(dev);
+               return -EINVAL;
+       }
+       if (rss_conf.rss_key == NULL)
+               rss_conf.rss_key = rss_intel_key; /* Default hash key */
+       ixgbe_hw_rss_hash_set(hw, &rss_conf);
+
+       rte_memcpy(&filter_info->rss_info,
+               conf, sizeof(struct ixgbe_rte_flow_rss_conf));
+
+       return 0;
+}
+
 /* Stubs needed for linkage when CONFIG_RTE_IXGBE_INC_VECTOR is set to 'n' */
 int __attribute__((weak))
 ixgbe_rx_vec_dev_conf_condition_check(struct rte_eth_dev __rte_unused *dev)