ixgbe: cleanups
[dpdk.git] / lib / librte_pmd_ixgbe / ixgbe_rxtx.c
index 96c4b98..7173db8 100644 (file)
@@ -1027,12 +1027,11 @@ ixgbe_rx_alloc_bufs(struct ixgbe_rx_queue *rxq)
        struct ixgbe_rx_entry *rxep;
        struct rte_mbuf *mb;
        uint16_t alloc_idx;
-       uint64_t dma_addr;
+       __le64 dma_addr;
        int diag, i;
 
        /* allocate buffers in bulk directly into the S/W ring */
-       alloc_idx = (uint16_t)(rxq->rx_free_trigger -
-                               (rxq->rx_free_thresh - 1));
+       alloc_idx = rxq->rx_free_trigger - (rxq->rx_free_thresh - 1);
        rxep = &rxq->sw_ring[alloc_idx];
        diag = rte_mempool_get_bulk(rxq->mb_pool, (void *)rxep,
                                    rxq->rx_free_thresh);
@@ -1050,7 +1049,7 @@ ixgbe_rx_alloc_bufs(struct ixgbe_rx_queue *rxq)
                mb->port = rxq->port_id;
 
                /* populate the descriptors */
-               dma_addr = (uint64_t)mb->buf_physaddr + RTE_PKTMBUF_HEADROOM;
+               dma_addr = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb));
                rxdp[i].read.hdr_addr = dma_addr;
                rxdp[i].read.pkt_addr = dma_addr;
        }
@@ -1060,10 +1059,9 @@ ixgbe_rx_alloc_bufs(struct ixgbe_rx_queue *rxq)
        IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, rxq->rx_free_trigger);
 
        /* update state of internal queue structure */
-       rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_trigger +
-                                               rxq->rx_free_thresh);
+       rxq->rx_free_trigger = rxq->rx_free_trigger + rxq->rx_free_thresh;
        if (rxq->rx_free_trigger >= rxq->nb_rx_desc)
-               rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
+               rxq->rx_free_trigger = rxq->rx_free_thresh - 1;
 
        /* no errors */
        return 0;
@@ -1170,6 +1168,17 @@ ixgbe_recv_pkts_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
 
        return nb_rx;
 }
+
+#else
+
+/* Stub to avoid extra ifdefs */
+static uint16_t
+ixgbe_recv_pkts_bulk_alloc(__rte_unused void *rx_queue,
+       __rte_unused struct rte_mbuf **rx_pkts, __rte_unused uint16_t nb_pkts)
+{
+       return 0;
+}
+
 #endif /* RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC */
 
 uint16_t
@@ -1558,13 +1567,14 @@ ixgbe_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
                first_seg->ol_flags = pkt_flags;
 
                if (likely(pkt_flags & PKT_RX_RSS_HASH))
-                       first_seg->hash.rss = rxd.wb.lower.hi_dword.rss;
+                       first_seg->hash.rss =
+                                   rte_le_to_cpu_32(rxd.wb.lower.hi_dword.rss);
                else if (pkt_flags & PKT_RX_FDIR) {
                        first_seg->hash.fdir.hash =
-                               (uint16_t)((rxd.wb.lower.hi_dword.csum_ip.csum)
-                                          & IXGBE_ATR_HASH_MASK);
+                           rte_le_to_cpu_16(rxd.wb.lower.hi_dword.csum_ip.csum)
+                                          & IXGBE_ATR_HASH_MASK;
                        first_seg->hash.fdir.id =
-                               rxd.wb.lower.hi_dword.csum_ip.ip_id;
+                         rte_le_to_cpu_16(rxd.wb.lower.hi_dword.csum_ip.ip_id);
                }
 
                /* Prefetch data of first segment, if configured to do so. */
@@ -2074,12 +2084,12 @@ check_rx_burst_bulk_alloc_preconditions(__rte_unused struct ixgbe_rx_queue *rxq)
 
 /* Reset dynamic ixgbe_rx_queue fields back to defaults */
 static void
-ixgbe_reset_rx_queue(struct ixgbe_rx_queue *rxq)
+ixgbe_reset_rx_queue(struct ixgbe_hw *hw, struct ixgbe_rx_queue *rxq)
 {
        static const union ixgbe_adv_rx_desc zeroed_desc = { .read = {
                        .pkt_addr = 0}};
        unsigned i;
-       uint16_t len;
+       uint16_t len = rxq->nb_rx_desc;
 
        /*
         * By default, the Rx queue setup function allocates enough memory for
@@ -2091,14 +2101,9 @@ ixgbe_reset_rx_queue(struct ixgbe_rx_queue *rxq)
         * constraints here to see if we need to zero out memory after the end
         * of the H/W descriptor ring.
         */
-#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
-       if (check_rx_burst_bulk_alloc_preconditions(rxq) == 0)
+       if (hw->rx_bulk_alloc_allowed)
                /* zero out extra memory */
-               len = (uint16_t)(rxq->nb_rx_desc + RTE_PMD_IXGBE_RX_MAX_BURST);
-       else
-#endif
-               /* do not zero out extra memory */
-               len = rxq->nb_rx_desc;
+               len += RTE_PMD_IXGBE_RX_MAX_BURST;
 
        /*
         * Zero out HW ring memory. Zero out extra memory at the end of
@@ -2115,8 +2120,8 @@ ixgbe_reset_rx_queue(struct ixgbe_rx_queue *rxq)
         * entries is always allocated
         */
        memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
-       for (i = 0; i < RTE_PMD_IXGBE_RX_MAX_BURST; ++i) {
-               rxq->sw_ring[rxq->nb_rx_desc + i].mbuf = &rxq->fake_mbuf;
+       for (i = rxq->nb_rx_desc; i < len; ++i) {
+               rxq->sw_ring[i].mbuf = &rxq->fake_mbuf;
        }
 
        rxq->rx_nb_avail = 0;
@@ -2140,7 +2145,6 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
        const struct rte_memzone *rz;
        struct ixgbe_rx_queue *rxq;
        struct ixgbe_hw     *hw;
-       int use_def_burst_func = 1;
        uint16_t len;
 
        PMD_INIT_FUNC_TRACE();
@@ -2222,16 +2226,28 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
 #endif
        rxq->rx_ring = (union ixgbe_adv_rx_desc *) rz->addr;
 
+       /*
+        * Certain constraints must be met in order to use the bulk buffer
+        * allocation Rx burst function. If any of Rx queues doesn't meet them
+        * the feature should be disabled for the whole port.
+        */
+       if (check_rx_burst_bulk_alloc_preconditions(rxq)) {
+               PMD_INIT_LOG(DEBUG, "queue[%d] doesn't meet Rx Bulk Alloc "
+                                   "preconditions - canceling the feature for "
+                                   "the whole port[%d]",
+                            rxq->queue_id, rxq->port_id);
+               hw->rx_bulk_alloc_allowed = false;
+       }
+
        /*
         * Allocate software ring. Allow for space at the end of the
         * S/W ring to make sure look-ahead logic in bulk alloc Rx burst
         * function does not access an invalid memory region.
         */
-#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
-       len = (uint16_t)(nb_desc + RTE_PMD_IXGBE_RX_MAX_BURST);
-#else
        len = nb_desc;
-#endif
+       if (hw->rx_bulk_alloc_allowed)
+               len += RTE_PMD_IXGBE_RX_MAX_BURST;
+
        rxq->sw_ring = rte_zmalloc_socket("rxq->sw_ring",
                                          sizeof(struct ixgbe_rx_entry) * len,
                                          RTE_CACHE_LINE_SIZE, socket_id);
@@ -2242,42 +2258,18 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
        PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
                     rxq->sw_ring, rxq->rx_ring, rxq->rx_ring_phys_addr);
 
-       /*
-        * Certain constraints must be met in order to use the bulk buffer
-        * allocation Rx burst function.
-        */
-       use_def_burst_func = check_rx_burst_bulk_alloc_preconditions(rxq);
+       if (!rte_is_power_of_2(nb_desc)) {
+               PMD_INIT_LOG(DEBUG, "queue[%d] doesn't meet Vector Rx "
+                                   "preconditions - canceling the feature for "
+                                   "the whole port[%d]",
+                            rxq->queue_id, rxq->port_id);
+               hw->rx_vec_allowed = false;
+       } else
+               ixgbe_rxq_vec_setup(rxq);
 
-#ifdef RTE_IXGBE_INC_VECTOR
-       ixgbe_rxq_vec_setup(rxq);
-#endif
-       /* Check if pre-conditions are satisfied, and no Scattered Rx */
-       if (!use_def_burst_func && !dev->data->scattered_rx) {
-#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
-               PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
-                            "satisfied. Rx Burst Bulk Alloc function will be "
-                            "used on port=%d, queue=%d.",
-                            rxq->port_id, rxq->queue_id);
-               dev->rx_pkt_burst = ixgbe_recv_pkts_bulk_alloc;
-#ifdef RTE_IXGBE_INC_VECTOR
-               if (!ixgbe_rx_vec_condition_check(dev) &&
-                   (rte_is_power_of_2(nb_desc))) {
-                       PMD_INIT_LOG(INFO, "Vector rx enabled, please make "
-                                    "sure RX burst size no less than 32.");
-                       dev->rx_pkt_burst = ixgbe_recv_pkts_vec;
-               }
-#endif
-#endif
-       } else {
-               PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions "
-                            "are not satisfied, Scattered Rx is requested, "
-                            "or RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC is not "
-                            "enabled (port=%d, queue=%d).",
-                            rxq->port_id, rxq->queue_id);
-       }
        dev->data->rx_queues[queue_idx] = rxq;
 
-       ixgbe_reset_rx_queue(rxq);
+       ixgbe_reset_rx_queue(hw, rxq);
 
        return 0;
 }
@@ -2331,6 +2323,7 @@ void
 ixgbe_dev_clear_queues(struct rte_eth_dev *dev)
 {
        unsigned i;
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
        PMD_INIT_FUNC_TRACE();
 
@@ -2346,7 +2339,7 @@ ixgbe_dev_clear_queues(struct rte_eth_dev *dev)
                struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i];
                if (rxq != NULL) {
                        ixgbe_rx_queue_release_mbufs(rxq);
-                       ixgbe_reset_rx_queue(rxq);
+                       ixgbe_reset_rx_queue(hw, rxq);
                }
        }
 }
@@ -3508,6 +3501,74 @@ ixgbe_dev_mq_tx_configure(struct rte_eth_dev *dev)
        return 0;
 }
 
+void ixgbe_set_rx_function(struct rte_eth_dev *dev)
+{
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       /*
+        * In order to allow Vector Rx there are a few configuration
+        * conditions to be met and Rx Bulk Allocation should be allowed.
+        */
+       if (ixgbe_rx_vec_dev_conf_condition_check(dev) ||
+           !hw->rx_bulk_alloc_allowed) {
+               PMD_INIT_LOG(DEBUG, "Port[%d] doesn't meet Vector Rx "
+                                   "preconditions or RTE_IXGBE_INC_VECTOR is "
+                                   "not enabled",
+                            dev->data->port_id);
+
+               hw->rx_vec_allowed = false;
+       }
+
+       if (dev->data->scattered_rx) {
+               /*
+                * Set the non-LRO scattered callback: there are Vector and
+                * single allocation versions.
+                */
+               if (hw->rx_vec_allowed) {
+                       PMD_INIT_LOG(DEBUG, "Using Vector Scattered Rx "
+                                           "callback (port=%d).",
+                                    dev->data->port_id);
+
+                       dev->rx_pkt_burst = ixgbe_recv_scattered_pkts_vec;
+               } else {
+                       PMD_INIT_LOG(DEBUG, "Using Regualr (non-vector) "
+                                           "Scattered Rx callback "
+                                           "(port=%d).",
+                                    dev->data->port_id);
+
+                       dev->rx_pkt_burst = ixgbe_recv_scattered_pkts;
+               }
+       /*
+        * Below we set "simple" callbacks according to port/queues parameters.
+        * If parameters allow we are going to choose between the following
+        * callbacks:
+        *    - Vector
+        *    - Bulk Allocation
+        *    - Single buffer allocation (the simplest one)
+        */
+       } else if (hw->rx_vec_allowed) {
+               PMD_INIT_LOG(INFO, "Vector rx enabled, please make sure RX "
+                                  "burst size no less than 32.");
+
+               dev->rx_pkt_burst = ixgbe_recv_pkts_vec;
+       } else if (hw->rx_bulk_alloc_allowed) {
+               PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
+                                   "satisfied. Rx Burst Bulk Alloc function "
+                                   "will be used on port=%d.",
+                            dev->data->port_id);
+
+               dev->rx_pkt_burst = ixgbe_recv_pkts_bulk_alloc;
+       } else {
+               PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are not "
+                                   "satisfied, or Scattered Rx is requested, "
+                                   "or RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC "
+                                   "is not enabled (port=%d).",
+                            dev->data->port_id);
+
+               dev->rx_pkt_burst = ixgbe_recv_pkts;
+       }
+}
+
 /*
  * Initializes Receive Unit.
  */
@@ -3527,6 +3588,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
        uint32_t rxcsum;
        uint16_t buf_size;
        uint16_t i;
+       struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
 
        PMD_INIT_FUNC_TRACE();
        hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
@@ -3549,7 +3611,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
         * Configure CRC stripping, if any.
         */
        hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
-       if (dev->data->dev_conf.rxmode.hw_strip_crc)
+       if (rx_conf->hw_strip_crc)
                hlreg0 |= IXGBE_HLREG0_RXCRCSTRP;
        else
                hlreg0 &= ~IXGBE_HLREG0_RXCRCSTRP;
@@ -3557,11 +3619,11 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
        /*
         * Configure jumbo frame support, if any.
         */
-       if (dev->data->dev_conf.rxmode.jumbo_frame == 1) {
+       if (rx_conf->jumbo_frame == 1) {
                hlreg0 |= IXGBE_HLREG0_JUMBOEN;
                maxfrs = IXGBE_READ_REG(hw, IXGBE_MAXFRS);
                maxfrs &= 0x0000FFFF;
-               maxfrs |= (dev->data->dev_conf.rxmode.max_rx_pkt_len << 16);
+               maxfrs |= (rx_conf->max_rx_pkt_len << 16);
                IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, maxfrs);
        } else
                hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
@@ -3585,9 +3647,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
                 * Reset crc_len in case it was changed after queue setup by a
                 * call to configure.
                 */
-               rxq->crc_len = (uint8_t)
-                               ((dev->data->dev_conf.rxmode.hw_strip_crc) ? 0 :
-                               ETHER_CRC_LEN);
+               rxq->crc_len = rx_conf->hw_strip_crc ? 0 : ETHER_CRC_LEN;
 
                /* Setup the Base and Length of the Rx Descriptor Rings */
                bus_addr = rxq->rx_ring_phys_addr;
@@ -3605,7 +3665,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
                /*
                 * Configure Header Split
                 */
-               if (dev->data->dev_conf.rxmode.header_split) {
+               if (rx_conf->header_split) {
                        if (hw->mac.type == ixgbe_mac_82599EB) {
                                /* Must setup the PSRTYPE register */
                                uint32_t psrtype;
@@ -3615,7 +3675,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
                                        IXGBE_PSRTYPE_IPV6HDR;
                                IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(rxq->reg_idx), psrtype);
                        }
-                       srrctl = ((dev->data->dev_conf.rxmode.split_hdr_size <<
+                       srrctl = ((rx_conf->split_hdr_size <<
                                IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
                                IXGBE_SRRCTL_BSIZEHDR_MASK);
                        srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
@@ -3643,23 +3703,17 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
                buf_size = (uint16_t) ((srrctl & IXGBE_SRRCTL_BSIZEPKT_MASK) <<
                                       IXGBE_SRRCTL_BSIZEPKT_SHIFT);
 
-               if (dev->data->dev_conf.rxmode.enable_scatter ||
-                   /* It adds dual VLAN length for supporting dual VLAN */
-                   (dev->data->dev_conf.rxmode.max_rx_pkt_len +
-                               2 * IXGBE_VLAN_TAG_SIZE) > buf_size){
-                       if (!dev->data->scattered_rx)
-                               PMD_INIT_LOG(DEBUG, "forcing scatter mode");
+               /* It adds dual VLAN length for supporting dual VLAN */
+               if (dev->data->dev_conf.rxmode.max_rx_pkt_len +
+                                           2 * IXGBE_VLAN_TAG_SIZE > buf_size)
                        dev->data->scattered_rx = 1;
-#ifdef RTE_IXGBE_INC_VECTOR
-                       if (rte_is_power_of_2(rxq->nb_rx_desc))
-                               dev->rx_pkt_burst =
-                                       ixgbe_recv_scattered_pkts_vec;
-                       else
-#endif
-                               dev->rx_pkt_burst = ixgbe_recv_scattered_pkts;
-               }
        }
 
+       if (rx_conf->enable_scatter)
+               dev->data->scattered_rx = 1;
+
+       ixgbe_set_rx_function(dev);
+
        /*
         * Device configured with multiple RX queues.
         */
@@ -3672,16 +3726,17 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
         */
        rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
        rxcsum |= IXGBE_RXCSUM_PCSD;
-       if (dev->data->dev_conf.rxmode.hw_ip_checksum)
+       if (rx_conf->hw_ip_checksum)
                rxcsum |= IXGBE_RXCSUM_IPPCSE;
        else
                rxcsum &= ~IXGBE_RXCSUM_IPPCSE;
 
        IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
 
-       if (hw->mac.type == ixgbe_mac_82599EB) {
+       if (hw->mac.type == ixgbe_mac_82599EB ||
+           hw->mac.type == ixgbe_mac_X540) {
                rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
-               if (dev->data->dev_conf.rxmode.hw_strip_crc)
+               if (rx_conf->hw_strip_crc)
                        rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
                else
                        rdrxctl &= ~IXGBE_RDRXCTL_CRCSTRIP;
@@ -3934,7 +3989,7 @@ ixgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
                rte_delay_us(RTE_IXGBE_WAIT_100_US);
 
                ixgbe_rx_queue_release_mbufs(rxq);
-               ixgbe_reset_rx_queue(rxq);
+               ixgbe_reset_rx_queue(hw, rxq);
        } else
                return -1;
 
@@ -4290,3 +4345,34 @@ ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev)
 
        }
 }
+
+/* Stubs needed for linkage when CONFIG_RTE_IXGBE_INC_VECTOR is set to 'n' */
+int __attribute__((weak))
+ixgbe_rx_vec_dev_conf_condition_check(struct rte_eth_dev __rte_unused *dev)
+{
+       return -1;
+}
+
+uint16_t __attribute__((weak))
+ixgbe_recv_pkts_vec(
+       void __rte_unused *rx_queue,
+       struct rte_mbuf __rte_unused **rx_pkts,
+       uint16_t __rte_unused nb_pkts)
+{
+       return 0;
+}
+
+uint16_t __attribute__((weak))
+ixgbe_recv_scattered_pkts_vec(
+       void __rte_unused *rx_queue,
+       struct rte_mbuf __rte_unused **rx_pkts,
+       uint16_t __rte_unused nb_pkts)
+{
+       return 0;
+}
+
+int __attribute__((weak))
+ixgbe_rxq_vec_setup(struct ixgbe_rx_queue __rte_unused *rxq)
+{
+       return -1;
+}