X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;ds=sidebyside;f=lib%2Flibrte_pmd_ixgbe%2Fixgbe_rxtx.c;h=d4361513441779209e67368ac47d84ed018da6e3;hb=4ef84d5a2227761cca0de1e41a61668d6a26861e;hp=1a463938ddaf717e7d56bf7e966912afe8f87f17;hpb=0ff3324da2eb36c606457121f397f9b0d3fa3a26;p=dpdk.git diff --git a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c index 1a463938dd..d436151344 100644 --- a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c +++ b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c @@ -142,10 +142,6 @@ ixgbe_tx_free_bufs(struct igb_tx_queue *txq) */ txep = &(txq->sw_ring[txq->tx_next_dd - (txq->tx_rs_thresh - 1)]); - /* prefetch the mbufs that are about to be freed */ - for (i = 0; i < txq->tx_rs_thresh; ++i) - rte_prefetch0((txep + i)->mbuf); - /* free buffers one at a time */ if ((txq->txq_flags & (uint32_t)ETH_TXQ_FLAGS_NOREFCOUNT) != 0) { for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) { @@ -186,6 +182,7 @@ tx4(volatile union ixgbe_adv_tx_desc *txdp, struct rte_mbuf **pkts) ((uint32_t)DCMD_DTYP_FLAGS | pkt_len); txdp->read.olinfo_status = (pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT); + rte_prefetch0(&(*pkts)->pool); } } @@ -205,6 +202,7 @@ tx1(volatile union ixgbe_adv_tx_desc *txdp, struct rte_mbuf **pkts) ((uint32_t)DCMD_DTYP_FLAGS | pkt_len); txdp->read.olinfo_status = (pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT); + rte_prefetch0(&(*pkts)->pool); } /* @@ -615,7 +613,7 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, tx_last = (uint16_t) (tx_last - txq->nb_tx_desc); PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u" - " tx_first=%u tx_last=%u\n", + " tx_first=%u tx_last=%u", (unsigned) txq->port_id, (unsigned) txq->queue_id, (unsigned) pkt_len, @@ -902,6 +900,7 @@ ixgbe_rx_scan_hw_ring(struct igb_rx_queue *rxq) struct igb_rx_entry *rxep; struct rte_mbuf *mb; uint16_t pkt_len; + uint16_t pkt_flags; int s[LOOK_AHEAD], nb_dd; int i, j, nb_rx = 0; @@ -935,21 +934,28 @@ ixgbe_rx_scan_hw_ring(struct igb_rx_queue *rxq) /* Translate descriptor info to mbuf format */ for (j = 0; j < nb_dd; ++j) { mb = rxep[j].mbuf; - pkt_len = (uint16_t)(rxdp[j].wb.upper.length - - rxq->crc_len); + pkt_len = (uint16_t)(rxdp[j].wb.upper.length - rxq->crc_len); mb->data_len = pkt_len; mb->pkt_len = pkt_len; mb->vlan_tci = rxdp[j].wb.upper.vlan; - mb->hash.rss = rxdp[j].wb.lower.hi_dword.rss; + mb->vlan_tci = rte_le_to_cpu_16(rxdp[j].wb.upper.vlan); /* convert descriptor fields to rte mbuf flags */ - mb->ol_flags = rx_desc_hlen_type_rss_to_pkt_flags( + pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags( rxdp[j].wb.lower.lo_dword.data); /* reuse status field from scan list */ - mb->ol_flags = mb->ol_flags | - rx_desc_status_to_pkt_flags(s[j]); - mb->ol_flags = mb->ol_flags | - rx_desc_error_to_pkt_flags(s[j]); + pkt_flags |= rx_desc_status_to_pkt_flags(s[j]); + pkt_flags |= rx_desc_error_to_pkt_flags(s[j]); + mb->ol_flags = pkt_flags; + + if (likely(pkt_flags & PKT_RX_RSS_HASH)) + mb->hash.rss = rxdp[j].wb.lower.hi_dword.rss; + else if (pkt_flags & PKT_RX_FDIR) { + mb->hash.fdir.hash = + (uint16_t)((rxdp[j].wb.lower.hi_dword.csum_ip.csum) + & IXGBE_ATR_HASH_MASK); + mb->hash.fdir.id = rxdp[j].wb.lower.hi_dword.csum_ip.ip_id; + } } /* Move mbuf pointers from the S/W ring to the stage */ @@ -1065,7 +1071,7 @@ rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, if (ixgbe_rx_alloc_bufs(rxq) != 0) { int i, j; PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u " - "queue_id=%u\n", (unsigned) rxq->port_id, + "queue_id=%u", (unsigned) rxq->port_id, (unsigned) rxq->queue_id); rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed += @@ -1192,7 +1198,7 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, * frames to its peer(s). */ PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u " - "ext_err_stat=0x%08x pkt_len=%u\n", + "ext_err_stat=0x%08x pkt_len=%u", (unsigned) rxq->port_id, (unsigned) rxq->queue_id, (unsigned) rx_id, (unsigned) staterr, (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length)); @@ -1200,7 +1206,7 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, nmb = rte_rxmbuf_alloc(rxq->mb_pool); if (nmb == NULL) { PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u " - "queue_id=%u\n", (unsigned) rxq->port_id, + "queue_id=%u", (unsigned) rxq->port_id, (unsigned) rxq->queue_id); rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++; break; @@ -1292,7 +1298,7 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold); if (nb_hold > rxq->rx_free_thresh) { PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u " - "nb_hold=%u nb_rx=%u\n", + "nb_hold=%u nb_rx=%u", (unsigned) rxq->port_id, (unsigned) rxq->queue_id, (unsigned) rx_id, (unsigned) nb_hold, (unsigned) nb_rx); @@ -1379,8 +1385,8 @@ ixgbe_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, * to happen by sending specific "back-pressure" flow control * frames to its peer(s). */ - PMD_RX_LOG(DEBUG, "\nport_id=%u queue_id=%u rx_id=%u " - "staterr=0x%x data_len=%u\n", + PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u " + "staterr=0x%x data_len=%u", (unsigned) rxq->port_id, (unsigned) rxq->queue_id, (unsigned) rx_id, (unsigned) staterr, (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length)); @@ -1388,7 +1394,7 @@ ixgbe_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, nmb = rte_rxmbuf_alloc(rxq->mb_pool); if (nmb == NULL) { PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u " - "queue_id=%u\n", (unsigned) rxq->port_id, + "queue_id=%u", (unsigned) rxq->port_id, (unsigned) rxq->queue_id); rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++; break; @@ -1557,7 +1563,7 @@ ixgbe_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold); if (nb_hold > rxq->rx_free_thresh) { PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u " - "nb_hold=%u nb_rx=%u\n", + "nb_hold=%u nb_rx=%u", (unsigned) rxq->port_id, (unsigned) rxq->queue_id, (unsigned) rx_id, (unsigned) nb_hold, (unsigned) nb_rx); @@ -1761,33 +1767,36 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ? tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH); if (tx_rs_thresh >= (nb_desc - 2)) { - RTE_LOG(ERR, PMD, "tx_rs_thresh must be less than the number " - "of TX descriptors minus 2. (tx_rs_thresh=%u port=%d " - "queue=%d)\n", (unsigned int)tx_rs_thresh, - (int)dev->data->port_id, (int)queue_idx); + PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the number " + "of TX descriptors minus 2. (tx_rs_thresh=%u " + "port=%d queue=%d)", (unsigned int)tx_rs_thresh, + (int)dev->data->port_id, (int)queue_idx); return -(EINVAL); } if (tx_free_thresh >= (nb_desc - 3)) { - RTE_LOG(ERR, PMD, "tx_rs_thresh must be less than the " - "tx_free_thresh must be less than the number of TX " - "descriptors minus 3. (tx_free_thresh=%u port=%d " - "queue=%d)\n", (unsigned int)tx_free_thresh, - (int)dev->data->port_id, (int)queue_idx); + PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the " + "tx_free_thresh must be less than the number of " + "TX descriptors minus 3. (tx_free_thresh=%u " + "port=%d queue=%d)", + (unsigned int)tx_free_thresh, + (int)dev->data->port_id, (int)queue_idx); return -(EINVAL); } if (tx_rs_thresh > tx_free_thresh) { - RTE_LOG(ERR, PMD, "tx_rs_thresh must be less than or equal to " - "tx_free_thresh. (tx_free_thresh=%u tx_rs_thresh=%u " - "port=%d queue=%d)\n", (unsigned int)tx_free_thresh, - (unsigned int)tx_rs_thresh, (int)dev->data->port_id, - (int)queue_idx); + PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than or equal to " + "tx_free_thresh. (tx_free_thresh=%u " + "tx_rs_thresh=%u port=%d queue=%d)", + (unsigned int)tx_free_thresh, + (unsigned int)tx_rs_thresh, + (int)dev->data->port_id, + (int)queue_idx); return -(EINVAL); } if ((nb_desc % tx_rs_thresh) != 0) { - RTE_LOG(ERR, PMD, "tx_rs_thresh must be a divisor of the " - "number of TX descriptors. (tx_rs_thresh=%u port=%d " - "queue=%d)\n", (unsigned int)tx_rs_thresh, - (int)dev->data->port_id, (int)queue_idx); + PMD_INIT_LOG(ERR, "tx_rs_thresh must be a divisor of the " + "number of TX descriptors. (tx_rs_thresh=%u " + "port=%d queue=%d)", (unsigned int)tx_rs_thresh, + (int)dev->data->port_id, (int)queue_idx); return -(EINVAL); } @@ -1798,10 +1807,10 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, * accumulates WTHRESH descriptors. */ if ((tx_rs_thresh > 1) && (tx_conf->tx_thresh.wthresh != 0)) { - RTE_LOG(ERR, PMD, "TX WTHRESH must be set to 0 if " - "tx_rs_thresh is greater than 1. (tx_rs_thresh=%u " - "port=%d queue=%d)\n", (unsigned int)tx_rs_thresh, - (int)dev->data->port_id, (int)queue_idx); + PMD_INIT_LOG(ERR, "TX WTHRESH must be set to 0 if " + "tx_rs_thresh is greater than 1. (tx_rs_thresh=%u " + "port=%d queue=%d)", (unsigned int)tx_rs_thresh, + (int)dev->data->port_id, (int)queue_idx); return -(EINVAL); } @@ -1842,7 +1851,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, txq->port_id = dev->data->port_id; txq->txq_flags = tx_conf->txq_flags; txq->ops = &def_txq_ops; - txq->start_tx_per_q = tx_conf->start_tx_per_q; + txq->tx_deferred_start = tx_conf->tx_deferred_start; /* * Modification to set VFTDT for virtual function if vf is detected @@ -1866,26 +1875,32 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, ixgbe_tx_queue_release(txq); return (-ENOMEM); } - PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64"\n", + PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64, txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr); /* Use a simple Tx queue (no offloads, no multi segs) if possible */ if (((txq->txq_flags & IXGBE_SIMPLE_FLAGS) == IXGBE_SIMPLE_FLAGS) && (txq->tx_rs_thresh >= RTE_PMD_IXGBE_TX_MAX_BURST)) { - PMD_INIT_LOG(INFO, "Using simple tx code path\n"); + PMD_INIT_LOG(INFO, "Using simple tx code path"); #ifdef RTE_IXGBE_INC_VECTOR if (txq->tx_rs_thresh <= RTE_IXGBE_TX_MAX_FREE_BUF_SZ && - ixgbe_txq_vec_setup(txq, socket_id) == 0) { - PMD_INIT_LOG(INFO, "Vector tx enabled.\n"); + ixgbe_txq_vec_setup(txq) == 0) { + PMD_INIT_LOG(INFO, "Vector tx enabled."); dev->tx_pkt_burst = ixgbe_xmit_pkts_vec; } else #endif dev->tx_pkt_burst = ixgbe_xmit_pkts_simple; } else { - PMD_INIT_LOG(INFO, "Using full-featured tx code path\n"); - PMD_INIT_LOG(INFO, " - txq_flags = %lx [IXGBE_SIMPLE_FLAGS=%lx]\n", (long unsigned)txq->txq_flags, (long unsigned)IXGBE_SIMPLE_FLAGS); - PMD_INIT_LOG(INFO, " - tx_rs_thresh = %lu [RTE_PMD_IXGBE_TX_MAX_BURST=%lu]\n", (long unsigned)txq->tx_rs_thresh, (long unsigned)RTE_PMD_IXGBE_TX_MAX_BURST); + PMD_INIT_LOG(INFO, "Using full-featured tx code path"); + PMD_INIT_LOG(INFO, " - txq_flags = %lx " + "[IXGBE_SIMPLE_FLAGS=%lx]", + (long unsigned)txq->txq_flags, + (long unsigned)IXGBE_SIMPLE_FLAGS); + PMD_INIT_LOG(INFO, " - tx_rs_thresh = %lu " + "[RTE_PMD_IXGBE_TX_MAX_BURST=%lu]", + (long unsigned)txq->tx_rs_thresh, + (long unsigned)RTE_PMD_IXGBE_TX_MAX_BURST); dev->tx_pkt_burst = ixgbe_xmit_pkts; } @@ -1965,15 +1980,34 @@ check_rx_burst_bulk_alloc_preconditions(__rte_unused struct igb_rx_queue *rxq) * outside of this function. */ #ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC - if (! (rxq->rx_free_thresh >= RTE_PMD_IXGBE_RX_MAX_BURST)) + if (!(rxq->rx_free_thresh >= RTE_PMD_IXGBE_RX_MAX_BURST)) { + PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: " + "rxq->rx_free_thresh=%d, " + "RTE_PMD_IXGBE_RX_MAX_BURST=%d", + rxq->rx_free_thresh, RTE_PMD_IXGBE_RX_MAX_BURST); ret = -EINVAL; - else if (! (rxq->rx_free_thresh < rxq->nb_rx_desc)) + } else if (!(rxq->rx_free_thresh < rxq->nb_rx_desc)) { + PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: " + "rxq->rx_free_thresh=%d, " + "rxq->nb_rx_desc=%d", + rxq->rx_free_thresh, rxq->nb_rx_desc); ret = -EINVAL; - else if (! ((rxq->nb_rx_desc % rxq->rx_free_thresh) == 0)) + } else if (!((rxq->nb_rx_desc % rxq->rx_free_thresh) == 0)) { + PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: " + "rxq->nb_rx_desc=%d, " + "rxq->rx_free_thresh=%d", + rxq->nb_rx_desc, rxq->rx_free_thresh); ret = -EINVAL; - else if (! (rxq->nb_rx_desc < - (IXGBE_MAX_RING_DESC - RTE_PMD_IXGBE_RX_MAX_BURST))) + } else if (!(rxq->nb_rx_desc < + (IXGBE_MAX_RING_DESC - RTE_PMD_IXGBE_RX_MAX_BURST))) { + PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: " + "rxq->nb_rx_desc=%d, " + "IXGBE_MAX_RING_DESC=%d, " + "RTE_PMD_IXGBE_RX_MAX_BURST=%d", + rxq->nb_rx_desc, IXGBE_MAX_RING_DESC, + RTE_PMD_IXGBE_RX_MAX_BURST); ret = -EINVAL; + } #else ret = -EINVAL; #endif @@ -2087,7 +2121,7 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ? 0 : ETHER_CRC_LEN); rxq->drop_en = rx_conf->rx_drop_en; - rxq->start_rx_per_q = rx_conf->start_rx_per_q; + rxq->rx_deferred_start = rx_conf->rx_deferred_start; /* * Allocate RX ring hardware descriptors. A memzone large enough to @@ -2145,7 +2179,7 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, ixgbe_rx_queue_release(rxq); return (-ENOMEM); } - PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64"\n", + PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64, rxq->sw_ring, rxq->rx_ring, rxq->rx_ring_phys_addr); /* @@ -2159,13 +2193,13 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, #ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are " "satisfied. Rx Burst Bulk Alloc function will be " - "used on port=%d, queue=%d.\n", + "used on port=%d, queue=%d.", rxq->port_id, rxq->queue_id); dev->rx_pkt_burst = ixgbe_recv_pkts_bulk_alloc; #ifdef RTE_IXGBE_INC_VECTOR if (!ixgbe_rx_vec_condition_check(dev)) { PMD_INIT_LOG(INFO, "Vector rx enabled, please make " - "sure RX burst size no less than 32.\n"); + "sure RX burst size no less than 32."); ixgbe_rxq_vec_setup(rxq); dev->rx_pkt_burst = ixgbe_recv_pkts_vec; } @@ -2175,7 +2209,7 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions " "are not satisfied, Scattered Rx is requested, " "or RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC is not " - "enabled (port=%d, queue=%d).\n", + "enabled (port=%d, queue=%d).", rxq->port_id, rxq->queue_id); } dev->data->rx_queues[queue_idx] = rxq; @@ -2194,7 +2228,7 @@ ixgbe_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id) uint32_t desc = 0; if (rx_queue_id >= dev->data->nb_rx_queues) { - PMD_RX_LOG(ERR, "Invalid RX queue id=%d\n", rx_queue_id); + PMD_RX_LOG(ERR, "Invalid RX queue id=%d", rx_queue_id); return 0; } @@ -2910,7 +2944,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev, ixgbe_dcb_rx_hw_config(hw, dcb_config); break; default: - PMD_INIT_LOG(ERR, "Incorrect DCB RX mode configuration\n"); + PMD_INIT_LOG(ERR, "Incorrect DCB RX mode configuration"); break; } switch (dev->data->dev_conf.txmode.mq_mode) { @@ -2932,7 +2966,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev, ixgbe_dcb_tx_hw_config(hw, dcb_config); break; default: - PMD_INIT_LOG(ERR, "Incorrect DCB TX mode configuration\n"); + PMD_INIT_LOG(ERR, "Incorrect DCB TX mode configuration"); break; } @@ -3203,7 +3237,7 @@ ixgbe_alloc_rx_queue_mbufs(struct igb_rx_queue *rxq) volatile union ixgbe_adv_rx_desc *rxd; struct rte_mbuf *mbuf = rte_rxmbuf_alloc(rxq->mb_pool); if (mbuf == NULL) { - PMD_INIT_LOG(ERR, "RX mbuf alloc failed queue_id=%u\n", + PMD_INIT_LOG(ERR, "RX mbuf alloc failed queue_id=%u", (unsigned) rxq->queue_id); return (-ENOMEM); } @@ -3274,7 +3308,7 @@ ixgbe_dev_mq_rx_configure(struct rte_eth_dev *dev) IXGBE_WRITE_REG(hw, IXGBE_MRQC, IXGBE_MRQC_VMDQRT8TCEN); break; default: - RTE_LOG(ERR, PMD, "invalid pool number in IOV mode\n"); + PMD_INIT_LOG(ERR, "invalid pool number in IOV mode"); } } @@ -3327,7 +3361,7 @@ ixgbe_dev_mq_tx_configure(struct rte_eth_dev *dev) break; default: mtqc = IXGBE_MTQC_64Q_1PB; - RTE_LOG(ERR, PMD, "invalid pool number in IOV mode\n"); + PMD_INIT_LOG(ERR, "invalid pool number in IOV mode"); } IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc); } @@ -3477,13 +3511,25 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev) /* It adds dual VLAN length for supporting dual VLAN */ if ((dev->data->dev_conf.rxmode.max_rx_pkt_len + 2 * IXGBE_VLAN_TAG_SIZE) > buf_size){ + if (!dev->data->scattered_rx) + PMD_INIT_LOG(DEBUG, "forcing scatter mode"); dev->data->scattered_rx = 1; +#ifdef RTE_IXGBE_INC_VECTOR + dev->rx_pkt_burst = ixgbe_recv_scattered_pkts_vec; +#else dev->rx_pkt_burst = ixgbe_recv_scattered_pkts; +#endif } } if (dev->data->dev_conf.rxmode.enable_scatter) { + if (!dev->data->scattered_rx) + PMD_INIT_LOG(DEBUG, "forcing scatter mode"); +#ifdef RTE_IXGBE_INC_VECTOR + dev->rx_pkt_burst = ixgbe_recv_scattered_pkts_vec; +#else dev->rx_pkt_burst = ixgbe_recv_scattered_pkts; +#endif dev->data->scattered_rx = 1; } @@ -3590,12 +3636,12 @@ ixgbe_dev_tx_init(struct rte_eth_dev *dev) static inline void ixgbe_setup_loopback_link_82599(struct ixgbe_hw *hw) { - DEBUGFUNC("ixgbe_setup_loopback_link_82599"); + PMD_INIT_FUNC_TRACE(); if (ixgbe_verify_lesm_fw_enabled_82599(hw)) { if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM) != IXGBE_SUCCESS) { - PMD_INIT_LOG(ERR, "Could not enable loopback mode\n"); + PMD_INIT_LOG(ERR, "Could not enable loopback mode"); /* ignore error */ return; } @@ -3647,13 +3693,13 @@ ixgbe_dev_rxtx_start(struct rte_eth_dev *dev) for (i = 0; i < dev->data->nb_tx_queues; i++) { txq = dev->data->tx_queues[i]; - if (!txq->start_tx_per_q) + if (!txq->tx_deferred_start) ixgbe_dev_tx_queue_start(dev, i); } for (i = 0; i < dev->data->nb_rx_queues; i++) { rxq = dev->data->rx_queues[i]; - if (!rxq->start_rx_per_q) + if (!rxq->rx_deferred_start) ixgbe_dev_rx_queue_start(dev, i); } @@ -3690,9 +3736,8 @@ ixgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) /* Allocate buffers for descriptor rings */ if (ixgbe_alloc_rx_queue_mbufs(rxq) != 0) { - PMD_INIT_LOG(ERR, - "Could not alloc mbuf for queue:%d\n", - rx_queue_id); + PMD_INIT_LOG(ERR, "Could not alloc mbuf for queue:%d", + rx_queue_id); return -1; } rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx)); @@ -3706,8 +3751,8 @@ ixgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx)); } while (--poll_ms && !(rxdctl & IXGBE_RXDCTL_ENABLE)); if (!poll_ms) - PMD_INIT_LOG(ERR, "Could not enable " - "Rx Queue %d\n", rx_queue_id); + PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d", + rx_queue_id); rte_wmb(); IXGBE_WRITE_REG(hw, IXGBE_RDH(rxq->reg_idx), 0); IXGBE_WRITE_REG(hw, IXGBE_RDT(rxq->reg_idx), rxq->nb_rx_desc - 1); @@ -3745,8 +3790,8 @@ ixgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx)); } while (--poll_ms && (rxdctl | IXGBE_RXDCTL_ENABLE)); if (!poll_ms) - PMD_INIT_LOG(ERR, "Could not disable " - "Rx Queue %d\n", rx_queue_id); + PMD_INIT_LOG(ERR, "Could not disable Rx Queue %d", + rx_queue_id); rte_delay_us(RTE_IXGBE_WAIT_100_US); @@ -3789,7 +3834,7 @@ ixgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) } while (--poll_ms && !(txdctl & IXGBE_TXDCTL_ENABLE)); if (!poll_ms) PMD_INIT_LOG(ERR, "Could not enable " - "Tx Queue %d\n", tx_queue_id); + "Tx Queue %d", tx_queue_id); } rte_wmb(); IXGBE_WRITE_REG(hw, IXGBE_TDH(txq->reg_idx), 0); @@ -3829,9 +3874,8 @@ ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) IXGBE_TDT(txq->reg_idx)); } while (--poll_ms && (txtdh != txtdt)); if (!poll_ms) - PMD_INIT_LOG(ERR, - "Tx Queue %d is not empty when stopping.\n", - tx_queue_id); + PMD_INIT_LOG(ERR, "Tx Queue %d is not empty " + "when stopping.", tx_queue_id); } txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx)); @@ -3848,7 +3892,7 @@ ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) } while (--poll_ms && (txdctl | IXGBE_TXDCTL_ENABLE)); if (!poll_ms) PMD_INIT_LOG(ERR, "Could not disable " - "Tx Queue %d\n", tx_queue_id); + "Tx Queue %d", tx_queue_id); } if (txq->ops != NULL) { @@ -3970,13 +4014,25 @@ ixgbevf_dev_rx_init(struct rte_eth_dev *dev) /* It adds dual VLAN length for supporting dual VLAN */ if ((dev->data->dev_conf.rxmode.max_rx_pkt_len + 2 * IXGBE_VLAN_TAG_SIZE) > buf_size) { + if (!dev->data->scattered_rx) + PMD_INIT_LOG(DEBUG, "forcing scatter mode"); dev->data->scattered_rx = 1; +#ifdef RTE_IXGBE_INC_VECTOR + dev->rx_pkt_burst = ixgbe_recv_scattered_pkts_vec; +#else dev->rx_pkt_burst = ixgbe_recv_scattered_pkts; +#endif } } if (dev->data->dev_conf.rxmode.enable_scatter) { + if (!dev->data->scattered_rx) + PMD_INIT_LOG(DEBUG, "forcing scatter mode"); +#ifdef RTE_IXGBE_INC_VECTOR + dev->rx_pkt_burst = ixgbe_recv_scattered_pkts_vec; +#else dev->rx_pkt_burst = ixgbe_recv_scattered_pkts; +#endif dev->data->scattered_rx = 1; } @@ -4064,8 +4120,7 @@ ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev) txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i)); } while (--poll_ms && !(txdctl & IXGBE_TXDCTL_ENABLE)); if (!poll_ms) - PMD_INIT_LOG(ERR, "Could not enable " - "Tx Queue %d\n", i); + PMD_INIT_LOG(ERR, "Could not enable Tx Queue %d", i); } for (i = 0; i < dev->data->nb_rx_queues; i++) { @@ -4082,8 +4137,7 @@ ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev) rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)); } while (--poll_ms && !(rxdctl & IXGBE_RXDCTL_ENABLE)); if (!poll_ms) - PMD_INIT_LOG(ERR, "Could not enable " - "Rx Queue %d\n", i); + PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d", i); rte_wmb(); IXGBE_WRITE_REG(hw, IXGBE_VFRDT(i), rxq->nb_rx_desc - 1);