/* free buffers one at a time */
if ((txq->txq_flags & (uint32_t)ETH_TXQ_FLAGS_NOREFCOUNT) != 0) {
for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
+ txep->mbuf->next = NULL;
rte_mempool_put(txep->mbuf->pool, txep->mbuf);
txep->mbuf = NULL;
}
struct igb_rx_entry *rxep;
struct rte_mbuf *mb;
uint16_t pkt_len;
+ uint16_t pkt_flags;
int s[LOOK_AHEAD], nb_dd;
int i, j, nb_rx = 0;
/* Translate descriptor info to mbuf format */
for (j = 0; j < nb_dd; ++j) {
mb = rxep[j].mbuf;
- pkt_len = (uint16_t)(rxdp[j].wb.upper.length -
- rxq->crc_len);
+ pkt_len = (uint16_t)(rxdp[j].wb.upper.length - rxq->crc_len);
mb->data_len = pkt_len;
mb->pkt_len = pkt_len;
mb->vlan_tci = rxdp[j].wb.upper.vlan;
- mb->hash.rss = rxdp[j].wb.lower.hi_dword.rss;
+ mb->vlan_tci = rte_le_to_cpu_16(rxdp[j].wb.upper.vlan);
/* convert descriptor fields to rte mbuf flags */
- mb->ol_flags = rx_desc_hlen_type_rss_to_pkt_flags(
+ pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(
rxdp[j].wb.lower.lo_dword.data);
/* reuse status field from scan list */
- mb->ol_flags = mb->ol_flags |
- rx_desc_status_to_pkt_flags(s[j]);
- mb->ol_flags = mb->ol_flags |
- rx_desc_error_to_pkt_flags(s[j]);
+ pkt_flags |= rx_desc_status_to_pkt_flags(s[j]);
+ pkt_flags |= rx_desc_error_to_pkt_flags(s[j]);
+ mb->ol_flags = pkt_flags;
+
+ if (likely(pkt_flags & PKT_RX_RSS_HASH))
+ mb->hash.rss = rxdp[j].wb.lower.hi_dword.rss;
+ else if (pkt_flags & PKT_RX_FDIR) {
+ mb->hash.fdir.hash =
+ (uint16_t)((rxdp[j].wb.lower.hi_dword.csum_ip.csum)
+ & IXGBE_ATR_HASH_MASK);
+ mb->hash.fdir.id = rxdp[j].wb.lower.hi_dword.csum_ip.ip_id;
+ }
}
/* Move mbuf pointers from the S/W ring to the stage */
txq->port_id = dev->data->port_id;
txq->txq_flags = tx_conf->txq_flags;
txq->ops = &def_txq_ops;
- txq->start_tx_per_q = tx_conf->start_tx_per_q;
+ txq->tx_deferred_start = tx_conf->tx_deferred_start;
/*
* Modification to set VFTDT for virtual function if vf is detected
* outside of this function.
*/
#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
- if (! (rxq->rx_free_thresh >= RTE_PMD_IXGBE_RX_MAX_BURST))
+ if (!(rxq->rx_free_thresh >= RTE_PMD_IXGBE_RX_MAX_BURST)) {
+ PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
+ "rxq->rx_free_thresh=%d, "
+ "RTE_PMD_IXGBE_RX_MAX_BURST=%d",
+ rxq->rx_free_thresh, RTE_PMD_IXGBE_RX_MAX_BURST);
ret = -EINVAL;
- else if (! (rxq->rx_free_thresh < rxq->nb_rx_desc))
+ } else if (!(rxq->rx_free_thresh < rxq->nb_rx_desc)) {
+ PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
+ "rxq->rx_free_thresh=%d, "
+ "rxq->nb_rx_desc=%d",
+ rxq->rx_free_thresh, rxq->nb_rx_desc);
ret = -EINVAL;
- else if (! ((rxq->nb_rx_desc % rxq->rx_free_thresh) == 0))
+ } else if (!((rxq->nb_rx_desc % rxq->rx_free_thresh) == 0)) {
+ PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
+ "rxq->nb_rx_desc=%d, "
+ "rxq->rx_free_thresh=%d",
+ rxq->nb_rx_desc, rxq->rx_free_thresh);
ret = -EINVAL;
- else if (! (rxq->nb_rx_desc <
- (IXGBE_MAX_RING_DESC - RTE_PMD_IXGBE_RX_MAX_BURST)))
+ } else if (!(rxq->nb_rx_desc <
+ (IXGBE_MAX_RING_DESC - RTE_PMD_IXGBE_RX_MAX_BURST))) {
+ PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
+ "rxq->nb_rx_desc=%d, "
+ "IXGBE_MAX_RING_DESC=%d, "
+ "RTE_PMD_IXGBE_RX_MAX_BURST=%d",
+ rxq->nb_rx_desc, IXGBE_MAX_RING_DESC,
+ RTE_PMD_IXGBE_RX_MAX_BURST);
ret = -EINVAL;
+ }
#else
ret = -EINVAL;
#endif
rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ?
0 : ETHER_CRC_LEN);
rxq->drop_en = rx_conf->rx_drop_en;
- rxq->start_rx_per_q = rx_conf->start_rx_per_q;
+ rxq->rx_deferred_start = rx_conf->rx_deferred_start;
/*
* Allocate RX ring hardware descriptors. A memzone large enough to
break;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id,
tsa, map);
break;
break;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, bwg_id,tsa);
ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, bwg_id,tsa, map);
break;
/* It adds dual VLAN length for supporting dual VLAN */
if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
2 * IXGBE_VLAN_TAG_SIZE) > buf_size){
+ if (!dev->data->scattered_rx)
+ PMD_INIT_LOG(DEBUG, "forcing scatter mode");
dev->data->scattered_rx = 1;
#ifdef RTE_IXGBE_INC_VECTOR
dev->rx_pkt_burst = ixgbe_recv_scattered_pkts_vec;
}
if (dev->data->dev_conf.rxmode.enable_scatter) {
+ if (!dev->data->scattered_rx)
+ PMD_INIT_LOG(DEBUG, "forcing scatter mode");
#ifdef RTE_IXGBE_INC_VECTOR
dev->rx_pkt_burst = ixgbe_recv_scattered_pkts_vec;
#else
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
default:
txctrl = IXGBE_READ_REG(hw,
IXGBE_DCA_TXCTRL_82599(txq->reg_idx));
for (i = 0; i < dev->data->nb_tx_queues; i++) {
txq = dev->data->tx_queues[i];
- if (!txq->start_tx_per_q)
+ if (!txq->tx_deferred_start)
ixgbe_dev_tx_queue_start(dev, i);
}
for (i = 0; i < dev->data->nb_rx_queues; i++) {
rxq = dev->data->rx_queues[i];
- if (!rxq->start_rx_per_q)
+ if (!rxq->rx_deferred_start)
ixgbe_dev_rx_queue_start(dev, i);
}
/* It adds dual VLAN length for supporting dual VLAN */
if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
2 * IXGBE_VLAN_TAG_SIZE) > buf_size) {
+ if (!dev->data->scattered_rx)
+ PMD_INIT_LOG(DEBUG, "forcing scatter mode");
dev->data->scattered_rx = 1;
#ifdef RTE_IXGBE_INC_VECTOR
dev->rx_pkt_burst = ixgbe_recv_scattered_pkts_vec;
}
if (dev->data->dev_conf.rxmode.enable_scatter) {
+ if (!dev->data->scattered_rx)
+ PMD_INIT_LOG(DEBUG, "forcing scatter mode");
#ifdef RTE_IXGBE_INC_VECTOR
dev->rx_pkt_burst = ixgbe_recv_scattered_pkts_vec;
#else