#define I40E_RX_ERR_BITS 0x3f
if (likely((error_bits & I40E_RX_ERR_BITS) == 0))
return flags;
- /* If RXE bit set, all other status bits are meaningless */
- if (unlikely(error_bits & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {
- flags |= PKT_RX_MAC_ERR;
- return flags;
- }
-
- /* If RECIPE bit set, all other status indications should be ignored */
- if (unlikely(error_bits & (1 << I40E_RX_DESC_ERROR_RECIPE_SHIFT))) {
- flags |= PKT_RX_RECIP_ERR;
- return flags;
- }
- if (unlikely(error_bits & (1 << I40E_RX_DESC_ERROR_HBO_SHIFT)))
- flags |= PKT_RX_HBUF_OVERFLOW;
if (unlikely(error_bits & (1 << I40E_RX_DESC_ERROR_IPE_SHIFT)))
flags |= PKT_RX_IP_CKSUM_BAD;
if (unlikely(error_bits & (1 << I40E_RX_DESC_ERROR_L4E_SHIFT)))
flags |= PKT_RX_L4_CKSUM_BAD;
if (unlikely(error_bits & (1 << I40E_RX_DESC_ERROR_EIPE_SHIFT)))
flags |= PKT_RX_EIP_CKSUM_BAD;
- if (unlikely(error_bits & (1 << I40E_RX_DESC_ERROR_OVERSIZE_SHIFT)))
- flags |= PKT_RX_OVERSIZE;
return flags;
}
I40E_RXD_QW1_STATUS_SHIFT;
}
+ rte_smp_rmb();
+
/* Compute how many status bits were set */
for (j = 0, nb_dd = 0; j < I40E_LOOK_AHEAD; j++)
nb_dd += s[j] & (1 << I40E_RX_DESC_STATUS_DD_SHIFT);
i40e_rxd_pkt_type_mapping((uint8_t)((qword1 &
I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT));
if (pkt_flags & PKT_RX_RSS_HASH)
- rxm->hash.rss =
+ first_seg->hash.rss =
rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);
if (pkt_flags & PKT_RX_FDIR)
- pkt_flags |= i40e_rxd_build_fdir(&rxd, rxm);
+ pkt_flags |= i40e_rxd_build_fdir(&rxd, first_seg);
#ifdef RTE_LIBRTE_IEEE1588
pkt_flags |= i40e_get_iee15888_flags(first_seg, qword1);
return;
}
- if (!rxq || !rxq->sw_ring) {
- PMD_DRV_LOG(DEBUG, "Pointer to rxq or sw_ring is NULL");
+ if (!rxq->sw_ring) {
+ PMD_DRV_LOG(DEBUG, "Pointer to sw_ring is NULL");
return;
}
PMD_INIT_FUNC_TRACE();
for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ if (!dev->data->tx_queues[i])
+ continue;
i40e_tx_queue_release_mbufs(dev->data->tx_queues[i]);
i40e_reset_tx_queue(dev->data->tx_queues[i]);
}
for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ if (!dev->data->rx_queues[i])
+ continue;
i40e_rx_queue_release_mbufs(dev->data->rx_queues[i]);
i40e_reset_rx_queue(dev->data->rx_queues[i]);
}
PMD_INIT_FUNC_TRACE();
for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ if (!dev->data->rx_queues[i])
+ continue;
i40e_dev_rx_queue_release(dev->data->rx_queues[i]);
dev->data->rx_queues[i] = NULL;
}
dev->data->nb_rx_queues = 0;
for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ if (!dev->data->tx_queues[i])
+ continue;
i40e_dev_tx_queue_release(dev->data->tx_queues[i]);
dev->data->tx_queues[i] = NULL;
}
struct i40e_tx_queue *txq;
const struct rte_memzone *tz = NULL;
uint32_t ring_size;
- struct rte_eth_dev *dev = pf->adapter->eth_dev;
+ struct rte_eth_dev *dev;
if (!pf) {
PMD_DRV_LOG(ERR, "PF is not available");
return I40E_ERR_BAD_PTR;
}
+ dev = pf->adapter->eth_dev;
+
/* Allocate the TX queue data structure. */
txq = rte_zmalloc_socket("i40e fdir tx queue",
sizeof(struct i40e_tx_queue),
struct i40e_rx_queue *rxq;
const struct rte_memzone *rz = NULL;
uint32_t ring_size;
- struct rte_eth_dev *dev = pf->adapter->eth_dev;
+ struct rte_eth_dev *dev;
if (!pf) {
PMD_DRV_LOG(ERR, "PF is not available");
return I40E_ERR_BAD_PTR;
}
+ dev = pf->adapter->eth_dev;
+
/* Allocate the RX queue data structure. */
rxq = rte_zmalloc_socket("i40e fdir rx queue",
sizeof(struct i40e_rx_queue),
struct i40e_rx_queue *rxq =
dev->data->rx_queues[i];
- if (i40e_rxq_vec_setup(rxq)) {
+ if (rxq && i40e_rxq_vec_setup(rxq)) {
ad->rx_vec_allowed = false;
break;
}
for (i = 0; i < dev->data->nb_rx_queues; i++) {
struct i40e_rx_queue *rxq = dev->data->rx_queues[i];
- rxq->rx_using_sse = rx_using_sse;
+ if (rxq)
+ rxq->rx_using_sse = rx_using_sse;
}
}
}
struct i40e_tx_queue *txq =
dev->data->tx_queues[i];
- if (i40e_txq_vec_setup(txq)) {
+ if (txq && i40e_txq_vec_setup(txq)) {
ad->tx_vec_allowed = false;
break;
}