/* check DD bit on threshold descriptor */
status = txq->tx_ring[txq->tx_next_dd].wb.status;
- if (! (status & IXGBE_ADVTXD_STAT_DD))
+ if (!(status & rte_cpu_to_le_32(IXGBE_ADVTXD_STAT_DD)))
return 0;
/*
pkt_len = (*pkts)->data_len;
/* write data to descriptor */
- txdp->read.buffer_addr = buf_dma_addr;
+ txdp->read.buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
+
txdp->read.cmd_type_len =
- ((uint32_t)DCMD_DTYP_FLAGS | pkt_len);
+ rte_cpu_to_le_32((uint32_t)DCMD_DTYP_FLAGS | pkt_len);
+
txdp->read.olinfo_status =
- (pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
+ rte_cpu_to_le_32(pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
+
rte_prefetch0(&(*pkts)->pool);
}
}
pkt_len = (*pkts)->data_len;
/* write data to descriptor */
- txdp->read.buffer_addr = buf_dma_addr;
+ txdp->read.buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
txdp->read.cmd_type_len =
- ((uint32_t)DCMD_DTYP_FLAGS | pkt_len);
+ rte_cpu_to_le_32((uint32_t)DCMD_DTYP_FLAGS | pkt_len);
txdp->read.olinfo_status =
- (pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
+ rte_cpu_to_le_32(pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
rte_prefetch0(&(*pkts)->pool);
}
uint16_t nb_tx_desc = txq->nb_tx_desc;
uint16_t desc_to_clean_to;
uint16_t nb_tx_to_clean;
+ uint32_t status;
/* Determine the last descriptor needing to be cleaned */
desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->tx_rs_thresh);
/* Check to make sure the last descriptor to clean is done */
desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
- if (! (txr[desc_to_clean_to].wb.status & IXGBE_TXD_STAT_DD))
+ status = txr[desc_to_clean_to].wb.status;
+ if (!(status & rte_cpu_to_le_32(IXGBE_TXD_STAT_DD)))
{
PMD_TX_FREE_LOG(DEBUG,
"TX descriptor %4u is not done"
IXGBE_RXDADV_ERR_CKSUM_BIT) & IXGBE_RXDADV_ERR_CKSUM_MSK];
}
-#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
/*
* LOOK_AHEAD defines how many desc statuses to check beyond the
* current descriptor.
int s[LOOK_AHEAD], nb_dd;
#endif /* RTE_NEXT_ABI */
int i, j, nb_rx = 0;
-
+ uint32_t status;
/* get references to current descriptor and S/W ring entry */
rxdp = &rxq->rx_ring[rxq->rx_tail];
rxep = &rxq->sw_ring[rxq->rx_tail];
+ status = rxdp->wb.upper.status_error;
/* check to make sure there is at least 1 packet to receive */
- if (! (rxdp->wb.upper.status_error & IXGBE_RXDADV_STAT_DD))
+ if (!(status & rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD)))
return 0;
/*
{
/* Read desc statuses backwards to avoid race condition */
for (j = LOOK_AHEAD-1; j >= 0; --j)
- s[j] = rxdp[j].wb.upper.status_error;
+ s[j] = rte_le_to_cpu_32(rxdp[j].wb.upper.status_error);
#ifdef RTE_NEXT_ABI
for (j = LOOK_AHEAD - 1; j >= 0; --j)
/* Translate descriptor info to mbuf format */
for (j = 0; j < nb_dd; ++j) {
mb = rxep[j].mbuf;
- pkt_len = (uint16_t)(rxdp[j].wb.upper.length - rxq->crc_len);
+ pkt_len = rte_le_to_cpu_16(rxdp[j].wb.upper.length) -
+ rxq->crc_len;
mb->data_len = pkt_len;
mb->pkt_len = pkt_len;
mb->vlan_tci = rte_le_to_cpu_16(rxdp[j].wb.upper.vlan);
ixgbe_rxd_pkt_info_to_pkt_type(pkt_info[j]);
#else /* RTE_NEXT_ABI */
pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(
- rxdp[j].wb.lower.lo_dword.data);
+ rte_le_to_cpu_32(
+ rxdp[j].wb.lower.lo_dword.data));
/* reuse status field from scan list */
pkt_flags |= rx_desc_status_to_pkt_flags(s[j]);
pkt_flags |= rx_desc_error_to_pkt_flags(s[j]);
#endif /* RTE_NEXT_ABI */
if (likely(pkt_flags & PKT_RX_RSS_HASH))
- mb->hash.rss = rxdp[j].wb.lower.hi_dword.rss;
+ mb->hash.rss = rte_le_to_cpu_32(
+ rxdp[j].wb.lower.hi_dword.rss);
else if (pkt_flags & PKT_RX_FDIR) {
- mb->hash.fdir.hash =
- (uint16_t)((rxdp[j].wb.lower.hi_dword.csum_ip.csum)
- & IXGBE_ATR_HASH_MASK);
- mb->hash.fdir.id = rxdp[j].wb.lower.hi_dword.csum_ip.ip_id;
+ mb->hash.fdir.hash = rte_le_to_cpu_16(
+ rxdp[j].wb.lower.hi_dword.csum_ip.csum) &
+ IXGBE_ATR_HASH_MASK;
+ mb->hash.fdir.id = rte_le_to_cpu_16(
+ rxdp[j].wb.lower.hi_dword.csum_ip.ip_id);
}
}
return nb_rx;
}
-#else
-
-/* Stub to avoid extra ifdefs */
-static uint16_t
-ixgbe_recv_pkts_bulk_alloc(__rte_unused void *rx_queue,
- __rte_unused struct rte_mbuf **rx_pkts, __rte_unused uint16_t nb_pkts)
-{
- return 0;
-}
-
-static inline int
-ixgbe_rx_alloc_bufs(__rte_unused struct ixgbe_rx_queue *rxq,
- __rte_unused bool reset_mbuf)
-{
- return -ENOMEM;
-}
-#endif /* RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC */
-
uint16_t
ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts)
*/
rxdp = &rx_ring[rx_id];
staterr = rxdp->wb.upper.status_error;
- if (! (staterr & rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD)))
+ if (!(staterr & rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD)))
break;
rxd = *rxdp;
#endif /* RTE_NEXT_ABI */
if (likely(pkt_flags & PKT_RX_RSS_HASH))
- rxm->hash.rss = rxd.wb.lower.hi_dword.rss;
+ rxm->hash.rss = rte_le_to_cpu_32(
+ rxd.wb.lower.hi_dword.rss);
else if (pkt_flags & PKT_RX_FDIR) {
- rxm->hash.fdir.hash =
- (uint16_t)((rxd.wb.lower.hi_dword.csum_ip.csum)
- & IXGBE_ATR_HASH_MASK);
- rxm->hash.fdir.id = rxd.wb.lower.hi_dword.csum_ip.ip_id;
+ rxm->hash.fdir.hash = rte_le_to_cpu_16(
+ rxd.wb.lower.hi_dword.csum_ip.csum) &
+ IXGBE_ATR_HASH_MASK;
+ rxm->hash.fdir.id = rte_le_to_cpu_16(
+ rxd.wb.lower.hi_dword.csum_ip.ip_id);
}
/*
* Store the mbuf address into the next entry of the array
break;
}
}
-#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
else if (nb_hold > rxq->rx_free_thresh) {
uint16_t next_rdt = rxq->rx_free_trigger;
break;
}
}
-#endif
nb_hold++;
rxe = &sw_ring[rx_id];
prev = (uint16_t) (txq->nb_tx_desc - 1);
for (i = 0; i < txq->nb_tx_desc; i++) {
volatile union ixgbe_adv_tx_desc *txd = &txq->tx_ring[i];
- txd->wb.status = IXGBE_TXD_STAT_DD;
+ txd->wb.status = rte_cpu_to_le_32(IXGBE_TXD_STAT_DD);
txe[i].mbuf = NULL;
txe[i].last_id = i;
txe[prev].next_id = i;
{
unsigned i;
+#ifdef RTE_IXGBE_INC_VECTOR
+ /* SSE Vector driver has a different way of releasing mbufs. */
+ if (rxq->rx_using_sse) {
+ ixgbe_rx_queue_release_mbufs_vec(rxq);
+ return;
+ }
+#endif
+
if (rxq->sw_ring != NULL) {
for (i = 0; i < rxq->nb_rx_desc; i++) {
- if (rxq->sw_ring[i].mbuf != NULL &&
- rte_mbuf_refcnt_read(rxq->sw_ring[i].mbuf)) {
+ if (rxq->sw_ring[i].mbuf != NULL) {
rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
rxq->sw_ring[i].mbuf = NULL;
}
}
-#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
if (rxq->rx_nb_avail) {
for (i = 0; i < rxq->rx_nb_avail; ++i) {
struct rte_mbuf *mb;
}
rxq->rx_nb_avail = 0;
}
-#endif
}
if (rxq->sw_sc_ring)
* function must be used.
*/
static inline int __attribute__((cold))
-#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
check_rx_burst_bulk_alloc_preconditions(struct ixgbe_rx_queue *rxq)
-#else
-check_rx_burst_bulk_alloc_preconditions(__rte_unused struct ixgbe_rx_queue *rxq)
-#endif
{
int ret = 0;
* Scattered packets are not supported. This should be checked
* outside of this function.
*/
-#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
if (!(rxq->rx_free_thresh >= RTE_PMD_IXGBE_RX_MAX_BURST)) {
PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
"rxq->rx_free_thresh=%d, "
RTE_PMD_IXGBE_RX_MAX_BURST);
ret = -EINVAL;
}
-#else
- ret = -EINVAL;
-#endif
return ret;
}
rxq->rx_ring[i] = zeroed_desc;
}
-#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
/*
* initialize extra software ring entries. Space for these extra
* entries is always allocated
rxq->rx_nb_avail = 0;
rxq->rx_next_avail = 0;
rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
-#endif /* RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC */
rxq->rx_tail = 0;
rxq->nb_rx_hold = 0;
rxq->pkt_first_seg = NULL;
rxq->pkt_last_seg = NULL;
+
+#ifdef RTE_IXGBE_INC_VECTOR
+ rxq->rxrearm_start = 0;
+ rxq->rxrearm_nb = 0;
+#endif
}
int __attribute__((cold))
rxdp = &(rxq->rx_ring[rxq->rx_tail]);
while ((desc < rxq->nb_rx_desc) &&
- (rxdp->wb.upper.status_error & IXGBE_RXDADV_STAT_DD)) {
+ (rxdp->wb.upper.status_error &
+ rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD))) {
desc += IXGBE_RXQ_SCAN_INTERVAL;
rxdp += IXGBE_RXQ_SCAN_INTERVAL;
if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
desc -= rxq->nb_rx_desc;
rxdp = &rxq->rx_ring[desc];
- return !!(rxdp->wb.upper.status_error & IXGBE_RXDADV_STAT_DD);
+ return !!(rxdp->wb.upper.status_error &
+ rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD));
}
void __attribute__((cold))
void __attribute__((cold))
ixgbe_set_rx_function(struct rte_eth_dev *dev)
{
+ uint16_t i, rx_using_sse;
struct ixgbe_adapter *adapter =
(struct ixgbe_adapter *)dev->data->dev_private;
dev->rx_pkt_burst = ixgbe_recv_pkts_bulk_alloc;
} else {
PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are not "
- "satisfied, or Scattered Rx is requested, "
- "or RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC "
- "is not enabled (port=%d).",
+ "satisfied, or Scattered Rx is requested "
+ "(port=%d).",
dev->data->port_id);
dev->rx_pkt_burst = ixgbe_recv_pkts;
}
+
+ /* Propagate information about RX function choice through all queues. */
+
+ rx_using_sse =
+ (dev->rx_pkt_burst == ixgbe_recv_scattered_pkts_vec ||
+ dev->rx_pkt_burst == ixgbe_recv_pkts_vec);
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i];
+ rxq->rx_using_sse = rx_using_sse;
+ }
}
/**