/* check DD bit on threshold descriptor */
status = txq->tx_ring[txq->tx_next_dd].wb.status;
- if (! (status & IXGBE_ADVTXD_STAT_DD))
+ if (!(status & rte_cpu_to_le_32(IXGBE_ADVTXD_STAT_DD)))
return 0;
/*
pkt_len = (*pkts)->data_len;
/* write data to descriptor */
- txdp->read.buffer_addr = buf_dma_addr;
+ txdp->read.buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
+
txdp->read.cmd_type_len =
- ((uint32_t)DCMD_DTYP_FLAGS | pkt_len);
+ rte_cpu_to_le_32((uint32_t)DCMD_DTYP_FLAGS | pkt_len);
+
txdp->read.olinfo_status =
- (pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
+ rte_cpu_to_le_32(pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
+
rte_prefetch0(&(*pkts)->pool);
}
}
pkt_len = (*pkts)->data_len;
/* write data to descriptor */
- txdp->read.buffer_addr = buf_dma_addr;
+ txdp->read.buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
txdp->read.cmd_type_len =
- ((uint32_t)DCMD_DTYP_FLAGS | pkt_len);
+ rte_cpu_to_le_32((uint32_t)DCMD_DTYP_FLAGS | pkt_len);
txdp->read.olinfo_status =
- (pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
+ rte_cpu_to_le_32(pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
rte_prefetch0(&(*pkts)->pool);
}
uint16_t nb_tx_desc = txq->nb_tx_desc;
uint16_t desc_to_clean_to;
uint16_t nb_tx_to_clean;
+ uint32_t status;
/* Determine the last descriptor needing to be cleaned */
desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->tx_rs_thresh);
/* Check to make sure the last descriptor to clean is done */
desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
- if (! (txr[desc_to_clean_to].wb.status & IXGBE_TXD_STAT_DD))
+ status = txr[desc_to_clean_to].wb.status;
+ if (!(status & rte_cpu_to_le_32(IXGBE_TXD_STAT_DD)))
{
PMD_TX_FREE_LOG(DEBUG,
"TX descriptor %4u is not done"
* RX functions
*
**********************************************************************/
-#ifdef RTE_NEXT_ABI
#define IXGBE_PACKET_TYPE_IPV4 0X01
#define IXGBE_PACKET_TYPE_IPV4_TCP 0X11
#define IXGBE_PACKET_TYPE_IPV4_UDP 0X21
return ip_rss_types_map[pkt_info & 0XF];
#endif
}
-#else /* RTE_NEXT_ABI */
-static inline uint64_t
-rx_desc_hlen_type_rss_to_pkt_flags(uint32_t hl_tp_rs)
-{
- uint64_t pkt_flags;
-
- static const uint64_t ip_pkt_types_map[16] = {
- 0, PKT_RX_IPV4_HDR, PKT_RX_IPV4_HDR_EXT, PKT_RX_IPV4_HDR_EXT,
- PKT_RX_IPV6_HDR, 0, 0, 0,
- PKT_RX_IPV6_HDR_EXT, 0, 0, 0,
- PKT_RX_IPV6_HDR_EXT, 0, 0, 0,
- };
-
- static const uint64_t ip_rss_types_map[16] = {
- 0, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH,
- 0, PKT_RX_RSS_HASH, 0, PKT_RX_RSS_HASH,
- PKT_RX_RSS_HASH, 0, 0, 0,
- 0, 0, 0, PKT_RX_FDIR,
- };
-
-#ifdef RTE_LIBRTE_IEEE1588
- static uint64_t ip_pkt_etqf_map[8] = {
- 0, 0, 0, PKT_RX_IEEE1588_PTP,
- 0, 0, 0, 0,
- };
-
- pkt_flags = (hl_tp_rs & IXGBE_RXDADV_PKTTYPE_ETQF) ?
- ip_pkt_etqf_map[(hl_tp_rs >> 4) & 0x07] :
- ip_pkt_types_map[(hl_tp_rs >> 4) & 0x0F];
-#else
- pkt_flags = (hl_tp_rs & IXGBE_RXDADV_PKTTYPE_ETQF) ? 0 :
- ip_pkt_types_map[(hl_tp_rs >> 4) & 0x0F];
-
-#endif
- return pkt_flags | ip_rss_types_map[hl_tp_rs & 0xF];
-}
-#endif /* RTE_NEXT_ABI */
static inline uint64_t
rx_desc_status_to_pkt_flags(uint32_t rx_status)
struct rte_mbuf *mb;
uint16_t pkt_len;
uint64_t pkt_flags;
-#ifdef RTE_NEXT_ABI
int nb_dd;
uint32_t s[LOOK_AHEAD];
uint16_t pkt_info[LOOK_AHEAD];
-#else
- int s[LOOK_AHEAD], nb_dd;
-#endif /* RTE_NEXT_ABI */
int i, j, nb_rx = 0;
-
+ uint32_t status;
/* get references to current descriptor and S/W ring entry */
rxdp = &rxq->rx_ring[rxq->rx_tail];
rxep = &rxq->sw_ring[rxq->rx_tail];
+ status = rxdp->wb.upper.status_error;
/* check to make sure there is at least 1 packet to receive */
- if (! (rxdp->wb.upper.status_error & IXGBE_RXDADV_STAT_DD))
+ if (!(status & rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD)))
return 0;
/*
{
/* Read desc statuses backwards to avoid race condition */
for (j = LOOK_AHEAD-1; j >= 0; --j)
- s[j] = rxdp[j].wb.upper.status_error;
+ s[j] = rte_le_to_cpu_32(rxdp[j].wb.upper.status_error);
-#ifdef RTE_NEXT_ABI
for (j = LOOK_AHEAD - 1; j >= 0; --j)
pkt_info[j] = rxdp[j].wb.lower.lo_dword.
hs_rss.pkt_info;
-#endif /* RTE_NEXT_ABI */
/* Compute how many status bits were set */
nb_dd = 0;
/* Translate descriptor info to mbuf format */
for (j = 0; j < nb_dd; ++j) {
mb = rxep[j].mbuf;
- pkt_len = (uint16_t)(rxdp[j].wb.upper.length - rxq->crc_len);
+ pkt_len = rte_le_to_cpu_16(rxdp[j].wb.upper.length) -
+ rxq->crc_len;
mb->data_len = pkt_len;
mb->pkt_len = pkt_len;
mb->vlan_tci = rte_le_to_cpu_16(rxdp[j].wb.upper.vlan);
/* convert descriptor fields to rte mbuf flags */
-#ifdef RTE_NEXT_ABI
pkt_flags = rx_desc_status_to_pkt_flags(s[j]);
pkt_flags |= rx_desc_error_to_pkt_flags(s[j]);
pkt_flags |=
mb->ol_flags = pkt_flags;
mb->packet_type =
ixgbe_rxd_pkt_info_to_pkt_type(pkt_info[j]);
-#else /* RTE_NEXT_ABI */
- pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(
- rxdp[j].wb.lower.lo_dword.data);
- /* reuse status field from scan list */
- pkt_flags |= rx_desc_status_to_pkt_flags(s[j]);
- pkt_flags |= rx_desc_error_to_pkt_flags(s[j]);
- mb->ol_flags = pkt_flags;
-#endif /* RTE_NEXT_ABI */
if (likely(pkt_flags & PKT_RX_RSS_HASH))
- mb->hash.rss = rxdp[j].wb.lower.hi_dword.rss;
+ mb->hash.rss = rte_le_to_cpu_32(
+ rxdp[j].wb.lower.hi_dword.rss);
else if (pkt_flags & PKT_RX_FDIR) {
- mb->hash.fdir.hash =
- (uint16_t)((rxdp[j].wb.lower.hi_dword.csum_ip.csum)
- & IXGBE_ATR_HASH_MASK);
- mb->hash.fdir.id = rxdp[j].wb.lower.hi_dword.csum_ip.ip_id;
+ mb->hash.fdir.hash = rte_le_to_cpu_16(
+ rxdp[j].wb.lower.hi_dword.csum_ip.csum) &
+ IXGBE_ATR_HASH_MASK;
+ mb->hash.fdir.id = rte_le_to_cpu_16(
+ rxdp[j].wb.lower.hi_dword.csum_ip.ip_id);
}
}
/* populate the descriptors */
dma_addr = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb));
- rxdp[i].read.hdr_addr = dma_addr;
+ rxdp[i].read.hdr_addr = 0;
rxdp[i].read.pkt_addr = dma_addr;
}
union ixgbe_adv_rx_desc rxd;
uint64_t dma_addr;
uint32_t staterr;
-#ifdef RTE_NEXT_ABI
uint32_t pkt_info;
-#else
- uint32_t hlen_type_rss;
-#endif
uint16_t pkt_len;
uint16_t rx_id;
uint16_t nb_rx;
*/
rxdp = &rx_ring[rx_id];
staterr = rxdp->wb.upper.status_error;
- if (! (staterr & rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD)))
+ if (!(staterr & rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD)))
break;
rxd = *rxdp;
rxe->mbuf = nmb;
dma_addr =
rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
- rxdp->read.hdr_addr = dma_addr;
+ rxdp->read.hdr_addr = 0;
rxdp->read.pkt_addr = dma_addr;
/*
rxm->data_len = pkt_len;
rxm->port = rxq->port_id;
-#ifdef RTE_NEXT_ABI
pkt_info = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.hs_rss.
pkt_info);
/* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
ixgbe_rxd_pkt_info_to_pkt_flags(pkt_info);
rxm->ol_flags = pkt_flags;
rxm->packet_type = ixgbe_rxd_pkt_info_to_pkt_type(pkt_info);
-#else /* RTE_NEXT_ABI */
- hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
- /* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
- rxm->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
-
- pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
- pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr);
- pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr);
- rxm->ol_flags = pkt_flags;
-#endif /* RTE_NEXT_ABI */
if (likely(pkt_flags & PKT_RX_RSS_HASH))
- rxm->hash.rss = rxd.wb.lower.hi_dword.rss;
+ rxm->hash.rss = rte_le_to_cpu_32(
+ rxd.wb.lower.hi_dword.rss);
else if (pkt_flags & PKT_RX_FDIR) {
- rxm->hash.fdir.hash =
- (uint16_t)((rxd.wb.lower.hi_dword.csum_ip.csum)
- & IXGBE_ATR_HASH_MASK);
- rxm->hash.fdir.id = rxd.wb.lower.hi_dword.csum_ip.ip_id;
+ rxm->hash.fdir.hash = rte_le_to_cpu_16(
+ rxd.wb.lower.hi_dword.csum_ip.csum) &
+ IXGBE_ATR_HASH_MASK;
+ rxm->hash.fdir.id = rte_le_to_cpu_16(
+ rxd.wb.lower.hi_dword.csum_ip.ip_id);
}
/*
* Store the mbuf address into the next entry of the array
uint8_t port_id,
uint32_t staterr)
{
-#ifdef RTE_NEXT_ABI
uint16_t pkt_info;
uint64_t pkt_flags;
pkt_flags |= ixgbe_rxd_pkt_info_to_pkt_flags(pkt_info);
head->ol_flags = pkt_flags;
head->packet_type = ixgbe_rxd_pkt_info_to_pkt_type(pkt_info);
-#else /* RTE_NEXT_ABI */
- uint32_t hlen_type_rss;
- uint64_t pkt_flags;
-
- head->port = port_id;
-
- /*
- * The vlan_tci field is only valid when PKT_RX_VLAN_PKT is
- * set in the pkt_flags field.
- */
- head->vlan_tci = rte_le_to_cpu_16(desc->wb.upper.vlan);
- hlen_type_rss = rte_le_to_cpu_32(desc->wb.lower.lo_dword.data);
- pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
- pkt_flags |= rx_desc_status_to_pkt_flags(staterr);
- pkt_flags |= rx_desc_error_to_pkt_flags(staterr);
- head->ol_flags = pkt_flags;
-#endif /* RTE_NEXT_ABI */
if (likely(pkt_flags & PKT_RX_RSS_HASH))
head->hash.rss = rte_le_to_cpu_32(desc->wb.lower.hi_dword.rss);
rxe->mbuf = nmb;
rxm->data_off = RTE_PKTMBUF_HEADROOM;
- rxdp->read.hdr_addr = dma;
+ rxdp->read.hdr_addr = 0;
rxdp->read.pkt_addr = dma;
} else
rxe->mbuf = NULL;
ixgbe_fill_cluster_head_buf(first_seg, &rxd, rxq->port_id,
staterr);
+ /*
+ * Deal with the case, when HW CRC srip is disabled.
+ * That can't happen when LRO is enabled, but still could
+ * happen for scattered RX mode.
+ */
+ first_seg->pkt_len -= rxq->crc_len;
+ if (unlikely(rxm->data_len <= rxq->crc_len)) {
+ struct rte_mbuf *lp;
+
+ for (lp = first_seg; lp->next != rxm; lp = lp->next)
+ ;
+
+ first_seg->nb_segs--;
+ lp->data_len -= rxq->crc_len - rxm->data_len;
+ lp->next = NULL;
+ rte_pktmbuf_free_seg(rxm);
+ } else
+ rxm->data_len -= rxq->crc_len;
+
/* Prefetch data of first segment, if configured to do so. */
rte_packet_prefetch((char *)first_seg->buf_addr +
first_seg->data_off);
prev = (uint16_t) (txq->nb_tx_desc - 1);
for (i = 0; i < txq->nb_tx_desc; i++) {
volatile union ixgbe_adv_tx_desc *txd = &txq->tx_ring[i];
- txd->wb.status = IXGBE_TXD_STAT_DD;
+ txd->wb.status = rte_cpu_to_le_32(IXGBE_TXD_STAT_DD);
txe[i].mbuf = NULL;
txe[i].last_id = i;
txe[prev].next_id = i;
/* Use a simple Tx queue (no offloads, no multi segs) if possible */
if (((txq->txq_flags & IXGBE_SIMPLE_FLAGS) == IXGBE_SIMPLE_FLAGS)
&& (txq->tx_rs_thresh >= RTE_PMD_IXGBE_TX_MAX_BURST)) {
- PMD_INIT_LOG(INFO, "Using simple tx code path");
+ PMD_INIT_LOG(DEBUG, "Using simple tx code path");
#ifdef RTE_IXGBE_INC_VECTOR
if (txq->tx_rs_thresh <= RTE_IXGBE_TX_MAX_FREE_BUF_SZ &&
(rte_eal_process_type() != RTE_PROC_PRIMARY ||
ixgbe_txq_vec_setup(txq) == 0)) {
- PMD_INIT_LOG(INFO, "Vector tx enabled.");
+ PMD_INIT_LOG(DEBUG, "Vector tx enabled.");
dev->tx_pkt_burst = ixgbe_xmit_pkts_vec;
} else
#endif
dev->tx_pkt_burst = ixgbe_xmit_pkts_simple;
} else {
- PMD_INIT_LOG(INFO, "Using full-featured tx code path");
- PMD_INIT_LOG(INFO,
+ PMD_INIT_LOG(DEBUG, "Using full-featured tx code path");
+ PMD_INIT_LOG(DEBUG,
" - txq_flags = %lx " "[IXGBE_SIMPLE_FLAGS=%lx]",
(unsigned long)txq->txq_flags,
(unsigned long)IXGBE_SIMPLE_FLAGS);
- PMD_INIT_LOG(INFO,
+ PMD_INIT_LOG(DEBUG,
" - tx_rs_thresh = %lu " "[RTE_PMD_IXGBE_TX_MAX_BURST=%lu]",
(unsigned long)txq->tx_rs_thresh,
(unsigned long)RTE_PMD_IXGBE_TX_MAX_BURST);
rxdp = &(rxq->rx_ring[rxq->rx_tail]);
while ((desc < rxq->nb_rx_desc) &&
- (rxdp->wb.upper.status_error & IXGBE_RXDADV_STAT_DD)) {
+ (rxdp->wb.upper.status_error &
+ rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD))) {
desc += IXGBE_RXQ_SCAN_INTERVAL;
rxdp += IXGBE_RXQ_SCAN_INTERVAL;
if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
desc -= rxq->nb_rx_desc;
rxdp = &rxq->rx_ring[desc];
- return !!(rxdp->wb.upper.status_error & IXGBE_RXDADV_STAT_DD);
+ return !!(rxdp->wb.upper.status_error &
+ rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD));
}
void __attribute__((cold))
dma_addr =
rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mbuf));
rxd = &rxq->rx_ring[i];
- rxd->read.hdr_addr = dma_addr;
+ rxd->read.hdr_addr = 0;
rxd->read.pkt_addr = dma_addr;
rxe[i].mbuf = mbuf;
}
*/
if (dev->data->lro) {
if (adapter->rx_bulk_alloc_allowed) {
- PMD_INIT_LOG(INFO, "LRO is requested. Using a bulk "
+ PMD_INIT_LOG(DEBUG, "LRO is requested. Using a bulk "
"allocation version");
dev->rx_pkt_burst = ixgbe_recv_pkts_lro_bulk_alloc;
} else {
- PMD_INIT_LOG(INFO, "LRO is requested. Using a single "
+ PMD_INIT_LOG(DEBUG, "LRO is requested. Using a single "
"allocation version");
dev->rx_pkt_burst = ixgbe_recv_pkts_lro_single_alloc;
}
dev->rx_pkt_burst = ixgbe_recv_scattered_pkts_vec;
} else if (adapter->rx_bulk_alloc_allowed) {
- PMD_INIT_LOG(INFO, "Using a Scattered with bulk "
+ PMD_INIT_LOG(DEBUG, "Using a Scattered with bulk "
"allocation callback (port=%d).",
dev->data->port_id);
dev->rx_pkt_burst = ixgbe_recv_pkts_lro_bulk_alloc;
* - Single buffer allocation (the simplest one)
*/
} else if (adapter->rx_vec_allowed) {
- PMD_INIT_LOG(INFO, "Vector rx enabled, please make sure RX "
+ PMD_INIT_LOG(DEBUG, "Vector rx enabled, please make sure RX "
"burst size no less than 32.");
dev->rx_pkt_burst = ixgbe_recv_pkts_vec;
dev->data->lro = 1;
- PMD_INIT_LOG(INFO, "enabling LRO mode");
+ PMD_INIT_LOG(DEBUG, "enabling LRO mode");
return 0;
}