/* check DD bit on threshold descriptor */
status = txq->tx_ring[txq->tx_next_dd].wb.status;
- if (! (status & IXGBE_ADVTXD_STAT_DD))
+ if (!(status & rte_cpu_to_le_32(IXGBE_ADVTXD_STAT_DD)))
return 0;
/*
pkt_len = (*pkts)->data_len;
/* write data to descriptor */
- txdp->read.buffer_addr = buf_dma_addr;
+ txdp->read.buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
+
txdp->read.cmd_type_len =
- ((uint32_t)DCMD_DTYP_FLAGS | pkt_len);
+ rte_cpu_to_le_32((uint32_t)DCMD_DTYP_FLAGS | pkt_len);
+
txdp->read.olinfo_status =
- (pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
+ rte_cpu_to_le_32(pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
+
rte_prefetch0(&(*pkts)->pool);
}
}
pkt_len = (*pkts)->data_len;
/* write data to descriptor */
- txdp->read.buffer_addr = buf_dma_addr;
+ txdp->read.buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
txdp->read.cmd_type_len =
- ((uint32_t)DCMD_DTYP_FLAGS | pkt_len);
+ rte_cpu_to_le_32((uint32_t)DCMD_DTYP_FLAGS | pkt_len);
txdp->read.olinfo_status =
- (pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
+ rte_cpu_to_le_32(pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
rte_prefetch0(&(*pkts)->pool);
}
uint16_t nb_tx_desc = txq->nb_tx_desc;
uint16_t desc_to_clean_to;
uint16_t nb_tx_to_clean;
+ uint32_t status;
/* Determine the last descriptor needing to be cleaned */
desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->tx_rs_thresh);
/* Check to make sure the last descriptor to clean is done */
desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
- if (! (txr[desc_to_clean_to].wb.status & IXGBE_TXD_STAT_DD))
+ status = txr[desc_to_clean_to].wb.status;
+ if (!(status & rte_cpu_to_le_32(IXGBE_TXD_STAT_DD)))
{
PMD_TX_FREE_LOG(DEBUG,
"TX descriptor %4u is not done"
txe = &sw_ring[tx_id];
/* Determine if the descriptor ring needs to be cleaned. */
- if ((txq->nb_tx_desc - txq->nb_tx_free) > txq->tx_free_thresh) {
+ if (txq->nb_tx_free < txq->tx_free_thresh)
ixgbe_xmit_cleanup(txq);
- }
rte_prefetch0(&txe->mbuf->pool);
* RX functions
*
**********************************************************************/
-static inline uint64_t
-rx_desc_hlen_type_rss_to_pkt_flags(uint32_t hl_tp_rs)
+#define IXGBE_PACKET_TYPE_IPV4 0X01
+#define IXGBE_PACKET_TYPE_IPV4_TCP 0X11
+#define IXGBE_PACKET_TYPE_IPV4_UDP 0X21
+#define IXGBE_PACKET_TYPE_IPV4_SCTP 0X41
+#define IXGBE_PACKET_TYPE_IPV4_EXT 0X03
+#define IXGBE_PACKET_TYPE_IPV4_EXT_SCTP 0X43
+#define IXGBE_PACKET_TYPE_IPV6 0X04
+#define IXGBE_PACKET_TYPE_IPV6_TCP 0X14
+#define IXGBE_PACKET_TYPE_IPV6_UDP 0X24
+#define IXGBE_PACKET_TYPE_IPV6_EXT 0X0C
+#define IXGBE_PACKET_TYPE_IPV6_EXT_TCP 0X1C
+#define IXGBE_PACKET_TYPE_IPV6_EXT_UDP 0X2C
+#define IXGBE_PACKET_TYPE_IPV4_IPV6 0X05
+#define IXGBE_PACKET_TYPE_IPV4_IPV6_TCP 0X15
+#define IXGBE_PACKET_TYPE_IPV4_IPV6_UDP 0X25
+#define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT 0X0D
+#define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_TCP 0X1D
+#define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_UDP 0X2D
+#define IXGBE_PACKET_TYPE_MAX 0X80
+#define IXGBE_PACKET_TYPE_MASK 0X7F
+#define IXGBE_PACKET_TYPE_SHIFT 0X04
+static inline uint32_t
+ixgbe_rxd_pkt_info_to_pkt_type(uint16_t pkt_info)
{
- uint64_t pkt_flags;
-
- static const uint64_t ip_pkt_types_map[16] = {
- 0, PKT_RX_IPV4_HDR, PKT_RX_IPV4_HDR_EXT, PKT_RX_IPV4_HDR_EXT,
- PKT_RX_IPV6_HDR, 0, 0, 0,
- PKT_RX_IPV6_HDR_EXT, 0, 0, 0,
- PKT_RX_IPV6_HDR_EXT, 0, 0, 0,
+ static const uint32_t
+ ptype_table[IXGBE_PACKET_TYPE_MAX] __rte_cache_aligned = {
+ [IXGBE_PACKET_TYPE_IPV4] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4,
+ [IXGBE_PACKET_TYPE_IPV4_EXT] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT,
+ [IXGBE_PACKET_TYPE_IPV6] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6,
+ [IXGBE_PACKET_TYPE_IPV4_IPV6] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6,
+ [IXGBE_PACKET_TYPE_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6_EXT,
+ [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT,
+ [IXGBE_PACKET_TYPE_IPV4_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,
+ [IXGBE_PACKET_TYPE_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP,
+ [IXGBE_PACKET_TYPE_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP,
+ [IXGBE_PACKET_TYPE_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP,
+ [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP,
+ [IXGBE_PACKET_TYPE_IPV4_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,
+ [IXGBE_PACKET_TYPE_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,
+ [IXGBE_PACKET_TYPE_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP,
+ [IXGBE_PACKET_TYPE_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP,
+ [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP,
+ [IXGBE_PACKET_TYPE_IPV4_SCTP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP,
+ [IXGBE_PACKET_TYPE_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_SCTP,
};
+ if (unlikely(pkt_info & IXGBE_RXDADV_PKTTYPE_ETQF))
+ return RTE_PTYPE_UNKNOWN;
+
+ pkt_info = (pkt_info >> IXGBE_PACKET_TYPE_SHIFT) &
+ IXGBE_PACKET_TYPE_MASK;
+
+ return ptype_table[pkt_info];
+}
- static const uint64_t ip_rss_types_map[16] = {
+static inline uint64_t
+ixgbe_rxd_pkt_info_to_pkt_flags(uint16_t pkt_info)
+{
+ static uint64_t ip_rss_types_map[16] __rte_cache_aligned = {
0, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH,
0, PKT_RX_RSS_HASH, 0, PKT_RX_RSS_HASH,
PKT_RX_RSS_HASH, 0, 0, 0,
0, 0, 0, PKT_RX_FDIR,
};
-
#ifdef RTE_LIBRTE_IEEE1588
static uint64_t ip_pkt_etqf_map[8] = {
0, 0, 0, PKT_RX_IEEE1588_PTP,
0, 0, 0, 0,
};
- pkt_flags = (hl_tp_rs & IXGBE_RXDADV_PKTTYPE_ETQF) ?
- ip_pkt_etqf_map[(hl_tp_rs >> 4) & 0x07] :
- ip_pkt_types_map[(hl_tp_rs >> 4) & 0x0F];
+ if (likely(pkt_info & IXGBE_RXDADV_PKTTYPE_ETQF))
+ return ip_pkt_etqf_map[(pkt_info >> 4) & 0X07] |
+ ip_rss_types_map[pkt_info & 0XF];
+ else
+ return ip_rss_types_map[pkt_info & 0XF];
#else
- pkt_flags = (hl_tp_rs & IXGBE_RXDADV_PKTTYPE_ETQF) ? 0 :
- ip_pkt_types_map[(hl_tp_rs >> 4) & 0x0F];
-
+ return ip_rss_types_map[pkt_info & 0XF];
#endif
- return pkt_flags | ip_rss_types_map[hl_tp_rs & 0xF];
}
static inline uint64_t
IXGBE_RXDADV_ERR_CKSUM_BIT) & IXGBE_RXDADV_ERR_CKSUM_MSK];
}
-#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
/*
* LOOK_AHEAD defines how many desc statuses to check beyond the
* current descriptor.
struct rte_mbuf *mb;
uint16_t pkt_len;
uint64_t pkt_flags;
- int s[LOOK_AHEAD], nb_dd;
+ int nb_dd;
+ uint32_t s[LOOK_AHEAD];
+ uint16_t pkt_info[LOOK_AHEAD];
int i, j, nb_rx = 0;
-
+ uint32_t status;
/* get references to current descriptor and S/W ring entry */
rxdp = &rxq->rx_ring[rxq->rx_tail];
rxep = &rxq->sw_ring[rxq->rx_tail];
+ status = rxdp->wb.upper.status_error;
/* check to make sure there is at least 1 packet to receive */
- if (! (rxdp->wb.upper.status_error & IXGBE_RXDADV_STAT_DD))
+ if (!(status & rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD)))
return 0;
/*
{
/* Read desc statuses backwards to avoid race condition */
for (j = LOOK_AHEAD-1; j >= 0; --j)
- s[j] = rxdp[j].wb.upper.status_error;
+ s[j] = rte_le_to_cpu_32(rxdp[j].wb.upper.status_error);
+
+ for (j = LOOK_AHEAD - 1; j >= 0; --j)
+ pkt_info[j] = rxdp[j].wb.lower.lo_dword.
+ hs_rss.pkt_info;
/* Compute how many status bits were set */
nb_dd = 0;
/* Translate descriptor info to mbuf format */
for (j = 0; j < nb_dd; ++j) {
mb = rxep[j].mbuf;
- pkt_len = (uint16_t)(rxdp[j].wb.upper.length - rxq->crc_len);
+ pkt_len = rte_le_to_cpu_16(rxdp[j].wb.upper.length) -
+ rxq->crc_len;
mb->data_len = pkt_len;
mb->pkt_len = pkt_len;
- mb->vlan_tci = rxdp[j].wb.upper.vlan;
mb->vlan_tci = rte_le_to_cpu_16(rxdp[j].wb.upper.vlan);
/* convert descriptor fields to rte mbuf flags */
- pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(
- rxdp[j].wb.lower.lo_dword.data);
- /* reuse status field from scan list */
- pkt_flags |= rx_desc_status_to_pkt_flags(s[j]);
+ pkt_flags = rx_desc_status_to_pkt_flags(s[j]);
pkt_flags |= rx_desc_error_to_pkt_flags(s[j]);
+ pkt_flags |=
+ ixgbe_rxd_pkt_info_to_pkt_flags(pkt_info[j]);
mb->ol_flags = pkt_flags;
+ mb->packet_type =
+ ixgbe_rxd_pkt_info_to_pkt_type(pkt_info[j]);
if (likely(pkt_flags & PKT_RX_RSS_HASH))
- mb->hash.rss = rxdp[j].wb.lower.hi_dword.rss;
+ mb->hash.rss = rte_le_to_cpu_32(
+ rxdp[j].wb.lower.hi_dword.rss);
else if (pkt_flags & PKT_RX_FDIR) {
- mb->hash.fdir.hash =
- (uint16_t)((rxdp[j].wb.lower.hi_dword.csum_ip.csum)
- & IXGBE_ATR_HASH_MASK);
- mb->hash.fdir.id = rxdp[j].wb.lower.hi_dword.csum_ip.ip_id;
+ mb->hash.fdir.hash = rte_le_to_cpu_16(
+ rxdp[j].wb.lower.hi_dword.csum_ip.csum) &
+ IXGBE_ATR_HASH_MASK;
+ mb->hash.fdir.id = rte_le_to_cpu_16(
+ rxdp[j].wb.lower.hi_dword.csum_ip.ip_id);
}
}
/* populate the descriptors */
dma_addr = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb));
- rxdp[i].read.hdr_addr = dma_addr;
+ rxdp[i].read.hdr_addr = 0;
rxdp[i].read.pkt_addr = dma_addr;
}
return nb_rx;
}
-#else
-
-/* Stub to avoid extra ifdefs */
-static uint16_t
-ixgbe_recv_pkts_bulk_alloc(__rte_unused void *rx_queue,
- __rte_unused struct rte_mbuf **rx_pkts, __rte_unused uint16_t nb_pkts)
-{
- return 0;
-}
-
-static inline int
-ixgbe_rx_alloc_bufs(__rte_unused struct ixgbe_rx_queue *rxq,
- __rte_unused bool reset_mbuf)
-{
- return -ENOMEM;
-}
-#endif /* RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC */
-
uint16_t
ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts)
union ixgbe_adv_rx_desc rxd;
uint64_t dma_addr;
uint32_t staterr;
- uint32_t hlen_type_rss;
+ uint32_t pkt_info;
uint16_t pkt_len;
uint16_t rx_id;
uint16_t nb_rx;
*/
rxdp = &rx_ring[rx_id];
staterr = rxdp->wb.upper.status_error;
- if (! (staterr & rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD)))
+ if (!(staterr & rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD)))
break;
rxd = *rxdp;
rxe->mbuf = nmb;
dma_addr =
rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
- rxdp->read.hdr_addr = dma_addr;
+ rxdp->read.hdr_addr = 0;
rxdp->read.pkt_addr = dma_addr;
/*
rxm->data_len = pkt_len;
rxm->port = rxq->port_id;
- hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
+ pkt_info = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.hs_rss.
+ pkt_info);
/* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
rxm->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
- pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
- pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr);
+ pkt_flags = rx_desc_status_to_pkt_flags(staterr);
pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr);
+ pkt_flags = pkt_flags |
+ ixgbe_rxd_pkt_info_to_pkt_flags(pkt_info);
rxm->ol_flags = pkt_flags;
+ rxm->packet_type = ixgbe_rxd_pkt_info_to_pkt_type(pkt_info);
if (likely(pkt_flags & PKT_RX_RSS_HASH))
- rxm->hash.rss = rxd.wb.lower.hi_dword.rss;
+ rxm->hash.rss = rte_le_to_cpu_32(
+ rxd.wb.lower.hi_dword.rss);
else if (pkt_flags & PKT_RX_FDIR) {
- rxm->hash.fdir.hash =
- (uint16_t)((rxd.wb.lower.hi_dword.csum_ip.csum)
- & IXGBE_ATR_HASH_MASK);
- rxm->hash.fdir.id = rxd.wb.lower.hi_dword.csum_ip.ip_id;
+ rxm->hash.fdir.hash = rte_le_to_cpu_16(
+ rxd.wb.lower.hi_dword.csum_ip.csum) &
+ IXGBE_ATR_HASH_MASK;
+ rxm->hash.fdir.id = rte_le_to_cpu_16(
+ rxd.wb.lower.hi_dword.csum_ip.ip_id);
}
/*
* Store the mbuf address into the next entry of the array
uint8_t port_id,
uint32_t staterr)
{
- uint32_t hlen_type_rss;
+ uint16_t pkt_info;
uint64_t pkt_flags;
head->port = port_id;
- /*
- * The vlan_tci field is only valid when PKT_RX_VLAN_PKT is
+ /* The vlan_tci field is only valid when PKT_RX_VLAN_PKT is
* set in the pkt_flags field.
*/
head->vlan_tci = rte_le_to_cpu_16(desc->wb.upper.vlan);
- hlen_type_rss = rte_le_to_cpu_32(desc->wb.lower.lo_dword.data);
- pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
- pkt_flags |= rx_desc_status_to_pkt_flags(staterr);
+ pkt_info = rte_le_to_cpu_32(desc->wb.lower.lo_dword.hs_rss.pkt_info);
+ pkt_flags = rx_desc_status_to_pkt_flags(staterr);
pkt_flags |= rx_desc_error_to_pkt_flags(staterr);
+ pkt_flags |= ixgbe_rxd_pkt_info_to_pkt_flags(pkt_info);
head->ol_flags = pkt_flags;
+ head->packet_type = ixgbe_rxd_pkt_info_to_pkt_type(pkt_info);
if (likely(pkt_flags & PKT_RX_RSS_HASH))
head->hash.rss = rte_le_to_cpu_32(desc->wb.lower.hi_dword.rss);
rx_mbuf_alloc_failed++;
break;
}
- } else if (nb_hold > rxq->rx_free_thresh) {
+ }
+ else if (nb_hold > rxq->rx_free_thresh) {
uint16_t next_rdt = rxq->rx_free_trigger;
if (!ixgbe_rx_alloc_bufs(rxq, false)) {
rxe->mbuf = nmb;
rxm->data_off = RTE_PKTMBUF_HEADROOM;
- rxdp->read.hdr_addr = dma;
+ rxdp->read.hdr_addr = 0;
rxdp->read.pkt_addr = dma;
} else
rxe->mbuf = NULL;
ixgbe_fill_cluster_head_buf(first_seg, &rxd, rxq->port_id,
staterr);
+ /*
+ * Deal with the case, when HW CRC srip is disabled.
+ * That can't happen when LRO is enabled, but still could
+ * happen for scattered RX mode.
+ */
+ first_seg->pkt_len -= rxq->crc_len;
+ if (unlikely(rxm->data_len <= rxq->crc_len)) {
+ struct rte_mbuf *lp;
+
+ for (lp = first_seg; lp->next != rxm; lp = lp->next)
+ ;
+
+ first_seg->nb_segs--;
+ lp->data_len -= rxq->crc_len - rxm->data_len;
+ lp->next = NULL;
+ rte_pktmbuf_free_seg(rxm);
+ } else
+ rxm->data_len -= rxq->crc_len;
+
/* Prefetch data of first segment, if configured to do so. */
rte_packet_prefetch((char *)first_seg->buf_addr +
first_seg->data_off);
* needed. If the memzone is already created, then this function returns a ptr
* to the old one.
*/
-static const struct rte_memzone *
+static const struct rte_memzone * __attribute__((cold))
ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name,
uint16_t queue_id, uint32_t ring_size, int socket_id)
{
#endif
}
-static void
+static void __attribute__((cold))
ixgbe_tx_queue_release_mbufs(struct ixgbe_tx_queue *txq)
{
unsigned i;
}
}
-static void
+static void __attribute__((cold))
ixgbe_tx_free_swring(struct ixgbe_tx_queue *txq)
{
if (txq != NULL &&
rte_free(txq->sw_ring);
}
-static void
+static void __attribute__((cold))
ixgbe_tx_queue_release(struct ixgbe_tx_queue *txq)
{
if (txq != NULL && txq->ops != NULL) {
}
}
-void
+void __attribute__((cold))
ixgbe_dev_tx_queue_release(void *txq)
{
ixgbe_tx_queue_release(txq);
}
/* (Re)set dynamic ixgbe_tx_queue fields to defaults */
-static void
+static void __attribute__((cold))
ixgbe_reset_tx_queue(struct ixgbe_tx_queue *txq)
{
static const union ixgbe_adv_tx_desc zeroed_desc = {{0}};
prev = (uint16_t) (txq->nb_tx_desc - 1);
for (i = 0; i < txq->nb_tx_desc; i++) {
volatile union ixgbe_adv_tx_desc *txd = &txq->tx_ring[i];
- txd->wb.status = IXGBE_TXD_STAT_DD;
+ txd->wb.status = rte_cpu_to_le_32(IXGBE_TXD_STAT_DD);
txe[i].mbuf = NULL;
txe[i].last_id = i;
txe[prev].next_id = i;
* the queue parameters. Used in tx_queue_setup by primary process and then
* in dev_init by secondary process when attaching to an existing ethdev.
*/
-void
+void __attribute__((cold))
ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq)
{
/* Use a simple Tx queue (no offloads, no multi segs) if possible */
if (((txq->txq_flags & IXGBE_SIMPLE_FLAGS) == IXGBE_SIMPLE_FLAGS)
&& (txq->tx_rs_thresh >= RTE_PMD_IXGBE_TX_MAX_BURST)) {
- PMD_INIT_LOG(INFO, "Using simple tx code path");
+ PMD_INIT_LOG(DEBUG, "Using simple tx code path");
#ifdef RTE_IXGBE_INC_VECTOR
if (txq->tx_rs_thresh <= RTE_IXGBE_TX_MAX_FREE_BUF_SZ &&
(rte_eal_process_type() != RTE_PROC_PRIMARY ||
ixgbe_txq_vec_setup(txq) == 0)) {
- PMD_INIT_LOG(INFO, "Vector tx enabled.");
+ PMD_INIT_LOG(DEBUG, "Vector tx enabled.");
dev->tx_pkt_burst = ixgbe_xmit_pkts_vec;
} else
#endif
dev->tx_pkt_burst = ixgbe_xmit_pkts_simple;
} else {
- PMD_INIT_LOG(INFO, "Using full-featured tx code path");
- PMD_INIT_LOG(INFO,
+ PMD_INIT_LOG(DEBUG, "Using full-featured tx code path");
+ PMD_INIT_LOG(DEBUG,
" - txq_flags = %lx " "[IXGBE_SIMPLE_FLAGS=%lx]",
(unsigned long)txq->txq_flags,
(unsigned long)IXGBE_SIMPLE_FLAGS);
- PMD_INIT_LOG(INFO,
+ PMD_INIT_LOG(DEBUG,
" - tx_rs_thresh = %lu " "[RTE_PMD_IXGBE_TX_MAX_BURST=%lu]",
(unsigned long)txq->tx_rs_thresh,
(unsigned long)RTE_PMD_IXGBE_TX_MAX_BURST);
}
}
-int
+int __attribute__((cold))
ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
uint16_t nb_desc,
*
* @m scattered cluster head
*/
-static void
+static void __attribute__((cold))
ixgbe_free_sc_cluster(struct rte_mbuf *m)
{
uint8_t i, nb_segs = m->nb_segs;
}
}
-static void
+static void __attribute__((cold))
ixgbe_rx_queue_release_mbufs(struct ixgbe_rx_queue *rxq)
{
unsigned i;
+#ifdef RTE_IXGBE_INC_VECTOR
+ /* SSE Vector driver has a different way of releasing mbufs. */
+ if (rxq->rx_using_sse) {
+ ixgbe_rx_queue_release_mbufs_vec(rxq);
+ return;
+ }
+#endif
+
if (rxq->sw_ring != NULL) {
for (i = 0; i < rxq->nb_rx_desc; i++) {
if (rxq->sw_ring[i].mbuf != NULL) {
rxq->sw_ring[i].mbuf = NULL;
}
}
-#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
if (rxq->rx_nb_avail) {
for (i = 0; i < rxq->rx_nb_avail; ++i) {
struct rte_mbuf *mb;
}
rxq->rx_nb_avail = 0;
}
-#endif
}
if (rxq->sw_sc_ring)
}
}
-static void
+static void __attribute__((cold))
ixgbe_rx_queue_release(struct ixgbe_rx_queue *rxq)
{
if (rxq != NULL) {
}
}
-void
+void __attribute__((cold))
ixgbe_dev_rx_queue_release(void *rxq)
{
ixgbe_rx_queue_release(rxq);
* -EINVAL: the preconditions are NOT satisfied and the default Rx burst
* function must be used.
*/
-static inline int
-#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
+static inline int __attribute__((cold))
check_rx_burst_bulk_alloc_preconditions(struct ixgbe_rx_queue *rxq)
-#else
-check_rx_burst_bulk_alloc_preconditions(__rte_unused struct ixgbe_rx_queue *rxq)
-#endif
{
int ret = 0;
* Scattered packets are not supported. This should be checked
* outside of this function.
*/
-#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
if (!(rxq->rx_free_thresh >= RTE_PMD_IXGBE_RX_MAX_BURST)) {
PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
"rxq->rx_free_thresh=%d, "
RTE_PMD_IXGBE_RX_MAX_BURST);
ret = -EINVAL;
}
-#else
- ret = -EINVAL;
-#endif
return ret;
}
/* Reset dynamic ixgbe_rx_queue fields back to defaults */
-static void
+static void __attribute__((cold))
ixgbe_reset_rx_queue(struct ixgbe_adapter *adapter, struct ixgbe_rx_queue *rxq)
{
static const union ixgbe_adv_rx_desc zeroed_desc = {{0}};
rxq->rx_ring[i] = zeroed_desc;
}
-#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
/*
* initialize extra software ring entries. Space for these extra
* entries is always allocated
rxq->rx_nb_avail = 0;
rxq->rx_next_avail = 0;
rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
-#endif /* RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC */
rxq->rx_tail = 0;
rxq->nb_rx_hold = 0;
rxq->pkt_first_seg = NULL;
rxq->pkt_last_seg = NULL;
+
+#ifdef RTE_IXGBE_INC_VECTOR
+ rxq->rxrearm_start = 0;
+ rxq->rxrearm_nb = 0;
+#endif
}
-int
+int __attribute__((cold))
ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
uint16_t nb_desc,
rxdp = &(rxq->rx_ring[rxq->rx_tail]);
while ((desc < rxq->nb_rx_desc) &&
- (rxdp->wb.upper.status_error & IXGBE_RXDADV_STAT_DD)) {
+ (rxdp->wb.upper.status_error &
+ rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD))) {
desc += IXGBE_RXQ_SCAN_INTERVAL;
rxdp += IXGBE_RXQ_SCAN_INTERVAL;
if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
desc -= rxq->nb_rx_desc;
rxdp = &rxq->rx_ring[desc];
- return !!(rxdp->wb.upper.status_error & IXGBE_RXDADV_STAT_DD);
+ return !!(rxdp->wb.upper.status_error &
+ rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD));
}
-void
+void __attribute__((cold))
ixgbe_dev_clear_queues(struct rte_eth_dev *dev)
{
unsigned i;
}
}
+void
+ixgbe_dev_free_queues(struct rte_eth_dev *dev)
+{
+ unsigned i;
+
+ PMD_INIT_FUNC_TRACE();
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ ixgbe_dev_rx_queue_release(dev->data->rx_queues[i]);
+ dev->data->rx_queues[i] = NULL;
+ }
+ dev->data->nb_rx_queues = 0;
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ ixgbe_dev_tx_queue_release(dev->data->tx_queues[i]);
+ dev->data->tx_queues[i] = NULL;
+ }
+ dev->data->nb_tx_queues = 0;
+}
+
/*********************************************************************
*
* Device RX/TX init functions
return;
}
-static int
+static int __attribute__((cold))
ixgbe_alloc_rx_queue_mbufs(struct ixgbe_rx_queue *rxq)
{
struct ixgbe_rx_entry *rxe = rxq->sw_ring;
dma_addr =
rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mbuf));
rxd = &rxq->rx_ring[i];
- rxd->read.hdr_addr = dma_addr;
+ rxd->read.hdr_addr = 0;
rxd->read.pkt_addr = dma_addr;
rxe[i].mbuf = mbuf;
}
}
}
-void ixgbe_set_rx_function(struct rte_eth_dev *dev)
+void __attribute__((cold))
+ixgbe_set_rx_function(struct rte_eth_dev *dev)
{
+ uint16_t i, rx_using_sse;
struct ixgbe_adapter *adapter =
(struct ixgbe_adapter *)dev->data->dev_private;
*/
if (dev->data->lro) {
if (adapter->rx_bulk_alloc_allowed) {
- PMD_INIT_LOG(INFO, "LRO is requested. Using a bulk "
+ PMD_INIT_LOG(DEBUG, "LRO is requested. Using a bulk "
"allocation version");
dev->rx_pkt_burst = ixgbe_recv_pkts_lro_bulk_alloc;
} else {
- PMD_INIT_LOG(INFO, "LRO is requested. Using a single "
+ PMD_INIT_LOG(DEBUG, "LRO is requested. Using a single "
"allocation version");
dev->rx_pkt_burst = ixgbe_recv_pkts_lro_single_alloc;
}
dev->rx_pkt_burst = ixgbe_recv_scattered_pkts_vec;
} else if (adapter->rx_bulk_alloc_allowed) {
- PMD_INIT_LOG(INFO, "Using a Scattered with bulk "
+ PMD_INIT_LOG(DEBUG, "Using a Scattered with bulk "
"allocation callback (port=%d).",
dev->data->port_id);
dev->rx_pkt_burst = ixgbe_recv_pkts_lro_bulk_alloc;
* - Single buffer allocation (the simplest one)
*/
} else if (adapter->rx_vec_allowed) {
- PMD_INIT_LOG(INFO, "Vector rx enabled, please make sure RX "
+ PMD_INIT_LOG(DEBUG, "Vector rx enabled, please make sure RX "
"burst size no less than 32.");
dev->rx_pkt_burst = ixgbe_recv_pkts_vec;
dev->rx_pkt_burst = ixgbe_recv_pkts_bulk_alloc;
} else {
PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are not "
- "satisfied, or Scattered Rx is requested, "
- "or RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC "
- "is not enabled (port=%d).",
+ "satisfied, or Scattered Rx is requested "
+ "(port=%d).",
dev->data->port_id);
dev->rx_pkt_burst = ixgbe_recv_pkts;
}
+
+ /* Propagate information about RX function choice through all queues. */
+
+ rx_using_sse =
+ (dev->rx_pkt_burst == ixgbe_recv_scattered_pkts_vec ||
+ dev->rx_pkt_burst == ixgbe_recv_pkts_vec);
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i];
+ rxq->rx_using_sse = rx_using_sse;
+ }
}
/**
dev->data->lro = 1;
- PMD_INIT_LOG(INFO, "enabling LRO mode");
+ PMD_INIT_LOG(DEBUG, "enabling LRO mode");
return 0;
}
/*
* Initializes Receive Unit.
*/
-int
+int __attribute__((cold))
ixgbe_dev_rx_init(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw;
/*
* Initializes Transmit Unit.
*/
-void
+void __attribute__((cold))
ixgbe_dev_tx_init(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw;
/*
* Set up link for 82599 loopback mode Tx->Rx.
*/
-static inline void
+static inline void __attribute__((cold))
ixgbe_setup_loopback_link_82599(struct ixgbe_hw *hw)
{
PMD_INIT_FUNC_TRACE();
/*
* Start Transmit and Receive Units.
*/
-int
+int __attribute__((cold))
ixgbe_dev_rxtx_start(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw;
/*
* Start Receive Units for specified queue.
*/
-int
+int __attribute__((cold))
ixgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
struct ixgbe_hw *hw;
/*
* Stop Receive Units for specified queue.
*/
-int
+int __attribute__((cold))
ixgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
struct ixgbe_hw *hw;
/*
* Start Transmit Units for specified queue.
*/
-int
+int __attribute__((cold))
ixgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
{
struct ixgbe_hw *hw;
/*
* Stop Transmit Units for specified queue.
*/
-int
+int __attribute__((cold))
ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
{
struct ixgbe_hw *hw;
/*
* [VF] Initializes Receive Unit.
*/
-int
+int __attribute__((cold))
ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw;
/*
* [VF] Initializes Transmit Unit.
*/
-void
+void __attribute__((cold))
ixgbevf_dev_tx_init(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw;
/*
* [VF] Start Transmit and Receive Units.
*/
-void
+void __attribute__((cold))
ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw;