/* check DD bit on threshold descriptor */
status = txq->tx_ring[txq->tx_next_dd].wb.status;
- if (! (status & IXGBE_ADVTXD_STAT_DD))
+ if (!(status & rte_cpu_to_le_32(IXGBE_ADVTXD_STAT_DD)))
return 0;
/*
pkt_len = (*pkts)->data_len;
/* write data to descriptor */
- txdp->read.buffer_addr = buf_dma_addr;
+ txdp->read.buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
+
txdp->read.cmd_type_len =
- ((uint32_t)DCMD_DTYP_FLAGS | pkt_len);
+ rte_cpu_to_le_32((uint32_t)DCMD_DTYP_FLAGS | pkt_len);
+
txdp->read.olinfo_status =
- (pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
+ rte_cpu_to_le_32(pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
+
rte_prefetch0(&(*pkts)->pool);
}
}
pkt_len = (*pkts)->data_len;
/* write data to descriptor */
- txdp->read.buffer_addr = buf_dma_addr;
+ txdp->read.buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
txdp->read.cmd_type_len =
- ((uint32_t)DCMD_DTYP_FLAGS | pkt_len);
+ rte_cpu_to_le_32((uint32_t)DCMD_DTYP_FLAGS | pkt_len);
txdp->read.olinfo_status =
- (pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
+ rte_cpu_to_le_32(pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
rte_prefetch0(&(*pkts)->pool);
}
mss_l4len_idx |= sizeof(struct tcp_hdr) << IXGBE_ADVTXD_L4LEN_SHIFT;
tx_offload_mask.l2_len |= ~0;
tx_offload_mask.l3_len |= ~0;
- tx_offload_mask.l4_len |= ~0;
break;
case PKT_TX_SCTP_CKSUM:
type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_SCTP |
uint16_t nb_tx_desc = txq->nb_tx_desc;
uint16_t desc_to_clean_to;
uint16_t nb_tx_to_clean;
+ uint32_t status;
/* Determine the last descriptor needing to be cleaned */
desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->tx_rs_thresh);
/* Check to make sure the last descriptor to clean is done */
desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
- if (! (txr[desc_to_clean_to].wb.status & IXGBE_TXD_STAT_DD))
+ status = txr[desc_to_clean_to].wb.status;
+ if (!(status & rte_cpu_to_le_32(IXGBE_TXD_STAT_DD)))
{
PMD_TX_FREE_LOG(DEBUG,
"TX descriptor %4u is not done"
struct ixgbe_tx_entry *sw_ring;
struct ixgbe_tx_entry *txe, *txn;
volatile union ixgbe_adv_tx_desc *txr;
- volatile union ixgbe_adv_tx_desc *txd;
+ volatile union ixgbe_adv_tx_desc *txd, *txp;
struct rte_mbuf *tx_pkt;
struct rte_mbuf *m_seg;
uint64_t buf_dma_addr;
txr = txq->tx_ring;
tx_id = txq->tx_tail;
txe = &sw_ring[tx_id];
+ txp = NULL;
/* Determine if the descriptor ring needs to be cleaned. */
if (txq->nb_tx_free < txq->tx_free_thresh)
*/
nb_used = (uint16_t)(tx_pkt->nb_segs + new_ctx);
+ if (txp != NULL &&
+ nb_used + txq->nb_tx_used >= txq->tx_rs_thresh)
+ /* set RS on the previous packet in the burst */
+ txp->read.cmd_type_len |=
+ rte_cpu_to_le_32(IXGBE_TXD_CMD_RS);
+
/*
* The number of descriptors that must be allocated for a
* packet is the number of segments of that packet, plus 1
/* Update txq RS bit counters */
txq->nb_tx_used = 0;
- }
+ txp = NULL;
+ } else
+ txp = txd;
+
txd->read.cmd_type_len |= rte_cpu_to_le_32(cmd_type_len);
}
+
end_of_tx:
+ /* set RS on last packet in the burst */
+ if (txp != NULL)
+ txp->read.cmd_type_len |= rte_cpu_to_le_32(IXGBE_TXD_CMD_RS);
+
rte_wmb();
/*
* RX functions
*
**********************************************************************/
-#ifdef RTE_NEXT_ABI
#define IXGBE_PACKET_TYPE_IPV4 0X01
#define IXGBE_PACKET_TYPE_IPV4_TCP 0X11
#define IXGBE_PACKET_TYPE_IPV4_UDP 0X21
return ip_rss_types_map[pkt_info & 0XF];
#endif
}
-#else /* RTE_NEXT_ABI */
-static inline uint64_t
-rx_desc_hlen_type_rss_to_pkt_flags(uint32_t hl_tp_rs)
-{
- uint64_t pkt_flags;
-
- static const uint64_t ip_pkt_types_map[16] = {
- 0, PKT_RX_IPV4_HDR, PKT_RX_IPV4_HDR_EXT, PKT_RX_IPV4_HDR_EXT,
- PKT_RX_IPV6_HDR, 0, 0, 0,
- PKT_RX_IPV6_HDR_EXT, 0, 0, 0,
- PKT_RX_IPV6_HDR_EXT, 0, 0, 0,
- };
-
- static const uint64_t ip_rss_types_map[16] = {
- 0, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH,
- 0, PKT_RX_RSS_HASH, 0, PKT_RX_RSS_HASH,
- PKT_RX_RSS_HASH, 0, 0, 0,
- 0, 0, 0, PKT_RX_FDIR,
- };
-
-#ifdef RTE_LIBRTE_IEEE1588
- static uint64_t ip_pkt_etqf_map[8] = {
- 0, 0, 0, PKT_RX_IEEE1588_PTP,
- 0, 0, 0, 0,
- };
-
- pkt_flags = (hl_tp_rs & IXGBE_RXDADV_PKTTYPE_ETQF) ?
- ip_pkt_etqf_map[(hl_tp_rs >> 4) & 0x07] :
- ip_pkt_types_map[(hl_tp_rs >> 4) & 0x0F];
-#else
- pkt_flags = (hl_tp_rs & IXGBE_RXDADV_PKTTYPE_ETQF) ? 0 :
- ip_pkt_types_map[(hl_tp_rs >> 4) & 0x0F];
-
-#endif
- return pkt_flags | ip_rss_types_map[hl_tp_rs & 0xF];
-}
-#endif /* RTE_NEXT_ABI */
static inline uint64_t
rx_desc_status_to_pkt_flags(uint32_t rx_status)
IXGBE_RXDADV_ERR_CKSUM_BIT) & IXGBE_RXDADV_ERR_CKSUM_MSK];
}
-#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
/*
* LOOK_AHEAD defines how many desc statuses to check beyond the
* current descriptor.
struct rte_mbuf *mb;
uint16_t pkt_len;
uint64_t pkt_flags;
-#ifdef RTE_NEXT_ABI
int nb_dd;
uint32_t s[LOOK_AHEAD];
uint16_t pkt_info[LOOK_AHEAD];
-#else
- int s[LOOK_AHEAD], nb_dd;
-#endif /* RTE_NEXT_ABI */
int i, j, nb_rx = 0;
-
+ uint32_t status;
/* get references to current descriptor and S/W ring entry */
rxdp = &rxq->rx_ring[rxq->rx_tail];
rxep = &rxq->sw_ring[rxq->rx_tail];
+ status = rxdp->wb.upper.status_error;
/* check to make sure there is at least 1 packet to receive */
- if (! (rxdp->wb.upper.status_error & IXGBE_RXDADV_STAT_DD))
+ if (!(status & rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD)))
return 0;
/*
{
/* Read desc statuses backwards to avoid race condition */
for (j = LOOK_AHEAD-1; j >= 0; --j)
- s[j] = rxdp[j].wb.upper.status_error;
+ s[j] = rte_le_to_cpu_32(rxdp[j].wb.upper.status_error);
-#ifdef RTE_NEXT_ABI
for (j = LOOK_AHEAD - 1; j >= 0; --j)
pkt_info[j] = rxdp[j].wb.lower.lo_dword.
hs_rss.pkt_info;
-#endif /* RTE_NEXT_ABI */
/* Compute how many status bits were set */
nb_dd = 0;
/* Translate descriptor info to mbuf format */
for (j = 0; j < nb_dd; ++j) {
mb = rxep[j].mbuf;
- pkt_len = (uint16_t)(rxdp[j].wb.upper.length - rxq->crc_len);
+ pkt_len = rte_le_to_cpu_16(rxdp[j].wb.upper.length) -
+ rxq->crc_len;
mb->data_len = pkt_len;
mb->pkt_len = pkt_len;
mb->vlan_tci = rte_le_to_cpu_16(rxdp[j].wb.upper.vlan);
/* convert descriptor fields to rte mbuf flags */
-#ifdef RTE_NEXT_ABI
pkt_flags = rx_desc_status_to_pkt_flags(s[j]);
pkt_flags |= rx_desc_error_to_pkt_flags(s[j]);
pkt_flags |=
mb->ol_flags = pkt_flags;
mb->packet_type =
ixgbe_rxd_pkt_info_to_pkt_type(pkt_info[j]);
-#else /* RTE_NEXT_ABI */
- pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(
- rxdp[j].wb.lower.lo_dword.data);
- /* reuse status field from scan list */
- pkt_flags |= rx_desc_status_to_pkt_flags(s[j]);
- pkt_flags |= rx_desc_error_to_pkt_flags(s[j]);
- mb->ol_flags = pkt_flags;
-#endif /* RTE_NEXT_ABI */
if (likely(pkt_flags & PKT_RX_RSS_HASH))
- mb->hash.rss = rxdp[j].wb.lower.hi_dword.rss;
+ mb->hash.rss = rte_le_to_cpu_32(
+ rxdp[j].wb.lower.hi_dword.rss);
else if (pkt_flags & PKT_RX_FDIR) {
- mb->hash.fdir.hash =
- (uint16_t)((rxdp[j].wb.lower.hi_dword.csum_ip.csum)
- & IXGBE_ATR_HASH_MASK);
- mb->hash.fdir.id = rxdp[j].wb.lower.hi_dword.csum_ip.ip_id;
+ mb->hash.fdir.hash = rte_le_to_cpu_16(
+ rxdp[j].wb.lower.hi_dword.csum_ip.csum) &
+ IXGBE_ATR_HASH_MASK;
+ mb->hash.fdir.id = rte_le_to_cpu_16(
+ rxdp[j].wb.lower.hi_dword.csum_ip.ip_id);
}
}
/* populate the descriptors */
dma_addr = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb));
- rxdp[i].read.hdr_addr = dma_addr;
+ rxdp[i].read.hdr_addr = 0;
rxdp[i].read.pkt_addr = dma_addr;
}
return nb_rx;
}
-#else
-
-/* Stub to avoid extra ifdefs */
-static uint16_t
-ixgbe_recv_pkts_bulk_alloc(__rte_unused void *rx_queue,
- __rte_unused struct rte_mbuf **rx_pkts, __rte_unused uint16_t nb_pkts)
-{
- return 0;
-}
-
-static inline int
-ixgbe_rx_alloc_bufs(__rte_unused struct ixgbe_rx_queue *rxq,
- __rte_unused bool reset_mbuf)
-{
- return -ENOMEM;
-}
-#endif /* RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC */
-
uint16_t
ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts)
union ixgbe_adv_rx_desc rxd;
uint64_t dma_addr;
uint32_t staterr;
-#ifdef RTE_NEXT_ABI
uint32_t pkt_info;
-#else
- uint32_t hlen_type_rss;
-#endif
uint16_t pkt_len;
uint16_t rx_id;
uint16_t nb_rx;
*/
rxdp = &rx_ring[rx_id];
staterr = rxdp->wb.upper.status_error;
- if (! (staterr & rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD)))
+ if (!(staterr & rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD)))
break;
rxd = *rxdp;
rxe->mbuf = nmb;
dma_addr =
rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
- rxdp->read.hdr_addr = dma_addr;
+ rxdp->read.hdr_addr = 0;
rxdp->read.pkt_addr = dma_addr;
/*
rxm->data_len = pkt_len;
rxm->port = rxq->port_id;
-#ifdef RTE_NEXT_ABI
pkt_info = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.hs_rss.
pkt_info);
/* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
ixgbe_rxd_pkt_info_to_pkt_flags(pkt_info);
rxm->ol_flags = pkt_flags;
rxm->packet_type = ixgbe_rxd_pkt_info_to_pkt_type(pkt_info);
-#else /* RTE_NEXT_ABI */
- hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
- /* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
- rxm->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
-
- pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
- pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr);
- pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr);
- rxm->ol_flags = pkt_flags;
-#endif /* RTE_NEXT_ABI */
if (likely(pkt_flags & PKT_RX_RSS_HASH))
- rxm->hash.rss = rxd.wb.lower.hi_dword.rss;
+ rxm->hash.rss = rte_le_to_cpu_32(
+ rxd.wb.lower.hi_dword.rss);
else if (pkt_flags & PKT_RX_FDIR) {
- rxm->hash.fdir.hash =
- (uint16_t)((rxd.wb.lower.hi_dword.csum_ip.csum)
- & IXGBE_ATR_HASH_MASK);
- rxm->hash.fdir.id = rxd.wb.lower.hi_dword.csum_ip.ip_id;
+ rxm->hash.fdir.hash = rte_le_to_cpu_16(
+ rxd.wb.lower.hi_dword.csum_ip.csum) &
+ IXGBE_ATR_HASH_MASK;
+ rxm->hash.fdir.id = rte_le_to_cpu_16(
+ rxd.wb.lower.hi_dword.csum_ip.ip_id);
}
/*
* Store the mbuf address into the next entry of the array
uint8_t port_id,
uint32_t staterr)
{
-#ifdef RTE_NEXT_ABI
uint16_t pkt_info;
uint64_t pkt_flags;
pkt_flags |= ixgbe_rxd_pkt_info_to_pkt_flags(pkt_info);
head->ol_flags = pkt_flags;
head->packet_type = ixgbe_rxd_pkt_info_to_pkt_type(pkt_info);
-#else /* RTE_NEXT_ABI */
- uint32_t hlen_type_rss;
- uint64_t pkt_flags;
-
- head->port = port_id;
-
- /*
- * The vlan_tci field is only valid when PKT_RX_VLAN_PKT is
- * set in the pkt_flags field.
- */
- head->vlan_tci = rte_le_to_cpu_16(desc->wb.upper.vlan);
- hlen_type_rss = rte_le_to_cpu_32(desc->wb.lower.lo_dword.data);
- pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
- pkt_flags |= rx_desc_status_to_pkt_flags(staterr);
- pkt_flags |= rx_desc_error_to_pkt_flags(staterr);
- head->ol_flags = pkt_flags;
-#endif /* RTE_NEXT_ABI */
if (likely(pkt_flags & PKT_RX_RSS_HASH))
head->hash.rss = rte_le_to_cpu_32(desc->wb.lower.hi_dword.rss);
break;
}
}
-#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
else if (nb_hold > rxq->rx_free_thresh) {
uint16_t next_rdt = rxq->rx_free_trigger;
break;
}
}
-#endif
nb_hold++;
rxe = &sw_ring[rx_id];
rxe->mbuf = nmb;
rxm->data_off = RTE_PKTMBUF_HEADROOM;
- rxdp->read.hdr_addr = dma;
+ rxdp->read.hdr_addr = 0;
rxdp->read.pkt_addr = dma;
} else
rxe->mbuf = NULL;
ixgbe_fill_cluster_head_buf(first_seg, &rxd, rxq->port_id,
staterr);
+ /*
+ * Deal with the case, when HW CRC srip is disabled.
+ * That can't happen when LRO is enabled, but still could
+ * happen for scattered RX mode.
+ */
+ first_seg->pkt_len -= rxq->crc_len;
+ if (unlikely(rxm->data_len <= rxq->crc_len)) {
+ struct rte_mbuf *lp;
+
+ for (lp = first_seg; lp->next != rxm; lp = lp->next)
+ ;
+
+ first_seg->nb_segs--;
+ lp->data_len -= rxq->crc_len - rxm->data_len;
+ lp->next = NULL;
+ rte_pktmbuf_free_seg(rxm);
+ } else
+ rxm->data_len -= rxq->crc_len;
+
/* Prefetch data of first segment, if configured to do so. */
rte_packet_prefetch((char *)first_seg->buf_addr +
first_seg->data_off);
*
**********************************************************************/
-/*
- * Rings setup and release.
- *
- * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be
- * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary. This will
- * also optimize cache line size effect. H/W supports up to cache line size 128.
- */
-#define IXGBE_ALIGN 128
-
-/*
- * Maximum number of Ring Descriptors.
- *
- * Since RDLEN/TDLEN should be multiple of 128 bytes, the number of ring
- * descriptors should meet the following condition:
- * (num_ring_desc * sizeof(rx/tx descriptor)) % 128 == 0
- */
-#define IXGBE_MIN_RING_DESC 32
-#define IXGBE_MAX_RING_DESC 4096
-
/*
* Create memzone for HW rings. malloc can't be used as the physical address is
* needed. If the memzone is already created, then this function returns a ptr
prev = (uint16_t) (txq->nb_tx_desc - 1);
for (i = 0; i < txq->nb_tx_desc; i++) {
volatile union ixgbe_adv_tx_desc *txd = &txq->tx_ring[i];
- txd->wb.status = IXGBE_TXD_STAT_DD;
+ txd->wb.status = rte_cpu_to_le_32(IXGBE_TXD_STAT_DD);
txe[i].mbuf = NULL;
txe[i].last_id = i;
txe[prev].next_id = i;
/* Use a simple Tx queue (no offloads, no multi segs) if possible */
if (((txq->txq_flags & IXGBE_SIMPLE_FLAGS) == IXGBE_SIMPLE_FLAGS)
&& (txq->tx_rs_thresh >= RTE_PMD_IXGBE_TX_MAX_BURST)) {
- PMD_INIT_LOG(INFO, "Using simple tx code path");
+ PMD_INIT_LOG(DEBUG, "Using simple tx code path");
#ifdef RTE_IXGBE_INC_VECTOR
if (txq->tx_rs_thresh <= RTE_IXGBE_TX_MAX_FREE_BUF_SZ &&
(rte_eal_process_type() != RTE_PROC_PRIMARY ||
ixgbe_txq_vec_setup(txq) == 0)) {
- PMD_INIT_LOG(INFO, "Vector tx enabled.");
+ PMD_INIT_LOG(DEBUG, "Vector tx enabled.");
dev->tx_pkt_burst = ixgbe_xmit_pkts_vec;
} else
#endif
dev->tx_pkt_burst = ixgbe_xmit_pkts_simple;
} else {
- PMD_INIT_LOG(INFO, "Using full-featured tx code path");
- PMD_INIT_LOG(INFO,
+ PMD_INIT_LOG(DEBUG, "Using full-featured tx code path");
+ PMD_INIT_LOG(DEBUG,
" - txq_flags = %lx " "[IXGBE_SIMPLE_FLAGS=%lx]",
(unsigned long)txq->txq_flags,
(unsigned long)IXGBE_SIMPLE_FLAGS);
- PMD_INIT_LOG(INFO,
+ PMD_INIT_LOG(DEBUG,
" - tx_rs_thresh = %lu " "[RTE_PMD_IXGBE_TX_MAX_BURST=%lu]",
(unsigned long)txq->tx_rs_thresh,
(unsigned long)RTE_PMD_IXGBE_TX_MAX_BURST);
* It must not exceed hardware maximum, and must be multiple
* of IXGBE_ALIGN.
*/
- if (((nb_desc * sizeof(union ixgbe_adv_tx_desc)) % IXGBE_ALIGN) != 0 ||
- (nb_desc > IXGBE_MAX_RING_DESC) ||
- (nb_desc < IXGBE_MIN_RING_DESC)) {
+ if (nb_desc % IXGBE_TXD_ALIGN != 0 ||
+ (nb_desc > IXGBE_MAX_RING_DESC) ||
+ (nb_desc < IXGBE_MIN_RING_DESC)) {
return -EINVAL;
}
tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH);
if (tx_rs_thresh >= (nb_desc - 2)) {
PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the number "
- "of TX descriptors minus 2. (tx_rs_thresh=%u "
- "port=%d queue=%d)", (unsigned int)tx_rs_thresh,
- (int)dev->data->port_id, (int)queue_idx);
+ "of TX descriptors minus 2. (tx_rs_thresh=%u "
+ "port=%d queue=%d)", (unsigned int)tx_rs_thresh,
+ (int)dev->data->port_id, (int)queue_idx);
+ return -(EINVAL);
+ }
+ if (tx_rs_thresh > DEFAULT_TX_RS_THRESH) {
+ PMD_INIT_LOG(ERR, "tx_rs_thresh must be less or equal than %u. "
+ "(tx_rs_thresh=%u port=%d queue=%d)",
+ DEFAULT_TX_RS_THRESH, (unsigned int)tx_rs_thresh,
+ (int)dev->data->port_id, (int)queue_idx);
return -(EINVAL);
}
if (tx_free_thresh >= (nb_desc - 3)) {
rxq->sw_ring[i].mbuf = NULL;
}
}
-#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
if (rxq->rx_nb_avail) {
for (i = 0; i < rxq->rx_nb_avail; ++i) {
struct rte_mbuf *mb;
}
rxq->rx_nb_avail = 0;
}
-#endif
}
if (rxq->sw_sc_ring)
* function must be used.
*/
static inline int __attribute__((cold))
-#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
check_rx_burst_bulk_alloc_preconditions(struct ixgbe_rx_queue *rxq)
-#else
-check_rx_burst_bulk_alloc_preconditions(__rte_unused struct ixgbe_rx_queue *rxq)
-#endif
{
int ret = 0;
* Scattered packets are not supported. This should be checked
* outside of this function.
*/
-#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
if (!(rxq->rx_free_thresh >= RTE_PMD_IXGBE_RX_MAX_BURST)) {
PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
"rxq->rx_free_thresh=%d, "
RTE_PMD_IXGBE_RX_MAX_BURST);
ret = -EINVAL;
}
-#else
- ret = -EINVAL;
-#endif
return ret;
}
rxq->rx_ring[i] = zeroed_desc;
}
-#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
/*
* initialize extra software ring entries. Space for these extra
* entries is always allocated
rxq->rx_nb_avail = 0;
rxq->rx_next_avail = 0;
rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
-#endif /* RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC */
rxq->rx_tail = 0;
rxq->nb_rx_hold = 0;
rxq->pkt_first_seg = NULL;
rxq->pkt_last_seg = NULL;
+
+#ifdef RTE_IXGBE_INC_VECTOR
+ rxq->rxrearm_start = 0;
+ rxq->rxrearm_nb = 0;
+#endif
}
int __attribute__((cold))
* It must not exceed hardware maximum, and must be multiple
* of IXGBE_ALIGN.
*/
- if (((nb_desc * sizeof(union ixgbe_adv_rx_desc)) % IXGBE_ALIGN) != 0 ||
- (nb_desc > IXGBE_MAX_RING_DESC) ||
- (nb_desc < IXGBE_MIN_RING_DESC)) {
+ if (nb_desc % IXGBE_RXD_ALIGN != 0 ||
+ (nb_desc > IXGBE_MAX_RING_DESC) ||
+ (nb_desc < IXGBE_MIN_RING_DESC)) {
return (-EINVAL);
}
rxdp = &(rxq->rx_ring[rxq->rx_tail]);
while ((desc < rxq->nb_rx_desc) &&
- (rxdp->wb.upper.status_error & IXGBE_RXDADV_STAT_DD)) {
+ (rxdp->wb.upper.status_error &
+ rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD))) {
desc += IXGBE_RXQ_SCAN_INTERVAL;
rxdp += IXGBE_RXQ_SCAN_INTERVAL;
if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
desc -= rxq->nb_rx_desc;
rxdp = &rxq->rx_ring[desc];
- return !!(rxdp->wb.upper.status_error & IXGBE_RXDADV_STAT_DD);
+ return !!(rxdp->wb.upper.status_error &
+ rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD));
}
void __attribute__((cold))
{
struct ixgbe_hw *hw;
uint32_t mrqc;
+ uint32_t mrqc_reg;
hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
+ mrqc_reg = ixgbe_mrqc_reg_get(hw->mac.type);
+ mrqc = IXGBE_READ_REG(hw, mrqc_reg);
mrqc &= ~IXGBE_MRQC_RSSEN;
- IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
+ IXGBE_WRITE_REG(hw, mrqc_reg, mrqc);
}
static void
uint32_t rss_key;
uint64_t rss_hf;
uint16_t i;
+ uint32_t mrqc_reg;
+ uint32_t rssrk_reg;
+
+ mrqc_reg = ixgbe_mrqc_reg_get(hw->mac.type);
+ rssrk_reg = ixgbe_rssrk_reg_get(hw->mac.type, 0);
hash_key = rss_conf->rss_key;
if (hash_key != NULL) {
rss_key |= hash_key[(i * 4) + 1] << 8;
rss_key |= hash_key[(i * 4) + 2] << 16;
rss_key |= hash_key[(i * 4) + 3] << 24;
- IXGBE_WRITE_REG_ARRAY(hw, IXGBE_RSSRK(0), i, rss_key);
+ IXGBE_WRITE_REG_ARRAY(hw, rssrk_reg, i, rss_key);
}
}
mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
if (rss_hf & ETH_RSS_IPV6_UDP_EX)
mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
- IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
+ IXGBE_WRITE_REG(hw, mrqc_reg, mrqc);
}
int
struct ixgbe_hw *hw;
uint32_t mrqc;
uint64_t rss_hf;
+ uint32_t mrqc_reg;
hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ if (!ixgbe_rss_update_sp(hw->mac.type)) {
+ PMD_DRV_LOG(ERR, "RSS hash update is not supported on this "
+ "NIC.");
+ return -ENOTSUP;
+ }
+ mrqc_reg = ixgbe_mrqc_reg_get(hw->mac.type);
+
/*
* Excerpt from section 7.1.2.8 Receive-Side Scaling (RSS):
* "RSS enabling cannot be done dynamically while it must be
* disabled at initialization time.
*/
rss_hf = rss_conf->rss_hf & IXGBE_RSS_OFFLOAD_ALL;
- mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
+ mrqc = IXGBE_READ_REG(hw, mrqc_reg);
if (!(mrqc & IXGBE_MRQC_RSSEN)) { /* RSS disabled */
if (rss_hf != 0) /* Enable RSS */
return -(EINVAL);
uint32_t rss_key;
uint64_t rss_hf;
uint16_t i;
+ uint32_t mrqc_reg;
+ uint32_t rssrk_reg;
hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ mrqc_reg = ixgbe_mrqc_reg_get(hw->mac.type);
+ rssrk_reg = ixgbe_rssrk_reg_get(hw->mac.type, 0);
hash_key = rss_conf->rss_key;
if (hash_key != NULL) {
/* Return RSS hash key */
for (i = 0; i < 10; i++) {
- rss_key = IXGBE_READ_REG_ARRAY(hw, IXGBE_RSSRK(0), i);
+ rss_key = IXGBE_READ_REG_ARRAY(hw, rssrk_reg, i);
hash_key[(i * 4)] = rss_key & 0x000000FF;
hash_key[(i * 4) + 1] = (rss_key >> 8) & 0x000000FF;
hash_key[(i * 4) + 2] = (rss_key >> 16) & 0x000000FF;
}
/* Get RSS functions configured in MRQC register */
- mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
+ mrqc = IXGBE_READ_REG(hw, mrqc_reg);
if ((mrqc & IXGBE_MRQC_RSSEN) == 0) { /* RSS is disabled */
rss_conf->rss_hf = 0;
return 0;
uint32_t reta;
uint16_t i;
uint16_t j;
+ uint16_t sp_reta_size;
+ uint32_t reta_reg;
PMD_INIT_FUNC_TRACE();
hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ sp_reta_size = ixgbe_reta_size_get(hw->mac.type);
+
/*
* Fill in redirection table
* The byte-swap is needed because NIC registers are in
* little-endian order.
*/
reta = 0;
- for (i = 0, j = 0; i < 128; i++, j++) {
+ for (i = 0, j = 0; i < sp_reta_size; i++, j++) {
+ reta_reg = ixgbe_reta_reg_get(hw->mac.type, i);
+
if (j == dev->data->nb_rx_queues)
j = 0;
reta = (reta << 8) | j;
if ((i & 3) == 3)
- IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2),
+ IXGBE_WRITE_REG(hw, reta_reg,
rte_bswap32(reta));
}
#define NUM_VFTA_REGISTERS 128
#define NIC_RX_BUFFER_SIZE 0x200
+#define X550_RX_BUFFER_SIZE 0x180
static void
ixgbe_vmdq_dcb_configure(struct rte_eth_dev *dev)
* RXPBSIZE
* split rx buffer up into sections, each for 1 traffic class
*/
- pbsize = (uint16_t)(NIC_RX_BUFFER_SIZE / nb_tcs);
+ switch (hw->mac.type) {
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ pbsize = (uint16_t)(X550_RX_BUFFER_SIZE / nb_tcs);
+ break;
+ default:
+ pbsize = (uint16_t)(NIC_RX_BUFFER_SIZE / nb_tcs);
+ break;
+ }
for (i = 0 ; i < nb_tcs; i++) {
uint32_t rxpbsize = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
rxpbsize &= (~(0x3FF << IXGBE_RXPBSIZE_SHIFT));
* mapping is done with 3 bits per priority,
* so shift by i*3 each time
*/
- queue_mapping |= ((cfg->dcb_queue[i] & 0x07) << (i * 3));
+ queue_mapping |= ((cfg->dcb_tc[i] & 0x07) << (i * 3));
IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, queue_mapping);
}
/* User Priority to Traffic Class mapping */
for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
- j = vmdq_rx_conf->dcb_queue[i];
+ j = vmdq_rx_conf->dcb_tc[i];
tc = &dcb_config->tc_config[j];
tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap =
(uint8_t)(1 << j);
/* User Priority to Traffic Class mapping */
for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
- j = vmdq_tx_conf->dcb_queue[i];
+ j = vmdq_tx_conf->dcb_tc[i];
tc = &dcb_config->tc_config[j];
tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap =
(uint8_t)(1 << j);
/* User Priority to Traffic Class mapping */
for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
- j = rx_conf->dcb_queue[i];
+ j = rx_conf->dcb_tc[i];
tc = &dcb_config->tc_config[j];
tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap =
(uint8_t)(1 << j);
/* User Priority to Traffic Class mapping */
for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
- j = tx_conf->dcb_queue[i];
+ j = tx_conf->dcb_tc[i];
tc = &dcb_config->tc_config[j];
tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap =
(uint8_t)(1 << j);
reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
IXGBE_MRQC_VMDQRT4TCEN;
else {
+ /* no matter the mode is DCB or DCB_RSS, just
+ * set the MRQE to RSSXTCEN. RSS is controlled
+ * by RSS_FIELD
+ */
IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0);
reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
- IXGBE_MRQC_RT4TCEN;
+ IXGBE_MRQC_RTRSS4TCEN;
}
}
if (dcb_config->num_tcs.pg_tcs == 8) {
else {
IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0);
reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
- IXGBE_MRQC_RT8TCEN;
+ IXGBE_MRQC_RTRSS8TCEN;
}
}
{
int ret = 0;
uint8_t i,pfc_en,nb_tcs;
- uint16_t pbsize;
+ uint16_t pbsize, rx_buffer_size;
uint8_t config_dcb_rx = 0;
uint8_t config_dcb_tx = 0;
uint8_t tsa[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
*get dcb and VT rx configuration parameters
*from rte_eth_conf
*/
- ixgbe_vmdq_dcb_rx_config(dev,dcb_config);
+ ixgbe_vmdq_dcb_rx_config(dev, dcb_config);
/*Configure general VMDQ and DCB RX parameters*/
ixgbe_vmdq_dcb_configure(dev);
}
break;
case ETH_MQ_RX_DCB:
+ case ETH_MQ_RX_DCB_RSS:
dcb_config->vt_mode = false;
config_dcb_rx = DCB_RX_CONFIG;
/* Get dcb TX configuration parameters from rte_eth_conf */
- ixgbe_dcb_rx_config(dev,dcb_config);
+ ixgbe_dcb_rx_config(dev, dcb_config);
/*Configure general DCB RX parameters*/
ixgbe_dcb_rx_hw_config(hw, dcb_config);
break;
dcb_config->vt_mode = false;
config_dcb_tx = DCB_TX_CONFIG;
/*get DCB TX configuration parameters from rte_eth_conf*/
- ixgbe_dcb_tx_config(dev,dcb_config);
+ ixgbe_dcb_tx_config(dev, dcb_config);
/*Configure general DCB TX parameters*/
ixgbe_dcb_tx_hw_config(hw, dcb_config);
break;
}
}
+ switch (hw->mac.type) {
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ rx_buffer_size = X550_RX_BUFFER_SIZE;
+ break;
+ default:
+ rx_buffer_size = NIC_RX_BUFFER_SIZE;
+ break;
+ }
+
if(config_dcb_rx) {
/* Set RX buffer size */
- pbsize = (uint16_t)(NIC_RX_BUFFER_SIZE / nb_tcs);
+ pbsize = (uint16_t)(rx_buffer_size / nb_tcs);
uint32_t rxpbsize = pbsize << IXGBE_RXPBSIZE_SHIFT;
for (i = 0 ; i < nb_tcs; i++) {
IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize);
/* Check if the PFC is supported */
if(dev->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
- pbsize = (uint16_t) (NIC_RX_BUFFER_SIZE / nb_tcs);
+ pbsize = (uint16_t)(rx_buffer_size / nb_tcs);
for (i = 0; i < nb_tcs; i++) {
/*
* If the TC count is 8,and the default high_water is 48,
/* check support mq_mode for DCB */
if ((dev_conf->rxmode.mq_mode != ETH_MQ_RX_VMDQ_DCB) &&
- (dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB))
+ (dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB) &&
+ (dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB_RSS))
return;
if (dev->data->nb_rx_queues != ETH_DCB_NUM_QUEUES)
return;
/** Configure DCB hardware **/
- ixgbe_dcb_hw_configure(dev,dcb_cfg);
+ ixgbe_dcb_hw_configure(dev, dcb_cfg);
return;
}
dma_addr =
rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mbuf));
rxd = &rxq->rx_ring[i];
- rxd->read.hdr_addr = dma_addr;
+ rxd->read.hdr_addr = 0;
rxd->read.pkt_addr = dma_addr;
rxe[i].mbuf = mbuf;
}
* any DCB/RSS w/o VMDq multi-queue setting
*/
switch (dev->data->dev_conf.rxmode.mq_mode) {
- case ETH_MQ_RX_RSS:
- ixgbe_rss_configure(dev);
- break;
+ case ETH_MQ_RX_RSS:
+ case ETH_MQ_RX_DCB_RSS:
+ case ETH_MQ_RX_VMDQ_RSS:
+ ixgbe_rss_configure(dev);
+ break;
- case ETH_MQ_RX_VMDQ_DCB:
- ixgbe_vmdq_dcb_configure(dev);
- break;
+ case ETH_MQ_RX_VMDQ_DCB:
+ ixgbe_vmdq_dcb_configure(dev);
+ break;
- case ETH_MQ_RX_VMDQ_ONLY:
- ixgbe_vmdq_rx_hw_configure(dev);
- break;
+ case ETH_MQ_RX_VMDQ_ONLY:
+ ixgbe_vmdq_rx_hw_configure(dev);
+ break;
- case ETH_MQ_RX_NONE:
- /* if mq_mode is none, disable rss mode.*/
- default: ixgbe_rss_disable(dev);
+ case ETH_MQ_RX_NONE:
+ default:
+ /* if mq_mode is none, disable rss mode.*/
+ ixgbe_rss_disable(dev);
+ break;
}
} else {
/*
*/
if (dev->data->lro) {
if (adapter->rx_bulk_alloc_allowed) {
- PMD_INIT_LOG(INFO, "LRO is requested. Using a bulk "
+ PMD_INIT_LOG(DEBUG, "LRO is requested. Using a bulk "
"allocation version");
dev->rx_pkt_burst = ixgbe_recv_pkts_lro_bulk_alloc;
} else {
- PMD_INIT_LOG(INFO, "LRO is requested. Using a single "
+ PMD_INIT_LOG(DEBUG, "LRO is requested. Using a single "
"allocation version");
dev->rx_pkt_burst = ixgbe_recv_pkts_lro_single_alloc;
}
dev->rx_pkt_burst = ixgbe_recv_scattered_pkts_vec;
} else if (adapter->rx_bulk_alloc_allowed) {
- PMD_INIT_LOG(INFO, "Using a Scattered with bulk "
+ PMD_INIT_LOG(DEBUG, "Using a Scattered with bulk "
"allocation callback (port=%d).",
dev->data->port_id);
dev->rx_pkt_burst = ixgbe_recv_pkts_lro_bulk_alloc;
* - Single buffer allocation (the simplest one)
*/
} else if (adapter->rx_vec_allowed) {
- PMD_INIT_LOG(INFO, "Vector rx enabled, please make sure RX "
- "burst size no less than 32.");
+ PMD_INIT_LOG(DEBUG, "Vector rx enabled, please make sure RX "
+ "burst size no less than %d (port=%d).",
+ RTE_IXGBE_DESCS_PER_LOOP,
+ dev->data->port_id);
dev->rx_pkt_burst = ixgbe_recv_pkts_vec;
} else if (adapter->rx_bulk_alloc_allowed) {
dev->rx_pkt_burst = ixgbe_recv_pkts_bulk_alloc;
} else {
PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are not "
- "satisfied, or Scattered Rx is requested, "
- "or RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC "
- "is not enabled (port=%d).",
+ "satisfied, or Scattered Rx is requested "
+ "(port=%d).",
dev->data->port_id);
dev->rx_pkt_burst = ixgbe_recv_pkts;
dev->data->lro = 1;
- PMD_INIT_LOG(INFO, "enabling LRO mode");
+ PMD_INIT_LOG(DEBUG, "enabling LRO mode");
return 0;
}
rte_wmb();
IXGBE_WRITE_REG(hw, IXGBE_RDH(rxq->reg_idx), 0);
IXGBE_WRITE_REG(hw, IXGBE_RDT(rxq->reg_idx), rxq->nb_rx_desc - 1);
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
} else
return -1;
ixgbe_rx_queue_release_mbufs(rxq);
ixgbe_reset_rx_queue(adapter, rxq);
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
} else
return -1;
rte_wmb();
IXGBE_WRITE_REG(hw, IXGBE_TDH(txq->reg_idx), 0);
IXGBE_WRITE_REG(hw, IXGBE_TDT(txq->reg_idx), 0);
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
} else
return -1;
txq->ops->release_mbufs(txq);
txq->ops->reset(txq);
}
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
} else
return -1;
return 0;
}
+void
+ixgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_rxq_info *qinfo)
+{
+ struct ixgbe_rx_queue *rxq;
+
+ rxq = dev->data->rx_queues[queue_id];
+
+ qinfo->mp = rxq->mb_pool;
+ qinfo->scattered_rx = dev->data->scattered_rx;
+ qinfo->nb_desc = rxq->nb_rx_desc;
+
+ qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
+ qinfo->conf.rx_drop_en = rxq->drop_en;
+ qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
+}
+
+void
+ixgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_txq_info *qinfo)
+{
+ struct ixgbe_tx_queue *txq;
+
+ txq = dev->data->tx_queues[queue_id];
+
+ qinfo->nb_desc = txq->nb_tx_desc;
+
+ qinfo->conf.tx_thresh.pthresh = txq->pthresh;
+ qinfo->conf.tx_thresh.hthresh = txq->hthresh;
+ qinfo->conf.tx_thresh.wthresh = txq->wthresh;
+
+ qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
+ qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
+ qinfo->conf.txq_flags = txq->txq_flags;
+ qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
+}
+
/*
* [VF] Initializes Receive Unit.
*/