* BSD LICENSE
*
* Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * Copyright 2014 6WIND S.A.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
#include <rte_memory.h>
#include <rte_memzone.h>
#include <rte_launch.h>
-#include <rte_tailq.h>
#include <rte_eal.h>
#include <rte_per_lcore.h>
#include <rte_lcore.h>
#include <rte_sctp.h>
#include <rte_string_fns.h>
#include <rte_errno.h>
+#include <rte_ip.h>
#include "ixgbe_logs.h"
#include "ixgbe/ixgbe_api.h"
#include "ixgbe/ixgbe_common.h"
#include "ixgbe_rxtx.h"
-#define IXGBE_RSS_OFFLOAD_ALL ( \
- ETH_RSS_IPV4 | \
- ETH_RSS_IPV4_TCP | \
- ETH_RSS_IPV6 | \
- ETH_RSS_IPV6_EX | \
- ETH_RSS_IPV6_TCP | \
- ETH_RSS_IPV6_TCP_EX | \
- ETH_RSS_IPV4_UDP | \
- ETH_RSS_IPV6_UDP | \
- ETH_RSS_IPV6_UDP_EX)
-
/* Bit Mask to indicate what bits required for building TX context */
#define IXGBE_TX_OFFLOAD_MASK ( \
PKT_TX_VLAN_PKT | \
PKT_TX_IP_CKSUM | \
- PKT_TX_L4_MASK)
+ PKT_TX_L4_MASK | \
+ PKT_TX_TCP_SEG)
static inline struct rte_mbuf *
rte_rxmbuf_alloc(struct rte_mempool *mp)
* Return the total number of buffers freed.
*/
static inline int __attribute__((always_inline))
-ixgbe_tx_free_bufs(struct igb_tx_queue *txq)
+ixgbe_tx_free_bufs(struct ixgbe_tx_queue *txq)
{
- struct igb_tx_entry *txep;
+ struct ixgbe_tx_entry *txep;
uint32_t status;
int i;
* Copy mbuf pointers to the S/W ring.
*/
static inline void
-ixgbe_tx_fill_hw_ring(struct igb_tx_queue *txq, struct rte_mbuf **pkts,
+ixgbe_tx_fill_hw_ring(struct ixgbe_tx_queue *txq, struct rte_mbuf **pkts,
uint16_t nb_pkts)
{
volatile union ixgbe_adv_tx_desc *txdp = &(txq->tx_ring[txq->tx_tail]);
- struct igb_tx_entry *txep = &(txq->sw_ring[txq->tx_tail]);
+ struct ixgbe_tx_entry *txep = &(txq->sw_ring[txq->tx_tail]);
const int N_PER_LOOP = 4;
const int N_PER_LOOP_MASK = N_PER_LOOP-1;
int mainpart, leftover;
tx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
{
- struct igb_tx_queue *txq = (struct igb_tx_queue *)tx_queue;
+ struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
volatile union ixgbe_adv_tx_desc *tx_r = txq->tx_ring;
uint16_t n = 0;
}
static inline void
-ixgbe_set_xmit_ctx(struct igb_tx_queue* txq,
+ixgbe_set_xmit_ctx(struct ixgbe_tx_queue *txq,
volatile struct ixgbe_adv_tx_context_desc *ctx_txd,
- uint64_t ol_flags, uint32_t vlan_macip_lens)
+ uint64_t ol_flags, union ixgbe_tx_offload tx_offload)
{
uint32_t type_tucmd_mlhl;
- uint32_t mss_l4len_idx;
+ uint32_t mss_l4len_idx = 0;
uint32_t ctx_idx;
- uint32_t cmp_mask;
+ uint32_t vlan_macip_lens;
+ union ixgbe_tx_offload tx_offload_mask;
ctx_idx = txq->ctx_curr;
- cmp_mask = 0;
+ tx_offload_mask.data = 0;
type_tucmd_mlhl = 0;
+ /* Specify which HW CTX to upload. */
+ mss_l4len_idx |= (ctx_idx << IXGBE_ADVTXD_IDX_SHIFT);
+
if (ol_flags & PKT_TX_VLAN_PKT) {
- cmp_mask |= TX_VLAN_CMP_MASK;
+ tx_offload_mask.vlan_tci |= ~0;
}
- if (ol_flags & PKT_TX_IP_CKSUM) {
- type_tucmd_mlhl = IXGBE_ADVTXD_TUCMD_IPV4;
- cmp_mask |= TX_MACIP_LEN_CMP_MASK;
- }
+ /* check if TCP segmentation required for this packet */
+ if (ol_flags & PKT_TX_TCP_SEG) {
+ /* implies IP cksum and TCP cksum */
+ type_tucmd_mlhl = IXGBE_ADVTXD_TUCMD_IPV4 |
+ IXGBE_ADVTXD_TUCMD_L4T_TCP |
+ IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
+
+ tx_offload_mask.l2_len |= ~0;
+ tx_offload_mask.l3_len |= ~0;
+ tx_offload_mask.l4_len |= ~0;
+ tx_offload_mask.tso_segsz |= ~0;
+ mss_l4len_idx |= tx_offload.tso_segsz << IXGBE_ADVTXD_MSS_SHIFT;
+ mss_l4len_idx |= tx_offload.l4_len << IXGBE_ADVTXD_L4LEN_SHIFT;
+ } else { /* no TSO, check if hardware checksum is needed */
+ if (ol_flags & PKT_TX_IP_CKSUM) {
+ type_tucmd_mlhl = IXGBE_ADVTXD_TUCMD_IPV4;
+ tx_offload_mask.l2_len |= ~0;
+ tx_offload_mask.l3_len |= ~0;
+ }
- /* Specify which HW CTX to upload. */
- mss_l4len_idx = (ctx_idx << IXGBE_ADVTXD_IDX_SHIFT);
- switch (ol_flags & PKT_TX_L4_MASK) {
- case PKT_TX_UDP_CKSUM:
- type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP |
+ switch (ol_flags & PKT_TX_L4_MASK) {
+ case PKT_TX_UDP_CKSUM:
+ type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP |
IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
- mss_l4len_idx |= sizeof(struct udp_hdr) << IXGBE_ADVTXD_L4LEN_SHIFT;
- cmp_mask |= TX_MACIP_LEN_CMP_MASK;
- break;
- case PKT_TX_TCP_CKSUM:
- type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP |
+ mss_l4len_idx |= sizeof(struct udp_hdr) << IXGBE_ADVTXD_L4LEN_SHIFT;
+ tx_offload_mask.l2_len |= ~0;
+ tx_offload_mask.l3_len |= ~0;
+ break;
+ case PKT_TX_TCP_CKSUM:
+ type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP |
IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
- mss_l4len_idx |= sizeof(struct tcp_hdr) << IXGBE_ADVTXD_L4LEN_SHIFT;
- cmp_mask |= TX_MACIP_LEN_CMP_MASK;
- break;
- case PKT_TX_SCTP_CKSUM:
- type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_SCTP |
+ mss_l4len_idx |= sizeof(struct tcp_hdr) << IXGBE_ADVTXD_L4LEN_SHIFT;
+ tx_offload_mask.l2_len |= ~0;
+ tx_offload_mask.l3_len |= ~0;
+ tx_offload_mask.l4_len |= ~0;
+ break;
+ case PKT_TX_SCTP_CKSUM:
+ type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_SCTP |
IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
- mss_l4len_idx |= sizeof(struct sctp_hdr) << IXGBE_ADVTXD_L4LEN_SHIFT;
- cmp_mask |= TX_MACIP_LEN_CMP_MASK;
- break;
- default:
- type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_RSV |
+ mss_l4len_idx |= sizeof(struct sctp_hdr) << IXGBE_ADVTXD_L4LEN_SHIFT;
+ tx_offload_mask.l2_len |= ~0;
+ tx_offload_mask.l3_len |= ~0;
+ break;
+ default:
+ type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_RSV |
IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
- break;
+ break;
+ }
}
txq->ctx_cache[ctx_idx].flags = ol_flags;
- txq->ctx_cache[ctx_idx].cmp_mask = cmp_mask;
- txq->ctx_cache[ctx_idx].vlan_macip_lens.data =
- vlan_macip_lens & cmp_mask;
+ txq->ctx_cache[ctx_idx].tx_offload.data =
+ tx_offload_mask.data & tx_offload.data;
+ txq->ctx_cache[ctx_idx].tx_offload_mask = tx_offload_mask;
ctx_txd->type_tucmd_mlhl = rte_cpu_to_le_32(type_tucmd_mlhl);
+ vlan_macip_lens = tx_offload.l3_len;
+ vlan_macip_lens |= (tx_offload.l2_len << IXGBE_ADVTXD_MACLEN_SHIFT);
+ vlan_macip_lens |= ((uint32_t)tx_offload.vlan_tci << IXGBE_ADVTXD_VLAN_SHIFT);
ctx_txd->vlan_macip_lens = rte_cpu_to_le_32(vlan_macip_lens);
ctx_txd->mss_l4len_idx = rte_cpu_to_le_32(mss_l4len_idx);
ctx_txd->seqnum_seed = 0;
* or create a new context descriptor.
*/
static inline uint32_t
-what_advctx_update(struct igb_tx_queue *txq, uint64_t flags,
- uint32_t vlan_macip_lens)
+what_advctx_update(struct ixgbe_tx_queue *txq, uint64_t flags,
+ union ixgbe_tx_offload tx_offload)
{
/* If match with the current used context */
if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
- (txq->ctx_cache[txq->ctx_curr].vlan_macip_lens.data ==
- (txq->ctx_cache[txq->ctx_curr].cmp_mask & vlan_macip_lens)))) {
+ (txq->ctx_cache[txq->ctx_curr].tx_offload.data ==
+ (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data & tx_offload.data)))) {
return txq->ctx_curr;
}
/* What if match with the next context */
txq->ctx_curr ^= 1;
if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
- (txq->ctx_cache[txq->ctx_curr].vlan_macip_lens.data ==
- (txq->ctx_cache[txq->ctx_curr].cmp_mask & vlan_macip_lens)))) {
+ (txq->ctx_cache[txq->ctx_curr].tx_offload.data ==
+ (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data & tx_offload.data)))) {
return txq->ctx_curr;
}
static inline uint32_t
tx_desc_cksum_flags_to_olinfo(uint64_t ol_flags)
{
- static const uint32_t l4_olinfo[2] = {0, IXGBE_ADVTXD_POPTS_TXSM};
- static const uint32_t l3_olinfo[2] = {0, IXGBE_ADVTXD_POPTS_IXSM};
- uint32_t tmp;
-
- tmp = l4_olinfo[(ol_flags & PKT_TX_L4_MASK) != PKT_TX_L4_NO_CKSUM];
- tmp |= l3_olinfo[(ol_flags & PKT_TX_IP_CKSUM) != 0];
+ uint32_t tmp = 0;
+ if ((ol_flags & PKT_TX_L4_MASK) != PKT_TX_L4_NO_CKSUM)
+ tmp |= IXGBE_ADVTXD_POPTS_TXSM;
+ if (ol_flags & PKT_TX_IP_CKSUM)
+ tmp |= IXGBE_ADVTXD_POPTS_IXSM;
+ if (ol_flags & PKT_TX_TCP_SEG)
+ tmp |= IXGBE_ADVTXD_POPTS_TXSM;
return tmp;
}
static inline uint32_t
-tx_desc_vlan_flags_to_cmdtype(uint64_t ol_flags)
+tx_desc_ol_flags_to_cmdtype(uint64_t ol_flags)
{
- static const uint32_t vlan_cmd[2] = {0, IXGBE_ADVTXD_DCMD_VLE};
- return vlan_cmd[(ol_flags & PKT_TX_VLAN_PKT) != 0];
+ uint32_t cmdtype = 0;
+ if (ol_flags & PKT_TX_VLAN_PKT)
+ cmdtype |= IXGBE_ADVTXD_DCMD_VLE;
+ if (ol_flags & PKT_TX_TCP_SEG)
+ cmdtype |= IXGBE_ADVTXD_DCMD_TSE;
+ return cmdtype;
}
/* Default RS bit threshold values */
/* Reset transmit descriptors after they have been used */
static inline int
-ixgbe_xmit_cleanup(struct igb_tx_queue *txq)
+ixgbe_xmit_cleanup(struct ixgbe_tx_queue *txq)
{
- struct igb_tx_entry *sw_ring = txq->sw_ring;
+ struct ixgbe_tx_entry *sw_ring = txq->sw_ring;
volatile union ixgbe_adv_tx_desc *txr = txq->tx_ring;
uint16_t last_desc_cleaned = txq->last_desc_cleaned;
uint16_t nb_tx_desc = txq->nb_tx_desc;
ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
{
- struct igb_tx_queue *txq;
- struct igb_tx_entry *sw_ring;
- struct igb_tx_entry *txe, *txn;
+ struct ixgbe_tx_queue *txq;
+ struct ixgbe_tx_entry *sw_ring;
+ struct ixgbe_tx_entry *txe, *txn;
volatile union ixgbe_adv_tx_desc *txr;
volatile union ixgbe_adv_tx_desc *txd;
struct rte_mbuf *tx_pkt;
struct rte_mbuf *m_seg;
- union ixgbe_vlan_macip vlan_macip_lens;
uint64_t buf_dma_addr;
uint32_t olinfo_status;
uint32_t cmd_type_len;
uint64_t tx_ol_req;
uint32_t ctx = 0;
uint32_t new_ctx;
+ union ixgbe_tx_offload tx_offload = {0};
txq = tx_queue;
sw_ring = txq->sw_ring;
/* If hardware offload required */
tx_ol_req = ol_flags & IXGBE_TX_OFFLOAD_MASK;
if (tx_ol_req) {
- vlan_macip_lens.f.vlan_tci = tx_pkt->vlan_tci;
- vlan_macip_lens.f.l2_l3_len = tx_pkt->l2_l3_len;
+ tx_offload.l2_len = tx_pkt->l2_len;
+ tx_offload.l3_len = tx_pkt->l3_len;
+ tx_offload.l4_len = tx_pkt->l4_len;
+ tx_offload.vlan_tci = tx_pkt->vlan_tci;
+ tx_offload.tso_segsz = tx_pkt->tso_segsz;
/* If new context need be built or reuse the exist ctx. */
ctx = what_advctx_update(txq, tx_ol_req,
- vlan_macip_lens.data);
+ tx_offload);
/* Only allocate context descriptor if required*/
new_ctx = (ctx == IXGBE_CTX_NUM);
ctx = txq->ctx_curr;
*/
cmd_type_len = IXGBE_ADVTXD_DTYP_DATA |
IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT;
- olinfo_status = (pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
+
#ifdef RTE_LIBRTE_IEEE1588
if (ol_flags & PKT_TX_IEEE1588_TMST)
cmd_type_len |= IXGBE_ADVTXD_MAC_1588;
#endif
+ olinfo_status = 0;
if (tx_ol_req) {
+
+ if (ol_flags & PKT_TX_TCP_SEG) {
+ /* when TSO is on, paylen in descriptor is the
+ * not the packet len but the tcp payload len */
+ pkt_len -= (tx_offload.l2_len +
+ tx_offload.l3_len + tx_offload.l4_len);
+ }
+
/*
* Setup the TX Advanced Context Descriptor if required
*/
}
ixgbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req,
- vlan_macip_lens.data);
+ tx_offload);
txe->last_id = tx_last;
tx_id = txe->next_id;
* This path will go through
* whatever new/reuse the context descriptor
*/
- cmd_type_len |= tx_desc_vlan_flags_to_cmdtype(ol_flags);
+ cmd_type_len |= tx_desc_ol_flags_to_cmdtype(ol_flags);
olinfo_status |= tx_desc_cksum_flags_to_olinfo(ol_flags);
olinfo_status |= ctx << IXGBE_ADVTXD_IDX_SHIFT;
}
+ olinfo_status |= (pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
+
m_seg = tx_pkt;
do {
txd = &txr[tx_id];
{
uint64_t pkt_flags;
- static uint64_t ip_pkt_types_map[16] = {
+ static const uint64_t ip_pkt_types_map[16] = {
0, PKT_RX_IPV4_HDR, PKT_RX_IPV4_HDR_EXT, PKT_RX_IPV4_HDR_EXT,
PKT_RX_IPV6_HDR, 0, 0, 0,
PKT_RX_IPV6_HDR_EXT, 0, 0, 0,
PKT_RX_IPV6_HDR_EXT, 0, 0, 0,
};
- static uint64_t ip_rss_types_map[16] = {
+ static const uint64_t ip_rss_types_map[16] = {
0, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH,
0, PKT_RX_RSS_HASH, 0, PKT_RX_RSS_HASH,
PKT_RX_RSS_HASH, 0, 0, 0,
#error "PMD IXGBE: LOOK_AHEAD must be 8\n"
#endif
static inline int
-ixgbe_rx_scan_hw_ring(struct igb_rx_queue *rxq)
+ixgbe_rx_scan_hw_ring(struct ixgbe_rx_queue *rxq)
{
volatile union ixgbe_adv_rx_desc *rxdp;
- struct igb_rx_entry *rxep;
+ struct ixgbe_rx_entry *rxep;
struct rte_mbuf *mb;
uint16_t pkt_len;
uint64_t pkt_flags;
}
static inline int
-ixgbe_rx_alloc_bufs(struct igb_rx_queue *rxq)
+ixgbe_rx_alloc_bufs(struct ixgbe_rx_queue *rxq, bool reset_mbuf)
{
volatile union ixgbe_adv_rx_desc *rxdp;
- struct igb_rx_entry *rxep;
+ struct ixgbe_rx_entry *rxep;
struct rte_mbuf *mb;
uint16_t alloc_idx;
- uint64_t dma_addr;
+ __le64 dma_addr;
int diag, i;
/* allocate buffers in bulk directly into the S/W ring */
- alloc_idx = (uint16_t)(rxq->rx_free_trigger -
- (rxq->rx_free_thresh - 1));
+ alloc_idx = rxq->rx_free_trigger - (rxq->rx_free_thresh - 1);
rxep = &rxq->sw_ring[alloc_idx];
diag = rte_mempool_get_bulk(rxq->mb_pool, (void *)rxep,
rxq->rx_free_thresh);
for (i = 0; i < rxq->rx_free_thresh; ++i) {
/* populate the static rte mbuf fields */
mb = rxep[i].mbuf;
+ if (reset_mbuf) {
+ mb->next = NULL;
+ mb->nb_segs = 1;
+ mb->port = rxq->port_id;
+ }
+
rte_mbuf_refcnt_set(mb, 1);
- mb->next = NULL;
mb->data_off = RTE_PKTMBUF_HEADROOM;
- mb->nb_segs = 1;
- mb->port = rxq->port_id;
/* populate the descriptors */
- dma_addr = (uint64_t)mb->buf_physaddr + RTE_PKTMBUF_HEADROOM;
+ dma_addr = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb));
rxdp[i].read.hdr_addr = dma_addr;
rxdp[i].read.pkt_addr = dma_addr;
}
- /* update tail pointer */
- rte_wmb();
- IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, rxq->rx_free_trigger);
-
/* update state of internal queue structure */
- rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_trigger +
- rxq->rx_free_thresh);
+ rxq->rx_free_trigger = rxq->rx_free_trigger + rxq->rx_free_thresh;
if (rxq->rx_free_trigger >= rxq->nb_rx_desc)
- rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
+ rxq->rx_free_trigger = rxq->rx_free_thresh - 1;
/* no errors */
return 0;
}
static inline uint16_t
-ixgbe_rx_fill_from_stage(struct igb_rx_queue *rxq, struct rte_mbuf **rx_pkts,
+ixgbe_rx_fill_from_stage(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts)
{
struct rte_mbuf **stage = &rxq->rx_stage[rxq->rx_next_avail];
rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts)
{
- struct igb_rx_queue *rxq = (struct igb_rx_queue *)rx_queue;
+ struct ixgbe_rx_queue *rxq = (struct ixgbe_rx_queue *)rx_queue;
uint16_t nb_rx = 0;
/* Any previously recv'd pkts will be returned from the Rx stage */
/* if required, allocate new buffers to replenish descriptors */
if (rxq->rx_tail > rxq->rx_free_trigger) {
- if (ixgbe_rx_alloc_bufs(rxq) != 0) {
+ uint16_t cur_free_trigger = rxq->rx_free_trigger;
+
+ if (ixgbe_rx_alloc_bufs(rxq, true) != 0) {
int i, j;
PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
"queue_id=%u", (unsigned) rxq->port_id,
return 0;
}
+
+ /* update tail pointer */
+ rte_wmb();
+ IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, cur_free_trigger);
}
if (rxq->rx_tail >= rxq->nb_rx_desc)
}
/* split requests into chunks of size RTE_PMD_IXGBE_RX_MAX_BURST */
-uint16_t
+static uint16_t
ixgbe_recv_pkts_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts)
{
return nb_rx;
}
+
+#else
+
+/* Stub to avoid extra ifdefs */
+static uint16_t
+ixgbe_recv_pkts_bulk_alloc(__rte_unused void *rx_queue,
+ __rte_unused struct rte_mbuf **rx_pkts, __rte_unused uint16_t nb_pkts)
+{
+ return 0;
+}
+
+static inline int
+ixgbe_rx_alloc_bufs(__rte_unused struct ixgbe_rx_queue *rxq,
+ __rte_unused bool reset_mbuf)
+{
+ return -ENOMEM;
+}
#endif /* RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC */
uint16_t
ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts)
{
- struct igb_rx_queue *rxq;
+ struct ixgbe_rx_queue *rxq;
volatile union ixgbe_adv_rx_desc *rx_ring;
volatile union ixgbe_adv_rx_desc *rxdp;
- struct igb_rx_entry *sw_ring;
- struct igb_rx_entry *rxe;
+ struct ixgbe_rx_entry *sw_ring;
+ struct ixgbe_rx_entry *rxe;
struct rte_mbuf *rxm;
struct rte_mbuf *nmb;
union ixgbe_adv_rx_desc rxd;
return (nb_rx);
}
+/**
+ * Detect an RSC descriptor.
+ */
+static inline uint32_t
+ixgbe_rsc_count(union ixgbe_adv_rx_desc *rx)
+{
+ return (rte_le_to_cpu_32(rx->wb.lower.lo_dword.data) &
+ IXGBE_RXDADV_RSCCNT_MASK) >> IXGBE_RXDADV_RSCCNT_SHIFT;
+}
+
+/**
+ * ixgbe_fill_cluster_head_buf - fill the first mbuf of the returned packet
+ *
+ * Fill the following info in the HEAD buffer of the Rx cluster:
+ * - RX port identifier
+ * - hardware offload data, if any:
+ * - RSS flag & hash
+ * - IP checksum flag
+ * - VLAN TCI, if any
+ * - error flags
+ * @head HEAD of the packet cluster
+ * @desc HW descriptor to get data from
+ * @port_id Port ID of the Rx queue
+ */
+static inline void
+ixgbe_fill_cluster_head_buf(
+ struct rte_mbuf *head,
+ union ixgbe_adv_rx_desc *desc,
+ uint8_t port_id,
+ uint32_t staterr)
+{
+ uint32_t hlen_type_rss;
+ uint64_t pkt_flags;
+
+ head->port = port_id;
+
+ /*
+ * The vlan_tci field is only valid when PKT_RX_VLAN_PKT is
+ * set in the pkt_flags field.
+ */
+ head->vlan_tci = rte_le_to_cpu_16(desc->wb.upper.vlan);
+ hlen_type_rss = rte_le_to_cpu_32(desc->wb.lower.lo_dword.data);
+ pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
+ pkt_flags |= rx_desc_status_to_pkt_flags(staterr);
+ pkt_flags |= rx_desc_error_to_pkt_flags(staterr);
+ head->ol_flags = pkt_flags;
+
+ if (likely(pkt_flags & PKT_RX_RSS_HASH))
+ head->hash.rss = rte_le_to_cpu_32(desc->wb.lower.hi_dword.rss);
+ else if (pkt_flags & PKT_RX_FDIR) {
+ head->hash.fdir.hash =
+ rte_le_to_cpu_16(desc->wb.lower.hi_dword.csum_ip.csum)
+ & IXGBE_ATR_HASH_MASK;
+ head->hash.fdir.id =
+ rte_le_to_cpu_16(desc->wb.lower.hi_dword.csum_ip.ip_id);
+ }
+}
+
+/**
+ * ixgbe_recv_pkts_lro - receive handler for and LRO case.
+ *
+ * @rx_queue Rx queue handle
+ * @rx_pkts table of received packets
+ * @nb_pkts size of rx_pkts table
+ * @bulk_alloc if TRUE bulk allocation is used for a HW ring refilling
+ *
+ * Handles the Rx HW ring completions when RSC feature is configured. Uses an
+ * additional ring of ixgbe_rsc_entry's that will hold the relevant RSC info.
+ *
+ * We use the same logic as in Linux and in FreeBSD ixgbe drivers:
+ * 1) When non-EOP RSC completion arrives:
+ * a) Update the HEAD of the current RSC aggregation cluster with the new
+ * segment's data length.
+ * b) Set the "next" pointer of the current segment to point to the segment
+ * at the NEXTP index.
+ * c) Pass the HEAD of RSC aggregation cluster on to the next NEXTP entry
+ * in the sw_rsc_ring.
+ * 2) When EOP arrives we just update the cluster's total length and offload
+ * flags and deliver the cluster up to the upper layers. In our case - put it
+ * in the rx_pkts table.
+ *
+ * Returns the number of received packets/clusters (according to the "bulk
+ * receive" interface).
+ */
+static inline uint16_t
+ixgbe_recv_pkts_lro(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts,
+ bool bulk_alloc)
+{
+ struct ixgbe_rx_queue *rxq = rx_queue;
+ volatile union ixgbe_adv_rx_desc *rx_ring = rxq->rx_ring;
+ struct ixgbe_rx_entry *sw_ring = rxq->sw_ring;
+ struct ixgbe_rsc_entry *sw_rsc_ring = rxq->sw_rsc_ring;
+ uint16_t rx_id = rxq->rx_tail;
+ uint16_t nb_rx = 0;
+ uint16_t nb_hold = rxq->nb_rx_hold;
+ uint16_t prev_id = rxq->rx_tail;
+
+ while (nb_rx < nb_pkts) {
+ bool eop;
+ struct ixgbe_rx_entry *rxe;
+ struct ixgbe_rsc_entry *rsc_entry;
+ struct ixgbe_rsc_entry *next_rsc_entry;
+ struct ixgbe_rx_entry *next_rxe;
+ struct rte_mbuf *first_seg;
+ struct rte_mbuf *rxm;
+ struct rte_mbuf *nmb;
+ union ixgbe_adv_rx_desc rxd;
+ uint16_t data_len;
+ uint16_t next_id;
+ volatile union ixgbe_adv_rx_desc *rxdp;
+ uint32_t staterr;
+
+next_desc:
+ /*
+ * The code in this whole file uses the volatile pointer to
+ * ensure the read ordering of the status and the rest of the
+ * descriptor fields (on the compiler level only!!!). This is so
+ * UGLY - why not to just use the compiler barrier instead? DPDK
+ * even has the rte_compiler_barrier() for that.
+ *
+ * But most importantly this is just wrong because this doesn't
+ * ensure memory ordering in a general case at all. For
+ * instance, DPDK is supposed to work on Power CPUs where
+ * compiler barrier may just not be enough!
+ *
+ * I tried to write only this function properly to have a
+ * starting point (as a part of an LRO/RSC series) but the
+ * compiler cursed at me when I tried to cast away the
+ * "volatile" from rx_ring (yes, it's volatile too!!!). So, I'm
+ * keeping it the way it is for now.
+ *
+ * The code in this file is broken in so many other places and
+ * will just not work on a big endian CPU anyway therefore the
+ * lines below will have to be revisited together with the rest
+ * of the ixgbe PMD.
+ *
+ * TODO:
+ * - Get rid of "volatile" crap and let the compiler do its
+ * job.
+ * - Use the proper memory barrier (rte_rmb()) to ensure the
+ * memory ordering below.
+ */
+ rxdp = &rx_ring[rx_id];
+ staterr = rte_le_to_cpu_32(rxdp->wb.upper.status_error);
+
+ if (!(staterr & IXGBE_RXDADV_STAT_DD))
+ break;
+
+ rxd = *rxdp;
+
+ PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
+ "staterr=0x%x data_len=%u",
+ rxq->port_id, rxq->queue_id, rx_id, staterr,
+ rte_le_to_cpu_16(rxd.wb.upper.length));
+
+ if (!bulk_alloc) {
+ nmb = rte_rxmbuf_alloc(rxq->mb_pool);
+ if (nmb == NULL) {
+ PMD_RX_LOG(DEBUG, "RX mbuf alloc failed "
+ "port_id=%u queue_id=%u",
+ rxq->port_id, rxq->queue_id);
+
+ rte_eth_devices[rxq->port_id].data->
+ rx_mbuf_alloc_failed++;
+ break;
+ }
+ } else if (nb_hold > rxq->rx_free_thresh) {
+ uint16_t next_rdt = rxq->rx_free_trigger;
+
+ if (!ixgbe_rx_alloc_bufs(rxq, false)) {
+ rte_wmb();
+ IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr,
+ next_rdt);
+ nb_hold -= rxq->rx_free_thresh;
+ } else {
+ PMD_RX_LOG(DEBUG, "RX bulk alloc failed "
+ "port_id=%u queue_id=%u",
+ rxq->port_id, rxq->queue_id);
+
+ rte_eth_devices[rxq->port_id].data->
+ rx_mbuf_alloc_failed++;
+ break;
+ }
+ }
+
+ nb_hold++;
+ rxe = &sw_ring[rx_id];
+ eop = staterr & IXGBE_RXDADV_STAT_EOP;
+
+ next_id = rx_id + 1;
+ if (next_id == rxq->nb_rx_desc)
+ next_id = 0;
+
+ /* Prefetch next mbuf while processing current one. */
+ rte_ixgbe_prefetch(sw_ring[next_id].mbuf);
+
+ /*
+ * When next RX descriptor is on a cache-line boundary,
+ * prefetch the next 4 RX descriptors and the next 4 pointers
+ * to mbufs.
+ */
+ if ((next_id & 0x3) == 0) {
+ rte_ixgbe_prefetch(&rx_ring[next_id]);
+ rte_ixgbe_prefetch(&sw_ring[next_id]);
+ }
+
+ rxm = rxe->mbuf;
+
+ if (!bulk_alloc) {
+ __le64 dma =
+ rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
+ /*
+ * Update RX descriptor with the physical address of the
+ * new data buffer of the new allocated mbuf.
+ */
+ rxe->mbuf = nmb;
+
+ rxm->data_off = RTE_PKTMBUF_HEADROOM;
+ rxdp->read.hdr_addr = dma;
+ rxdp->read.pkt_addr = dma;
+ } else
+ rxe->mbuf = NULL;
+
+ /*
+ * Set data length & data buffer address of mbuf.
+ */
+ data_len = rte_le_to_cpu_16(rxd.wb.upper.length);
+ rxm->data_len = data_len;
+
+ if (!eop) {
+ uint16_t nextp_id;
+ /*
+ * Get next descriptor index:
+ * - For RSC it's in the NEXTP field.
+ * - For a scattered packet - it's just a following
+ * descriptor.
+ */
+ if (ixgbe_rsc_count(&rxd))
+ nextp_id =
+ (staterr & IXGBE_RXDADV_NEXTP_MASK) >>
+ IXGBE_RXDADV_NEXTP_SHIFT;
+ else
+ nextp_id = next_id;
+
+ next_rsc_entry = &sw_rsc_ring[nextp_id];
+ next_rxe = &sw_ring[nextp_id];
+ rte_ixgbe_prefetch(next_rxe);
+ }
+
+ rsc_entry = &sw_rsc_ring[rx_id];
+ first_seg = rsc_entry->fbuf;
+ rsc_entry->fbuf = NULL;
+
+ /*
+ * If this is the first buffer of the received packet,
+ * set the pointer to the first mbuf of the packet and
+ * initialize its context.
+ * Otherwise, update the total length and the number of segments
+ * of the current scattered packet, and update the pointer to
+ * the last mbuf of the current packet.
+ */
+ if (first_seg == NULL) {
+ first_seg = rxm;
+ first_seg->pkt_len = data_len;
+ first_seg->nb_segs = 1;
+ } else {
+ first_seg->pkt_len += data_len;
+ first_seg->nb_segs++;
+ }
+
+ prev_id = rx_id;
+ rx_id = next_id;
+
+ /*
+ * If this is not the last buffer of the received packet, update
+ * the pointer to the first mbuf at the NEXTP entry in the
+ * sw_rsc_ring and continue to parse the RX ring.
+ */
+ if (!eop) {
+ rxm->next = next_rxe->mbuf;
+ next_rsc_entry->fbuf = first_seg;
+ goto next_desc;
+ }
+
+ /*
+ * This is the last buffer of the received packet - return
+ * the current cluster to the user.
+ */
+ rxm->next = NULL;
+
+ /* Initialize the first mbuf of the returned packet */
+ ixgbe_fill_cluster_head_buf(first_seg, &rxd, rxq->port_id,
+ staterr);
+
+ /* Prefetch data of first segment, if configured to do so. */
+ rte_packet_prefetch((char *)first_seg->buf_addr +
+ first_seg->data_off);
+
+ /*
+ * Store the mbuf address into the next entry of the array
+ * of returned packets.
+ */
+ rx_pkts[nb_rx++] = first_seg;
+ }
+
+ /*
+ * Record index of the next RX descriptor to probe.
+ */
+ rxq->rx_tail = rx_id;
+
+ /*
+ * If the number of free RX descriptors is greater than the RX free
+ * threshold of the queue, advance the Receive Descriptor Tail (RDT)
+ * register.
+ * Update the RDT with the value of the last processed RX descriptor
+ * minus 1, to guarantee that the RDT register is never equal to the
+ * RDH register, which creates a "full" ring situtation from the
+ * hardware point of view...
+ */
+ if (!bulk_alloc && nb_hold > rxq->rx_free_thresh) {
+ PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
+ "nb_hold=%u nb_rx=%u",
+ rxq->port_id, rxq->queue_id, rx_id, nb_hold, nb_rx);
+
+ rte_wmb();
+ IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, prev_id);
+ nb_hold = 0;
+ }
+
+ rxq->nb_rx_hold = nb_hold;
+ return nb_rx;
+}
+
+uint16_t
+ixgbe_recv_pkts_lro_single_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ return ixgbe_recv_pkts_lro(rx_queue, rx_pkts, nb_pkts, false);
+}
+
+uint16_t
+ixgbe_recv_pkts_lro_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ return ixgbe_recv_pkts_lro(rx_queue, rx_pkts, nb_pkts, true);
+}
+
uint16_t
ixgbe_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts)
{
- struct igb_rx_queue *rxq;
+ struct ixgbe_rx_queue *rxq;
volatile union ixgbe_adv_rx_desc *rx_ring;
volatile union ixgbe_adv_rx_desc *rxdp;
- struct igb_rx_entry *sw_ring;
- struct igb_rx_entry *rxe;
+ struct ixgbe_rx_entry *sw_ring;
+ struct ixgbe_rx_entry *rxe;
struct rte_mbuf *first_seg;
struct rte_mbuf *last_seg;
struct rte_mbuf *rxm;
union ixgbe_adv_rx_desc rxd;
uint64_t dma; /* Physical address of mbuf data buffer */
uint32_t staterr;
- uint32_t hlen_type_rss;
uint16_t rx_id;
uint16_t nb_rx;
uint16_t nb_hold;
uint16_t data_len;
- uint64_t pkt_flags;
nb_rx = 0;
nb_hold = 0;
(uint16_t) (data_len - ETHER_CRC_LEN);
}
- /*
- * Initialize the first mbuf of the returned packet:
- * - RX port identifier,
- * - hardware offload data, if any:
- * - RSS flag & hash,
- * - IP checksum flag,
- * - VLAN TCI, if any,
- * - error flags.
- */
- first_seg->port = rxq->port_id;
-
- /*
- * The vlan_tci field is only valid when PKT_RX_VLAN_PKT is
- * set in the pkt_flags field.
- */
- first_seg->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
- hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
- pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
- pkt_flags = (pkt_flags |
- rx_desc_status_to_pkt_flags(staterr));
- pkt_flags = (pkt_flags |
- rx_desc_error_to_pkt_flags(staterr));
- first_seg->ol_flags = pkt_flags;
-
- if (likely(pkt_flags & PKT_RX_RSS_HASH))
- first_seg->hash.rss = rxd.wb.lower.hi_dword.rss;
- else if (pkt_flags & PKT_RX_FDIR) {
- first_seg->hash.fdir.hash =
- (uint16_t)((rxd.wb.lower.hi_dword.csum_ip.csum)
- & IXGBE_ATR_HASH_MASK);
- first_seg->hash.fdir.id =
- rxd.wb.lower.hi_dword.csum_ip.ip_id;
- }
+ /* Initialize the first mbuf of the returned packet */
+ ixgbe_fill_cluster_head_buf(first_seg, &rxd, rxq->port_id,
+ staterr);
/* Prefetch data of first segment, if configured to do so. */
rte_packet_prefetch((char *)first_seg->buf_addr +
}
static void
-ixgbe_tx_queue_release_mbufs(struct igb_tx_queue *txq)
+ixgbe_tx_queue_release_mbufs(struct ixgbe_tx_queue *txq)
{
unsigned i;
}
static void
-ixgbe_tx_free_swring(struct igb_tx_queue *txq)
+ixgbe_tx_free_swring(struct ixgbe_tx_queue *txq)
{
if (txq != NULL &&
txq->sw_ring != NULL)
}
static void
-ixgbe_tx_queue_release(struct igb_tx_queue *txq)
+ixgbe_tx_queue_release(struct ixgbe_tx_queue *txq)
{
if (txq != NULL && txq->ops != NULL) {
txq->ops->release_mbufs(txq);
ixgbe_tx_queue_release(txq);
}
-/* (Re)set dynamic igb_tx_queue fields to defaults */
+/* (Re)set dynamic ixgbe_tx_queue fields to defaults */
static void
-ixgbe_reset_tx_queue(struct igb_tx_queue *txq)
+ixgbe_reset_tx_queue(struct ixgbe_tx_queue *txq)
{
- static const union ixgbe_adv_tx_desc zeroed_desc = { .read = {
- .buffer_addr = 0}};
- struct igb_tx_entry *txe = txq->sw_ring;
+ static const union ixgbe_adv_tx_desc zeroed_desc = {{0}};
+ struct ixgbe_tx_entry *txe = txq->sw_ring;
uint16_t prev, i;
/* Zero out HW ring memory */
IXGBE_CTX_NUM * sizeof(struct ixgbe_advctx_info));
}
-static struct ixgbe_txq_ops def_txq_ops = {
+static const struct ixgbe_txq_ops def_txq_ops = {
.release_mbufs = ixgbe_tx_queue_release_mbufs,
.free_swring = ixgbe_tx_free_swring,
.reset = ixgbe_reset_tx_queue,
};
+/* Takes an ethdev and a queue and sets up the tx function to be used based on
+ * the queue parameters. Used in tx_queue_setup by primary process and then
+ * in dev_init by secondary process when attaching to an existing ethdev.
+ */
+void
+ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq)
+{
+ /* Use a simple Tx queue (no offloads, no multi segs) if possible */
+ if (((txq->txq_flags & IXGBE_SIMPLE_FLAGS) == IXGBE_SIMPLE_FLAGS)
+ && (txq->tx_rs_thresh >= RTE_PMD_IXGBE_TX_MAX_BURST)) {
+ PMD_INIT_LOG(INFO, "Using simple tx code path");
+#ifdef RTE_IXGBE_INC_VECTOR
+ if (txq->tx_rs_thresh <= RTE_IXGBE_TX_MAX_FREE_BUF_SZ &&
+ (rte_eal_process_type() != RTE_PROC_PRIMARY ||
+ ixgbe_txq_vec_setup(txq) == 0)) {
+ PMD_INIT_LOG(INFO, "Vector tx enabled.");
+ dev->tx_pkt_burst = ixgbe_xmit_pkts_vec;
+ } else
+#endif
+ dev->tx_pkt_burst = ixgbe_xmit_pkts_simple;
+ } else {
+ PMD_INIT_LOG(INFO, "Using full-featured tx code path");
+ PMD_INIT_LOG(INFO,
+ " - txq_flags = %lx " "[IXGBE_SIMPLE_FLAGS=%lx]",
+ (unsigned long)txq->txq_flags,
+ (unsigned long)IXGBE_SIMPLE_FLAGS);
+ PMD_INIT_LOG(INFO,
+ " - tx_rs_thresh = %lu " "[RTE_PMD_IXGBE_TX_MAX_BURST=%lu]",
+ (unsigned long)txq->tx_rs_thresh,
+ (unsigned long)RTE_PMD_IXGBE_TX_MAX_BURST);
+ dev->tx_pkt_burst = ixgbe_xmit_pkts;
+ }
+}
+
int
ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
const struct rte_eth_txconf *tx_conf)
{
const struct rte_memzone *tz;
- struct igb_tx_queue *txq;
+ struct ixgbe_tx_queue *txq;
struct ixgbe_hw *hw;
uint16_t tx_rs_thresh, tx_free_thresh;
}
/* First allocate the tx queue data structure */
- txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct igb_tx_queue),
- CACHE_LINE_SIZE, socket_id);
+ txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct ixgbe_tx_queue),
+ RTE_CACHE_LINE_SIZE, socket_id);
if (txq == NULL)
return (-ENOMEM);
/*
* Modification to set VFTDT for virtual function if vf is detected
*/
- if (hw->mac.type == ixgbe_mac_82599_vf)
+ if (hw->mac.type == ixgbe_mac_82599_vf ||
+ hw->mac.type == ixgbe_mac_X540_vf ||
+ hw->mac.type == ixgbe_mac_X550_vf ||
+ hw->mac.type == ixgbe_mac_X550EM_x_vf)
txq->tdt_reg_addr = IXGBE_PCI_REG_ADDR(hw, IXGBE_VFTDT(queue_idx));
else
txq->tdt_reg_addr = IXGBE_PCI_REG_ADDR(hw, IXGBE_TDT(txq->reg_idx));
/* Allocate software ring */
txq->sw_ring = rte_zmalloc_socket("txq->sw_ring",
- sizeof(struct igb_tx_entry) * nb_desc,
- CACHE_LINE_SIZE, socket_id);
+ sizeof(struct ixgbe_tx_entry) * nb_desc,
+ RTE_CACHE_LINE_SIZE, socket_id);
if (txq->sw_ring == NULL) {
ixgbe_tx_queue_release(txq);
return (-ENOMEM);
PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
- /* Use a simple Tx queue (no offloads, no multi segs) if possible */
- if (((txq->txq_flags & IXGBE_SIMPLE_FLAGS) == IXGBE_SIMPLE_FLAGS) &&
- (txq->tx_rs_thresh >= RTE_PMD_IXGBE_TX_MAX_BURST)) {
- PMD_INIT_LOG(INFO, "Using simple tx code path");
-#ifdef RTE_IXGBE_INC_VECTOR
- if (txq->tx_rs_thresh <= RTE_IXGBE_TX_MAX_FREE_BUF_SZ &&
- ixgbe_txq_vec_setup(txq) == 0) {
- PMD_INIT_LOG(INFO, "Vector tx enabled.");
- dev->tx_pkt_burst = ixgbe_xmit_pkts_vec;
- }
- else
-#endif
- dev->tx_pkt_burst = ixgbe_xmit_pkts_simple;
- } else {
- PMD_INIT_LOG(INFO, "Using full-featured tx code path");
- PMD_INIT_LOG(INFO, " - txq_flags = %lx "
- "[IXGBE_SIMPLE_FLAGS=%lx]",
- (long unsigned)txq->txq_flags,
- (long unsigned)IXGBE_SIMPLE_FLAGS);
- PMD_INIT_LOG(INFO, " - tx_rs_thresh = %lu "
- "[RTE_PMD_IXGBE_TX_MAX_BURST=%lu]",
- (long unsigned)txq->tx_rs_thresh,
- (long unsigned)RTE_PMD_IXGBE_TX_MAX_BURST);
- dev->tx_pkt_burst = ixgbe_xmit_pkts;
- }
+ /* set up vector or scalar TX function as appropriate */
+ ixgbe_set_tx_function(dev, txq);
txq->ops->reset(txq);
return (0);
}
+/**
+ * ixgbe_free_rsc_cluster - free the not-yet-completed RSC cluster
+ *
+ * The "next" pointer of the last segment of (not-yet-completed) RSC clusters
+ * in the sw_rsc_ring is not set to NULL but rather points to the next
+ * mbuf of this RSC aggregation (that has not been completed yet and still
+ * resides on the HW ring). So, instead of calling for rte_pktmbuf_free() we
+ * will just free first "nb_segs" segments of the cluster explicitly by calling
+ * an rte_pktmbuf_free_seg().
+ *
+ * @m RSC cluster head
+ */
static void
-ixgbe_rx_queue_release_mbufs(struct igb_rx_queue *rxq)
+ixgbe_free_rsc_cluster(struct rte_mbuf *m)
+{
+ uint8_t i, nb_segs = m->nb_segs;
+ struct rte_mbuf *next_seg;
+
+ for (i = 0; i < nb_segs; i++) {
+ next_seg = m->next;
+ rte_pktmbuf_free_seg(m);
+ m = next_seg;
+ }
+}
+
+static void
+ixgbe_rx_queue_release_mbufs(struct ixgbe_rx_queue *rxq)
{
unsigned i;
}
#endif
}
+
+ if (rxq->sw_rsc_ring)
+ for (i = 0; i < rxq->nb_rx_desc; i++)
+ if (rxq->sw_rsc_ring[i].fbuf) {
+ ixgbe_free_rsc_cluster(rxq->sw_rsc_ring[i].fbuf);
+ rxq->sw_rsc_ring[i].fbuf = NULL;
+ }
}
static void
-ixgbe_rx_queue_release(struct igb_rx_queue *rxq)
+ixgbe_rx_queue_release(struct ixgbe_rx_queue *rxq)
{
if (rxq != NULL) {
ixgbe_rx_queue_release_mbufs(rxq);
rte_free(rxq->sw_ring);
+ rte_free(rxq->sw_rsc_ring);
rte_free(rxq);
}
}
*/
static inline int
#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
-check_rx_burst_bulk_alloc_preconditions(struct igb_rx_queue *rxq)
+check_rx_burst_bulk_alloc_preconditions(struct ixgbe_rx_queue *rxq)
#else
-check_rx_burst_bulk_alloc_preconditions(__rte_unused struct igb_rx_queue *rxq)
+check_rx_burst_bulk_alloc_preconditions(__rte_unused struct ixgbe_rx_queue *rxq)
#endif
{
int ret = 0;
return ret;
}
-/* Reset dynamic igb_rx_queue fields back to defaults */
+/* Reset dynamic ixgbe_rx_queue fields back to defaults */
static void
-ixgbe_reset_rx_queue(struct igb_rx_queue *rxq)
+ixgbe_reset_rx_queue(struct ixgbe_hw *hw, struct ixgbe_rx_queue *rxq)
{
- static const union ixgbe_adv_rx_desc zeroed_desc = { .read = {
- .pkt_addr = 0}};
+ static const union ixgbe_adv_rx_desc zeroed_desc = {{0}};
unsigned i;
- uint16_t len;
+ uint16_t len = rxq->nb_rx_desc;
/*
* By default, the Rx queue setup function allocates enough memory for
* constraints here to see if we need to zero out memory after the end
* of the H/W descriptor ring.
*/
-#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
- if (check_rx_burst_bulk_alloc_preconditions(rxq) == 0)
+ if (hw->rx_bulk_alloc_allowed)
/* zero out extra memory */
- len = (uint16_t)(rxq->nb_rx_desc + RTE_PMD_IXGBE_RX_MAX_BURST);
- else
-#endif
- /* do not zero out extra memory */
- len = rxq->nb_rx_desc;
+ len += RTE_PMD_IXGBE_RX_MAX_BURST;
/*
* Zero out HW ring memory. Zero out extra memory at the end of
* entries is always allocated
*/
memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
- for (i = 0; i < RTE_PMD_IXGBE_RX_MAX_BURST; ++i) {
- rxq->sw_ring[rxq->nb_rx_desc + i].mbuf = &rxq->fake_mbuf;
+ for (i = rxq->nb_rx_desc; i < len; ++i) {
+ rxq->sw_ring[i].mbuf = &rxq->fake_mbuf;
}
rxq->rx_nb_avail = 0;
rxq->nb_rx_hold = 0;
rxq->pkt_first_seg = NULL;
rxq->pkt_last_seg = NULL;
+ rxq->rsc_en = 0;
}
int
struct rte_mempool *mp)
{
const struct rte_memzone *rz;
- struct igb_rx_queue *rxq;
+ struct ixgbe_rx_queue *rxq;
struct ixgbe_hw *hw;
- int use_def_burst_func = 1;
uint16_t len;
+ struct rte_eth_dev_info dev_info = { 0 };
+ struct rte_eth_rxmode *dev_rx_mode = &dev->data->dev_conf.rxmode;
+ bool rsc_requested = false;
+
+ dev->dev_ops->dev_infos_get(dev, &dev_info);
+ if ((dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO) &&
+ dev_rx_mode->enable_lro)
+ rsc_requested = true;
PMD_INIT_FUNC_TRACE();
hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
}
/* First allocate the rx queue data structure */
- rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct igb_rx_queue),
- CACHE_LINE_SIZE, socket_id);
+ rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct ixgbe_rx_queue),
+ RTE_CACHE_LINE_SIZE, socket_id);
if (rxq == NULL)
return (-ENOMEM);
rxq->mb_pool = mp;
/*
* Modified to setup VFRDT for Virtual Function
*/
- if (hw->mac.type == ixgbe_mac_82599_vf) {
+ if (hw->mac.type == ixgbe_mac_82599_vf ||
+ hw->mac.type == ixgbe_mac_X540_vf ||
+ hw->mac.type == ixgbe_mac_X550_vf ||
+ hw->mac.type == ixgbe_mac_X550EM_x_vf) {
rxq->rdt_reg_addr =
IXGBE_PCI_REG_ADDR(hw, IXGBE_VFRDT(queue_idx));
rxq->rdh_reg_addr =
#endif
rxq->rx_ring = (union ixgbe_adv_rx_desc *) rz->addr;
+ /*
+ * Certain constraints must be met in order to use the bulk buffer
+ * allocation Rx burst function. If any of Rx queues doesn't meet them
+ * the feature should be disabled for the whole port.
+ */
+ if (check_rx_burst_bulk_alloc_preconditions(rxq)) {
+ PMD_INIT_LOG(DEBUG, "queue[%d] doesn't meet Rx Bulk Alloc "
+ "preconditions - canceling the feature for "
+ "the whole port[%d]",
+ rxq->queue_id, rxq->port_id);
+ hw->rx_bulk_alloc_allowed = false;
+ }
+
/*
* Allocate software ring. Allow for space at the end of the
* S/W ring to make sure look-ahead logic in bulk alloc Rx burst
* function does not access an invalid memory region.
*/
-#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
- len = (uint16_t)(nb_desc + RTE_PMD_IXGBE_RX_MAX_BURST);
-#else
len = nb_desc;
-#endif
+ if (hw->rx_bulk_alloc_allowed)
+ len += RTE_PMD_IXGBE_RX_MAX_BURST;
+
rxq->sw_ring = rte_zmalloc_socket("rxq->sw_ring",
- sizeof(struct igb_rx_entry) * len,
- CACHE_LINE_SIZE, socket_id);
- if (rxq->sw_ring == NULL) {
+ sizeof(struct ixgbe_rx_entry) * len,
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (!rxq->sw_ring) {
ixgbe_rx_queue_release(rxq);
return (-ENOMEM);
}
- PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
- rxq->sw_ring, rxq->rx_ring, rxq->rx_ring_phys_addr);
-
- /*
- * Certain constraints must be met in order to use the bulk buffer
- * allocation Rx burst function.
- */
- use_def_burst_func = check_rx_burst_bulk_alloc_preconditions(rxq);
-#ifdef RTE_IXGBE_INC_VECTOR
- ixgbe_rxq_vec_setup(rxq);
-#endif
- /* Check if pre-conditions are satisfied, and no Scattered Rx */
- if (!use_def_burst_func && !dev->data->scattered_rx) {
-#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
- PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
- "satisfied. Rx Burst Bulk Alloc function will be "
- "used on port=%d, queue=%d.",
- rxq->port_id, rxq->queue_id);
- dev->rx_pkt_burst = ixgbe_recv_pkts_bulk_alloc;
-#ifdef RTE_IXGBE_INC_VECTOR
- if (!ixgbe_rx_vec_condition_check(dev)) {
- PMD_INIT_LOG(INFO, "Vector rx enabled, please make "
- "sure RX burst size no less than 32.");
- dev->rx_pkt_burst = ixgbe_recv_pkts_vec;
+ if (rsc_requested) {
+ rxq->sw_rsc_ring =
+ rte_zmalloc_socket("rxq->sw_rsc_ring",
+ sizeof(struct ixgbe_rsc_entry) * len,
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (!rxq->sw_rsc_ring) {
+ ixgbe_rx_queue_release(rxq);
+ return (-ENOMEM);
}
-#endif
-#endif
- } else {
- PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions "
- "are not satisfied, Scattered Rx is requested, "
- "or RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC is not "
- "enabled (port=%d, queue=%d).",
- rxq->port_id, rxq->queue_id);
- }
+ } else
+ rxq->sw_rsc_ring = NULL;
+
+ PMD_INIT_LOG(DEBUG, "sw_ring=%p sw_rsc_ring=%p hw_ring=%p "
+ "dma_addr=0x%"PRIx64,
+ rxq->sw_ring, rxq->sw_rsc_ring, rxq->rx_ring,
+ rxq->rx_ring_phys_addr);
+
+ if (!rte_is_power_of_2(nb_desc)) {
+ PMD_INIT_LOG(DEBUG, "queue[%d] doesn't meet Vector Rx "
+ "preconditions - canceling the feature for "
+ "the whole port[%d]",
+ rxq->queue_id, rxq->port_id);
+ hw->rx_vec_allowed = false;
+ } else
+ ixgbe_rxq_vec_setup(rxq);
+
dev->data->rx_queues[queue_idx] = rxq;
- ixgbe_reset_rx_queue(rxq);
+ ixgbe_reset_rx_queue(hw, rxq);
return 0;
}
{
#define IXGBE_RXQ_SCAN_INTERVAL 4
volatile union ixgbe_adv_rx_desc *rxdp;
- struct igb_rx_queue *rxq;
+ struct ixgbe_rx_queue *rxq;
uint32_t desc = 0;
if (rx_queue_id >= dev->data->nb_rx_queues) {
ixgbe_dev_rx_descriptor_done(void *rx_queue, uint16_t offset)
{
volatile union ixgbe_adv_rx_desc *rxdp;
- struct igb_rx_queue *rxq = rx_queue;
+ struct ixgbe_rx_queue *rxq = rx_queue;
uint32_t desc;
if (unlikely(offset >= rxq->nb_rx_desc))
ixgbe_dev_clear_queues(struct rte_eth_dev *dev)
{
unsigned i;
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
PMD_INIT_FUNC_TRACE();
for (i = 0; i < dev->data->nb_tx_queues; i++) {
- struct igb_tx_queue *txq = dev->data->tx_queues[i];
+ struct ixgbe_tx_queue *txq = dev->data->tx_queues[i];
if (txq != NULL) {
txq->ops->release_mbufs(txq);
txq->ops->reset(txq);
}
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- struct igb_rx_queue *rxq = dev->data->rx_queues[i];
+ struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i];
if (rxq != NULL) {
ixgbe_rx_queue_release_mbufs(rxq);
- ixgbe_reset_rx_queue(rxq);
+ ixgbe_reset_rx_queue(hw, rxq);
}
}
}
mrqc = IXGBE_MRQC_RSSEN; /* Enable RSS */
if (rss_hf & ETH_RSS_IPV4)
mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
- if (rss_hf & ETH_RSS_IPV4_TCP)
+ if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
if (rss_hf & ETH_RSS_IPV6)
mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
if (rss_hf & ETH_RSS_IPV6_EX)
mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
- if (rss_hf & ETH_RSS_IPV6_TCP)
+ if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
if (rss_hf & ETH_RSS_IPV6_TCP_EX)
mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
- if (rss_hf & ETH_RSS_IPV4_UDP)
+ if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
- if (rss_hf & ETH_RSS_IPV6_UDP)
+ if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
if (rss_hf & ETH_RSS_IPV6_UDP_EX)
mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4)
rss_hf |= ETH_RSS_IPV4;
if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4_TCP)
- rss_hf |= ETH_RSS_IPV4_TCP;
+ rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6)
rss_hf |= ETH_RSS_IPV6;
if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX)
rss_hf |= ETH_RSS_IPV6_EX;
if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_TCP)
- rss_hf |= ETH_RSS_IPV6_TCP;
+ rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP)
rss_hf |= ETH_RSS_IPV6_TCP_EX;
if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV4_UDP)
- rss_hf |= ETH_RSS_IPV4_UDP;
+ rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_UDP)
- rss_hf |= ETH_RSS_IPV6_UDP;
+ rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
if (mrqc & IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP)
rss_hf |= ETH_RSS_IPV6_UDP_EX;
rss_conf->rss_hf = rss_hf;
}
static int
-ixgbe_alloc_rx_queue_mbufs(struct igb_rx_queue *rxq)
+ixgbe_alloc_rx_queue_mbufs(struct ixgbe_rx_queue *rxq)
{
- struct igb_rx_entry *rxe = rxq->sw_ring;
+ struct ixgbe_rx_entry *rxe = rxq->sw_ring;
uint64_t dma_addr;
unsigned i;
return 0;
}
+static int
+ixgbe_config_vf_rss(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw;
+ uint32_t mrqc;
+
+ ixgbe_rss_configure(dev);
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ /* MRQC: enable VF RSS */
+ mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
+ mrqc &= ~IXGBE_MRQC_MRQE_MASK;
+ switch (RTE_ETH_DEV_SRIOV(dev).active) {
+ case ETH_64_POOLS:
+ mrqc |= IXGBE_MRQC_VMDQRSS64EN;
+ break;
+
+ case ETH_32_POOLS:
+ mrqc |= IXGBE_MRQC_VMDQRSS32EN;
+ break;
+
+ default:
+ PMD_INIT_LOG(ERR, "Invalid pool number in IOV mode with VMDQ RSS");
+ return -EINVAL;
+ }
+
+ IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
+
+ return 0;
+}
+
+static int
+ixgbe_config_vf_default(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ switch (RTE_ETH_DEV_SRIOV(dev).active) {
+ case ETH_64_POOLS:
+ IXGBE_WRITE_REG(hw, IXGBE_MRQC,
+ IXGBE_MRQC_VMDQEN);
+ break;
+
+ case ETH_32_POOLS:
+ IXGBE_WRITE_REG(hw, IXGBE_MRQC,
+ IXGBE_MRQC_VMDQRT4TCEN);
+ break;
+
+ case ETH_16_POOLS:
+ IXGBE_WRITE_REG(hw, IXGBE_MRQC,
+ IXGBE_MRQC_VMDQRT8TCEN);
+ break;
+ default:
+ PMD_INIT_LOG(ERR,
+ "invalid pool number in IOV mode");
+ break;
+ }
+ return 0;
+}
+
static int
ixgbe_dev_mq_rx_configure(struct rte_eth_dev *dev)
{
default: ixgbe_rss_disable(dev);
}
} else {
- switch (RTE_ETH_DEV_SRIOV(dev).active) {
/*
* SRIOV active scheme
- * FIXME if support DCB/RSS together with VMDq & SRIOV
+ * Support RSS together with VMDq & SRIOV
*/
- case ETH_64_POOLS:
- IXGBE_WRITE_REG(hw, IXGBE_MRQC, IXGBE_MRQC_VMDQEN);
- break;
-
- case ETH_32_POOLS:
- IXGBE_WRITE_REG(hw, IXGBE_MRQC, IXGBE_MRQC_VMDQRT4TCEN);
+ switch (dev->data->dev_conf.rxmode.mq_mode) {
+ case ETH_MQ_RX_RSS:
+ case ETH_MQ_RX_VMDQ_RSS:
+ ixgbe_config_vf_rss(dev);
break;
- case ETH_16_POOLS:
- IXGBE_WRITE_REG(hw, IXGBE_MRQC, IXGBE_MRQC_VMDQRT8TCEN);
- break;
+ /* FIXME if support DCB/RSS together with VMDq & SRIOV */
+ case ETH_MQ_RX_VMDQ_DCB:
+ case ETH_MQ_RX_VMDQ_DCB_RSS:
+ PMD_INIT_LOG(ERR,
+ "Could not support DCB with VMDq & SRIOV");
+ return -1;
default:
- PMD_INIT_LOG(ERR, "invalid pool number in IOV mode");
+ ixgbe_config_vf_default(dev);
+ break;
}
}
return 0;
}
+/**
+ * ixgbe_get_rscctl_maxdesc - Calculate the RSCCTL[n].MAXDESC for PF
+ *
+ * Return the RSCCTL[n].MAXDESC for 82599 and x540 PF devices according to the
+ * spec rev. 3.0 chapter 8.2.3.8.13.
+ *
+ * @pool Memory pool of the Rx queue
+ */
+static inline uint32_t
+ixgbe_get_rscctl_maxdesc(struct rte_mempool *pool)
+{
+ struct rte_pktmbuf_pool_private *mp_priv = rte_mempool_get_priv(pool);
+
+ /* MAXDESC * SRRCTL.BSIZEPKT must not exceed 64 KB minus one */
+ uint16_t maxdesc =
+ IPV4_MAX_PKT_LEN /
+ (mp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM);
+
+ if (maxdesc >= 16)
+ return IXGBE_RSCCTL_MAXDESC_16;
+ else if (maxdesc >= 8)
+ return IXGBE_RSCCTL_MAXDESC_8;
+ else if (maxdesc >= 4)
+ return IXGBE_RSCCTL_MAXDESC_4;
+ else
+ return IXGBE_RSCCTL_MAXDESC_1;
+}
+
+/**
+ * ixgbe_set_ivar - Setup the correct IVAR register for a particular MSIX
+ * interrupt
+ *
+ * (Taken from FreeBSD tree)
+ * (yes this is all very magic and confusing :)
+ *
+ * @dev port handle
+ * @entry the register array entry
+ * @vector the MSIX vector for this queue
+ * @type RX/TX/MISC
+ */
+static void
+ixgbe_set_ivar(struct rte_eth_dev *dev, u8 entry, u8 vector, s8 type)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ u32 ivar, index;
+
+ vector |= IXGBE_IVAR_ALLOC_VAL;
+
+ switch (hw->mac.type) {
+
+ case ixgbe_mac_82598EB:
+ if (type == -1)
+ entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
+ else
+ entry += (type * 64);
+ index = (entry >> 2) & 0x1F;
+ ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
+ ivar &= ~(0xFF << (8 * (entry & 0x3)));
+ ivar |= (vector << (8 * (entry & 0x3)));
+ IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
+ break;
+
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ if (type == -1) { /* MISC IVAR */
+ index = (entry & 1) * 8;
+ ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
+ ivar &= ~(0xFF << index);
+ ivar |= (vector << index);
+ IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
+ } else { /* RX/TX IVARS */
+ index = (16 * (entry & 1)) + (8 * type);
+ ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
+ ivar &= ~(0xFF << index);
+ ivar |= (vector << index);
+ IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
+ }
+
+ break;
+
+ default:
+ break;
+ }
+}
+
+void ixgbe_set_rx_function(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ /*
+ * In order to allow Vector Rx there are a few configuration
+ * conditions to be met and Rx Bulk Allocation should be allowed.
+ */
+ if (ixgbe_rx_vec_dev_conf_condition_check(dev) ||
+ !hw->rx_bulk_alloc_allowed) {
+ PMD_INIT_LOG(DEBUG, "Port[%d] doesn't meet Vector Rx "
+ "preconditions or RTE_IXGBE_INC_VECTOR is "
+ "not enabled",
+ dev->data->port_id);
+
+ hw->rx_vec_allowed = false;
+ }
+
+ /*
+ * Initialize the appropriate LRO callback.
+ *
+ * If all queues satisfy the bulk allocation preconditions
+ * (hw->rx_bulk_alloc_allowed is TRUE) then we may use bulk allocation.
+ * Otherwise use a single allocation version.
+ */
+ if (dev->data->lro) {
+ if (hw->rx_bulk_alloc_allowed) {
+ PMD_INIT_LOG(INFO, "LRO is requested. Using a bulk "
+ "allocation version");
+ dev->rx_pkt_burst = ixgbe_recv_pkts_lro_bulk_alloc;
+ } else {
+ PMD_INIT_LOG(INFO, "LRO is requested. Using a single "
+ "allocation version");
+ dev->rx_pkt_burst = ixgbe_recv_pkts_lro_single_alloc;
+ }
+ } else if (dev->data->scattered_rx) {
+ /*
+ * Set the non-LRO scattered callback: there are Vector and
+ * single allocation versions.
+ */
+ if (hw->rx_vec_allowed) {
+ PMD_INIT_LOG(DEBUG, "Using Vector Scattered Rx "
+ "callback (port=%d).",
+ dev->data->port_id);
+
+ dev->rx_pkt_burst = ixgbe_recv_scattered_pkts_vec;
+ } else {
+ PMD_INIT_LOG(DEBUG, "Using Regualr (non-vector) "
+ "Scattered Rx callback "
+ "(port=%d).",
+ dev->data->port_id);
+
+ dev->rx_pkt_burst = ixgbe_recv_scattered_pkts;
+ }
+ /*
+ * Below we set "simple" callbacks according to port/queues parameters.
+ * If parameters allow we are going to choose between the following
+ * callbacks:
+ * - Vector
+ * - Bulk Allocation
+ * - Single buffer allocation (the simplest one)
+ */
+ } else if (hw->rx_vec_allowed) {
+ PMD_INIT_LOG(INFO, "Vector rx enabled, please make sure RX "
+ "burst size no less than 32.");
+
+ dev->rx_pkt_burst = ixgbe_recv_pkts_vec;
+ } else if (hw->rx_bulk_alloc_allowed) {
+ PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
+ "satisfied. Rx Burst Bulk Alloc function "
+ "will be used on port=%d.",
+ dev->data->port_id);
+
+ dev->rx_pkt_burst = ixgbe_recv_pkts_bulk_alloc;
+ } else {
+ PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are not "
+ "satisfied, or Scattered Rx is requested, "
+ "or RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC "
+ "is not enabled (port=%d).",
+ dev->data->port_id);
+
+ dev->rx_pkt_burst = ixgbe_recv_pkts;
+ }
+}
+
+/**
+ * ixgbe_set_rsc - configure RSC related port HW registers
+ *
+ * Configures the port's RSC related registers according to the 4.6.7.2 chapter
+ * of 82599 Spec (x540 configuration is virtually the same).
+ *
+ * @dev port handle
+ *
+ * Returns 0 in case of success or a non-zero error code
+ */
+static int
+ixgbe_set_rsc(struct rte_eth_dev *dev)
+{
+ struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_eth_dev_info dev_info = { 0 };
+ bool rsc_capable = false;
+ uint16_t i;
+ uint32_t rdrxctl;
+
+ /* Sanity check */
+ dev->dev_ops->dev_infos_get(dev, &dev_info);
+ if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO)
+ rsc_capable = true;
+
+ if (!rsc_capable && rx_conf->enable_lro) {
+ PMD_INIT_LOG(CRIT, "LRO is requested on HW that doesn't "
+ "support it");
+ return -EINVAL;
+ }
+
+ /* RSC global configuration (chapter 4.6.7.2.1 of 82599 Spec) */
+
+ if (!rx_conf->hw_strip_crc && rx_conf->enable_lro) {
+ /*
+ * According to chapter of 4.6.7.2.1 of the Spec Rev.
+ * 3.0 RSC configuration requires HW CRC stripping being
+ * enabled. If user requested both HW CRC stripping off
+ * and RSC on - return an error.
+ */
+ PMD_INIT_LOG(CRIT, "LRO can't be enabled when HW CRC "
+ "is disabled");
+ return -EINVAL;
+ }
+
+ /* RFCTL configuration */
+ if (rsc_capable) {
+ uint32_t rfctl = IXGBE_READ_REG(hw, IXGBE_RFCTL);
+ if (rx_conf->enable_lro)
+ /*
+ * Since NFS packets coalescing is not supported - clear
+ * RFCTL.NFSW_DIS and RFCTL.NFSR_DIS when RSC is
+ * enabled.
+ */
+ rfctl &= ~(IXGBE_RFCTL_RSC_DIS | IXGBE_RFCTL_NFSW_DIS |
+ IXGBE_RFCTL_NFSR_DIS);
+ else
+ rfctl |= IXGBE_RFCTL_RSC_DIS;
+
+ IXGBE_WRITE_REG(hw, IXGBE_RFCTL, rfctl);
+ }
+
+ /* If LRO hasn't been requested - we are done here. */
+ if (!rx_conf->enable_lro)
+ return 0;
+
+ /* Set RDRXCTL.RSCACKC bit */
+ rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
+ rdrxctl |= IXGBE_RDRXCTL_RSCACKC;
+ IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
+
+ /* Per-queue RSC configuration (chapter 4.6.7.2.2 of 82599 Spec) */
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i];
+ uint32_t srrctl =
+ IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxq->reg_idx));
+ uint32_t rscctl =
+ IXGBE_READ_REG(hw, IXGBE_RSCCTL(rxq->reg_idx));
+ uint32_t psrtype =
+ IXGBE_READ_REG(hw, IXGBE_PSRTYPE(rxq->reg_idx));
+ uint32_t eitr =
+ IXGBE_READ_REG(hw, IXGBE_EITR(rxq->reg_idx));
+
+ /*
+ * ixgbe PMD doesn't support header-split at the moment.
+ *
+ * Following the 4.6.7.2.1 chapter of the 82599/x540
+ * Spec if RSC is enabled the SRRCTL[n].BSIZEHEADER
+ * should be configured even if header split is not
+ * enabled. We will configure it 128 bytes following the
+ * recommendation in the spec.
+ */
+ srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
+ srrctl |= (128 << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
+ IXGBE_SRRCTL_BSIZEHDR_MASK;
+
+ /*
+ * TODO: Consider setting the Receive Descriptor Minimum
+ * Threshold Size for an RSC case. This is not an obviously
+ * beneficiary option but the one worth considering...
+ */
+
+ rscctl |= IXGBE_RSCCTL_RSCEN;
+ rscctl |= ixgbe_get_rscctl_maxdesc(rxq->mb_pool);
+ psrtype |= IXGBE_PSRTYPE_TCPHDR;
+
+ /*
+ * RSC: Set ITR interval corresponding to 2K ints/s.
+ *
+ * Full-sized RSC aggregations for a 10Gb/s link will
+ * arrive at about 20K aggregation/s rate.
+ *
+ * 2K inst/s rate will make only 10% of the
+ * aggregations to be closed due to the interrupt timer
+ * expiration for a streaming at wire-speed case.
+ *
+ * For a sparse streaming case this setting will yield
+ * at most 500us latency for a single RSC aggregation.
+ */
+ eitr &= ~IXGBE_EITR_ITR_INT_MASK;
+ eitr |= IXGBE_EITR_INTERVAL_US(500) | IXGBE_EITR_CNT_WDIS;
+
+ IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxq->reg_idx), srrctl);
+ IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(rxq->reg_idx), rscctl);
+ IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(rxq->reg_idx), psrtype);
+ IXGBE_WRITE_REG(hw, IXGBE_EITR(rxq->reg_idx), eitr);
+
+ /*
+ * RSC requires the mapping of the queue to the
+ * interrupt vector.
+ */
+ ixgbe_set_ivar(dev, rxq->reg_idx, i, 0);
+
+ rxq->rsc_en = 1;
+ }
+
+ dev->data->lro = 1;
+
+ PMD_INIT_LOG(INFO, "enabling LRO mode");
+
+ return 0;
+}
+
/*
* Initializes Receive Unit.
*/
ixgbe_dev_rx_init(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw;
- struct igb_rx_queue *rxq;
- struct rte_pktmbuf_pool_private *mbp_priv;
+ struct ixgbe_rx_queue *rxq;
uint64_t bus_addr;
uint32_t rxctrl;
uint32_t fctrl;
uint32_t rxcsum;
uint16_t buf_size;
uint16_t i;
+ struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
+ int rc;
PMD_INIT_FUNC_TRACE();
hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
* Configure CRC stripping, if any.
*/
hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
- if (dev->data->dev_conf.rxmode.hw_strip_crc)
+ if (rx_conf->hw_strip_crc)
hlreg0 |= IXGBE_HLREG0_RXCRCSTRP;
else
hlreg0 &= ~IXGBE_HLREG0_RXCRCSTRP;
/*
* Configure jumbo frame support, if any.
*/
- if (dev->data->dev_conf.rxmode.jumbo_frame == 1) {
+ if (rx_conf->jumbo_frame == 1) {
hlreg0 |= IXGBE_HLREG0_JUMBOEN;
maxfrs = IXGBE_READ_REG(hw, IXGBE_MAXFRS);
maxfrs &= 0x0000FFFF;
- maxfrs |= (dev->data->dev_conf.rxmode.max_rx_pkt_len << 16);
+ maxfrs |= (rx_conf->max_rx_pkt_len << 16);
IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, maxfrs);
} else
hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
* Reset crc_len in case it was changed after queue setup by a
* call to configure.
*/
- rxq->crc_len = (uint8_t)
- ((dev->data->dev_conf.rxmode.hw_strip_crc) ? 0 :
- ETHER_CRC_LEN);
+ rxq->crc_len = rx_conf->hw_strip_crc ? 0 : ETHER_CRC_LEN;
/* Setup the Base and Length of the Rx Descriptor Rings */
bus_addr = rxq->rx_ring_phys_addr;
/*
* Configure Header Split
*/
- if (dev->data->dev_conf.rxmode.header_split) {
+ if (rx_conf->header_split) {
if (hw->mac.type == ixgbe_mac_82599EB) {
/* Must setup the PSRTYPE register */
uint32_t psrtype;
IXGBE_PSRTYPE_IPV6HDR;
IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(rxq->reg_idx), psrtype);
}
- srrctl = ((dev->data->dev_conf.rxmode.split_hdr_size <<
- IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
- IXGBE_SRRCTL_BSIZEHDR_MASK);
- srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
+ srrctl = ((rx_conf->split_hdr_size <<
+ IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
+ IXGBE_SRRCTL_BSIZEHDR_MASK);
+ srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
} else
#endif
srrctl = IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
* The value is in 1 KB resolution. Valid values can be from
* 1 KB to 16 KB.
*/
- mbp_priv = rte_mempool_get_priv(rxq->mb_pool);
- buf_size = (uint16_t) (mbp_priv->mbuf_data_room_size -
- RTE_PKTMBUF_HEADROOM);
+ buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
+ RTE_PKTMBUF_HEADROOM);
srrctl |= ((buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) &
IXGBE_SRRCTL_BSIZEPKT_MASK);
+
IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxq->reg_idx), srrctl);
buf_size = (uint16_t) ((srrctl & IXGBE_SRRCTL_BSIZEPKT_MASK) <<
IXGBE_SRRCTL_BSIZEPKT_SHIFT);
/* It adds dual VLAN length for supporting dual VLAN */
- if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
- 2 * IXGBE_VLAN_TAG_SIZE) > buf_size){
- if (!dev->data->scattered_rx)
- PMD_INIT_LOG(DEBUG, "forcing scatter mode");
+ if (dev->data->dev_conf.rxmode.max_rx_pkt_len +
+ 2 * IXGBE_VLAN_TAG_SIZE > buf_size)
dev->data->scattered_rx = 1;
-#ifdef RTE_IXGBE_INC_VECTOR
- dev->rx_pkt_burst = ixgbe_recv_scattered_pkts_vec;
-#else
- dev->rx_pkt_burst = ixgbe_recv_scattered_pkts;
-#endif
- }
}
- if (dev->data->dev_conf.rxmode.enable_scatter) {
- if (!dev->data->scattered_rx)
- PMD_INIT_LOG(DEBUG, "forcing scatter mode");
-#ifdef RTE_IXGBE_INC_VECTOR
- dev->rx_pkt_burst = ixgbe_recv_scattered_pkts_vec;
-#else
- dev->rx_pkt_burst = ixgbe_recv_scattered_pkts;
-#endif
+ if (rx_conf->enable_scatter)
dev->data->scattered_rx = 1;
- }
/*
* Device configured with multiple RX queues.
*/
rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
rxcsum |= IXGBE_RXCSUM_PCSD;
- if (dev->data->dev_conf.rxmode.hw_ip_checksum)
+ if (rx_conf->hw_ip_checksum)
rxcsum |= IXGBE_RXCSUM_IPPCSE;
else
rxcsum &= ~IXGBE_RXCSUM_IPPCSE;
IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
- if (hw->mac.type == ixgbe_mac_82599EB) {
+ if (hw->mac.type == ixgbe_mac_82599EB ||
+ hw->mac.type == ixgbe_mac_X540) {
rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
- if (dev->data->dev_conf.rxmode.hw_strip_crc)
+ if (rx_conf->hw_strip_crc)
rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
else
rdrxctl &= ~IXGBE_RDRXCTL_CRCSTRIP;
IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
}
+ rc = ixgbe_set_rsc(dev);
+ if (rc)
+ return rc;
+
+ ixgbe_set_rx_function(dev);
+
return 0;
}
ixgbe_dev_tx_init(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw;
- struct igb_tx_queue *txq;
+ struct ixgbe_tx_queue *txq;
uint64_t bus_addr;
uint32_t hlreg0;
uint32_t txctrl;
PMD_INIT_FUNC_TRACE();
hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- /* Enable TX CRC (checksum offload requirement) */
+ /* Enable TX CRC (checksum offload requirement) and hw padding
+ * (TSO requirement) */
hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
- hlreg0 |= IXGBE_HLREG0_TXCRCEN;
+ hlreg0 |= (IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_TXPADEN);
IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
/* Setup the Base and Length of the Tx Descriptor Rings */
/*
* Start Transmit and Receive Units.
*/
-void
+int
ixgbe_dev_rxtx_start(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw;
- struct igb_tx_queue *txq;
- struct igb_rx_queue *rxq;
+ struct ixgbe_tx_queue *txq;
+ struct ixgbe_rx_queue *rxq;
uint32_t txdctl;
uint32_t dmatxctl;
uint32_t rxctrl;
uint16_t i;
+ int ret = 0;
PMD_INIT_FUNC_TRACE();
hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
for (i = 0; i < dev->data->nb_tx_queues; i++) {
txq = dev->data->tx_queues[i];
- if (!txq->tx_deferred_start)
- ixgbe_dev_tx_queue_start(dev, i);
+ if (!txq->tx_deferred_start) {
+ ret = ixgbe_dev_tx_queue_start(dev, i);
+ if (ret < 0)
+ return ret;
+ }
}
for (i = 0; i < dev->data->nb_rx_queues; i++) {
rxq = dev->data->rx_queues[i];
- if (!rxq->rx_deferred_start)
- ixgbe_dev_rx_queue_start(dev, i);
+ if (!rxq->rx_deferred_start) {
+ ret = ixgbe_dev_rx_queue_start(dev, i);
+ if (ret < 0)
+ return ret;
+ }
}
/* Enable Receive engine */
dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_82599_TX_RX)
ixgbe_setup_loopback_link_82599(hw);
+ return 0;
}
/*
ixgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
struct ixgbe_hw *hw;
- struct igb_rx_queue *rxq;
+ struct ixgbe_rx_queue *rxq;
uint32_t rxdctl;
int poll_ms;
ixgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
struct ixgbe_hw *hw;
- struct igb_rx_queue *rxq;
+ struct ixgbe_rx_queue *rxq;
uint32_t rxdctl;
int poll_ms;
rte_delay_us(RTE_IXGBE_WAIT_100_US);
ixgbe_rx_queue_release_mbufs(rxq);
- ixgbe_reset_rx_queue(rxq);
+ ixgbe_reset_rx_queue(hw, rxq);
} else
return -1;
ixgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
{
struct ixgbe_hw *hw;
- struct igb_tx_queue *txq;
+ struct ixgbe_tx_queue *txq;
uint32_t txdctl;
int poll_ms;
ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
{
struct ixgbe_hw *hw;
- struct igb_tx_queue *txq;
+ struct ixgbe_tx_queue *txq;
uint32_t txdctl;
uint32_t txtdh, txtdt;
int poll_ms;
ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw;
- struct igb_rx_queue *rxq;
- struct rte_pktmbuf_pool_private *mbp_priv;
+ struct ixgbe_rx_queue *rxq;
uint64_t bus_addr;
- uint32_t srrctl;
+ uint32_t srrctl, psrtype = 0;
uint16_t buf_size;
uint16_t i;
int ret;
PMD_INIT_FUNC_TRACE();
hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ if (rte_is_power_of_2(dev->data->nb_rx_queues) == 0) {
+ PMD_INIT_LOG(ERR, "The number of Rx queue invalid, "
+ "it should be power of 2");
+ return -1;
+ }
+
+ if (dev->data->nb_rx_queues > hw->mac.max_rx_queues) {
+ PMD_INIT_LOG(ERR, "The number of Rx queue invalid, "
+ "it should be equal to or less than %d",
+ hw->mac.max_rx_queues);
+ return -1;
+ }
+
/*
* When the VF driver issues a IXGBE_VF_RESET request, the PF driver
* disables the VF receipt of packets if the PF MTU is > 1500.
* Configure Header Split
*/
if (dev->data->dev_conf.rxmode.header_split) {
-
- /* Must setup the PSRTYPE register */
- uint32_t psrtype;
- psrtype = IXGBE_PSRTYPE_TCPHDR |
- IXGBE_PSRTYPE_UDPHDR |
- IXGBE_PSRTYPE_IPV4HDR |
- IXGBE_PSRTYPE_IPV6HDR;
-
- IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE(i), psrtype);
-
srrctl = ((dev->data->dev_conf.rxmode.split_hdr_size <<
- IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
- IXGBE_SRRCTL_BSIZEHDR_MASK);
- srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
+ IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
+ IXGBE_SRRCTL_BSIZEHDR_MASK);
+ srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
} else
#endif
srrctl = IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
* The value is in 1 KB resolution. Valid values can be from
* 1 KB to 16 KB.
*/
- mbp_priv = rte_mempool_get_priv(rxq->mb_pool);
- buf_size = (uint16_t) (mbp_priv->mbuf_data_room_size -
- RTE_PKTMBUF_HEADROOM);
+ buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
+ RTE_PKTMBUF_HEADROOM);
srrctl |= ((buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) &
IXGBE_SRRCTL_BSIZEPKT_MASK);
buf_size = (uint16_t) ((srrctl & IXGBE_SRRCTL_BSIZEPKT_MASK) <<
IXGBE_SRRCTL_BSIZEPKT_SHIFT);
- /* It adds dual VLAN length for supporting dual VLAN */
- if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
+ if (dev->data->dev_conf.rxmode.enable_scatter ||
+ /* It adds dual VLAN length for supporting dual VLAN */
+ (dev->data->dev_conf.rxmode.max_rx_pkt_len +
2 * IXGBE_VLAN_TAG_SIZE) > buf_size) {
if (!dev->data->scattered_rx)
PMD_INIT_LOG(DEBUG, "forcing scatter mode");
dev->data->scattered_rx = 1;
#ifdef RTE_IXGBE_INC_VECTOR
- dev->rx_pkt_burst = ixgbe_recv_scattered_pkts_vec;
-#else
- dev->rx_pkt_burst = ixgbe_recv_scattered_pkts;
+ if (rte_is_power_of_2(rxq->nb_rx_desc))
+ dev->rx_pkt_burst =
+ ixgbe_recv_scattered_pkts_vec;
+ else
#endif
+ dev->rx_pkt_burst = ixgbe_recv_scattered_pkts;
}
}
- if (dev->data->dev_conf.rxmode.enable_scatter) {
- if (!dev->data->scattered_rx)
- PMD_INIT_LOG(DEBUG, "forcing scatter mode");
-#ifdef RTE_IXGBE_INC_VECTOR
- dev->rx_pkt_burst = ixgbe_recv_scattered_pkts_vec;
-#else
- dev->rx_pkt_burst = ixgbe_recv_scattered_pkts;
+#ifdef RTE_HEADER_SPLIT_ENABLE
+ if (dev->data->dev_conf.rxmode.header_split)
+ /* Must setup the PSRTYPE register */
+ psrtype = IXGBE_PSRTYPE_TCPHDR |
+ IXGBE_PSRTYPE_UDPHDR |
+ IXGBE_PSRTYPE_IPV4HDR |
+ IXGBE_PSRTYPE_IPV6HDR;
#endif
- dev->data->scattered_rx = 1;
- }
+
+ /* Set RQPL for VF RSS according to max Rx queue */
+ psrtype |= (dev->data->nb_rx_queues >> 1) <<
+ IXGBE_PSRTYPE_RQPL_SHIFT;
+ IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
return 0;
}
ixgbevf_dev_tx_init(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw;
- struct igb_tx_queue *txq;
+ struct ixgbe_tx_queue *txq;
uint64_t bus_addr;
uint32_t txctrl;
uint16_t i;
ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw;
- struct igb_tx_queue *txq;
- struct igb_rx_queue *rxq;
+ struct ixgbe_tx_queue *txq;
+ struct ixgbe_rx_queue *rxq;
uint32_t txdctl;
uint32_t rxdctl;
uint16_t i;
}
}
+
+/* Stubs needed for linkage when CONFIG_RTE_IXGBE_INC_VECTOR is set to 'n' */
+int __attribute__((weak))
+ixgbe_rx_vec_dev_conf_condition_check(struct rte_eth_dev __rte_unused *dev)
+{
+ return -1;
+}
+
+uint16_t __attribute__((weak))
+ixgbe_recv_pkts_vec(
+ void __rte_unused *rx_queue,
+ struct rte_mbuf __rte_unused **rx_pkts,
+ uint16_t __rte_unused nb_pkts)
+{
+ return 0;
+}
+
+uint16_t __attribute__((weak))
+ixgbe_recv_scattered_pkts_vec(
+ void __rte_unused *rx_queue,
+ struct rte_mbuf __rte_unused **rx_pkts,
+ uint16_t __rte_unused nb_pkts)
+{
+ return 0;
+}
+
+int __attribute__((weak))
+ixgbe_rxq_vec_setup(struct ixgbe_rx_queue __rte_unused *rxq)
+{
+ return -1;
+}