CONFIG_RTE_LIBRTE_IXGBE_DEBUG_TX_FREE=n
CONFIG_RTE_LIBRTE_IXGBE_DEBUG_DRIVER=n
CONFIG_RTE_LIBRTE_IXGBE_PF_DISABLE_STRIP_CRC=n
-CONFIG_RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC=y
CONFIG_RTE_IXGBE_INC_VECTOR=n
CONFIG_RTE_IXGBE_RX_OLFLAGS_DISABLE=n
CONFIG_RTE_LIBRTE_IXGBE_DEBUG_TX_FREE=n
CONFIG_RTE_LIBRTE_IXGBE_DEBUG_DRIVER=n
CONFIG_RTE_LIBRTE_IXGBE_PF_DISABLE_STRIP_CRC=n
-CONFIG_RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC=y
CONFIG_RTE_IXGBE_INC_VECTOR=y
CONFIG_RTE_IXGBE_RX_OLFLAGS_ENABLE=y
* To enable vPMD to work for RX, bulk allocation for Rx must be allowed.
-* The RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC=y configuration MACRO must be set before compiling the code.
-
Ensure that the following pre-conditions are satisfied:
* rxq->rx_free_thresh >= RTE_PMD_IXGBE_RX_MAX_BURST
DEPDIRS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += lib/librte_mempool lib/librte_mbuf
DEPDIRS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += lib/librte_net
-ifeq ($(CONFIG_RTE_IXGBE_INC_VECTOR)$(CONFIG_RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC),yn)
-$(error The ixgbe vpmd depends on Rx bulk alloc)
-endif
-
include $(RTE_SDK)/mk/rte.lib.mk
IXGBE_RXDADV_ERR_CKSUM_BIT) & IXGBE_RXDADV_ERR_CKSUM_MSK];
}
-#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
/*
* LOOK_AHEAD defines how many desc statuses to check beyond the
* current descriptor.
return nb_rx;
}
-#else
-
-/* Stub to avoid extra ifdefs */
-static uint16_t
-ixgbe_recv_pkts_bulk_alloc(__rte_unused void *rx_queue,
- __rte_unused struct rte_mbuf **rx_pkts, __rte_unused uint16_t nb_pkts)
-{
- return 0;
-}
-
-static inline int
-ixgbe_rx_alloc_bufs(__rte_unused struct ixgbe_rx_queue *rxq,
- __rte_unused bool reset_mbuf)
-{
- return -ENOMEM;
-}
-#endif /* RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC */
-
uint16_t
ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts)
break;
}
}
-#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
else if (nb_hold > rxq->rx_free_thresh) {
uint16_t next_rdt = rxq->rx_free_trigger;
break;
}
}
-#endif
nb_hold++;
rxe = &sw_ring[rx_id];
rxq->sw_ring[i].mbuf = NULL;
}
}
-#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
if (rxq->rx_nb_avail) {
for (i = 0; i < rxq->rx_nb_avail; ++i) {
struct rte_mbuf *mb;
}
rxq->rx_nb_avail = 0;
}
-#endif
}
if (rxq->sw_sc_ring)
* function must be used.
*/
static inline int __attribute__((cold))
-#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
check_rx_burst_bulk_alloc_preconditions(struct ixgbe_rx_queue *rxq)
-#else
-check_rx_burst_bulk_alloc_preconditions(__rte_unused struct ixgbe_rx_queue *rxq)
-#endif
{
int ret = 0;
* Scattered packets are not supported. This should be checked
* outside of this function.
*/
-#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
if (!(rxq->rx_free_thresh >= RTE_PMD_IXGBE_RX_MAX_BURST)) {
PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
"rxq->rx_free_thresh=%d, "
RTE_PMD_IXGBE_RX_MAX_BURST);
ret = -EINVAL;
}
-#else
- ret = -EINVAL;
-#endif
return ret;
}
rxq->rx_ring[i] = zeroed_desc;
}
-#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
/*
* initialize extra software ring entries. Space for these extra
* entries is always allocated
rxq->rx_nb_avail = 0;
rxq->rx_next_avail = 0;
rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
-#endif /* RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC */
rxq->rx_tail = 0;
rxq->nb_rx_hold = 0;
rxq->pkt_first_seg = NULL;
dev->rx_pkt_burst = ixgbe_recv_pkts_bulk_alloc;
} else {
PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are not "
- "satisfied, or Scattered Rx is requested, "
- "or RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC "
- "is not enabled (port=%d).",
+ "satisfied, or Scattered Rx is requested "
+ "(port=%d).",
dev->data->port_id);
dev->rx_pkt_burst = ixgbe_recv_pkts;
#define RTE_PMD_IXGBE_TX_MAX_BURST 32
#define RTE_PMD_IXGBE_RX_MAX_BURST 32
-#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
-#define RTE_IXGBE_DESCS_PER_LOOP 4
-#elif defined(RTE_IXGBE_INC_VECTOR)
-#define RTE_IXGBE_DESCS_PER_LOOP 4
-#else
-#define RTE_IXGBE_DESCS_PER_LOOP 1
-#endif
+#define RTE_IXGBE_DESCS_PER_LOOP 4
#define RTE_MBUF_DATA_DMA_ADDR(mb) \
(uint64_t) ((mb)->buf_physaddr + (mb)->data_off)
uint16_t nb_rx_desc; /**< number of RX descriptors. */
uint16_t rx_tail; /**< current value of RDT register. */
uint16_t nb_rx_hold; /**< number of held free RX desc. */
-#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
uint16_t rx_nb_avail; /**< nr of staged pkts ready to ret to app */
uint16_t rx_next_avail; /**< idx of next staged pkt to ret to app */
uint16_t rx_free_trigger; /**< triggers rx buffer allocation */
-#endif
uint16_t rx_using_sse;
/**< indicates that vector RX is in use */
#ifdef RTE_IXGBE_INC_VECTOR
uint8_t crc_len; /**< 0 if CRC stripped, 4 otherwise. */
uint8_t drop_en; /**< If not 0, set SRRCTL.Drop_En. */
uint8_t rx_deferred_start; /**< not in global dev start. */
-#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
/** need to alloc dummy mbuf, for wraparound when scanning hw ring */
struct rte_mbuf fake_mbuf;
/** hold packets to return to application */
struct rte_mbuf *rx_stage[RTE_PMD_IXGBE_RX_MAX_BURST*2];
-#endif
};
/**