From: Pablo de Lara Date: Thu, 23 Jul 2015 14:29:41 +0000 (+0100) Subject: ixgbe: remove Rx bulk allocation option X-Git-Tag: spdx-start~8650 X-Git-Url: http://git.droids-corp.org/?a=commitdiff_plain;h=7c574623baddfb8fa76a7435c0809a9119b8d485;p=dpdk.git ixgbe: remove Rx bulk allocation option RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC config option is not really necessary, as bulk alloc rx function can be used anyway, as long as the necessary conditions are satisfied, which are checked already in the library. Signed-off-by: Pablo de Lara Acked-by: Konstantin Ananyev --- diff --git a/config/common_bsdapp b/config/common_bsdapp index e96e4a5bca..6d49cc89de 100644 --- a/config/common_bsdapp +++ b/config/common_bsdapp @@ -173,7 +173,6 @@ CONFIG_RTE_LIBRTE_IXGBE_DEBUG_TX=n CONFIG_RTE_LIBRTE_IXGBE_DEBUG_TX_FREE=n CONFIG_RTE_LIBRTE_IXGBE_DEBUG_DRIVER=n CONFIG_RTE_LIBRTE_IXGBE_PF_DISABLE_STRIP_CRC=n -CONFIG_RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC=y CONFIG_RTE_IXGBE_INC_VECTOR=n CONFIG_RTE_IXGBE_RX_OLFLAGS_DISABLE=n diff --git a/config/common_linuxapp b/config/common_linuxapp index 579a5d794a..49dd8db8e1 100644 --- a/config/common_linuxapp +++ b/config/common_linuxapp @@ -171,7 +171,6 @@ CONFIG_RTE_LIBRTE_IXGBE_DEBUG_TX=n CONFIG_RTE_LIBRTE_IXGBE_DEBUG_TX_FREE=n CONFIG_RTE_LIBRTE_IXGBE_DEBUG_DRIVER=n CONFIG_RTE_LIBRTE_IXGBE_PF_DISABLE_STRIP_CRC=n -CONFIG_RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC=y CONFIG_RTE_IXGBE_INC_VECTOR=y CONFIG_RTE_IXGBE_RX_OLFLAGS_ENABLE=y diff --git a/doc/guides/nics/ixgbe.rst b/doc/guides/nics/ixgbe.rst index 70cd591531..62cc0aab55 100644 --- a/doc/guides/nics/ixgbe.rst +++ b/doc/guides/nics/ixgbe.rst @@ -57,8 +57,6 @@ The following prerequisites apply: * To enable vPMD to work for RX, bulk allocation for Rx must be allowed. -* The RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC=y configuration MACRO must be set before compiling the code. - Ensure that the following pre-conditions are satisfied: * rxq->rx_free_thresh >= RTE_PMD_IXGBE_RX_MAX_BURST diff --git a/drivers/net/ixgbe/Makefile b/drivers/net/ixgbe/Makefile index 6095cc2730..c032775315 100644 --- a/drivers/net/ixgbe/Makefile +++ b/drivers/net/ixgbe/Makefile @@ -119,8 +119,4 @@ DEPDIRS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += lib/librte_eal lib/librte_ether DEPDIRS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += lib/librte_mempool lib/librte_mbuf DEPDIRS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += lib/librte_net -ifeq ($(CONFIG_RTE_IXGBE_INC_VECTOR)$(CONFIG_RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC),yn) -$(error The ixgbe vpmd depends on Rx bulk alloc) -endif - include $(RTE_SDK)/mk/rte.lib.mk diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c index db2454ca89..cbb16b69e6 100644 --- a/drivers/net/ixgbe/ixgbe_rxtx.c +++ b/drivers/net/ixgbe/ixgbe_rxtx.c @@ -1034,7 +1034,6 @@ rx_desc_error_to_pkt_flags(uint32_t rx_status) IXGBE_RXDADV_ERR_CKSUM_BIT) & IXGBE_RXDADV_ERR_CKSUM_MSK]; } -#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC /* * LOOK_AHEAD defines how many desc statuses to check beyond the * current descriptor. @@ -1305,24 +1304,6 @@ ixgbe_recv_pkts_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts, return nb_rx; } -#else - -/* Stub to avoid extra ifdefs */ -static uint16_t -ixgbe_recv_pkts_bulk_alloc(__rte_unused void *rx_queue, - __rte_unused struct rte_mbuf **rx_pkts, __rte_unused uint16_t nb_pkts) -{ - return 0; -} - -static inline int -ixgbe_rx_alloc_bufs(__rte_unused struct ixgbe_rx_queue *rxq, - __rte_unused bool reset_mbuf) -{ - return -ENOMEM; -} -#endif /* RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC */ - uint16_t ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) @@ -1708,7 +1689,6 @@ next_desc: break; } } -#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC else if (nb_hold > rxq->rx_free_thresh) { uint16_t next_rdt = rxq->rx_free_trigger; @@ -1727,7 +1707,6 @@ next_desc: break; } } -#endif nb_hold++; rxe = &sw_ring[rx_id]; @@ -2285,7 +2264,6 @@ ixgbe_rx_queue_release_mbufs(struct ixgbe_rx_queue *rxq) rxq->sw_ring[i].mbuf = NULL; } } -#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC if (rxq->rx_nb_avail) { for (i = 0; i < rxq->rx_nb_avail; ++i) { struct rte_mbuf *mb; @@ -2294,7 +2272,6 @@ ixgbe_rx_queue_release_mbufs(struct ixgbe_rx_queue *rxq) } rxq->rx_nb_avail = 0; } -#endif } if (rxq->sw_sc_ring) @@ -2331,11 +2308,7 @@ ixgbe_dev_rx_queue_release(void *rxq) * function must be used. */ static inline int __attribute__((cold)) -#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC check_rx_burst_bulk_alloc_preconditions(struct ixgbe_rx_queue *rxq) -#else -check_rx_burst_bulk_alloc_preconditions(__rte_unused struct ixgbe_rx_queue *rxq) -#endif { int ret = 0; @@ -2348,7 +2321,6 @@ check_rx_burst_bulk_alloc_preconditions(__rte_unused struct ixgbe_rx_queue *rxq) * Scattered packets are not supported. This should be checked * outside of this function. */ -#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC if (!(rxq->rx_free_thresh >= RTE_PMD_IXGBE_RX_MAX_BURST)) { PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: " "rxq->rx_free_thresh=%d, " @@ -2377,9 +2349,6 @@ check_rx_burst_bulk_alloc_preconditions(__rte_unused struct ixgbe_rx_queue *rxq) RTE_PMD_IXGBE_RX_MAX_BURST); ret = -EINVAL; } -#else - ret = -EINVAL; -#endif return ret; } @@ -2415,7 +2384,6 @@ ixgbe_reset_rx_queue(struct ixgbe_adapter *adapter, struct ixgbe_rx_queue *rxq) rxq->rx_ring[i] = zeroed_desc; } -#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC /* * initialize extra software ring entries. Space for these extra * entries is always allocated @@ -2428,7 +2396,6 @@ ixgbe_reset_rx_queue(struct ixgbe_adapter *adapter, struct ixgbe_rx_queue *rxq) rxq->rx_nb_avail = 0; rxq->rx_next_avail = 0; rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1); -#endif /* RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC */ rxq->rx_tail = 0; rxq->nb_rx_hold = 0; rxq->pkt_first_seg = NULL; @@ -4015,9 +3982,8 @@ ixgbe_set_rx_function(struct rte_eth_dev *dev) dev->rx_pkt_burst = ixgbe_recv_pkts_bulk_alloc; } else { PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are not " - "satisfied, or Scattered Rx is requested, " - "or RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC " - "is not enabled (port=%d).", + "satisfied, or Scattered Rx is requested " + "(port=%d).", dev->data->port_id); dev->rx_pkt_burst = ixgbe_recv_pkts; diff --git a/drivers/net/ixgbe/ixgbe_rxtx.h b/drivers/net/ixgbe/ixgbe_rxtx.h index 1557438ed3..113682a85d 100644 --- a/drivers/net/ixgbe/ixgbe_rxtx.h +++ b/drivers/net/ixgbe/ixgbe_rxtx.h @@ -38,13 +38,7 @@ #define RTE_PMD_IXGBE_TX_MAX_BURST 32 #define RTE_PMD_IXGBE_RX_MAX_BURST 32 -#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC -#define RTE_IXGBE_DESCS_PER_LOOP 4 -#elif defined(RTE_IXGBE_INC_VECTOR) -#define RTE_IXGBE_DESCS_PER_LOOP 4 -#else -#define RTE_IXGBE_DESCS_PER_LOOP 1 -#endif +#define RTE_IXGBE_DESCS_PER_LOOP 4 #define RTE_MBUF_DATA_DMA_ADDR(mb) \ (uint64_t) ((mb)->buf_physaddr + (mb)->data_off) @@ -116,11 +110,9 @@ struct ixgbe_rx_queue { uint16_t nb_rx_desc; /**< number of RX descriptors. */ uint16_t rx_tail; /**< current value of RDT register. */ uint16_t nb_rx_hold; /**< number of held free RX desc. */ -#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC uint16_t rx_nb_avail; /**< nr of staged pkts ready to ret to app */ uint16_t rx_next_avail; /**< idx of next staged pkt to ret to app */ uint16_t rx_free_trigger; /**< triggers rx buffer allocation */ -#endif uint16_t rx_using_sse; /**< indicates that vector RX is in use */ #ifdef RTE_IXGBE_INC_VECTOR @@ -134,12 +126,10 @@ struct ixgbe_rx_queue { uint8_t crc_len; /**< 0 if CRC stripped, 4 otherwise. */ uint8_t drop_en; /**< If not 0, set SRRCTL.Drop_En. */ uint8_t rx_deferred_start; /**< not in global dev start. */ -#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC /** need to alloc dummy mbuf, for wraparound when scanning hw ring */ struct rte_mbuf fake_mbuf; /** hold packets to return to application */ struct rte_mbuf *rx_stage[RTE_PMD_IXGBE_RX_MAX_BURST*2]; -#endif }; /**