summary |
shortlog |
log |
commit | commitdiff |
tree
raw |
patch |
inline | side by side (from parent 1:
66894d9)
Since we know in advance that we're going to fill in multiple
descriptors it's convenient to allocate the buffers in batches.
Signed-off-by: Alex Marginean <alexandru.marginean@nxp.com>
Acked-by: Gagandeep Singh <g.singh@nxp.com>
#include "enetc.h"
#include "enetc_logs.h"
#include "enetc.h"
#include "enetc_logs.h"
+#define ENETC_RXBD_BUNDLE 16 /* Number of buffers to allocate at once */
+
static int
enetc_clean_tx_ring(struct enetc_bdr *tx_ring)
{
static int
enetc_clean_tx_ring(struct enetc_bdr *tx_ring)
{
{
struct enetc_swbd *rx_swbd;
union enetc_rx_bd *rxbd;
{
struct enetc_swbd *rx_swbd;
union enetc_rx_bd *rxbd;
+ int i, j, k = ENETC_RXBD_BUNDLE;
+ struct rte_mbuf *m[ENETC_RXBD_BUNDLE];
+ struct rte_mempool *mb_pool;
i = rx_ring->next_to_use;
i = rx_ring->next_to_use;
+ mb_pool = rx_ring->mb_pool;
rx_swbd = &rx_ring->q_swbd[i];
rxbd = ENETC_RXBD(*rx_ring, i);
for (j = 0; j < buff_cnt; j++) {
rx_swbd = &rx_ring->q_swbd[i];
rxbd = ENETC_RXBD(*rx_ring, i);
for (j = 0; j < buff_cnt; j++) {
- rx_swbd->buffer_addr = (void *)(uintptr_t)
- rte_cpu_to_le_64((uint64_t)(uintptr_t)
- rte_pktmbuf_alloc(rx_ring->mb_pool));
+ /* bulk alloc for the next up to 8 BDs */
+ if (k == ENETC_RXBD_BUNDLE) {
+ k = 0;
+ int m_cnt = RTE_MIN(buff_cnt - j, ENETC_RXBD_BUNDLE);
+
+ if (rte_pktmbuf_alloc_bulk(mb_pool, m, m_cnt))
+ return -1;
+ }
+
+ rx_swbd->buffer_addr = m[k];
rxbd->w.addr = (uint64_t)(uintptr_t)
rx_swbd->buffer_addr->buf_iova +
rx_swbd->buffer_addr->data_off;
rxbd->w.addr = (uint64_t)(uintptr_t)
rx_swbd->buffer_addr->buf_iova +
rx_swbd->buffer_addr->data_off;
if (unlikely(i == rx_ring->bd_count)) {
i = 0;
rxbd = ENETC_RXBD(*rx_ring, 0);
if (unlikely(i == rx_ring->bd_count)) {
i = 0;
rxbd = ENETC_RXBD(*rx_ring, 0);