rc = -ENOMEM;
goto err_out;
}
+ vnic->flags |= BNXT_VNIC_INFO_BCAST;
STAILQ_INSERT_TAIL(&bp->ff_pool[0], vnic, next);
bp->nr_vnics++;
}
/* For each pool, allocate MACVLAN CFA rule & VNIC */
if (!pools) {
+ pools = RTE_MIN(bp->max_vnics,
+ RTE_MIN(bp->max_l2_ctx,
+ RTE_MIN(bp->max_rsscos_ctx, ETH_64_POOLS)));
RTE_LOG(ERR, PMD,
"VMDq pool not set, defaulted to 64\n");
pools = ETH_64_POOLS;
rc = -ENOMEM;
goto err_out;
}
+ vnic->flags |= BNXT_VNIC_INFO_BCAST;
STAILQ_INSERT_TAIL(&bp->ff_pool[i], vnic, next);
bp->nr_vnics++;
rc = -ENOMEM;
goto err_out;
}
+ vnic->flags |= BNXT_VNIC_INFO_BCAST;
/* Partition the rx queues for the single pool */
for (i = 0; i < bp->rx_cp_nr_rings; i++) {
rxq = bp->eth_dev->data->rx_queues[i];
static void bnxt_rx_queue_release_mbufs(struct bnxt_rx_queue *rxq)
{
struct bnxt_sw_rx_bd *sw_ring;
+ struct bnxt_tpa_info *tpa_info;
uint16_t i;
if (rxq) {
}
}
}
+
+ /* Free up mbufs in TPA */
+ tpa_info = rxq->rx_ring->tpa_info;
+ if (tpa_info) {
+ for (i = 0; i < BNXT_TPA_MAX; i++) {
+ if (tpa_info[i].mbuf) {
+ rte_pktmbuf_free_seg(tpa_info[i].mbuf);
+ tpa_info[i].mbuf = NULL;
+ }
+ }
+ }
}
}
int rc = 0;
if (!nb_desc || nb_desc > MAX_RX_DESC_CNT) {
- RTE_LOG(ERR, PMD, "nb_desc %d is invalid", nb_desc);
+ RTE_LOG(ERR, PMD, "nb_desc %d is invalid\n", nb_desc);
rc = -EINVAL;
goto out;
}
rxq = rte_zmalloc_socket("bnxt_rx_queue", sizeof(struct bnxt_rx_queue),
RTE_CACHE_LINE_SIZE, socket_id);
if (!rxq) {
- RTE_LOG(ERR, PMD, "bnxt_rx_queue allocation failed!");
+ RTE_LOG(ERR, PMD, "bnxt_rx_queue allocation failed!\n");
rc = -ENOMEM;
goto out;
}
/* Allocate RX ring hardware descriptors */
if (bnxt_alloc_rings(bp, queue_idx, NULL, rxq->rx_ring, rxq->cp_ring,
"rxr")) {
- RTE_LOG(ERR, PMD, "ring_dma_zone_reserve for rx_ring failed!");
+ RTE_LOG(ERR, PMD,
+ "ring_dma_zone_reserve for rx_ring failed!\n");
bnxt_rx_queue_release_op(rxq);
rc = -ENOMEM;
goto out;