void bnxt_free_rxq_stats(struct bnxt_rx_queue *rxq)
{
- struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
-
- if (cpr->hw_stats)
- cpr->hw_stats = NULL;
+ if (rxq && rxq->cp_ring && rxq->cp_ring->hw_stats)
+ rxq->cp_ring->hw_stats = NULL;
}
int bnxt_mq_rx_configure(struct bnxt *bp)
/* Single queue mode */
if (bp->rx_cp_nr_rings < 2) {
- vnic = bnxt_alloc_vnic(bp);
+ vnic = &bp->vnic_info[0];
if (!vnic) {
PMD_DRV_LOG(ERR, "VNIC alloc failed\n");
rc = -ENOMEM;
goto err_out;
}
vnic->flags |= BNXT_VNIC_INFO_BCAST;
- STAILQ_INSERT_TAIL(&bp->ff_pool[0], vnic, next);
bp->nr_vnics++;
rxq = bp->eth_dev->data->rx_queues[0];
rxq->vnic = vnic;
vnic->func_default = true;
- vnic->ff_pool_idx = 0;
vnic->start_grp_id = 0;
vnic->end_grp_id = vnic->start_grp_id;
filter = bnxt_alloc_filter(bp);
switch (dev_conf->rxmode.mq_mode) {
case ETH_MQ_RX_VMDQ_RSS:
case ETH_MQ_RX_VMDQ_ONLY:
+ /* FALLTHROUGH */
/* ETH_8/64_POOLs */
pools = conf->nb_queue_pools;
/* For each pool, allocate MACVLAN CFA rule & VNIC */
RTE_MIN(bp->max_l2_ctx,
RTE_MIN(bp->max_rsscos_ctx,
ETH_64_POOLS)));
+ PMD_DRV_LOG(DEBUG,
+ "pools = %u max_pools = %u\n",
+ pools, max_pools);
if (pools > max_pools)
pools = max_pools;
break;
goto err_out;
}
}
-
nb_q_per_grp = bp->rx_cp_nr_rings / pools;
+ PMD_DRV_LOG(DEBUG, "pools = %u nb_q_per_grp = %u\n",
+ pools, nb_q_per_grp);
start_grp_id = 0;
end_grp_id = nb_q_per_grp;
for (i = 0; i < pools; i++) {
- vnic = bnxt_alloc_vnic(bp);
+ vnic = &bp->vnic_info[i];
if (!vnic) {
PMD_DRV_LOG(ERR, "VNIC alloc failed\n");
rc = -ENOMEM;
goto err_out;
}
vnic->flags |= BNXT_VNIC_INFO_BCAST;
- STAILQ_INSERT_TAIL(&bp->ff_pool[i], vnic, next);
bp->nr_vnics++;
for (j = 0; j < nb_q_per_grp; j++, ring_idx++) {
rxq = bp->eth_dev->data->rx_queues[ring_idx];
rxq->vnic = vnic;
+ PMD_DRV_LOG(DEBUG,
+ "rxq[%d] = %p vnic[%d] = %p\n",
+ ring_idx, rxq, i, vnic);
}
if (i == 0) {
if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_DCB) {
}
vnic->func_default = true;
}
- vnic->ff_pool_idx = i;
vnic->start_grp_id = start_grp_id;
vnic->end_grp_id = end_grp_id;
hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6;
for (i = 0; i < bp->nr_vnics; i++) {
- STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) {
+ vnic = &bp->vnic_info[i];
vnic->hash_type = hash_type;
/*
rss->rss_key_len <= HW_HASH_KEY_SIZE)
memcpy(vnic->rss_hash_key,
rss->rss_key, rss->rss_key_len);
- }
}
}
return rc;
}
-static void bnxt_rx_queue_release_mbufs(struct bnxt_rx_queue *rxq)
+void bnxt_rx_queue_release_mbufs(struct bnxt_rx_queue *rxq)
{
struct bnxt_sw_rx_bd *sw_ring;
struct bnxt_tpa_info *tpa_info;
uint16_t i;
+ rte_spinlock_lock(&rxq->lock);
+
if (rxq) {
sw_ring = rxq->rx_ring->rx_buf_ring;
if (sw_ring) {
- for (i = 0; i < rxq->nb_rx_desc; i++) {
+ for (i = 0;
+ i < rxq->rx_ring->rx_ring_struct->ring_size; i++) {
if (sw_ring[i].mbuf) {
rte_pktmbuf_free_seg(sw_ring[i].mbuf);
sw_ring[i].mbuf = NULL;
/* Free up mbufs in Agg ring */
sw_ring = rxq->rx_ring->ag_buf_ring;
if (sw_ring) {
- for (i = 0; i < rxq->nb_rx_desc; i++) {
+ for (i = 0;
+ i < rxq->rx_ring->ag_ring_struct->ring_size; i++) {
if (sw_ring[i].mbuf) {
rte_pktmbuf_free_seg(sw_ring[i].mbuf);
sw_ring[i].mbuf = NULL;
}
}
}
+
+ rte_spinlock_unlock(&rxq->lock);
}
void bnxt_free_rx_mbufs(struct bnxt *bp)
bnxt_free_ring(rxq->cp_ring->cp_ring_struct);
bnxt_free_rxq_stats(rxq);
+ rte_memzone_free(rxq->mz);
+ rxq->mz = NULL;
rte_free(rxq);
}
uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads;
struct bnxt_rx_queue *rxq;
int rc = 0;
+ uint8_t queue_state;
if (queue_idx >= bp->max_rx_rings) {
PMD_DRV_LOG(ERR,
"Cannot create Rx ring %d. Only %d rings available\n",
queue_idx, bp->max_rx_rings);
- return -ENOSPC;
+ return -EINVAL;
}
if (!nb_desc || nb_desc > MAX_RX_DESC_CNT) {
rxq->queue_id = queue_idx;
rxq->port_id = eth_dev->data->port_id;
- rxq->crc_len = rx_offloads & DEV_RX_OFFLOAD_CRC_STRIP ?
- 0 : ETHER_CRC_LEN;
+ if (rx_offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+ rxq->crc_len = RTE_ETHER_CRC_LEN;
+ else
+ rxq->crc_len = 0;
eth_dev->data->rx_queues[queue_idx] = rxq;
/* Allocate RX ring hardware descriptors */
- if (bnxt_alloc_rings(bp, queue_idx, NULL, rxq->rx_ring, rxq->cp_ring,
+ if (bnxt_alloc_rings(bp, queue_idx, NULL, rxq, rxq->cp_ring,
"rxr")) {
PMD_DRV_LOG(ERR,
"ring_dma_zone_reserve for rx_ring failed!\n");
}
rte_atomic64_init(&rxq->rx_mbuf_alloc_fail);
+ rxq->rx_deferred_start = rx_conf->rx_deferred_start;
+ queue_state = rxq->rx_deferred_start ? RTE_ETH_QUEUE_STATE_STOPPED :
+ RTE_ETH_QUEUE_STATE_STARTED;
+ eth_dev->data->rx_queue_state[queue_idx] = queue_state;
+ rte_spinlock_init(&rxq->lock);
+
+#ifdef RTE_ARCH_X86
+ bnxt_rxq_vec_setup(rxq);
+#endif
+
out:
return rc;
}
struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
struct bnxt_rx_queue *rxq = bp->rx_queues[rx_queue_id];
struct bnxt_vnic_info *vnic = NULL;
+ int rc = 0;
if (rxq == NULL) {
PMD_DRV_LOG(ERR, "Invalid Rx queue %d\n", rx_queue_id);
}
dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
- rxq->rx_deferred_start = false;
+
+ bnxt_free_hwrm_rx_ring(bp, rx_queue_id);
+ bnxt_alloc_hwrm_rx_ring(bp, rx_queue_id);
PMD_DRV_LOG(INFO, "Rx queue started %d\n", rx_queue_id);
+
if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
vnic = rxq->vnic;
+
if (vnic->fw_grp_ids[rx_queue_id] != INVALID_HW_RING_ID)
return 0;
- PMD_DRV_LOG(DEBUG, "vnic = %p fw_grp_id = %d\n",
- vnic, bp->grp_info[rx_queue_id + 1].fw_grp_id);
+
+ PMD_DRV_LOG(DEBUG,
+ "vnic = %p fw_grp_id = %d\n",
+ vnic, bp->grp_info[rx_queue_id].fw_grp_id);
+
vnic->fw_grp_ids[rx_queue_id] =
- bp->grp_info[rx_queue_id + 1].fw_grp_id;
- return bnxt_vnic_rss_configure(bp, vnic);
+ bp->grp_info[rx_queue_id].fw_grp_id;
+ rc = bnxt_vnic_rss_configure(bp, vnic);
}
- return 0;
+ if (rc == 0)
+ rxq->rx_deferred_start = false;
+
+ return rc;
}
int bnxt_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
- struct bnxt_rx_queue *rxq = bp->rx_queues[rx_queue_id];
struct bnxt_vnic_info *vnic = NULL;
+ struct bnxt_rx_queue *rxq = NULL;
+ int rc = 0;
+
+ /* Rx CQ 0 also works as Default CQ for async notifications */
+ if (!rx_queue_id) {
+ PMD_DRV_LOG(ERR, "Cannot stop Rx queue id %d\n", rx_queue_id);
+ return -EINVAL;
+ }
+
+ rxq = bp->rx_queues[rx_queue_id];
if (rxq == NULL) {
PMD_DRV_LOG(ERR, "Invalid Rx queue %d\n", rx_queue_id);
if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
vnic = rxq->vnic;
vnic->fw_grp_ids[rx_queue_id] = INVALID_HW_RING_ID;
- return bnxt_vnic_rss_configure(bp, vnic);
+ rc = bnxt_vnic_rss_configure(bp, vnic);
}
- return 0;
+
+ if (rc == 0)
+ bnxt_rx_queue_release_mbufs(rxq);
+
+ return rc;
}