rc = -ENOMEM;
goto err_out;
}
+ filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST;
STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
goto out;
}
}
}
nb_q_per_grp = bp->rx_cp_nr_rings / pools;
+ bp->rx_num_qs_per_vnic = nb_q_per_grp;
PMD_DRV_LOG(DEBUG, "pools = %u nb_q_per_grp = %u\n",
pools, nb_q_per_grp);
start_grp_id = 0;
rc = -ENOMEM;
goto err_out;
}
+ filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST;
/*
* TODO: Configure & associate CFA rule for
* each VNIC for each VMDq with MACVLAN, MACVLAN+TC
out:
if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
struct rte_eth_rss_conf *rss = &dev_conf->rx_adv_conf.rss_conf;
- uint16_t hash_type = 0;
if (bp->flags & BNXT_FLAG_UPDATE_HASH) {
rss = &bp->rss_conf;
bp->flags &= ~BNXT_FLAG_UPDATE_HASH;
}
- if (rss->rss_hf & ETH_RSS_IPV4)
- hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4;
- if (rss->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
- hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4;
- if (rss->rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
- hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4;
- if (rss->rss_hf & ETH_RSS_IPV6)
- hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6;
- if (rss->rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
- hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6;
- if (rss->rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
- hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6;
-
for (i = 0; i < bp->nr_vnics; i++) {
vnic = &bp->vnic_info[i];
- vnic->hash_type = hash_type;
+ vnic->hash_type =
+ bnxt_rte_to_hwrm_hash_types(rss->rss_hf);
/*
* Use the supplied key if the key length is
struct bnxt_tpa_info *tpa_info;
uint16_t i;
+ if (!rxq)
+ return;
+
rte_spinlock_lock(&rxq->lock);
- if (rxq) {
- sw_ring = rxq->rx_ring->rx_buf_ring;
- if (sw_ring) {
- for (i = 0;
- i < rxq->rx_ring->rx_ring_struct->ring_size; i++) {
- if (sw_ring[i].mbuf) {
- rte_pktmbuf_free_seg(sw_ring[i].mbuf);
- sw_ring[i].mbuf = NULL;
- }
+ sw_ring = rxq->rx_ring->rx_buf_ring;
+ if (sw_ring) {
+ for (i = 0;
+ i < rxq->rx_ring->rx_ring_struct->ring_size; i++) {
+ if (sw_ring[i].mbuf) {
+ rte_pktmbuf_free_seg(sw_ring[i].mbuf);
+ sw_ring[i].mbuf = NULL;
}
}
- /* Free up mbufs in Agg ring */
- sw_ring = rxq->rx_ring->ag_buf_ring;
- if (sw_ring) {
- for (i = 0;
- i < rxq->rx_ring->ag_ring_struct->ring_size; i++) {
- if (sw_ring[i].mbuf) {
- rte_pktmbuf_free_seg(sw_ring[i].mbuf);
- sw_ring[i].mbuf = NULL;
- }
+ }
+ /* Free up mbufs in Agg ring */
+ sw_ring = rxq->rx_ring->ag_buf_ring;
+ if (sw_ring) {
+ for (i = 0;
+ i < rxq->rx_ring->ag_ring_struct->ring_size; i++) {
+ if (sw_ring[i].mbuf) {
+ rte_pktmbuf_free_seg(sw_ring[i].mbuf);
+ sw_ring[i].mbuf = NULL;
}
}
+ }
- /* Free up mbufs in TPA */
- tpa_info = rxq->rx_ring->tpa_info;
- if (tpa_info) {
- for (i = 0; i < BNXT_TPA_MAX; i++) {
- if (tpa_info[i].mbuf) {
- rte_pktmbuf_free_seg(tpa_info[i].mbuf);
- tpa_info[i].mbuf = NULL;
- }
+ /* Free up mbufs in TPA */
+ tpa_info = rxq->rx_ring->tpa_info;
+ if (tpa_info) {
+ for (i = 0; i < BNXT_TPA_MAX; i++) {
+ if (tpa_info[i].mbuf) {
+ rte_pktmbuf_free_seg(tpa_info[i].mbuf);
+ tpa_info[i].mbuf = NULL;
}
}
}
}
rte_atomic64_init(&rxq->rx_mbuf_alloc_fail);
- rxq->rx_deferred_start = rx_conf->rx_deferred_start;
- queue_state = rxq->rx_deferred_start ? RTE_ETH_QUEUE_STATE_STOPPED :
- RTE_ETH_QUEUE_STATE_STARTED;
+ /* rxq 0 must not be stopped when used as async CPR */
+ if (!BNXT_NUM_ASYNC_CPR(bp) && queue_idx == 0)
+ rxq->rx_deferred_start = false;
+ else
+ rxq->rx_deferred_start = rx_conf->rx_deferred_start;
+
+ if (rxq->rx_deferred_start) {
+ queue_state = RTE_ETH_QUEUE_STATE_STOPPED;
+ rxq->rx_started = false;
+ } else {
+ queue_state = RTE_ETH_QUEUE_STATE_STARTED;
+ rxq->rx_started = true;
+ }
eth_dev->data->rx_queue_state[queue_idx] = queue_state;
rte_spinlock_init(&rxq->lock);
return -EINVAL;
}
+ /* Set the queue state to started here.
+ * We check the status of the queue while posting buffer.
+ * If queue is it started, we do not post buffers for Rx.
+ */
+ rxq->rx_started = true;
bnxt_free_hwrm_rx_ring(bp, rx_queue_id);
rc = bnxt_alloc_hwrm_rx_ring(bp, rx_queue_id);
if (rc)
vnic->fw_grp_ids[rx_queue_id] =
bp->grp_info[rx_queue_id].fw_grp_id;
+ PMD_DRV_LOG(DEBUG,
+ "vnic = %p fw_grp_id = %d\n",
+ vnic, bp->grp_info[rx_queue_id].fw_grp_id);
}
- PMD_DRV_LOG(DEBUG,
- "vnic = %p fw_grp_id = %d\n",
- vnic, bp->grp_info[rx_queue_id].fw_grp_id);
-
- rc = bnxt_vnic_rss_configure(bp, vnic);
+ PMD_DRV_LOG(DEBUG, "Rx Queue Count %d\n", vnic->rx_queue_cnt);
+ if (vnic->rx_queue_cnt > 1)
+ rc = bnxt_vnic_rss_configure(bp, vnic);
}
- if (rc == 0) {
+ if (rc == 0)
dev->data->rx_queue_state[rx_queue_id] =
RTE_ETH_QUEUE_STATE_STARTED;
- rxq->rx_deferred_start = false;
- }
+ else
+ rxq->rx_started = false;
PMD_DRV_LOG(INFO,
"queue %d, rx_deferred_start %d, state %d!\n",
}
dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
- rxq->rx_deferred_start = true;
+ rxq->rx_started = false;
PMD_DRV_LOG(DEBUG, "Rx queue stopped\n");
if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
vnic = rxq->vnic;
if (BNXT_HAS_RING_GRPS(bp))
vnic->fw_grp_ids[rx_queue_id] = INVALID_HW_RING_ID;
- rc = bnxt_vnic_rss_configure(bp, vnic);
+
+ PMD_DRV_LOG(DEBUG, "Rx Queue Count %d\n", vnic->rx_queue_cnt);
+ if (vnic->rx_queue_cnt > 1)
+ rc = bnxt_vnic_rss_configure(bp, vnic);
}
if (rc == 0)