-/*-
- * BSD LICENSE
- *
- * Copyright(c) Broadcom Limited.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Broadcom Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Broadcom
+ * All rights reserved.
*/
#include <inttypes.h>
void bnxt_free_rxq_stats(struct bnxt_rx_queue *rxq)
{
- struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
-
- if (cpr->hw_stats)
- cpr->hw_stats = NULL;
+ if (rxq && rxq->cp_ring && rxq->cp_ring->hw_stats)
+ rxq->cp_ring->hw_stats = NULL;
}
int bnxt_mq_rx_configure(struct bnxt *bp)
{
struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
- unsigned int i, j, nb_q_per_grp, ring_idx;
- int start_grp_id, end_grp_id, rc = 0;
+ const struct rte_eth_vmdq_rx_conf *conf =
+ &dev_conf->rx_adv_conf.vmdq_rx_conf;
+ unsigned int i, j, nb_q_per_grp = 1, ring_idx = 0;
+ int start_grp_id, end_grp_id = 1, rc = 0;
struct bnxt_vnic_info *vnic;
struct bnxt_filter_info *filter;
+ enum rte_eth_nb_pools pools = bp->rx_cp_nr_rings, max_pools = 0;
struct bnxt_rx_queue *rxq;
bp->nr_vnics = 0;
if (bp->rx_cp_nr_rings < 2) {
vnic = bnxt_alloc_vnic(bp);
if (!vnic) {
- RTE_LOG(ERR, PMD, "VNIC alloc failed\n");
+ PMD_DRV_LOG(ERR, "VNIC alloc failed\n");
rc = -ENOMEM;
goto err_out;
}
+ vnic->flags |= BNXT_VNIC_INFO_BCAST;
STAILQ_INSERT_TAIL(&bp->ff_pool[0], vnic, next);
bp->nr_vnics++;
vnic->func_default = true;
vnic->ff_pool_idx = 0;
- vnic->start_grp_id = 1;
- vnic->end_grp_id = vnic->start_grp_id +
- bp->rx_cp_nr_rings - 1;
+ vnic->start_grp_id = 0;
+ vnic->end_grp_id = vnic->start_grp_id;
filter = bnxt_alloc_filter(bp);
if (!filter) {
- RTE_LOG(ERR, PMD, "L2 filter alloc failed\n");
+ PMD_DRV_LOG(ERR, "L2 filter alloc failed\n");
rc = -ENOMEM;
goto err_out;
}
}
/* Multi-queue mode */
- if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG) {
+ if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_DCB_RSS) {
/* VMDq ONLY, VMDq+RSS, VMDq+DCB, VMDq+DCB+RSS */
- enum rte_eth_nb_pools pools;
switch (dev_conf->rxmode.mq_mode) {
case ETH_MQ_RX_VMDQ_RSS:
case ETH_MQ_RX_VMDQ_ONLY:
- {
- const struct rte_eth_vmdq_rx_conf *conf =
- &dev_conf->rx_adv_conf.vmdq_rx_conf;
-
- /* ETH_8/64_POOLs */
- pools = conf->nb_queue_pools;
- break;
- }
+ /* FALLTHROUGH */
+ /* ETH_8/64_POOLs */
+ pools = conf->nb_queue_pools;
+ /* For each pool, allocate MACVLAN CFA rule & VNIC */
+ max_pools = RTE_MIN(bp->max_vnics,
+ RTE_MIN(bp->max_l2_ctx,
+ RTE_MIN(bp->max_rsscos_ctx,
+ ETH_64_POOLS)));
+ if (pools > max_pools)
+ pools = max_pools;
+ break;
+ case ETH_MQ_RX_RSS:
+ pools = 1;
+ break;
default:
- RTE_LOG(ERR, PMD, "Unsupported mq_mod %d\n",
+ PMD_DRV_LOG(ERR, "Unsupported mq_mod %d\n",
dev_conf->rxmode.mq_mode);
rc = -EINVAL;
goto err_out;
}
- /* For each pool, allocate MACVLAN CFA rule & VNIC */
- if (!pools) {
- RTE_LOG(ERR, PMD,
- "VMDq pool not set, defaulted to 64\n");
- pools = ETH_64_POOLS;
+ }
+
+ nb_q_per_grp = bp->rx_cp_nr_rings / pools;
+ start_grp_id = 0;
+ end_grp_id = nb_q_per_grp;
+
+ for (i = 0; i < pools; i++) {
+ vnic = bnxt_alloc_vnic(bp);
+ if (!vnic) {
+ PMD_DRV_LOG(ERR, "VNIC alloc failed\n");
+ rc = -ENOMEM;
+ goto err_out;
}
- nb_q_per_grp = bp->rx_cp_nr_rings / pools;
- start_grp_id = 1;
- end_grp_id = start_grp_id + nb_q_per_grp - 1;
-
- ring_idx = 0;
- for (i = 0; i < pools; i++) {
- vnic = bnxt_alloc_vnic(bp);
- if (!vnic) {
- RTE_LOG(ERR, PMD,
- "VNIC alloc failed\n");
- rc = -ENOMEM;
- goto err_out;
- }
- STAILQ_INSERT_TAIL(&bp->ff_pool[i], vnic, next);
- bp->nr_vnics++;
+ vnic->flags |= BNXT_VNIC_INFO_BCAST;
+ STAILQ_INSERT_TAIL(&bp->ff_pool[i], vnic, next);
+ bp->nr_vnics++;
- for (j = 0; j < nb_q_per_grp; j++, ring_idx++) {
- rxq = bp->eth_dev->data->rx_queues[ring_idx];
- rxq->vnic = vnic;
- }
- if (i == 0)
- vnic->func_default = true;
- vnic->ff_pool_idx = i;
- vnic->start_grp_id = start_grp_id;
- vnic->end_grp_id = end_grp_id;
-
- filter = bnxt_alloc_filter(bp);
- if (!filter) {
- RTE_LOG(ERR, PMD,
- "L2 filter alloc failed\n");
- rc = -ENOMEM;
- goto err_out;
+ for (j = 0; j < nb_q_per_grp; j++, ring_idx++) {
+ rxq = bp->eth_dev->data->rx_queues[ring_idx];
+ rxq->vnic = vnic;
+ }
+ if (i == 0) {
+ if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_DCB) {
+ bp->eth_dev->data->promiscuous = 1;
+ vnic->flags |= BNXT_VNIC_INFO_PROMISC;
}
- /*
- * TODO: Configure & associate CFA rule for
- * each VNIC for each VMDq with MACVLAN, MACVLAN+TC
- */
- STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
-
- start_grp_id = end_grp_id + 1;
- end_grp_id += nb_q_per_grp;
+ vnic->func_default = true;
}
- goto out;
- }
+ vnic->ff_pool_idx = i;
+ vnic->start_grp_id = start_grp_id;
+ vnic->end_grp_id = end_grp_id;
+
+ if (i) {
+ if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_DCB ||
+ !(dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS))
+ vnic->rss_dflt_cr = true;
+ goto skip_filter_allocation;
+ }
+ filter = bnxt_alloc_filter(bp);
+ if (!filter) {
+ PMD_DRV_LOG(ERR, "L2 filter alloc failed\n");
+ rc = -ENOMEM;
+ goto err_out;
+ }
+ /*
+ * TODO: Configure & associate CFA rule for
+ * each VNIC for each VMDq with MACVLAN, MACVLAN+TC
+ */
+ STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
- /* Non-VMDq mode - RSS, DCB, RSS+DCB */
- /* Init default VNIC for RSS or DCB only */
- vnic = bnxt_alloc_vnic(bp);
- if (!vnic) {
- RTE_LOG(ERR, PMD, "VNIC alloc failed\n");
- rc = -ENOMEM;
- goto err_out;
- }
- /* Partition the rx queues for the single pool */
- for (i = 0; i < bp->rx_cp_nr_rings; i++) {
- rxq = bp->eth_dev->data->rx_queues[i];
- rxq->vnic = vnic;
- }
- STAILQ_INSERT_TAIL(&bp->ff_pool[0], vnic, next);
- bp->nr_vnics++;
-
- vnic->func_default = true;
- vnic->ff_pool_idx = 0;
- vnic->start_grp_id = 1;
- vnic->end_grp_id = vnic->start_grp_id +
- bp->rx_cp_nr_rings - 1;
- filter = bnxt_alloc_filter(bp);
- if (!filter) {
- RTE_LOG(ERR, PMD, "L2 filter alloc failed\n");
- rc = -ENOMEM;
- goto err_out;
+skip_filter_allocation:
+ start_grp_id = end_grp_id;
+ end_grp_id += nb_q_per_grp;
}
- STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
-
- if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
- vnic->hash_type =
- HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4 |
- HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6;
out:
+ if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
+ struct rte_eth_rss_conf *rss = &dev_conf->rx_adv_conf.rss_conf;
+ uint16_t hash_type = 0;
+
+ if (bp->flags & BNXT_FLAG_UPDATE_HASH) {
+ rss = &bp->rss_conf;
+ bp->flags &= ~BNXT_FLAG_UPDATE_HASH;
+ }
+
+ if (rss->rss_hf & ETH_RSS_IPV4)
+ hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4;
+ if (rss->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+ hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4;
+ if (rss->rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
+ hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4;
+ if (rss->rss_hf & ETH_RSS_IPV6)
+ hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6;
+ if (rss->rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
+ hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6;
+ if (rss->rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
+ hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6;
+
+ for (i = 0; i < bp->nr_vnics; i++) {
+ STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) {
+ vnic->hash_type = hash_type;
+
+ /*
+ * Use the supplied key if the key length is
+ * acceptable and the rss_key is not NULL
+ */
+ if (rss->rss_key &&
+ rss->rss_key_len <= HW_HASH_KEY_SIZE)
+ memcpy(vnic->rss_hash_key,
+ rss->rss_key, rss->rss_key_len);
+ }
+ }
+ }
+
return rc;
err_out:
static void bnxt_rx_queue_release_mbufs(struct bnxt_rx_queue *rxq)
{
struct bnxt_sw_rx_bd *sw_ring;
+ struct bnxt_tpa_info *tpa_info;
uint16_t i;
if (rxq) {
sw_ring = rxq->rx_ring->rx_buf_ring;
if (sw_ring) {
- for (i = 0; i < rxq->nb_rx_desc; i++) {
+ for (i = 0;
+ i < rxq->rx_ring->rx_ring_struct->ring_size; i++) {
+ if (sw_ring[i].mbuf) {
+ rte_pktmbuf_free_seg(sw_ring[i].mbuf);
+ sw_ring[i].mbuf = NULL;
+ }
+ }
+ }
+ /* Free up mbufs in Agg ring */
+ sw_ring = rxq->rx_ring->ag_buf_ring;
+ if (sw_ring) {
+ for (i = 0;
+ i < rxq->rx_ring->ag_ring_struct->ring_size; i++) {
if (sw_ring[i].mbuf) {
rte_pktmbuf_free_seg(sw_ring[i].mbuf);
sw_ring[i].mbuf = NULL;
}
}
}
+
+ /* Free up mbufs in TPA */
+ tpa_info = rxq->rx_ring->tpa_info;
+ if (tpa_info) {
+ for (i = 0; i < BNXT_TPA_MAX; i++) {
+ if (tpa_info[i].mbuf) {
+ rte_pktmbuf_free_seg(tpa_info[i].mbuf);
+ tpa_info[i].mbuf = NULL;
+ }
+ }
+ }
}
}
/* Free RX ring hardware descriptors */
bnxt_free_ring(rxq->rx_ring->rx_ring_struct);
+ /* Free RX Agg ring hardware descriptors */
+ bnxt_free_ring(rxq->rx_ring->ag_ring_struct);
/* Free RX completion ring hardware descriptors */
bnxt_free_ring(rxq->cp_ring->cp_ring_struct);
bnxt_free_rxq_stats(rxq);
+ rte_memzone_free(rxq->mz);
+ rxq->mz = NULL;
rte_free(rxq);
}
struct rte_mempool *mp)
{
struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+ uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads;
struct bnxt_rx_queue *rxq;
int rc = 0;
+ if (queue_idx >= bp->max_rx_rings) {
+ PMD_DRV_LOG(ERR,
+ "Cannot create Rx ring %d. Only %d rings available\n",
+ queue_idx, bp->max_rx_rings);
+ return -EINVAL;
+ }
+
if (!nb_desc || nb_desc > MAX_RX_DESC_CNT) {
- RTE_LOG(ERR, PMD, "nb_desc %d is invalid", nb_desc);
+ PMD_DRV_LOG(ERR, "nb_desc %d is invalid\n", nb_desc);
rc = -EINVAL;
goto out;
}
rxq = rte_zmalloc_socket("bnxt_rx_queue", sizeof(struct bnxt_rx_queue),
RTE_CACHE_LINE_SIZE, socket_id);
if (!rxq) {
- RTE_LOG(ERR, PMD, "bnxt_rx_queue allocation failed!");
+ PMD_DRV_LOG(ERR, "bnxt_rx_queue allocation failed!\n");
rc = -ENOMEM;
goto out;
}
rxq->nb_rx_desc = nb_desc;
rxq->rx_free_thresh = rx_conf->rx_free_thresh;
+ PMD_DRV_LOG(DEBUG, "RX Buf size is %d\n", rxq->rx_buf_use_size);
+ PMD_DRV_LOG(DEBUG, "RX Buf MTU %d\n", eth_dev->data->mtu);
+
rc = bnxt_init_rx_ring_struct(rxq, socket_id);
if (rc)
goto out;
rxq->queue_id = queue_idx;
rxq->port_id = eth_dev->data->port_id;
- rxq->crc_len = (uint8_t)((eth_dev->data->dev_conf.rxmode.hw_strip_crc) ?
- 0 : ETHER_CRC_LEN);
+ rxq->crc_len = rte_eth_dev_must_keep_crc(rx_offloads) ?
+ ETHER_CRC_LEN : 0;
eth_dev->data->rx_queues[queue_idx] = rxq;
/* Allocate RX ring hardware descriptors */
- if (bnxt_alloc_rings(bp, queue_idx, NULL, rxq->rx_ring, rxq->cp_ring,
+ if (bnxt_alloc_rings(bp, queue_idx, NULL, rxq, rxq->cp_ring,
"rxr")) {
- RTE_LOG(ERR, PMD, "ring_dma_zone_reserve for rx_ring failed!");
+ PMD_DRV_LOG(ERR,
+ "ring_dma_zone_reserve for rx_ring failed!\n");
bnxt_rx_queue_release_op(rxq);
rc = -ENOMEM;
goto out;
}
+ rte_atomic64_init(&rxq->rx_mbuf_alloc_fail);
out:
return rc;
}
+
+int
+bnxt_rx_queue_intr_enable_op(struct rte_eth_dev *eth_dev, uint16_t queue_id)
+{
+ struct bnxt_rx_queue *rxq;
+ struct bnxt_cp_ring_info *cpr;
+ int rc = 0;
+
+ if (eth_dev->data->rx_queues) {
+ rxq = eth_dev->data->rx_queues[queue_id];
+ if (!rxq) {
+ rc = -EINVAL;
+ return rc;
+ }
+ cpr = rxq->cp_ring;
+ B_CP_DB_ARM(cpr);
+ }
+ return rc;
+}
+
+int
+bnxt_rx_queue_intr_disable_op(struct rte_eth_dev *eth_dev, uint16_t queue_id)
+{
+ struct bnxt_rx_queue *rxq;
+ struct bnxt_cp_ring_info *cpr;
+ int rc = 0;
+
+ if (eth_dev->data->rx_queues) {
+ rxq = eth_dev->data->rx_queues[queue_id];
+ if (!rxq) {
+ rc = -EINVAL;
+ return rc;
+ }
+ cpr = rxq->cp_ring;
+ B_CP_DB_DISARM(cpr);
+ }
+ return rc;
+}
+
+int bnxt_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
+ struct bnxt_rx_queue *rxq = bp->rx_queues[rx_queue_id];
+ struct bnxt_vnic_info *vnic = NULL;
+
+ if (rxq == NULL) {
+ PMD_DRV_LOG(ERR, "Invalid Rx queue %d\n", rx_queue_id);
+ return -EINVAL;
+ }
+
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+ rxq->rx_deferred_start = false;
+ PMD_DRV_LOG(INFO, "Rx queue started %d\n", rx_queue_id);
+ if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
+ vnic = rxq->vnic;
+ if (vnic->fw_grp_ids[rx_queue_id] != INVALID_HW_RING_ID)
+ return 0;
+ PMD_DRV_LOG(DEBUG, "vnic = %p fw_grp_id = %d\n",
+ vnic, bp->grp_info[rx_queue_id + 1].fw_grp_id);
+ vnic->fw_grp_ids[rx_queue_id] =
+ bp->grp_info[rx_queue_id + 1].fw_grp_id;
+ return bnxt_vnic_rss_configure(bp, vnic);
+ }
+
+ return 0;
+}
+
+int bnxt_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
+ struct bnxt_rx_queue *rxq = bp->rx_queues[rx_queue_id];
+ struct bnxt_vnic_info *vnic = NULL;
+
+ if (rxq == NULL) {
+ PMD_DRV_LOG(ERR, "Invalid Rx queue %d\n", rx_queue_id);
+ return -EINVAL;
+ }
+
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+ rxq->rx_deferred_start = true;
+ PMD_DRV_LOG(DEBUG, "Rx queue stopped\n");
+
+ if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
+ vnic = rxq->vnic;
+ vnic->fw_grp_ids[rx_queue_id] = INVALID_HW_RING_ID;
+ return bnxt_vnic_rss_configure(bp, vnic);
+ }
+ return 0;
+}