4 * Copyright(c) Broadcom Limited.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Broadcom Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 #include <rte_malloc.h>
40 #include "bnxt_filter.h"
41 #include "bnxt_hwrm.h"
42 #include "bnxt_ring.h"
45 #include "bnxt_vnic.h"
46 #include "hsi_struct_def_dpdk.h"
52 void bnxt_free_rxq_stats(struct bnxt_rx_queue *rxq)
54 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
60 int bnxt_mq_rx_configure(struct bnxt *bp)
62 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
63 const struct rte_eth_vmdq_rx_conf *conf =
64 &dev_conf->rx_adv_conf.vmdq_rx_conf;
65 unsigned int i, j, nb_q_per_grp = 1, ring_idx = 0;
66 int start_grp_id, end_grp_id = 1, rc = 0;
67 struct bnxt_vnic_info *vnic;
68 struct bnxt_filter_info *filter;
69 enum rte_eth_nb_pools pools = bp->rx_cp_nr_rings, max_pools = 0;
70 struct bnxt_rx_queue *rxq;
71 bool rss_dflt_cr = false;
75 /* Single queue mode */
76 if (bp->rx_cp_nr_rings < 2) {
77 vnic = bnxt_alloc_vnic(bp);
79 RTE_LOG(ERR, PMD, "VNIC alloc failed\n");
83 vnic->flags |= BNXT_VNIC_INFO_BCAST;
84 STAILQ_INSERT_TAIL(&bp->ff_pool[0], vnic, next);
87 rxq = bp->eth_dev->data->rx_queues[0];
90 vnic->func_default = true;
91 vnic->ff_pool_idx = 0;
92 vnic->start_grp_id = 0;
93 vnic->end_grp_id = vnic->start_grp_id;
94 filter = bnxt_alloc_filter(bp);
96 RTE_LOG(ERR, PMD, "L2 filter alloc failed\n");
100 STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
104 /* Multi-queue mode */
105 if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_DCB_RSS) {
106 /* VMDq ONLY, VMDq+RSS, VMDq+DCB, VMDq+DCB+RSS */
108 switch (dev_conf->rxmode.mq_mode) {
109 case ETH_MQ_RX_VMDQ_RSS:
110 case ETH_MQ_RX_VMDQ_ONLY:
112 pools = conf->nb_queue_pools;
113 /* For each pool, allocate MACVLAN CFA rule & VNIC */
114 max_pools = RTE_MIN(bp->max_vnics,
115 RTE_MIN(bp->max_l2_ctx,
116 RTE_MIN(bp->max_rsscos_ctx,
118 if (pools > max_pools)
125 RTE_LOG(ERR, PMD, "Unsupported mq_mod %d\n",
126 dev_conf->rxmode.mq_mode);
132 * If MQ RX w/o RSS no need for per VNIC filter.
134 if ((dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_DCB) ||
135 (bp->rx_cp_nr_rings &&
136 !(dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS)))
139 nb_q_per_grp = bp->rx_cp_nr_rings / pools;
141 end_grp_id = nb_q_per_grp;
143 for (i = 0; i < pools; i++) {
144 vnic = bnxt_alloc_vnic(bp);
146 RTE_LOG(ERR, PMD, "VNIC alloc failed\n");
150 vnic->flags |= BNXT_VNIC_INFO_BCAST;
151 STAILQ_INSERT_TAIL(&bp->ff_pool[i], vnic, next);
154 for (j = 0, ring_idx = 0; j < nb_q_per_grp; j++, ring_idx++) {
155 rxq = bp->eth_dev->data->rx_queues[ring_idx];
159 if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_DCB) {
160 bp->eth_dev->data->promiscuous = 1;
161 vnic->flags |= BNXT_VNIC_INFO_PROMISC;
163 vnic->func_default = true;
165 vnic->ff_pool_idx = i;
166 vnic->start_grp_id = start_grp_id;
167 vnic->end_grp_id = end_grp_id;
169 if (rss_dflt_cr && i) {
170 vnic->rss_dflt_cr = true;
171 goto skip_filter_allocation;
173 filter = bnxt_alloc_filter(bp);
175 RTE_LOG(ERR, PMD, "L2 filter alloc failed\n");
179 for (j = 0; j < conf->nb_pool_maps; j++) {
180 if (conf->pool_map[j].pools & (1UL << i)) {
182 "Add vlan %u to vmdq pool %u\n",
183 conf->pool_map[j].vlan_id, i);
185 filter->l2_ivlan = conf->pool_map[j].vlan_id;
187 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN;
191 * TODO: Configure & associate CFA rule for
192 * each VNIC for each VMDq with MACVLAN, MACVLAN+TC
194 STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
196 skip_filter_allocation:
197 start_grp_id = end_grp_id;
198 end_grp_id += nb_q_per_grp;
202 if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
203 struct rte_eth_rss_conf *rss = &dev_conf->rx_adv_conf.rss_conf;
204 uint16_t hash_type = 0;
206 if (bp->flags & BNXT_FLAG_UPDATE_HASH) {
208 bp->flags &= ~BNXT_FLAG_UPDATE_HASH;
211 if (rss->rss_hf & ETH_RSS_IPV4)
212 hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4;
213 if (rss->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
214 hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4;
215 if (rss->rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
216 hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4;
217 if (rss->rss_hf & ETH_RSS_IPV6)
218 hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6;
219 if (rss->rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
220 hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6;
221 if (rss->rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
222 hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6;
224 for (i = 0; i < bp->nr_vnics; i++) {
225 STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) {
226 vnic->hash_type |= hash_type;
229 * Use the supplied key if the key length is
230 * acceptable and the rss_key is not NULL
233 rss->rss_key_len <= HW_HASH_KEY_SIZE)
234 memcpy(vnic->rss_hash_key,
235 rss->rss_key, rss->rss_key_len);
243 /* Free allocated vnic/filters */
248 static void bnxt_rx_queue_release_mbufs(struct bnxt_rx_queue *rxq)
250 struct bnxt_sw_rx_bd *sw_ring;
251 struct bnxt_tpa_info *tpa_info;
255 sw_ring = rxq->rx_ring->rx_buf_ring;
257 for (i = 0; i < rxq->nb_rx_desc; i++) {
258 if (sw_ring[i].mbuf) {
259 rte_pktmbuf_free_seg(sw_ring[i].mbuf);
260 sw_ring[i].mbuf = NULL;
264 /* Free up mbufs in Agg ring */
265 sw_ring = rxq->rx_ring->ag_buf_ring;
267 for (i = 0; i < rxq->nb_rx_desc; i++) {
268 if (sw_ring[i].mbuf) {
269 rte_pktmbuf_free_seg(sw_ring[i].mbuf);
270 sw_ring[i].mbuf = NULL;
275 /* Free up mbufs in TPA */
276 tpa_info = rxq->rx_ring->tpa_info;
278 for (i = 0; i < BNXT_TPA_MAX; i++) {
279 if (tpa_info[i].mbuf) {
280 rte_pktmbuf_free_seg(tpa_info[i].mbuf);
281 tpa_info[i].mbuf = NULL;
288 void bnxt_free_rx_mbufs(struct bnxt *bp)
290 struct bnxt_rx_queue *rxq;
293 for (i = 0; i < (int)bp->rx_nr_rings; i++) {
294 rxq = bp->rx_queues[i];
295 bnxt_rx_queue_release_mbufs(rxq);
299 void bnxt_rx_queue_release_op(void *rx_queue)
301 struct bnxt_rx_queue *rxq = (struct bnxt_rx_queue *)rx_queue;
304 bnxt_rx_queue_release_mbufs(rxq);
306 /* Free RX ring hardware descriptors */
307 bnxt_free_ring(rxq->rx_ring->rx_ring_struct);
308 /* Free RX Agg ring hardware descriptors */
309 bnxt_free_ring(rxq->rx_ring->ag_ring_struct);
311 /* Free RX completion ring hardware descriptors */
312 bnxt_free_ring(rxq->cp_ring->cp_ring_struct);
314 bnxt_free_rxq_stats(rxq);
320 int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
323 unsigned int socket_id,
324 const struct rte_eth_rxconf *rx_conf,
325 struct rte_mempool *mp)
327 struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
328 struct bnxt_rx_queue *rxq;
331 if (!nb_desc || nb_desc > MAX_RX_DESC_CNT) {
332 RTE_LOG(ERR, PMD, "nb_desc %d is invalid\n", nb_desc);
337 if (eth_dev->data->rx_queues) {
338 rxq = eth_dev->data->rx_queues[queue_idx];
340 bnxt_rx_queue_release_op(rxq);
342 rxq = rte_zmalloc_socket("bnxt_rx_queue", sizeof(struct bnxt_rx_queue),
343 RTE_CACHE_LINE_SIZE, socket_id);
345 RTE_LOG(ERR, PMD, "bnxt_rx_queue allocation failed!\n");
351 rxq->nb_rx_desc = nb_desc;
352 rxq->rx_free_thresh = rx_conf->rx_free_thresh;
354 RTE_LOG(DEBUG, PMD, "RX Buf size is %d\n", rxq->rx_buf_use_size);
355 RTE_LOG(DEBUG, PMD, "RX Buf MTU %d\n", eth_dev->data->mtu);
357 rc = bnxt_init_rx_ring_struct(rxq, socket_id);
361 rxq->queue_id = queue_idx;
362 rxq->port_id = eth_dev->data->port_id;
363 rxq->crc_len = (uint8_t)((eth_dev->data->dev_conf.rxmode.hw_strip_crc) ?
366 eth_dev->data->rx_queues[queue_idx] = rxq;
367 /* Allocate RX ring hardware descriptors */
368 if (bnxt_alloc_rings(bp, queue_idx, NULL, rxq->rx_ring, rxq->cp_ring,
371 "ring_dma_zone_reserve for rx_ring failed!\n");
372 bnxt_rx_queue_release_op(rxq);
382 bnxt_rx_queue_intr_enable_op(struct rte_eth_dev *eth_dev, uint16_t queue_id)
384 struct bnxt_rx_queue *rxq;
385 struct bnxt_cp_ring_info *cpr;
388 if (eth_dev->data->rx_queues) {
389 rxq = eth_dev->data->rx_queues[queue_id];
401 bnxt_rx_queue_intr_disable_op(struct rte_eth_dev *eth_dev, uint16_t queue_id)
403 struct bnxt_rx_queue *rxq;
404 struct bnxt_cp_ring_info *cpr;
407 if (eth_dev->data->rx_queues) {
408 rxq = eth_dev->data->rx_queues[queue_id];