ethdev: add new offload flag to keep CRC
[dpdk.git] / drivers / net / bnxt / bnxt_rxq.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2018 Broadcom
3  * All rights reserved.
4  */
5
6 #include <inttypes.h>
7
8 #include <rte_malloc.h>
9
10 #include "bnxt.h"
11 #include "bnxt_cpr.h"
12 #include "bnxt_filter.h"
13 #include "bnxt_hwrm.h"
14 #include "bnxt_ring.h"
15 #include "bnxt_rxq.h"
16 #include "bnxt_rxr.h"
17 #include "bnxt_vnic.h"
18 #include "hsi_struct_def_dpdk.h"
19
20 /*
21  * RX Queues
22  */
23
24 void bnxt_free_rxq_stats(struct bnxt_rx_queue *rxq)
25 {
26         if (rxq && rxq->cp_ring && rxq->cp_ring->hw_stats)
27                 rxq->cp_ring->hw_stats = NULL;
28 }
29
30 int bnxt_mq_rx_configure(struct bnxt *bp)
31 {
32         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
33         const struct rte_eth_vmdq_rx_conf *conf =
34                     &dev_conf->rx_adv_conf.vmdq_rx_conf;
35         unsigned int i, j, nb_q_per_grp = 1, ring_idx = 0;
36         int start_grp_id, end_grp_id = 1, rc = 0;
37         struct bnxt_vnic_info *vnic;
38         struct bnxt_filter_info *filter;
39         enum rte_eth_nb_pools pools = bp->rx_cp_nr_rings, max_pools = 0;
40         struct bnxt_rx_queue *rxq;
41
42         bp->nr_vnics = 0;
43
44         /* Single queue mode */
45         if (bp->rx_cp_nr_rings < 2) {
46                 vnic = bnxt_alloc_vnic(bp);
47                 if (!vnic) {
48                         PMD_DRV_LOG(ERR, "VNIC alloc failed\n");
49                         rc = -ENOMEM;
50                         goto err_out;
51                 }
52                 vnic->flags |= BNXT_VNIC_INFO_BCAST;
53                 STAILQ_INSERT_TAIL(&bp->ff_pool[0], vnic, next);
54                 bp->nr_vnics++;
55
56                 rxq = bp->eth_dev->data->rx_queues[0];
57                 rxq->vnic = vnic;
58
59                 vnic->func_default = true;
60                 vnic->ff_pool_idx = 0;
61                 vnic->start_grp_id = 0;
62                 vnic->end_grp_id = vnic->start_grp_id;
63                 filter = bnxt_alloc_filter(bp);
64                 if (!filter) {
65                         PMD_DRV_LOG(ERR, "L2 filter alloc failed\n");
66                         rc = -ENOMEM;
67                         goto err_out;
68                 }
69                 STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
70                 goto out;
71         }
72
73         /* Multi-queue mode */
74         if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_DCB_RSS) {
75                 /* VMDq ONLY, VMDq+RSS, VMDq+DCB, VMDq+DCB+RSS */
76
77                 switch (dev_conf->rxmode.mq_mode) {
78                 case ETH_MQ_RX_VMDQ_RSS:
79                 case ETH_MQ_RX_VMDQ_ONLY:
80                         /* FALLTHROUGH */
81                         /* ETH_8/64_POOLs */
82                         pools = conf->nb_queue_pools;
83                         /* For each pool, allocate MACVLAN CFA rule & VNIC */
84                         max_pools = RTE_MIN(bp->max_vnics,
85                                             RTE_MIN(bp->max_l2_ctx,
86                                             RTE_MIN(bp->max_rsscos_ctx,
87                                                     ETH_64_POOLS)));
88                         if (pools > max_pools)
89                                 pools = max_pools;
90                         break;
91                 case ETH_MQ_RX_RSS:
92                         pools = 1;
93                         break;
94                 default:
95                         PMD_DRV_LOG(ERR, "Unsupported mq_mod %d\n",
96                                 dev_conf->rxmode.mq_mode);
97                         rc = -EINVAL;
98                         goto err_out;
99                 }
100         }
101
102         nb_q_per_grp = bp->rx_cp_nr_rings / pools;
103         start_grp_id = 0;
104         end_grp_id = nb_q_per_grp;
105
106         for (i = 0; i < pools; i++) {
107                 vnic = bnxt_alloc_vnic(bp);
108                 if (!vnic) {
109                         PMD_DRV_LOG(ERR, "VNIC alloc failed\n");
110                         rc = -ENOMEM;
111                         goto err_out;
112                 }
113                 vnic->flags |= BNXT_VNIC_INFO_BCAST;
114                 STAILQ_INSERT_TAIL(&bp->ff_pool[i], vnic, next);
115                 bp->nr_vnics++;
116
117                 for (j = 0; j < nb_q_per_grp; j++, ring_idx++) {
118                         rxq = bp->eth_dev->data->rx_queues[ring_idx];
119                         rxq->vnic = vnic;
120                 }
121                 if (i == 0) {
122                         if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_DCB) {
123                                 bp->eth_dev->data->promiscuous = 1;
124                                 vnic->flags |= BNXT_VNIC_INFO_PROMISC;
125                         }
126                         vnic->func_default = true;
127                 }
128                 vnic->ff_pool_idx = i;
129                 vnic->start_grp_id = start_grp_id;
130                 vnic->end_grp_id = end_grp_id;
131
132                 if (i) {
133                         if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_DCB ||
134                             !(dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS))
135                                 vnic->rss_dflt_cr = true;
136                         goto skip_filter_allocation;
137                 }
138                 filter = bnxt_alloc_filter(bp);
139                 if (!filter) {
140                         PMD_DRV_LOG(ERR, "L2 filter alloc failed\n");
141                         rc = -ENOMEM;
142                         goto err_out;
143                 }
144                 /*
145                  * TODO: Configure & associate CFA rule for
146                  * each VNIC for each VMDq with MACVLAN, MACVLAN+TC
147                  */
148                 STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
149
150 skip_filter_allocation:
151                 start_grp_id = end_grp_id;
152                 end_grp_id += nb_q_per_grp;
153         }
154
155 out:
156         if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
157                 struct rte_eth_rss_conf *rss = &dev_conf->rx_adv_conf.rss_conf;
158                 uint16_t hash_type = 0;
159
160                 if (bp->flags & BNXT_FLAG_UPDATE_HASH) {
161                         rss = &bp->rss_conf;
162                         bp->flags &= ~BNXT_FLAG_UPDATE_HASH;
163                 }
164
165                 if (rss->rss_hf & ETH_RSS_IPV4)
166                         hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4;
167                 if (rss->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
168                         hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4;
169                 if (rss->rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
170                         hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4;
171                 if (rss->rss_hf & ETH_RSS_IPV6)
172                         hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6;
173                 if (rss->rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
174                         hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6;
175                 if (rss->rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
176                         hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6;
177
178                 for (i = 0; i < bp->nr_vnics; i++) {
179                         STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) {
180                         vnic->hash_type = hash_type;
181
182                         /*
183                          * Use the supplied key if the key length is
184                          * acceptable and the rss_key is not NULL
185                          */
186                         if (rss->rss_key &&
187                             rss->rss_key_len <= HW_HASH_KEY_SIZE)
188                                 memcpy(vnic->rss_hash_key,
189                                        rss->rss_key, rss->rss_key_len);
190                         }
191                 }
192         }
193
194         return rc;
195
196 err_out:
197         /* Free allocated vnic/filters */
198
199         return rc;
200 }
201
202 static void bnxt_rx_queue_release_mbufs(struct bnxt_rx_queue *rxq)
203 {
204         struct bnxt_sw_rx_bd *sw_ring;
205         struct bnxt_tpa_info *tpa_info;
206         uint16_t i;
207
208         if (rxq) {
209                 sw_ring = rxq->rx_ring->rx_buf_ring;
210                 if (sw_ring) {
211                         for (i = 0;
212                              i < rxq->rx_ring->rx_ring_struct->ring_size; i++) {
213                                 if (sw_ring[i].mbuf) {
214                                         rte_pktmbuf_free_seg(sw_ring[i].mbuf);
215                                         sw_ring[i].mbuf = NULL;
216                                 }
217                         }
218                 }
219                 /* Free up mbufs in Agg ring */
220                 sw_ring = rxq->rx_ring->ag_buf_ring;
221                 if (sw_ring) {
222                         for (i = 0;
223                              i < rxq->rx_ring->ag_ring_struct->ring_size; i++) {
224                                 if (sw_ring[i].mbuf) {
225                                         rte_pktmbuf_free_seg(sw_ring[i].mbuf);
226                                         sw_ring[i].mbuf = NULL;
227                                 }
228                         }
229                 }
230
231                 /* Free up mbufs in TPA */
232                 tpa_info = rxq->rx_ring->tpa_info;
233                 if (tpa_info) {
234                         for (i = 0; i < BNXT_TPA_MAX; i++) {
235                                 if (tpa_info[i].mbuf) {
236                                         rte_pktmbuf_free_seg(tpa_info[i].mbuf);
237                                         tpa_info[i].mbuf = NULL;
238                                 }
239                         }
240                 }
241         }
242 }
243
244 void bnxt_free_rx_mbufs(struct bnxt *bp)
245 {
246         struct bnxt_rx_queue *rxq;
247         int i;
248
249         for (i = 0; i < (int)bp->rx_nr_rings; i++) {
250                 rxq = bp->rx_queues[i];
251                 bnxt_rx_queue_release_mbufs(rxq);
252         }
253 }
254
255 void bnxt_rx_queue_release_op(void *rx_queue)
256 {
257         struct bnxt_rx_queue *rxq = (struct bnxt_rx_queue *)rx_queue;
258
259         if (rxq) {
260                 bnxt_rx_queue_release_mbufs(rxq);
261
262                 /* Free RX ring hardware descriptors */
263                 bnxt_free_ring(rxq->rx_ring->rx_ring_struct);
264                 /* Free RX Agg ring hardware descriptors */
265                 bnxt_free_ring(rxq->rx_ring->ag_ring_struct);
266
267                 /* Free RX completion ring hardware descriptors */
268                 bnxt_free_ring(rxq->cp_ring->cp_ring_struct);
269
270                 bnxt_free_rxq_stats(rxq);
271                 rte_memzone_free(rxq->mz);
272                 rxq->mz = NULL;
273
274                 rte_free(rxq);
275         }
276 }
277
278 int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
279                                uint16_t queue_idx,
280                                uint16_t nb_desc,
281                                unsigned int socket_id,
282                                const struct rte_eth_rxconf *rx_conf,
283                                struct rte_mempool *mp)
284 {
285         struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
286         uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads;
287         struct bnxt_rx_queue *rxq;
288         int rc = 0;
289
290         if (queue_idx >= bp->max_rx_rings) {
291                 PMD_DRV_LOG(ERR,
292                         "Cannot create Rx ring %d. Only %d rings available\n",
293                         queue_idx, bp->max_rx_rings);
294                 return -EINVAL;
295         }
296
297         if (!nb_desc || nb_desc > MAX_RX_DESC_CNT) {
298                 PMD_DRV_LOG(ERR, "nb_desc %d is invalid\n", nb_desc);
299                 rc = -EINVAL;
300                 goto out;
301         }
302
303         if (eth_dev->data->rx_queues) {
304                 rxq = eth_dev->data->rx_queues[queue_idx];
305                 if (rxq)
306                         bnxt_rx_queue_release_op(rxq);
307         }
308         rxq = rte_zmalloc_socket("bnxt_rx_queue", sizeof(struct bnxt_rx_queue),
309                                  RTE_CACHE_LINE_SIZE, socket_id);
310         if (!rxq) {
311                 PMD_DRV_LOG(ERR, "bnxt_rx_queue allocation failed!\n");
312                 rc = -ENOMEM;
313                 goto out;
314         }
315         rxq->bp = bp;
316         rxq->mb_pool = mp;
317         rxq->nb_rx_desc = nb_desc;
318         rxq->rx_free_thresh = rx_conf->rx_free_thresh;
319
320         PMD_DRV_LOG(DEBUG, "RX Buf size is %d\n", rxq->rx_buf_use_size);
321         PMD_DRV_LOG(DEBUG, "RX Buf MTU %d\n", eth_dev->data->mtu);
322
323         rc = bnxt_init_rx_ring_struct(rxq, socket_id);
324         if (rc)
325                 goto out;
326
327         rxq->queue_id = queue_idx;
328         rxq->port_id = eth_dev->data->port_id;
329         rxq->crc_len = rte_eth_dev_must_keep_crc(rx_offloads) ?
330                 ETHER_CRC_LEN : 0;
331
332         eth_dev->data->rx_queues[queue_idx] = rxq;
333         /* Allocate RX ring hardware descriptors */
334         if (bnxt_alloc_rings(bp, queue_idx, NULL, rxq, rxq->cp_ring,
335                         "rxr")) {
336                 PMD_DRV_LOG(ERR,
337                         "ring_dma_zone_reserve for rx_ring failed!\n");
338                 bnxt_rx_queue_release_op(rxq);
339                 rc = -ENOMEM;
340                 goto out;
341         }
342         rte_atomic64_init(&rxq->rx_mbuf_alloc_fail);
343
344 out:
345         return rc;
346 }
347
348 int
349 bnxt_rx_queue_intr_enable_op(struct rte_eth_dev *eth_dev, uint16_t queue_id)
350 {
351         struct bnxt_rx_queue *rxq;
352         struct bnxt_cp_ring_info *cpr;
353         int rc = 0;
354
355         if (eth_dev->data->rx_queues) {
356                 rxq = eth_dev->data->rx_queues[queue_id];
357                 if (!rxq) {
358                         rc = -EINVAL;
359                         return rc;
360                 }
361                 cpr = rxq->cp_ring;
362                 B_CP_DB_ARM(cpr);
363         }
364         return rc;
365 }
366
367 int
368 bnxt_rx_queue_intr_disable_op(struct rte_eth_dev *eth_dev, uint16_t queue_id)
369 {
370         struct bnxt_rx_queue *rxq;
371         struct bnxt_cp_ring_info *cpr;
372         int rc = 0;
373
374         if (eth_dev->data->rx_queues) {
375                 rxq = eth_dev->data->rx_queues[queue_id];
376                 if (!rxq) {
377                         rc = -EINVAL;
378                         return rc;
379                 }
380                 cpr = rxq->cp_ring;
381                 B_CP_DB_DISARM(cpr);
382         }
383         return rc;
384 }
385
386 int bnxt_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
387 {
388         struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
389         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
390         struct bnxt_rx_queue *rxq = bp->rx_queues[rx_queue_id];
391         struct bnxt_vnic_info *vnic = NULL;
392
393         if (rxq == NULL) {
394                 PMD_DRV_LOG(ERR, "Invalid Rx queue %d\n", rx_queue_id);
395                 return -EINVAL;
396         }
397
398         dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
399         rxq->rx_deferred_start = false;
400         PMD_DRV_LOG(INFO, "Rx queue started %d\n", rx_queue_id);
401         if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
402                 vnic = rxq->vnic;
403                 if (vnic->fw_grp_ids[rx_queue_id] != INVALID_HW_RING_ID)
404                         return 0;
405                 PMD_DRV_LOG(DEBUG, "vnic = %p fw_grp_id = %d\n",
406                         vnic, bp->grp_info[rx_queue_id + 1].fw_grp_id);
407                 vnic->fw_grp_ids[rx_queue_id] =
408                                         bp->grp_info[rx_queue_id + 1].fw_grp_id;
409                 return bnxt_vnic_rss_configure(bp, vnic);
410         }
411
412         return 0;
413 }
414
415 int bnxt_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
416 {
417         struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
418         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
419         struct bnxt_rx_queue *rxq = bp->rx_queues[rx_queue_id];
420         struct bnxt_vnic_info *vnic = NULL;
421
422         if (rxq == NULL) {
423                 PMD_DRV_LOG(ERR, "Invalid Rx queue %d\n", rx_queue_id);
424                 return -EINVAL;
425         }
426
427         dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
428         rxq->rx_deferred_start = true;
429         PMD_DRV_LOG(DEBUG, "Rx queue stopped\n");
430
431         if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
432                 vnic = rxq->vnic;
433                 vnic->fw_grp_ids[rx_queue_id] = INVALID_HW_RING_ID;
434                 return bnxt_vnic_rss_configure(bp, vnic);
435         }
436         return 0;
437 }