net/bnxt: support LRO
[dpdk.git] / drivers / net / bnxt / bnxt_rxr.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) Broadcom Limited.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Broadcom Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <inttypes.h>
35 #include <stdbool.h>
36
37 #include <rte_bitmap.h>
38 #include <rte_byteorder.h>
39 #include <rte_malloc.h>
40 #include <rte_memory.h>
41
42 #include "bnxt.h"
43 #include "bnxt_cpr.h"
44 #include "bnxt_ring.h"
45 #include "bnxt_rxr.h"
46 #include "bnxt_rxq.h"
47 #include "hsi_struct_def_dpdk.h"
48
49 /*
50  * RX Ring handling
51  */
52
53 static inline struct rte_mbuf *__bnxt_alloc_rx_data(struct rte_mempool *mb)
54 {
55         struct rte_mbuf *data;
56
57         data = rte_mbuf_raw_alloc(mb);
58
59         return data;
60 }
61
62 static inline int bnxt_alloc_rx_data(struct bnxt_rx_queue *rxq,
63                                      struct bnxt_rx_ring_info *rxr,
64                                      uint16_t prod)
65 {
66         struct rx_prod_pkt_bd *rxbd = &rxr->rx_desc_ring[prod];
67         struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod];
68         struct rte_mbuf *data;
69
70         data = __bnxt_alloc_rx_data(rxq->mb_pool);
71         if (!data)
72                 return -ENOMEM;
73
74         rx_buf->mbuf = data;
75
76         rxbd->addr = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR(rx_buf->mbuf));
77
78         return 0;
79 }
80
81 static inline int bnxt_alloc_ag_data(struct bnxt_rx_queue *rxq,
82                                      struct bnxt_rx_ring_info *rxr,
83                                      uint16_t prod)
84 {
85         struct rx_prod_pkt_bd *rxbd = &rxr->ag_desc_ring[prod];
86         struct bnxt_sw_rx_bd *rx_buf = &rxr->ag_buf_ring[prod];
87         struct rte_mbuf *data;
88
89         data = __bnxt_alloc_rx_data(rxq->mb_pool);
90         if (!data)
91                 return -ENOMEM;
92
93         if (rxbd == NULL)
94                 RTE_LOG(ERR, PMD, "Jumbo Frame. rxbd is NULL\n");
95         if (rx_buf == NULL)
96                 RTE_LOG(ERR, PMD, "Jumbo Frame. rx_buf is NULL\n");
97
98
99         rx_buf->mbuf = data;
100
101         rxbd->addr = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR(rx_buf->mbuf));
102
103         return 0;
104 }
105
106 static inline void bnxt_reuse_rx_mbuf(struct bnxt_rx_ring_info *rxr,
107                                struct rte_mbuf *mbuf)
108 {
109         uint16_t prod = RING_NEXT(rxr->rx_ring_struct, rxr->rx_prod);
110         struct bnxt_sw_rx_bd *prod_rx_buf;
111         struct rx_prod_pkt_bd *prod_bd;
112
113         prod_rx_buf = &rxr->rx_buf_ring[prod];
114
115         RTE_ASSERT(prod_rx_buf->mbuf == NULL);
116         RTE_ASSERT(mbuf != NULL);
117
118         prod_rx_buf->mbuf = mbuf;
119
120         prod_bd = &rxr->rx_desc_ring[prod];
121
122         prod_bd->addr = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR(mbuf));
123
124         rxr->rx_prod = prod;
125 }
126
127 #ifdef BNXT_DEBUG
128 static void bnxt_reuse_ag_mbuf(struct bnxt_rx_ring_info *rxr, uint16_t cons,
129                                struct rte_mbuf *mbuf)
130 {
131         uint16_t prod = rxr->ag_prod;
132         struct bnxt_sw_rx_bd *prod_rx_buf;
133         struct rx_prod_pkt_bd *prod_bd, *cons_bd;
134
135         prod_rx_buf = &rxr->ag_buf_ring[prod];
136
137         prod_rx_buf->mbuf = mbuf;
138
139         prod_bd = &rxr->ag_desc_ring[prod];
140         cons_bd = &rxr->ag_desc_ring[cons];
141
142         prod_bd->addr = cons_bd->addr;
143 }
144 #endif
145
146 static inline
147 struct rte_mbuf *bnxt_consume_rx_buf(struct bnxt_rx_ring_info *rxr,
148                                      uint16_t cons)
149 {
150         struct bnxt_sw_rx_bd *cons_rx_buf;
151         struct rte_mbuf *mbuf;
152
153         cons_rx_buf = &rxr->rx_buf_ring[cons];
154         RTE_ASSERT(cons_rx_buf->mbuf != NULL);
155         mbuf = cons_rx_buf->mbuf;
156         cons_rx_buf->mbuf = NULL;
157         return mbuf;
158 }
159
160 static void bnxt_tpa_start(struct bnxt_rx_queue *rxq,
161                            struct rx_tpa_start_cmpl *tpa_start,
162                            struct rx_tpa_start_cmpl_hi *tpa_start1)
163 {
164         struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
165         uint8_t agg_id = rte_le_to_cpu_32(tpa_start->agg_id &
166                 RX_TPA_START_CMPL_AGG_ID_MASK) >> RX_TPA_START_CMPL_AGG_ID_SFT;
167         uint16_t data_cons;
168         struct bnxt_tpa_info *tpa_info;
169         struct rte_mbuf *mbuf;
170
171         data_cons = tpa_start->opaque;
172         tpa_info = &rxr->tpa_info[agg_id];
173
174         mbuf = bnxt_consume_rx_buf(rxr, data_cons);
175
176         bnxt_reuse_rx_mbuf(rxr, tpa_info->mbuf);
177
178         tpa_info->mbuf = mbuf;
179         tpa_info->len = rte_le_to_cpu_32(tpa_start->len);
180
181         mbuf->nb_segs = 1;
182         mbuf->next = NULL;
183         mbuf->pkt_len = rte_le_to_cpu_32(tpa_start->len);
184         mbuf->data_len = mbuf->pkt_len;
185         mbuf->port = rxq->port_id;
186         mbuf->ol_flags = PKT_RX_LRO;
187         if (likely(tpa_start->flags_type &
188                    rte_cpu_to_le_32(RX_TPA_START_CMPL_FLAGS_RSS_VALID))) {
189                 mbuf->hash.rss = rte_le_to_cpu_32(tpa_start->rss_hash);
190                 mbuf->ol_flags |= PKT_RX_RSS_HASH;
191         } else {
192                 mbuf->hash.fdir.id = rte_le_to_cpu_16(tpa_start1->cfa_code);
193                 mbuf->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
194         }
195         if (tpa_start1->flags2 &
196             rte_cpu_to_le_32(RX_TPA_START_CMPL_FLAGS2_META_FORMAT_VLAN)) {
197                 mbuf->vlan_tci = rte_le_to_cpu_32(tpa_start1->metadata);
198                 mbuf->ol_flags |= PKT_RX_VLAN_PKT;
199         }
200         if (likely(tpa_start1->flags2 &
201                    rte_cpu_to_le_32(RX_TPA_START_CMPL_FLAGS2_L4_CS_CALC)))
202                 mbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
203
204         /* recycle next mbuf */
205         data_cons = RING_NEXT(rxr->rx_ring_struct, data_cons);
206         bnxt_reuse_rx_mbuf(rxr, bnxt_consume_rx_buf(rxr, data_cons));
207 }
208
209 static int bnxt_agg_bufs_valid(struct bnxt_cp_ring_info *cpr,
210                 uint8_t agg_bufs, uint32_t raw_cp_cons)
211 {
212         uint16_t last_cp_cons;
213         struct rx_pkt_cmpl *agg_cmpl;
214
215         raw_cp_cons = ADV_RAW_CMP(raw_cp_cons, agg_bufs);
216         last_cp_cons = RING_CMP(cpr->cp_ring_struct, raw_cp_cons);
217         agg_cmpl = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[last_cp_cons];
218         return CMP_VALID(agg_cmpl, raw_cp_cons, cpr->cp_ring_struct);
219 }
220
221 /* TPA consume agg buffer out of order, allocate connected data only */
222 static int bnxt_prod_ag_mbuf(struct bnxt_rx_queue *rxq)
223 {
224         struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
225         uint16_t next = RING_NEXT(rxr->ag_ring_struct, rxr->ag_prod);
226
227         /* TODO batch allocation for better performance */
228         while (rte_bitmap_get(rxr->ag_bitmap, next)) {
229                 if (unlikely(bnxt_alloc_ag_data(rxq, rxr, next))) {
230                         RTE_LOG(ERR, PMD,
231                                 "agg mbuf alloc failed: prod=0x%x\n", next);
232                         break;
233                 }
234                 rte_bitmap_clear(rxr->ag_bitmap, next);
235                 rxr->ag_prod = next;
236                 next = RING_NEXT(rxr->ag_ring_struct, next);
237         }
238
239         return 0;
240 }
241
242 static int bnxt_rx_pages(struct bnxt_rx_queue *rxq,
243                          struct rte_mbuf *mbuf, uint32_t *tmp_raw_cons,
244                          uint8_t agg_buf)
245 {
246         struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
247         struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
248         int i;
249         uint16_t cp_cons, ag_cons;
250         struct rx_pkt_cmpl *rxcmp;
251         struct rte_mbuf *last = mbuf;
252
253         for (i = 0; i < agg_buf; i++) {
254                 struct bnxt_sw_rx_bd *ag_buf;
255                 struct rte_mbuf *ag_mbuf;
256                 *tmp_raw_cons = NEXT_RAW_CMP(*tmp_raw_cons);
257                 cp_cons = RING_CMP(cpr->cp_ring_struct, *tmp_raw_cons);
258                 rxcmp = (struct rx_pkt_cmpl *)
259                                         &cpr->cp_desc_ring[cp_cons];
260
261 #ifdef BNXT_DEBUG
262                 bnxt_dump_cmpl(cp_cons, rxcmp);
263 #endif
264
265                 ag_cons = rxcmp->opaque;
266                 RTE_ASSERT(ag_cons <= rxr->ag_ring_struct->ring_mask);
267                 ag_buf = &rxr->ag_buf_ring[ag_cons];
268                 ag_mbuf = ag_buf->mbuf;
269                 RTE_ASSERT(ag_mbuf != NULL);
270
271                 ag_mbuf->data_len = rte_le_to_cpu_16(rxcmp->len);
272
273                 mbuf->nb_segs++;
274                 mbuf->pkt_len += ag_mbuf->data_len;
275
276                 last->next = ag_mbuf;
277                 last = ag_mbuf;
278
279                 ag_buf->mbuf = NULL;
280
281                 /*
282                  * As aggregation buffer consumed out of order in TPA module,
283                  * use bitmap to track freed slots to be allocated and notified
284                  * to NIC
285                  */
286                 rte_bitmap_set(rxr->ag_bitmap, ag_cons);
287         }
288         bnxt_prod_ag_mbuf(rxq);
289         return 0;
290 }
291
292 static inline struct rte_mbuf *bnxt_tpa_end(
293                 struct bnxt_rx_queue *rxq,
294                 uint32_t *raw_cp_cons,
295                 struct rx_tpa_end_cmpl *tpa_end,
296                 struct rx_tpa_end_cmpl_hi *tpa_end1 __rte_unused)
297 {
298         struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
299         struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
300         uint8_t agg_id = (tpa_end->agg_id & RX_TPA_END_CMPL_AGG_ID_MASK)
301                         >> RX_TPA_END_CMPL_AGG_ID_SFT;
302         struct rte_mbuf *mbuf;
303         uint8_t agg_bufs;
304         struct bnxt_tpa_info *tpa_info;
305
306         tpa_info = &rxr->tpa_info[agg_id];
307         mbuf = tpa_info->mbuf;
308         RTE_ASSERT(mbuf != NULL);
309
310         rte_prefetch0(mbuf);
311         agg_bufs = (rte_le_to_cpu_32(tpa_end->agg_bufs_v1) &
312                 RX_TPA_END_CMPL_AGG_BUFS_MASK) >> RX_TPA_END_CMPL_AGG_BUFS_SFT;
313         if (agg_bufs) {
314                 if (!bnxt_agg_bufs_valid(cpr, agg_bufs, *raw_cp_cons))
315                         return NULL;
316                 bnxt_rx_pages(rxq, mbuf, raw_cp_cons, agg_bufs);
317         }
318         mbuf->l4_len = tpa_end->payload_offset;
319
320         struct rte_mbuf *new_data = __bnxt_alloc_rx_data(rxq->mb_pool);
321         RTE_ASSERT(new_data != NULL);
322         if (!new_data)
323                 return NULL;
324         tpa_info->mbuf = new_data;
325
326         return mbuf;
327 }
328
329 static int bnxt_rx_pkt(struct rte_mbuf **rx_pkt,
330                             struct bnxt_rx_queue *rxq, uint32_t *raw_cons)
331 {
332         struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
333         struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
334         struct rx_pkt_cmpl *rxcmp;
335         struct rx_pkt_cmpl_hi *rxcmp1;
336         uint32_t tmp_raw_cons = *raw_cons;
337         uint16_t cons, prod, cp_cons =
338             RING_CMP(cpr->cp_ring_struct, tmp_raw_cons);
339 #ifdef BNXT_DEBUG
340         uint16_t ag_cons;
341 #endif
342         struct rte_mbuf *mbuf;
343         int rc = 0;
344         uint8_t agg_buf = 0;
345         uint16_t cmp_type;
346
347         rxcmp = (struct rx_pkt_cmpl *)
348             &cpr->cp_desc_ring[cp_cons];
349
350         tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
351         cp_cons = RING_CMP(cpr->cp_ring_struct, tmp_raw_cons);
352         rxcmp1 = (struct rx_pkt_cmpl_hi *)&cpr->cp_desc_ring[cp_cons];
353
354         if (!CMP_VALID(rxcmp1, tmp_raw_cons, cpr->cp_ring_struct))
355                 return -EBUSY;
356
357         cmp_type = CMP_TYPE(rxcmp);
358         if (cmp_type == RX_PKT_CMPL_TYPE_RX_L2_TPA_START) {
359                 bnxt_tpa_start(rxq, (struct rx_tpa_start_cmpl *)rxcmp,
360                                (struct rx_tpa_start_cmpl_hi *)rxcmp1);
361                 rc = -EINVAL; /* Continue w/o new mbuf */
362                 goto next_rx;
363         } else if (cmp_type == RX_PKT_CMPL_TYPE_RX_L2_TPA_END) {
364                 mbuf = bnxt_tpa_end(rxq, &tmp_raw_cons,
365                                    (struct rx_tpa_end_cmpl *)rxcmp,
366                                    (struct rx_tpa_end_cmpl_hi *)rxcmp1);
367                 if (unlikely(!mbuf))
368                         return -EBUSY;
369                 *rx_pkt = mbuf;
370                 goto next_rx;
371         } else if (cmp_type != 0x11) {
372                 rc = -EINVAL;
373                 goto next_rx;
374         }
375
376         agg_buf = (rxcmp->agg_bufs_v1 & RX_PKT_CMPL_AGG_BUFS_MASK)
377                         >> RX_PKT_CMPL_AGG_BUFS_SFT;
378         if (agg_buf && !bnxt_agg_bufs_valid(cpr, agg_buf, tmp_raw_cons))
379                 return -EBUSY;
380
381         prod = rxr->rx_prod;
382
383         cons = rxcmp->opaque;
384         mbuf = bnxt_consume_rx_buf(rxr, cons);
385         rte_prefetch0(mbuf);
386
387         if (mbuf == NULL)
388                 return -ENOMEM;
389
390         mbuf->nb_segs = 1;
391         mbuf->next = NULL;
392         mbuf->pkt_len = rxcmp->len;
393         mbuf->data_len = mbuf->pkt_len;
394         mbuf->port = rxq->port_id;
395         mbuf->ol_flags = 0;
396         if (rxcmp->flags_type & RX_PKT_CMPL_FLAGS_RSS_VALID) {
397                 mbuf->hash.rss = rxcmp->rss_hash;
398                 mbuf->ol_flags |= PKT_RX_RSS_HASH;
399         } else {
400                 mbuf->hash.fdir.id = rxcmp1->cfa_code;
401                 mbuf->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
402         }
403
404         if (agg_buf)
405                 bnxt_rx_pages(rxq, mbuf, &tmp_raw_cons, agg_buf);
406
407         if (rxcmp1->flags2 & RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN) {
408                 mbuf->vlan_tci = rxcmp1->metadata &
409                         (RX_PKT_CMPL_METADATA_VID_MASK |
410                         RX_PKT_CMPL_METADATA_DE |
411                         RX_PKT_CMPL_METADATA_PRI_MASK);
412                 mbuf->ol_flags |= PKT_RX_VLAN_PKT;
413         }
414
415 #ifdef BNXT_DEBUG
416         if (rxcmp1->errors_v2 & RX_CMP_L2_ERRORS) {
417                 /* Re-install the mbuf back to the rx ring */
418                 bnxt_reuse_rx_mbuf(rxr, cons, mbuf);
419                 if (agg_buf)
420                         bnxt_reuse_ag_mbuf(rxr, ag_cons, mbuf);
421
422                 rc = -EIO;
423                 goto next_rx;
424         }
425 #endif
426         /*
427          * TODO: Redesign this....
428          * If the allocation fails, the packet does not get received.
429          * Simply returning this will result in slowly falling behind
430          * on the producer ring buffers.
431          * Instead, "filling up" the producer just before ringing the
432          * doorbell could be a better solution since it will let the
433          * producer ring starve until memory is available again pushing
434          * the drops into hardware and getting them out of the driver
435          * allowing recovery to a full producer ring.
436          *
437          * This could also help with cache usage by preventing per-packet
438          * calls in favour of a tight loop with the same function being called
439          * in it.
440          */
441         prod = RING_NEXT(rxr->rx_ring_struct, prod);
442         if (bnxt_alloc_rx_data(rxq, rxr, prod)) {
443                 RTE_LOG(ERR, PMD, "mbuf alloc failed with prod=0x%x\n", prod);
444                 rc = -ENOMEM;
445         }
446         rxr->rx_prod = prod;
447         /*
448          * All MBUFs are allocated with the same size under DPDK,
449          * no optimization for rx_copy_thresh
450          */
451
452         *rx_pkt = mbuf;
453
454 next_rx:
455
456         *raw_cons = tmp_raw_cons;
457
458         return rc;
459 }
460
461 uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
462                                uint16_t nb_pkts)
463 {
464         struct bnxt_rx_queue *rxq = rx_queue;
465         struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
466         struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
467         uint32_t raw_cons = cpr->cp_raw_cons;
468         uint32_t cons;
469         int nb_rx_pkts = 0;
470         struct rx_pkt_cmpl *rxcmp;
471         uint16_t prod = rxr->rx_prod;
472         uint16_t ag_prod = rxr->ag_prod;
473
474         /* Handle RX burst request */
475         while (1) {
476                 int rc;
477
478                 cons = RING_CMP(cpr->cp_ring_struct, raw_cons);
479                 rte_prefetch0(&cpr->cp_desc_ring[cons]);
480                 rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons];
481
482                 if (!CMP_VALID(rxcmp, raw_cons, cpr->cp_ring_struct))
483                         break;
484
485                 /* TODO: Avoid magic numbers... */
486                 if ((CMP_TYPE(rxcmp) & 0x30) == 0x10) {
487                         rc = bnxt_rx_pkt(&rx_pkts[nb_rx_pkts], rxq, &raw_cons);
488                         if (likely(!rc))
489                                 nb_rx_pkts++;
490                         if (rc == -EBUSY)       /* partial completion */
491                                 break;
492                 }
493                 raw_cons = NEXT_RAW_CMP(raw_cons);
494                 if (nb_rx_pkts == nb_pkts)
495                         break;
496         }
497
498         cpr->cp_raw_cons = raw_cons;
499         if (prod == rxr->rx_prod && ag_prod == rxr->ag_prod) {
500                 /*
501                  * For PMD, there is no need to keep on pushing to REARM
502                  * the doorbell if there are no new completions
503                  */
504                 return nb_rx_pkts;
505         }
506
507         B_CP_DIS_DB(cpr, cpr->cp_raw_cons);
508         B_RX_DB(rxr->rx_doorbell, rxr->rx_prod);
509         /* Ring the AGG ring DB */
510         B_RX_DB(rxr->ag_doorbell, rxr->ag_prod);
511         return nb_rx_pkts;
512 }
513
514 void bnxt_free_rx_rings(struct bnxt *bp)
515 {
516         int i;
517
518         for (i = 0; i < (int)bp->rx_nr_rings; i++) {
519                 struct bnxt_rx_queue *rxq = bp->rx_queues[i];
520
521                 if (!rxq)
522                         continue;
523
524                 bnxt_free_ring(rxq->rx_ring->rx_ring_struct);
525                 rte_free(rxq->rx_ring->rx_ring_struct);
526
527                 /* Free the Aggregator ring */
528                 bnxt_free_ring(rxq->rx_ring->ag_ring_struct);
529                 rte_free(rxq->rx_ring->ag_ring_struct);
530                 rxq->rx_ring->ag_ring_struct = NULL;
531
532                 rte_free(rxq->rx_ring);
533
534                 bnxt_free_ring(rxq->cp_ring->cp_ring_struct);
535                 rte_free(rxq->cp_ring->cp_ring_struct);
536                 rte_free(rxq->cp_ring);
537
538                 rte_free(rxq);
539                 bp->rx_queues[i] = NULL;
540         }
541 }
542
543 int bnxt_init_rx_ring_struct(struct bnxt_rx_queue *rxq, unsigned int socket_id)
544 {
545         struct bnxt_cp_ring_info *cpr;
546         struct bnxt_rx_ring_info *rxr;
547         struct bnxt_ring *ring;
548
549         rxq->rx_buf_use_size = BNXT_MAX_MTU + ETHER_HDR_LEN + ETHER_CRC_LEN +
550                                (2 * VLAN_TAG_SIZE);
551         rxq->rx_buf_size = rxq->rx_buf_use_size + sizeof(struct rte_mbuf);
552
553         rxr = rte_zmalloc_socket("bnxt_rx_ring",
554                                  sizeof(struct bnxt_rx_ring_info),
555                                  RTE_CACHE_LINE_SIZE, socket_id);
556         if (rxr == NULL)
557                 return -ENOMEM;
558         rxq->rx_ring = rxr;
559
560         ring = rte_zmalloc_socket("bnxt_rx_ring_struct",
561                                    sizeof(struct bnxt_ring),
562                                    RTE_CACHE_LINE_SIZE, socket_id);
563         if (ring == NULL)
564                 return -ENOMEM;
565         rxr->rx_ring_struct = ring;
566         ring->ring_size = rte_align32pow2(rxq->nb_rx_desc);
567         ring->ring_mask = ring->ring_size - 1;
568         ring->bd = (void *)rxr->rx_desc_ring;
569         ring->bd_dma = rxr->rx_desc_mapping;
570         ring->vmem_size = ring->ring_size * sizeof(struct bnxt_sw_rx_bd);
571         ring->vmem = (void **)&rxr->rx_buf_ring;
572
573         cpr = rte_zmalloc_socket("bnxt_rx_ring",
574                                  sizeof(struct bnxt_cp_ring_info),
575                                  RTE_CACHE_LINE_SIZE, socket_id);
576         if (cpr == NULL)
577                 return -ENOMEM;
578         rxq->cp_ring = cpr;
579
580         ring = rte_zmalloc_socket("bnxt_rx_ring_struct",
581                                    sizeof(struct bnxt_ring),
582                                    RTE_CACHE_LINE_SIZE, socket_id);
583         if (ring == NULL)
584                 return -ENOMEM;
585         cpr->cp_ring_struct = ring;
586         ring->ring_size = rte_align32pow2(rxr->rx_ring_struct->ring_size *
587                                           (2 + AGG_RING_SIZE_FACTOR));
588         ring->ring_mask = ring->ring_size - 1;
589         ring->bd = (void *)cpr->cp_desc_ring;
590         ring->bd_dma = cpr->cp_desc_mapping;
591         ring->vmem_size = 0;
592         ring->vmem = NULL;
593
594         /* Allocate Aggregator rings */
595         ring = rte_zmalloc_socket("bnxt_rx_ring_struct",
596                                    sizeof(struct bnxt_ring),
597                                    RTE_CACHE_LINE_SIZE, socket_id);
598         if (ring == NULL)
599                 return -ENOMEM;
600         rxr->ag_ring_struct = ring;
601         ring->ring_size = rte_align32pow2(rxq->nb_rx_desc *
602                                           AGG_RING_SIZE_FACTOR);
603         ring->ring_mask = ring->ring_size - 1;
604         ring->bd = (void *)rxr->ag_desc_ring;
605         ring->bd_dma = rxr->ag_desc_mapping;
606         ring->vmem_size = ring->ring_size * sizeof(struct bnxt_sw_rx_bd);
607         ring->vmem = (void **)&rxr->ag_buf_ring;
608
609         return 0;
610 }
611
612 static void bnxt_init_rxbds(struct bnxt_ring *ring, uint32_t type,
613                             uint16_t len)
614 {
615         uint32_t j;
616         struct rx_prod_pkt_bd *rx_bd_ring = (struct rx_prod_pkt_bd *)ring->bd;
617
618         if (!rx_bd_ring)
619                 return;
620         for (j = 0; j < ring->ring_size; j++) {
621                 rx_bd_ring[j].flags_type = rte_cpu_to_le_16(type);
622                 rx_bd_ring[j].len = rte_cpu_to_le_16(len);
623                 rx_bd_ring[j].opaque = j;
624         }
625 }
626
627 int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq)
628 {
629         struct bnxt_rx_ring_info *rxr;
630         struct bnxt_ring *ring;
631         uint32_t prod, type;
632         unsigned int i;
633         uint16_t size;
634
635         size = rte_pktmbuf_data_room_size(rxq->mb_pool) - RTE_PKTMBUF_HEADROOM;
636         if (rxq->rx_buf_use_size <= size)
637                 size = rxq->rx_buf_use_size;
638
639         type = RX_PROD_PKT_BD_TYPE_RX_PROD_PKT;
640
641         rxr = rxq->rx_ring;
642         ring = rxr->rx_ring_struct;
643         bnxt_init_rxbds(ring, type, size);
644
645         prod = rxr->rx_prod;
646         for (i = 0; i < ring->ring_size; i++) {
647                 if (bnxt_alloc_rx_data(rxq, rxr, prod) != 0) {
648                         RTE_LOG(WARNING, PMD,
649                                 "init'ed rx ring %d with %d/%d mbufs only\n",
650                                 rxq->queue_id, i, ring->ring_size);
651                         break;
652                 }
653                 rxr->rx_prod = prod;
654                 prod = RING_NEXT(rxr->rx_ring_struct, prod);
655         }
656         RTE_LOG(DEBUG, PMD, "%s\n", __func__);
657
658         ring = rxr->ag_ring_struct;
659         type = RX_PROD_AGG_BD_TYPE_RX_PROD_AGG;
660         bnxt_init_rxbds(ring, type, size);
661         prod = rxr->ag_prod;
662
663         for (i = 0; i < ring->ring_size; i++) {
664                 if (bnxt_alloc_ag_data(rxq, rxr, prod) != 0) {
665                         RTE_LOG(WARNING, PMD,
666                         "init'ed AG ring %d with %d/%d mbufs only\n",
667                         rxq->queue_id, i, ring->ring_size);
668                         break;
669                 }
670                 rxr->ag_prod = prod;
671                 prod = RING_NEXT(rxr->ag_ring_struct, prod);
672         }
673         RTE_LOG(DEBUG, PMD, "%s AGG Done!\n", __func__);
674
675         if (rxr->tpa_info) {
676                 for (i = 0; i < BNXT_TPA_MAX; i++) {
677                         rxr->tpa_info[i].mbuf =
678                                 __bnxt_alloc_rx_data(rxq->mb_pool);
679                         if (!rxr->tpa_info[i].mbuf)
680                                 return -ENOMEM;
681                 }
682         }
683         RTE_LOG(DEBUG, PMD, "%s TPA alloc Done!\n", __func__);
684
685         return 0;
686 }