net/bnxt: support port representor data path
[dpdk.git] / drivers / net / bnxt / bnxt_rxr.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2018 Broadcom
3  * All rights reserved.
4  */
5
6 #include <inttypes.h>
7 #include <stdbool.h>
8
9 #include <rte_bitmap.h>
10 #include <rte_byteorder.h>
11 #include <rte_malloc.h>
12 #include <rte_memory.h>
13
14 #include "bnxt.h"
15 #include "bnxt_reps.h"
16 #include "bnxt_ring.h"
17 #include "bnxt_rxr.h"
18 #include "bnxt_rxq.h"
19 #include "hsi_struct_def_dpdk.h"
20 #ifdef RTE_LIBRTE_IEEE1588
21 #include "bnxt_hwrm.h"
22 #endif
23
24 #include <bnxt_tf_common.h>
25 #include <ulp_mark_mgr.h>
26
27 /*
28  * RX Ring handling
29  */
30
31 static inline struct rte_mbuf *__bnxt_alloc_rx_data(struct rte_mempool *mb)
32 {
33         struct rte_mbuf *data;
34
35         data = rte_mbuf_raw_alloc(mb);
36
37         return data;
38 }
39
40 static inline int bnxt_alloc_rx_data(struct bnxt_rx_queue *rxq,
41                                      struct bnxt_rx_ring_info *rxr,
42                                      uint16_t prod)
43 {
44         struct rx_prod_pkt_bd *rxbd = &rxr->rx_desc_ring[prod];
45         struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod];
46         struct rte_mbuf *mbuf;
47
48         mbuf = __bnxt_alloc_rx_data(rxq->mb_pool);
49         if (!mbuf) {
50                 rte_atomic64_inc(&rxq->rx_mbuf_alloc_fail);
51                 return -ENOMEM;
52         }
53
54         rx_buf->mbuf = mbuf;
55         mbuf->data_off = RTE_PKTMBUF_HEADROOM;
56
57         rxbd->address = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
58
59         return 0;
60 }
61
62 static inline int bnxt_alloc_ag_data(struct bnxt_rx_queue *rxq,
63                                      struct bnxt_rx_ring_info *rxr,
64                                      uint16_t prod)
65 {
66         struct rx_prod_pkt_bd *rxbd = &rxr->ag_desc_ring[prod];
67         struct bnxt_sw_rx_bd *rx_buf = &rxr->ag_buf_ring[prod];
68         struct rte_mbuf *mbuf;
69
70         if (rxbd == NULL) {
71                 PMD_DRV_LOG(ERR, "Jumbo Frame. rxbd is NULL\n");
72                 return -EINVAL;
73         }
74
75         if (rx_buf == NULL) {
76                 PMD_DRV_LOG(ERR, "Jumbo Frame. rx_buf is NULL\n");
77                 return -EINVAL;
78         }
79
80         mbuf = __bnxt_alloc_rx_data(rxq->mb_pool);
81         if (!mbuf) {
82                 rte_atomic64_inc(&rxq->rx_mbuf_alloc_fail);
83                 return -ENOMEM;
84         }
85
86         rx_buf->mbuf = mbuf;
87         mbuf->data_off = RTE_PKTMBUF_HEADROOM;
88
89         rxbd->address = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
90
91         return 0;
92 }
93
94 static inline void bnxt_reuse_rx_mbuf(struct bnxt_rx_ring_info *rxr,
95                                struct rte_mbuf *mbuf)
96 {
97         uint16_t prod = RING_NEXT(rxr->rx_ring_struct, rxr->rx_prod);
98         struct bnxt_sw_rx_bd *prod_rx_buf;
99         struct rx_prod_pkt_bd *prod_bd;
100
101         prod_rx_buf = &rxr->rx_buf_ring[prod];
102
103         RTE_ASSERT(prod_rx_buf->mbuf == NULL);
104         RTE_ASSERT(mbuf != NULL);
105
106         prod_rx_buf->mbuf = mbuf;
107
108         prod_bd = &rxr->rx_desc_ring[prod];
109
110         prod_bd->address = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
111
112         rxr->rx_prod = prod;
113 }
114
115 static inline
116 struct rte_mbuf *bnxt_consume_rx_buf(struct bnxt_rx_ring_info *rxr,
117                                      uint16_t cons)
118 {
119         struct bnxt_sw_rx_bd *cons_rx_buf;
120         struct rte_mbuf *mbuf;
121
122         cons_rx_buf = &rxr->rx_buf_ring[cons];
123         RTE_ASSERT(cons_rx_buf->mbuf != NULL);
124         mbuf = cons_rx_buf->mbuf;
125         cons_rx_buf->mbuf = NULL;
126         return mbuf;
127 }
128
129 static void bnxt_tpa_start(struct bnxt_rx_queue *rxq,
130                            struct rx_tpa_start_cmpl *tpa_start,
131                            struct rx_tpa_start_cmpl_hi *tpa_start1)
132 {
133         struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
134         uint16_t agg_id;
135         uint16_t data_cons;
136         struct bnxt_tpa_info *tpa_info;
137         struct rte_mbuf *mbuf;
138
139         agg_id = bnxt_tpa_start_agg_id(rxq->bp, tpa_start);
140
141         data_cons = tpa_start->opaque;
142         tpa_info = &rxr->tpa_info[agg_id];
143
144         mbuf = bnxt_consume_rx_buf(rxr, data_cons);
145
146         bnxt_reuse_rx_mbuf(rxr, tpa_info->mbuf);
147
148         tpa_info->agg_count = 0;
149         tpa_info->mbuf = mbuf;
150         tpa_info->len = rte_le_to_cpu_32(tpa_start->len);
151
152         mbuf->nb_segs = 1;
153         mbuf->next = NULL;
154         mbuf->pkt_len = rte_le_to_cpu_32(tpa_start->len);
155         mbuf->data_len = mbuf->pkt_len;
156         mbuf->port = rxq->port_id;
157         mbuf->ol_flags = PKT_RX_LRO;
158         if (likely(tpa_start->flags_type &
159                    rte_cpu_to_le_32(RX_TPA_START_CMPL_FLAGS_RSS_VALID))) {
160                 mbuf->hash.rss = rte_le_to_cpu_32(tpa_start->rss_hash);
161                 mbuf->ol_flags |= PKT_RX_RSS_HASH;
162         } else {
163                 mbuf->hash.fdir.id = rte_le_to_cpu_16(tpa_start1->cfa_code);
164                 mbuf->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
165         }
166         if (tpa_start1->flags2 &
167             rte_cpu_to_le_32(RX_TPA_START_CMPL_FLAGS2_META_FORMAT_VLAN)) {
168                 mbuf->vlan_tci = rte_le_to_cpu_32(tpa_start1->metadata);
169                 mbuf->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
170         }
171         if (likely(tpa_start1->flags2 &
172                    rte_cpu_to_le_32(RX_TPA_START_CMPL_FLAGS2_L4_CS_CALC)))
173                 mbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
174
175         /* recycle next mbuf */
176         data_cons = RING_NEXT(rxr->rx_ring_struct, data_cons);
177         bnxt_reuse_rx_mbuf(rxr, bnxt_consume_rx_buf(rxr, data_cons));
178 }
179
180 static int bnxt_agg_bufs_valid(struct bnxt_cp_ring_info *cpr,
181                 uint8_t agg_bufs, uint32_t raw_cp_cons)
182 {
183         uint16_t last_cp_cons;
184         struct rx_pkt_cmpl *agg_cmpl;
185
186         raw_cp_cons = ADV_RAW_CMP(raw_cp_cons, agg_bufs);
187         last_cp_cons = RING_CMP(cpr->cp_ring_struct, raw_cp_cons);
188         agg_cmpl = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[last_cp_cons];
189         cpr->valid = FLIP_VALID(raw_cp_cons,
190                                 cpr->cp_ring_struct->ring_mask,
191                                 cpr->valid);
192         return CMP_VALID(agg_cmpl, raw_cp_cons, cpr->cp_ring_struct);
193 }
194
195 /* TPA consume agg buffer out of order, allocate connected data only */
196 static int bnxt_prod_ag_mbuf(struct bnxt_rx_queue *rxq)
197 {
198         struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
199         uint16_t next = RING_NEXT(rxr->ag_ring_struct, rxr->ag_prod);
200
201         /* TODO batch allocation for better performance */
202         while (rte_bitmap_get(rxr->ag_bitmap, next)) {
203                 if (unlikely(bnxt_alloc_ag_data(rxq, rxr, next))) {
204                         PMD_DRV_LOG(ERR,
205                                 "agg mbuf alloc failed: prod=0x%x\n", next);
206                         break;
207                 }
208                 rte_bitmap_clear(rxr->ag_bitmap, next);
209                 rxr->ag_prod = next;
210                 next = RING_NEXT(rxr->ag_ring_struct, next);
211         }
212
213         return 0;
214 }
215
216 static int bnxt_rx_pages(struct bnxt_rx_queue *rxq,
217                          struct rte_mbuf *mbuf, uint32_t *tmp_raw_cons,
218                          uint8_t agg_buf, struct bnxt_tpa_info *tpa_info)
219 {
220         struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
221         struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
222         int i;
223         uint16_t cp_cons, ag_cons;
224         struct rx_pkt_cmpl *rxcmp;
225         struct rte_mbuf *last = mbuf;
226         bool is_thor_tpa = tpa_info && BNXT_CHIP_THOR(rxq->bp);
227
228         for (i = 0; i < agg_buf; i++) {
229                 struct bnxt_sw_rx_bd *ag_buf;
230                 struct rte_mbuf *ag_mbuf;
231
232                 if (is_thor_tpa) {
233                         rxcmp = (void *)&tpa_info->agg_arr[i];
234                 } else {
235                         *tmp_raw_cons = NEXT_RAW_CMP(*tmp_raw_cons);
236                         cp_cons = RING_CMP(cpr->cp_ring_struct, *tmp_raw_cons);
237                         rxcmp = (struct rx_pkt_cmpl *)
238                                         &cpr->cp_desc_ring[cp_cons];
239                 }
240
241 #ifdef BNXT_DEBUG
242                 bnxt_dump_cmpl(cp_cons, rxcmp);
243 #endif
244
245                 ag_cons = rxcmp->opaque;
246                 RTE_ASSERT(ag_cons <= rxr->ag_ring_struct->ring_mask);
247                 ag_buf = &rxr->ag_buf_ring[ag_cons];
248                 ag_mbuf = ag_buf->mbuf;
249                 RTE_ASSERT(ag_mbuf != NULL);
250
251                 ag_mbuf->data_len = rte_le_to_cpu_16(rxcmp->len);
252
253                 mbuf->nb_segs++;
254                 mbuf->pkt_len += ag_mbuf->data_len;
255
256                 last->next = ag_mbuf;
257                 last = ag_mbuf;
258
259                 ag_buf->mbuf = NULL;
260
261                 /*
262                  * As aggregation buffer consumed out of order in TPA module,
263                  * use bitmap to track freed slots to be allocated and notified
264                  * to NIC
265                  */
266                 rte_bitmap_set(rxr->ag_bitmap, ag_cons);
267         }
268         bnxt_prod_ag_mbuf(rxq);
269         return 0;
270 }
271
272 static inline struct rte_mbuf *bnxt_tpa_end(
273                 struct bnxt_rx_queue *rxq,
274                 uint32_t *raw_cp_cons,
275                 struct rx_tpa_end_cmpl *tpa_end,
276                 struct rx_tpa_end_cmpl_hi *tpa_end1)
277 {
278         struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
279         struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
280         uint16_t agg_id;
281         struct rte_mbuf *mbuf;
282         uint8_t agg_bufs;
283         uint8_t payload_offset;
284         struct bnxt_tpa_info *tpa_info;
285
286         if (BNXT_CHIP_THOR(rxq->bp)) {
287                 struct rx_tpa_v2_end_cmpl *th_tpa_end;
288                 struct rx_tpa_v2_end_cmpl_hi *th_tpa_end1;
289
290                 th_tpa_end = (void *)tpa_end;
291                 th_tpa_end1 = (void *)tpa_end1;
292                 agg_id = BNXT_TPA_END_AGG_ID_TH(th_tpa_end);
293                 agg_bufs = BNXT_TPA_END_AGG_BUFS_TH(th_tpa_end1);
294                 payload_offset = th_tpa_end1->payload_offset;
295         } else {
296                 agg_id = BNXT_TPA_END_AGG_ID(tpa_end);
297                 agg_bufs = BNXT_TPA_END_AGG_BUFS(tpa_end);
298                 if (!bnxt_agg_bufs_valid(cpr, agg_bufs, *raw_cp_cons))
299                         return NULL;
300                 payload_offset = tpa_end->payload_offset;
301         }
302
303         tpa_info = &rxr->tpa_info[agg_id];
304         mbuf = tpa_info->mbuf;
305         RTE_ASSERT(mbuf != NULL);
306
307         rte_prefetch0(mbuf);
308         if (agg_bufs) {
309                 bnxt_rx_pages(rxq, mbuf, raw_cp_cons, agg_bufs, tpa_info);
310         }
311         mbuf->l4_len = payload_offset;
312
313         struct rte_mbuf *new_data = __bnxt_alloc_rx_data(rxq->mb_pool);
314         RTE_ASSERT(new_data != NULL);
315         if (!new_data) {
316                 rte_atomic64_inc(&rxq->rx_mbuf_alloc_fail);
317                 return NULL;
318         }
319         tpa_info->mbuf = new_data;
320
321         return mbuf;
322 }
323
324 static uint32_t
325 bnxt_parse_pkt_type(struct rx_pkt_cmpl *rxcmp, struct rx_pkt_cmpl_hi *rxcmp1)
326 {
327         uint32_t l3, pkt_type = 0;
328         uint32_t t_ipcs = 0, ip6 = 0, vlan = 0;
329         uint32_t flags_type;
330
331         vlan = !!(rxcmp1->flags2 &
332                 rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN));
333         pkt_type |= vlan ? RTE_PTYPE_L2_ETHER_VLAN : RTE_PTYPE_L2_ETHER;
334
335         t_ipcs = !!(rxcmp1->flags2 &
336                 rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC));
337         ip6 = !!(rxcmp1->flags2 &
338                  rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_IP_TYPE));
339
340         flags_type = rxcmp->flags_type &
341                 rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS_ITYPE_MASK);
342
343         if (!t_ipcs && !ip6)
344                 l3 = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
345         else if (!t_ipcs && ip6)
346                 l3 = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
347         else if (t_ipcs && !ip6)
348                 l3 = RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
349         else
350                 l3 = RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
351
352         switch (flags_type) {
353         case RTE_LE32(RX_PKT_CMPL_FLAGS_ITYPE_ICMP):
354                 if (!t_ipcs)
355                         pkt_type |= l3 | RTE_PTYPE_L4_ICMP;
356                 else
357                         pkt_type |= l3 | RTE_PTYPE_INNER_L4_ICMP;
358                 break;
359
360         case RTE_LE32(RX_PKT_CMPL_FLAGS_ITYPE_TCP):
361                 if (!t_ipcs)
362                         pkt_type |= l3 | RTE_PTYPE_L4_TCP;
363                 else
364                         pkt_type |= l3 | RTE_PTYPE_INNER_L4_TCP;
365                 break;
366
367         case RTE_LE32(RX_PKT_CMPL_FLAGS_ITYPE_UDP):
368                 if (!t_ipcs)
369                         pkt_type |= l3 | RTE_PTYPE_L4_UDP;
370                 else
371                         pkt_type |= l3 | RTE_PTYPE_INNER_L4_UDP;
372                 break;
373
374         case RTE_LE32(RX_PKT_CMPL_FLAGS_ITYPE_IP):
375                 pkt_type |= l3;
376                 break;
377         }
378
379         return pkt_type;
380 }
381
382 #ifdef RTE_LIBRTE_IEEE1588
383 static void
384 bnxt_get_rx_ts_thor(struct bnxt *bp, uint32_t rx_ts_cmpl)
385 {
386         uint64_t systime_cycles = 0;
387
388         if (!BNXT_CHIP_THOR(bp))
389                 return;
390
391         /* On Thor, Rx timestamps are provided directly in the
392          * Rx completion records to the driver. Only 32 bits of
393          * the timestamp is present in the completion. Driver needs
394          * to read the current 48 bit free running timer using the
395          * HWRM_PORT_TS_QUERY command and combine the upper 16 bits
396          * from the HWRM response with the lower 32 bits in the
397          * Rx completion to produce the 48 bit timestamp for the Rx packet
398          */
399         bnxt_hwrm_port_ts_query(bp, BNXT_PTP_FLAGS_CURRENT_TIME,
400                                 &systime_cycles);
401         bp->ptp_cfg->rx_timestamp = (systime_cycles & 0xFFFF00000000);
402         bp->ptp_cfg->rx_timestamp |= rx_ts_cmpl;
403 }
404 #endif
405
406 static void
407 bnxt_ulp_set_mark_in_mbuf(struct bnxt *bp, struct rx_pkt_cmpl_hi *rxcmp1,
408                           struct rte_mbuf *mbuf)
409 {
410         uint32_t cfa_code;
411         uint32_t meta_fmt;
412         uint32_t meta;
413         bool gfid = false;
414         uint32_t mark_id;
415         uint32_t flags2;
416         uint32_t gfid_support = 0;
417         int rc;
418         uint32_t vfr_flag;
419
420
421         if (BNXT_GFID_ENABLED(bp))
422                 gfid_support = 1;
423
424         cfa_code = rte_le_to_cpu_16(rxcmp1->cfa_code);
425         flags2 = rte_le_to_cpu_32(rxcmp1->flags2);
426         meta = rte_le_to_cpu_32(rxcmp1->metadata);
427
428         /*
429          * The flags field holds extra bits of info from [6:4]
430          * which indicate if the flow is in TCAM or EM or EEM
431          */
432         meta_fmt = (flags2 & BNXT_CFA_META_FMT_MASK) >>
433                 BNXT_CFA_META_FMT_SHFT;
434
435         switch (meta_fmt) {
436         case 0:
437                 if (gfid_support) {
438                         /* Not an LFID or GFID, a flush cmd. */
439                         goto skip_mark;
440                 } else {
441                         /* LFID mode, no vlan scenario */
442                         gfid = false;
443                 }
444                 break;
445         case 4:
446         case 5:
447                 /*
448                  * EM/TCAM case
449                  * Assume that EM doesn't support Mark due to GFID
450                  * collisions with EEM.  Simply return without setting the mark
451                  * in the mbuf.
452                  */
453                 if (BNXT_CFA_META_EM_TEST(meta)) {
454                         /*This is EM hit {EM(1), GFID[27:16], 19'd0 or vtag } */
455                         gfid = true;
456                         meta >>= BNXT_RX_META_CFA_CODE_SHIFT;
457                         cfa_code |= meta << BNXT_CFA_CODE_META_SHIFT;
458                 } else {
459                         /*
460                          * It is a TCAM entry, so it is an LFID.
461                          * The TCAM IDX and Mode can also be determined
462                          * by decoding the meta_data. We are not
463                          * using these for now.
464                          */
465                 }
466                 break;
467         case 6:
468         case 7:
469                 /* EEM Case, only using gfid in EEM for now. */
470                 gfid = true;
471
472                 /*
473                  * For EEM flows, The first part of cfa_code is 16 bits.
474                  * The second part is embedded in the
475                  * metadata field from bit 19 onwards. The driver needs to
476                  * ignore the first 19 bits of metadata and use the next 12
477                  * bits as higher 12 bits of cfa_code.
478                  */
479                 meta >>= BNXT_RX_META_CFA_CODE_SHIFT;
480                 cfa_code |= meta << BNXT_CFA_CODE_META_SHIFT;
481                 break;
482         default:
483                 /* For other values, the cfa_code is assumed to be an LFID. */
484                 break;
485         }
486
487         rc = ulp_mark_db_mark_get(bp->ulp_ctx, gfid,
488                                   cfa_code, &vfr_flag, &mark_id);
489         if (!rc) {
490                 /* Got the mark, write it to the mbuf and return */
491                 mbuf->hash.fdir.hi = mark_id;
492                 mbuf->udata64 = (cfa_code & 0xffffffffull) << 32;
493                 mbuf->hash.fdir.id = rxcmp1->cfa_code;
494                 mbuf->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
495                 return;
496         }
497
498 skip_mark:
499         mbuf->hash.fdir.hi = 0;
500         mbuf->hash.fdir.id = 0;
501 }
502
503 void bnxt_set_mark_in_mbuf(struct bnxt *bp,
504                            struct rx_pkt_cmpl_hi *rxcmp1,
505                            struct rte_mbuf *mbuf)
506 {
507         uint32_t cfa_code = 0;
508         uint8_t meta_fmt = 0;
509         uint16_t flags2 = 0;
510         uint32_t meta =  0;
511
512         cfa_code = rte_le_to_cpu_16(rxcmp1->cfa_code);
513         if (!cfa_code)
514                 return;
515
516         if (cfa_code && !bp->mark_table[cfa_code].valid)
517                 return;
518
519         flags2 = rte_le_to_cpu_16(rxcmp1->flags2);
520         meta = rte_le_to_cpu_32(rxcmp1->metadata);
521         if (meta) {
522                 meta >>= BNXT_RX_META_CFA_CODE_SHIFT;
523
524                 /* The flags field holds extra bits of info from [6:4]
525                  * which indicate if the flow is in TCAM or EM or EEM
526                  */
527                 meta_fmt = (flags2 & BNXT_CFA_META_FMT_MASK) >>
528                            BNXT_CFA_META_FMT_SHFT;
529
530                 /* meta_fmt == 4 => 'b100 => 'b10x => EM.
531                  * meta_fmt == 5 => 'b101 => 'b10x => EM + VLAN
532                  * meta_fmt == 6 => 'b110 => 'b11x => EEM
533                  * meta_fmt == 7 => 'b111 => 'b11x => EEM + VLAN.
534                  */
535                 meta_fmt >>= BNXT_CFA_META_FMT_EM_EEM_SHFT;
536         }
537
538         mbuf->hash.fdir.hi = bp->mark_table[cfa_code].mark_id;
539         mbuf->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
540 }
541
542 static int bnxt_rx_pkt(struct rte_mbuf **rx_pkt,
543                        struct bnxt_rx_queue *rxq, uint32_t *raw_cons)
544 {
545         struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
546         struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
547         struct rx_pkt_cmpl *rxcmp;
548         struct rx_pkt_cmpl_hi *rxcmp1;
549         uint32_t tmp_raw_cons = *raw_cons;
550         uint16_t cons, prod, cp_cons =
551             RING_CMP(cpr->cp_ring_struct, tmp_raw_cons);
552         struct rte_mbuf *mbuf;
553         int rc = 0;
554         uint8_t agg_buf = 0;
555         uint16_t cmp_type;
556         uint32_t flags2_f = 0;
557         uint16_t flags_type;
558         struct bnxt *bp = rxq->bp;
559
560         rxcmp = (struct rx_pkt_cmpl *)
561             &cpr->cp_desc_ring[cp_cons];
562
563         cmp_type = CMP_TYPE(rxcmp);
564
565         if (cmp_type == RX_TPA_V2_ABUF_CMPL_TYPE_RX_TPA_AGG) {
566                 struct rx_tpa_v2_abuf_cmpl *rx_agg = (void *)rxcmp;
567                 uint16_t agg_id = rte_cpu_to_le_16(rx_agg->agg_id);
568                 struct bnxt_tpa_info *tpa_info;
569
570                 tpa_info = &rxr->tpa_info[agg_id];
571                 RTE_ASSERT(tpa_info->agg_count < 16);
572                 tpa_info->agg_arr[tpa_info->agg_count++] = *rx_agg;
573                 rc = -EINVAL; /* Continue w/o new mbuf */
574                 goto next_rx;
575         }
576
577         tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
578         cp_cons = RING_CMP(cpr->cp_ring_struct, tmp_raw_cons);
579         rxcmp1 = (struct rx_pkt_cmpl_hi *)&cpr->cp_desc_ring[cp_cons];
580
581         if (!CMP_VALID(rxcmp1, tmp_raw_cons, cpr->cp_ring_struct))
582                 return -EBUSY;
583
584         cpr->valid = FLIP_VALID(cp_cons,
585                                 cpr->cp_ring_struct->ring_mask,
586                                 cpr->valid);
587
588         if (cmp_type == RX_TPA_START_CMPL_TYPE_RX_TPA_START) {
589                 bnxt_tpa_start(rxq, (struct rx_tpa_start_cmpl *)rxcmp,
590                                (struct rx_tpa_start_cmpl_hi *)rxcmp1);
591                 rc = -EINVAL; /* Continue w/o new mbuf */
592                 goto next_rx;
593         } else if (cmp_type == RX_TPA_END_CMPL_TYPE_RX_TPA_END) {
594                 mbuf = bnxt_tpa_end(rxq, &tmp_raw_cons,
595                                    (struct rx_tpa_end_cmpl *)rxcmp,
596                                    (struct rx_tpa_end_cmpl_hi *)rxcmp1);
597                 if (unlikely(!mbuf))
598                         return -EBUSY;
599                 *rx_pkt = mbuf;
600                 goto next_rx;
601         } else if (cmp_type != 0x11) {
602                 rc = -EINVAL;
603                 goto next_rx;
604         }
605
606         agg_buf = (rxcmp->agg_bufs_v1 & RX_PKT_CMPL_AGG_BUFS_MASK)
607                         >> RX_PKT_CMPL_AGG_BUFS_SFT;
608         if (agg_buf && !bnxt_agg_bufs_valid(cpr, agg_buf, tmp_raw_cons))
609                 return -EBUSY;
610
611         prod = rxr->rx_prod;
612
613         cons = rxcmp->opaque;
614         mbuf = bnxt_consume_rx_buf(rxr, cons);
615         if (mbuf == NULL)
616                 return -EBUSY;
617
618         rte_prefetch0(mbuf);
619
620         mbuf->data_off = RTE_PKTMBUF_HEADROOM;
621         mbuf->nb_segs = 1;
622         mbuf->next = NULL;
623         mbuf->pkt_len = rxcmp->len;
624         mbuf->data_len = mbuf->pkt_len;
625         mbuf->port = rxq->port_id;
626         mbuf->ol_flags = 0;
627
628         flags_type = rte_le_to_cpu_16(rxcmp->flags_type);
629         if (flags_type & RX_PKT_CMPL_FLAGS_RSS_VALID) {
630                 mbuf->hash.rss = rxcmp->rss_hash;
631                 mbuf->ol_flags |= PKT_RX_RSS_HASH;
632         }
633
634         if (BNXT_TRUFLOW_EN(bp))
635                 bnxt_ulp_set_mark_in_mbuf(rxq->bp, rxcmp1, mbuf);
636         else
637                 bnxt_set_mark_in_mbuf(rxq->bp, rxcmp1, mbuf);
638
639 #ifdef RTE_LIBRTE_IEEE1588
640         if (unlikely((flags_type & RX_PKT_CMPL_FLAGS_MASK) ==
641                      RX_PKT_CMPL_FLAGS_ITYPE_PTP_W_TIMESTAMP)) {
642                 mbuf->ol_flags |= PKT_RX_IEEE1588_PTP | PKT_RX_IEEE1588_TMST;
643                 bnxt_get_rx_ts_thor(rxq->bp, rxcmp1->reorder);
644         }
645 #endif
646         if (agg_buf)
647                 bnxt_rx_pages(rxq, mbuf, &tmp_raw_cons, agg_buf, NULL);
648
649         if (rxcmp1->flags2 & RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN) {
650                 mbuf->vlan_tci = rxcmp1->metadata &
651                         (RX_PKT_CMPL_METADATA_VID_MASK |
652                         RX_PKT_CMPL_METADATA_DE |
653                         RX_PKT_CMPL_METADATA_PRI_MASK);
654                 mbuf->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
655         }
656
657         flags2_f = flags2_0xf(rxcmp1);
658         /* IP Checksum */
659         if (likely(IS_IP_NONTUNNEL_PKT(flags2_f))) {
660                 if (unlikely(RX_CMP_IP_CS_ERROR(rxcmp1)))
661                         mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
662                 else if (unlikely(RX_CMP_IP_CS_UNKNOWN(rxcmp1)))
663                         mbuf->ol_flags |= PKT_RX_IP_CKSUM_UNKNOWN;
664                 else
665                         mbuf->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
666         } else if (IS_IP_TUNNEL_PKT(flags2_f)) {
667                 if (unlikely(RX_CMP_IP_OUTER_CS_ERROR(rxcmp1) ||
668                              RX_CMP_IP_CS_ERROR(rxcmp1)))
669                         mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
670                 else if (unlikely(RX_CMP_IP_CS_UNKNOWN(rxcmp1)))
671                         mbuf->ol_flags |= PKT_RX_IP_CKSUM_UNKNOWN;
672                 else
673                         mbuf->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
674         }
675
676         /* L4 Checksum */
677         if (likely(IS_L4_NONTUNNEL_PKT(flags2_f))) {
678                 if (unlikely(RX_CMP_L4_INNER_CS_ERR2(rxcmp1)))
679                         mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
680                 else
681                         mbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
682         } else if (IS_L4_TUNNEL_PKT(flags2_f)) {
683                 if (unlikely(RX_CMP_L4_INNER_CS_ERR2(rxcmp1)))
684                         mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
685                 else
686                         mbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
687                 if (unlikely(RX_CMP_L4_OUTER_CS_ERR2(rxcmp1))) {
688                         mbuf->ol_flags |= PKT_RX_OUTER_L4_CKSUM_BAD;
689                 } else if (unlikely(IS_L4_TUNNEL_PKT_ONLY_INNER_L4_CS
690                                     (flags2_f))) {
691                         mbuf->ol_flags |= PKT_RX_OUTER_L4_CKSUM_UNKNOWN;
692                 } else {
693                         mbuf->ol_flags |= PKT_RX_OUTER_L4_CKSUM_GOOD;
694                 }
695         } else if (unlikely(RX_CMP_L4_CS_UNKNOWN(rxcmp1))) {
696                 mbuf->ol_flags |= PKT_RX_L4_CKSUM_UNKNOWN;
697         }
698
699         mbuf->packet_type = bnxt_parse_pkt_type(rxcmp, rxcmp1);
700
701 #ifdef BNXT_DEBUG
702         if (rxcmp1->errors_v2 & RX_CMP_L2_ERRORS) {
703                 /* Re-install the mbuf back to the rx ring */
704                 bnxt_reuse_rx_mbuf(rxr, cons, mbuf);
705
706                 rc = -EIO;
707                 goto next_rx;
708         }
709 #endif
710         /*
711          * TODO: Redesign this....
712          * If the allocation fails, the packet does not get received.
713          * Simply returning this will result in slowly falling behind
714          * on the producer ring buffers.
715          * Instead, "filling up" the producer just before ringing the
716          * doorbell could be a better solution since it will let the
717          * producer ring starve until memory is available again pushing
718          * the drops into hardware and getting them out of the driver
719          * allowing recovery to a full producer ring.
720          *
721          * This could also help with cache usage by preventing per-packet
722          * calls in favour of a tight loop with the same function being called
723          * in it.
724          */
725         prod = RING_NEXT(rxr->rx_ring_struct, prod);
726         if (bnxt_alloc_rx_data(rxq, rxr, prod)) {
727                 PMD_DRV_LOG(ERR, "mbuf alloc failed with prod=0x%x\n", prod);
728                 rc = -ENOMEM;
729                 goto rx;
730         }
731         rxr->rx_prod = prod;
732         /*
733          * All MBUFs are allocated with the same size under DPDK,
734          * no optimization for rx_copy_thresh
735          */
736 rx:
737         *rx_pkt = mbuf;
738
739         if ((BNXT_VF_IS_TRUSTED(rxq->bp) || BNXT_PF(rxq->bp)) &&
740             rxq->bp->cfa_code_map && rxcmp1->cfa_code) {
741                 if (!bnxt_vfr_recv(rxq->bp, rxcmp1->cfa_code, rxq->queue_id,
742                                    mbuf)) {
743                         /* Now return an error so that nb_rx_pkts is not
744                          * incremented.
745                          * This packet was meant to be given to the representor.
746                          * So no need to account the packet and give it to
747                          * parent Rx burst function.
748                          */
749                         rc = -ENODEV;
750                 }
751         }
752
753 next_rx:
754
755         *raw_cons = tmp_raw_cons;
756
757         return rc;
758 }
759
760 uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
761                                uint16_t nb_pkts)
762 {
763         struct bnxt_rx_queue *rxq = rx_queue;
764         struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
765         struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
766         uint32_t raw_cons = cpr->cp_raw_cons;
767         uint32_t cons;
768         int nb_rx_pkts = 0;
769         int nb_rep_rx_pkts = 0;
770         struct rx_pkt_cmpl *rxcmp;
771         uint16_t prod = rxr->rx_prod;
772         uint16_t ag_prod = rxr->ag_prod;
773         int rc = 0;
774         bool evt = false;
775
776         if (unlikely(is_bnxt_in_error(rxq->bp)))
777                 return 0;
778
779         /* If Rx Q was stopped return */
780         if (unlikely(!rxq->rx_started ||
781                      !rte_spinlock_trylock(&rxq->lock)))
782                 return 0;
783
784         /* Handle RX burst request */
785         while (1) {
786                 cons = RING_CMP(cpr->cp_ring_struct, raw_cons);
787                 rte_prefetch0(&cpr->cp_desc_ring[cons]);
788                 rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons];
789
790                 if (!CMP_VALID(rxcmp, raw_cons, cpr->cp_ring_struct))
791                         break;
792                 cpr->valid = FLIP_VALID(cons,
793                                         cpr->cp_ring_struct->ring_mask,
794                                         cpr->valid);
795
796                 /* TODO: Avoid magic numbers... */
797                 if ((CMP_TYPE(rxcmp) & 0x30) == 0x10) {
798                         rc = bnxt_rx_pkt(&rx_pkts[nb_rx_pkts], rxq, &raw_cons);
799                         if (likely(!rc) || rc == -ENOMEM)
800                                 nb_rx_pkts++;
801                         if (rc == -EBUSY)       /* partial completion */
802                                 break;
803                         if (rc == -ENODEV)      /* completion for representor */
804                                 nb_rep_rx_pkts++;
805                 } else if (!BNXT_NUM_ASYNC_CPR(rxq->bp)) {
806                         evt =
807                         bnxt_event_hwrm_resp_handler(rxq->bp,
808                                                      (struct cmpl_base *)rxcmp);
809                         /* If the async event is Fatal error, return */
810                         if (unlikely(is_bnxt_in_error(rxq->bp)))
811                                 goto done;
812                 }
813
814                 raw_cons = NEXT_RAW_CMP(raw_cons);
815                 if (nb_rx_pkts == nb_pkts || evt)
816                         break;
817                 /* Post some Rx buf early in case of larger burst processing */
818                 if (nb_rx_pkts == BNXT_RX_POST_THRESH)
819                         bnxt_db_write(&rxr->rx_db, rxr->rx_prod);
820         }
821
822         cpr->cp_raw_cons = raw_cons;
823         if (!nb_rx_pkts && !nb_rep_rx_pkts && !evt) {
824                 /*
825                  * For PMD, there is no need to keep on pushing to REARM
826                  * the doorbell if there are no new completions
827                  */
828                 goto done;
829         }
830
831         if (prod != rxr->rx_prod)
832                 bnxt_db_write(&rxr->rx_db, rxr->rx_prod);
833
834         /* Ring the AGG ring DB */
835         if (ag_prod != rxr->ag_prod)
836                 bnxt_db_write(&rxr->ag_db, rxr->ag_prod);
837
838         bnxt_db_cq(cpr);
839
840         /* Attempt to alloc Rx buf in case of a previous allocation failure. */
841         if (rc == -ENOMEM) {
842                 int i = RING_NEXT(rxr->rx_ring_struct, prod);
843                 int cnt = nb_rx_pkts;
844
845                 for (; cnt;
846                         i = RING_NEXT(rxr->rx_ring_struct, i), cnt--) {
847                         struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i];
848
849                         /* Buffer already allocated for this index. */
850                         if (rx_buf->mbuf != NULL)
851                                 continue;
852
853                         /* This slot is empty. Alloc buffer for Rx */
854                         if (!bnxt_alloc_rx_data(rxq, rxr, i)) {
855                                 rxr->rx_prod = i;
856                                 bnxt_db_write(&rxr->rx_db, rxr->rx_prod);
857                         } else {
858                                 PMD_DRV_LOG(ERR, "Alloc  mbuf failed\n");
859                                 break;
860                         }
861                 }
862         }
863
864 done:
865         rte_spinlock_unlock(&rxq->lock);
866
867         return nb_rx_pkts;
868 }
869
870 /*
871  * Dummy DPDK callback for RX.
872  *
873  * This function is used to temporarily replace the real callback during
874  * unsafe control operations on the queue, or in case of error.
875  */
876 uint16_t
877 bnxt_dummy_recv_pkts(void *rx_queue __rte_unused,
878                      struct rte_mbuf **rx_pkts __rte_unused,
879                      uint16_t nb_pkts __rte_unused)
880 {
881         return 0;
882 }
883
884 void bnxt_free_rx_rings(struct bnxt *bp)
885 {
886         int i;
887         struct bnxt_rx_queue *rxq;
888
889         if (!bp->rx_queues)
890                 return;
891
892         for (i = 0; i < (int)bp->rx_nr_rings; i++) {
893                 rxq = bp->rx_queues[i];
894                 if (!rxq)
895                         continue;
896
897                 bnxt_free_ring(rxq->rx_ring->rx_ring_struct);
898                 rte_free(rxq->rx_ring->rx_ring_struct);
899
900                 /* Free the Aggregator ring */
901                 bnxt_free_ring(rxq->rx_ring->ag_ring_struct);
902                 rte_free(rxq->rx_ring->ag_ring_struct);
903                 rxq->rx_ring->ag_ring_struct = NULL;
904
905                 rte_free(rxq->rx_ring);
906
907                 bnxt_free_ring(rxq->cp_ring->cp_ring_struct);
908                 rte_free(rxq->cp_ring->cp_ring_struct);
909                 rte_free(rxq->cp_ring);
910
911                 rte_free(rxq);
912                 bp->rx_queues[i] = NULL;
913         }
914 }
915
916 int bnxt_init_rx_ring_struct(struct bnxt_rx_queue *rxq, unsigned int socket_id)
917 {
918         struct bnxt_cp_ring_info *cpr;
919         struct bnxt_rx_ring_info *rxr;
920         struct bnxt_ring *ring;
921
922         rxq->rx_buf_size = BNXT_MAX_PKT_LEN + sizeof(struct rte_mbuf);
923
924         rxr = rte_zmalloc_socket("bnxt_rx_ring",
925                                  sizeof(struct bnxt_rx_ring_info),
926                                  RTE_CACHE_LINE_SIZE, socket_id);
927         if (rxr == NULL)
928                 return -ENOMEM;
929         rxq->rx_ring = rxr;
930
931         ring = rte_zmalloc_socket("bnxt_rx_ring_struct",
932                                    sizeof(struct bnxt_ring),
933                                    RTE_CACHE_LINE_SIZE, socket_id);
934         if (ring == NULL)
935                 return -ENOMEM;
936         rxr->rx_ring_struct = ring;
937         ring->ring_size = rte_align32pow2(rxq->nb_rx_desc);
938         ring->ring_mask = ring->ring_size - 1;
939         ring->bd = (void *)rxr->rx_desc_ring;
940         ring->bd_dma = rxr->rx_desc_mapping;
941         ring->vmem_size = ring->ring_size * sizeof(struct bnxt_sw_rx_bd);
942         ring->vmem = (void **)&rxr->rx_buf_ring;
943
944         cpr = rte_zmalloc_socket("bnxt_rx_ring",
945                                  sizeof(struct bnxt_cp_ring_info),
946                                  RTE_CACHE_LINE_SIZE, socket_id);
947         if (cpr == NULL)
948                 return -ENOMEM;
949         rxq->cp_ring = cpr;
950
951         ring = rte_zmalloc_socket("bnxt_rx_ring_struct",
952                                    sizeof(struct bnxt_ring),
953                                    RTE_CACHE_LINE_SIZE, socket_id);
954         if (ring == NULL)
955                 return -ENOMEM;
956         cpr->cp_ring_struct = ring;
957         ring->ring_size = rte_align32pow2(rxr->rx_ring_struct->ring_size *
958                                           (2 + AGG_RING_SIZE_FACTOR));
959         ring->ring_mask = ring->ring_size - 1;
960         ring->bd = (void *)cpr->cp_desc_ring;
961         ring->bd_dma = cpr->cp_desc_mapping;
962         ring->vmem_size = 0;
963         ring->vmem = NULL;
964
965         /* Allocate Aggregator rings */
966         ring = rte_zmalloc_socket("bnxt_rx_ring_struct",
967                                    sizeof(struct bnxt_ring),
968                                    RTE_CACHE_LINE_SIZE, socket_id);
969         if (ring == NULL)
970                 return -ENOMEM;
971         rxr->ag_ring_struct = ring;
972         ring->ring_size = rte_align32pow2(rxq->nb_rx_desc *
973                                           AGG_RING_SIZE_FACTOR);
974         ring->ring_mask = ring->ring_size - 1;
975         ring->bd = (void *)rxr->ag_desc_ring;
976         ring->bd_dma = rxr->ag_desc_mapping;
977         ring->vmem_size = ring->ring_size * sizeof(struct bnxt_sw_rx_bd);
978         ring->vmem = (void **)&rxr->ag_buf_ring;
979
980         return 0;
981 }
982
983 static void bnxt_init_rxbds(struct bnxt_ring *ring, uint32_t type,
984                             uint16_t len)
985 {
986         uint32_t j;
987         struct rx_prod_pkt_bd *rx_bd_ring = (struct rx_prod_pkt_bd *)ring->bd;
988
989         if (!rx_bd_ring)
990                 return;
991         for (j = 0; j < ring->ring_size; j++) {
992                 rx_bd_ring[j].flags_type = rte_cpu_to_le_16(type);
993                 rx_bd_ring[j].len = rte_cpu_to_le_16(len);
994                 rx_bd_ring[j].opaque = j;
995         }
996 }
997
998 int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq)
999 {
1000         struct bnxt_rx_ring_info *rxr;
1001         struct bnxt_ring *ring;
1002         uint32_t prod, type;
1003         unsigned int i;
1004         uint16_t size;
1005
1006         size = rte_pktmbuf_data_room_size(rxq->mb_pool) - RTE_PKTMBUF_HEADROOM;
1007         size = RTE_MIN(BNXT_MAX_PKT_LEN, size);
1008
1009         type = RX_PROD_PKT_BD_TYPE_RX_PROD_PKT | RX_PROD_PKT_BD_FLAGS_EOP_PAD;
1010
1011         rxr = rxq->rx_ring;
1012         ring = rxr->rx_ring_struct;
1013         bnxt_init_rxbds(ring, type, size);
1014
1015         prod = rxr->rx_prod;
1016         for (i = 0; i < ring->ring_size; i++) {
1017                 if (unlikely(!rxr->rx_buf_ring[i].mbuf)) {
1018                         if (bnxt_alloc_rx_data(rxq, rxr, prod) != 0) {
1019                                 PMD_DRV_LOG(WARNING,
1020                                             "init'ed rx ring %d with %d/%d mbufs only\n",
1021                                             rxq->queue_id, i, ring->ring_size);
1022                                 break;
1023                         }
1024                 }
1025                 rxr->rx_prod = prod;
1026                 prod = RING_NEXT(rxr->rx_ring_struct, prod);
1027         }
1028
1029         ring = rxr->ag_ring_struct;
1030         type = RX_PROD_AGG_BD_TYPE_RX_PROD_AGG;
1031         bnxt_init_rxbds(ring, type, size);
1032         prod = rxr->ag_prod;
1033
1034         for (i = 0; i < ring->ring_size; i++) {
1035                 if (unlikely(!rxr->ag_buf_ring[i].mbuf)) {
1036                         if (bnxt_alloc_ag_data(rxq, rxr, prod) != 0) {
1037                                 PMD_DRV_LOG(WARNING,
1038                                             "init'ed AG ring %d with %d/%d mbufs only\n",
1039                                             rxq->queue_id, i, ring->ring_size);
1040                                 break;
1041                         }
1042                 }
1043                 rxr->ag_prod = prod;
1044                 prod = RING_NEXT(rxr->ag_ring_struct, prod);
1045         }
1046         PMD_DRV_LOG(DEBUG, "AGG Done!\n");
1047
1048         if (rxr->tpa_info) {
1049                 unsigned int max_aggs = BNXT_TPA_MAX_AGGS(rxq->bp);
1050
1051                 for (i = 0; i < max_aggs; i++) {
1052                         if (unlikely(!rxr->tpa_info[i].mbuf)) {
1053                                 rxr->tpa_info[i].mbuf =
1054                                         __bnxt_alloc_rx_data(rxq->mb_pool);
1055                                 if (!rxr->tpa_info[i].mbuf) {
1056                                         rte_atomic64_inc(&rxq->rx_mbuf_alloc_fail);
1057                                         return -ENOMEM;
1058                                 }
1059                         }
1060                 }
1061         }
1062         PMD_DRV_LOG(DEBUG, "TPA alloc Done!\n");
1063
1064         return 0;
1065 }