net/qede: fix maximum Rx packet length
[dpdk.git] / drivers / net / qede / qede_rxtx.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2016 - 2018 Cavium Inc.
3  * All rights reserved.
4  * www.cavium.com
5  */
6
7 #include <rte_net.h>
8 #include "qede_rxtx.h"
9
10 static inline int qede_alloc_rx_buffer(struct qede_rx_queue *rxq)
11 {
12         struct rte_mbuf *new_mb = NULL;
13         struct eth_rx_bd *rx_bd;
14         dma_addr_t mapping;
15         uint16_t idx = rxq->sw_rx_prod & NUM_RX_BDS(rxq);
16
17         new_mb = rte_mbuf_raw_alloc(rxq->mb_pool);
18         if (unlikely(!new_mb)) {
19                 PMD_RX_LOG(ERR, rxq,
20                            "Failed to allocate rx buffer "
21                            "sw_rx_prod %u sw_rx_cons %u mp entries %u free %u",
22                            idx, rxq->sw_rx_cons & NUM_RX_BDS(rxq),
23                            rte_mempool_avail_count(rxq->mb_pool),
24                            rte_mempool_in_use_count(rxq->mb_pool));
25                 return -ENOMEM;
26         }
27         rxq->sw_rx_ring[idx] = new_mb;
28         mapping = rte_mbuf_data_iova_default(new_mb);
29         /* Advance PROD and get BD pointer */
30         rx_bd = (struct eth_rx_bd *)ecore_chain_produce(&rxq->rx_bd_ring);
31         rx_bd->addr.hi = rte_cpu_to_le_32(U64_HI(mapping));
32         rx_bd->addr.lo = rte_cpu_to_le_32(U64_LO(mapping));
33         rxq->sw_rx_prod++;
34         return 0;
35 }
36
37 #define QEDE_MAX_BULK_ALLOC_COUNT 512
38
39 static inline int qede_alloc_rx_bulk_mbufs(struct qede_rx_queue *rxq, int count)
40 {
41         void *obj_p[QEDE_MAX_BULK_ALLOC_COUNT] __rte_cache_aligned;
42         struct rte_mbuf *mbuf = NULL;
43         struct eth_rx_bd *rx_bd;
44         dma_addr_t mapping;
45         int i, ret = 0;
46         uint16_t idx;
47
48         idx = rxq->sw_rx_prod & NUM_RX_BDS(rxq);
49
50         ret = rte_mempool_get_bulk(rxq->mb_pool, obj_p, count);
51         if (unlikely(ret)) {
52                 PMD_RX_LOG(ERR, rxq,
53                            "Failed to allocate %d rx buffers "
54                             "sw_rx_prod %u sw_rx_cons %u mp entries %u free %u",
55                             count, idx, rxq->sw_rx_cons & NUM_RX_BDS(rxq),
56                             rte_mempool_avail_count(rxq->mb_pool),
57                             rte_mempool_in_use_count(rxq->mb_pool));
58                 return -ENOMEM;
59         }
60
61         for (i = 0; i < count; i++) {
62                 mbuf = obj_p[i];
63                 if (likely(i < count - 1))
64                         rte_prefetch0(obj_p[i + 1]);
65
66                 idx = rxq->sw_rx_prod & NUM_RX_BDS(rxq);
67                 rxq->sw_rx_ring[idx] = mbuf;
68                 mapping = rte_mbuf_data_iova_default(mbuf);
69                 rx_bd = (struct eth_rx_bd *)
70                         ecore_chain_produce(&rxq->rx_bd_ring);
71                 rx_bd->addr.hi = rte_cpu_to_le_32(U64_HI(mapping));
72                 rx_bd->addr.lo = rte_cpu_to_le_32(U64_LO(mapping));
73                 rxq->sw_rx_prod++;
74         }
75
76         return 0;
77 }
78
79 /* Criterias for calculating Rx buffer size -
80  * 1) rx_buf_size should not exceed the size of mbuf
81  * 2) In scattered_rx mode - minimum rx_buf_size should be
82  *    (MTU + Maximum L2 Header Size + 2) / ETH_RX_MAX_BUFF_PER_PKT
83  * 3) In regular mode - minimum rx_buf_size should be
84  *    (MTU + Maximum L2 Header Size + 2)
85  *    In above cases +2 corresponds to 2 bytes padding in front of L2
86  *    header.
87  * 4) rx_buf_size should be cacheline-size aligned. So considering
88  *    criteria 1, we need to adjust the size to floor instead of ceil,
89  *    so that we don't exceed mbuf size while ceiling rx_buf_size.
90  */
91 int
92 qede_calc_rx_buf_size(struct rte_eth_dev *dev, uint16_t mbufsz,
93                       uint16_t max_frame_size)
94 {
95         struct qede_dev *qdev = QEDE_INIT_QDEV(dev);
96         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
97         int rx_buf_size;
98
99         if (dev->data->scattered_rx) {
100                 /* per HW limitation, only ETH_RX_MAX_BUFF_PER_PKT number of
101                  * buffers can be used for single packet. So need to make sure
102                  * mbuf size is sufficient enough for this.
103                  */
104                 if ((mbufsz * ETH_RX_MAX_BUFF_PER_PKT) <
105                      (max_frame_size + QEDE_ETH_OVERHEAD)) {
106                         DP_ERR(edev, "mbuf %d size is not enough to hold max fragments (%d) for max rx packet length (%d)\n",
107                                mbufsz, ETH_RX_MAX_BUFF_PER_PKT, max_frame_size);
108                         return -EINVAL;
109                 }
110
111                 rx_buf_size = RTE_MAX(mbufsz,
112                                       (max_frame_size + QEDE_ETH_OVERHEAD) /
113                                        ETH_RX_MAX_BUFF_PER_PKT);
114         } else {
115                 rx_buf_size = max_frame_size + QEDE_ETH_OVERHEAD;
116         }
117
118         /* Align to cache-line size if needed */
119         return QEDE_FLOOR_TO_CACHE_LINE_SIZE(rx_buf_size);
120 }
121
122 static struct qede_rx_queue *
123 qede_alloc_rx_queue_mem(struct rte_eth_dev *dev,
124                         uint16_t queue_idx,
125                         uint16_t nb_desc,
126                         unsigned int socket_id,
127                         struct rte_mempool *mp,
128                         uint16_t bufsz)
129 {
130         struct qede_dev *qdev = QEDE_INIT_QDEV(dev);
131         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
132         struct qede_rx_queue *rxq;
133         size_t size;
134         int rc;
135
136         /* First allocate the rx queue data structure */
137         rxq = rte_zmalloc_socket("qede_rx_queue", sizeof(struct qede_rx_queue),
138                                  RTE_CACHE_LINE_SIZE, socket_id);
139
140         if (!rxq) {
141                 DP_ERR(edev, "Unable to allocate memory for rxq on socket %u",
142                           socket_id);
143                 return NULL;
144         }
145
146         rxq->qdev = qdev;
147         rxq->mb_pool = mp;
148         rxq->nb_rx_desc = nb_desc;
149         rxq->queue_id = queue_idx;
150         rxq->port_id = dev->data->port_id;
151
152
153         rxq->rx_buf_size = bufsz;
154
155         DP_INFO(edev, "mtu %u mbufsz %u bd_max_bytes %u scatter_mode %d\n",
156                 qdev->mtu, bufsz, rxq->rx_buf_size, dev->data->scattered_rx);
157
158         /* Allocate the parallel driver ring for Rx buffers */
159         size = sizeof(*rxq->sw_rx_ring) * rxq->nb_rx_desc;
160         rxq->sw_rx_ring = rte_zmalloc_socket("sw_rx_ring", size,
161                                              RTE_CACHE_LINE_SIZE, socket_id);
162         if (!rxq->sw_rx_ring) {
163                 DP_ERR(edev, "Memory allocation fails for sw_rx_ring on"
164                        " socket %u\n", socket_id);
165                 rte_free(rxq);
166                 return NULL;
167         }
168
169         /* Allocate FW Rx ring  */
170         rc = qdev->ops->common->chain_alloc(edev,
171                                             ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,
172                                             ECORE_CHAIN_MODE_NEXT_PTR,
173                                             ECORE_CHAIN_CNT_TYPE_U16,
174                                             rxq->nb_rx_desc,
175                                             sizeof(struct eth_rx_bd),
176                                             &rxq->rx_bd_ring,
177                                             NULL);
178
179         if (rc != ECORE_SUCCESS) {
180                 DP_ERR(edev, "Memory allocation fails for RX BD ring"
181                        " on socket %u\n", socket_id);
182                 rte_free(rxq->sw_rx_ring);
183                 rte_free(rxq);
184                 return NULL;
185         }
186
187         /* Allocate FW completion ring */
188         rc = qdev->ops->common->chain_alloc(edev,
189                                             ECORE_CHAIN_USE_TO_CONSUME,
190                                             ECORE_CHAIN_MODE_PBL,
191                                             ECORE_CHAIN_CNT_TYPE_U16,
192                                             rxq->nb_rx_desc,
193                                             sizeof(union eth_rx_cqe),
194                                             &rxq->rx_comp_ring,
195                                             NULL);
196
197         if (rc != ECORE_SUCCESS) {
198                 DP_ERR(edev, "Memory allocation fails for RX CQE ring"
199                        " on socket %u\n", socket_id);
200                 qdev->ops->common->chain_free(edev, &rxq->rx_bd_ring);
201                 rte_free(rxq->sw_rx_ring);
202                 rte_free(rxq);
203                 return NULL;
204         }
205
206         return rxq;
207 }
208
209 int
210 qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qid,
211                     uint16_t nb_desc, unsigned int socket_id,
212                     __rte_unused const struct rte_eth_rxconf *rx_conf,
213                     struct rte_mempool *mp)
214 {
215         struct qede_dev *qdev = QEDE_INIT_QDEV(dev);
216         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
217         struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
218         struct qede_rx_queue *rxq;
219         uint16_t max_rx_pktlen;
220         uint16_t bufsz;
221         int rc;
222
223         PMD_INIT_FUNC_TRACE(edev);
224
225         /* Note: Ring size/align is controlled by struct rte_eth_desc_lim */
226         if (!rte_is_power_of_2(nb_desc)) {
227                 DP_ERR(edev, "Ring size %u is not power of 2\n",
228                           nb_desc);
229                 return -EINVAL;
230         }
231
232         /* Free memory prior to re-allocation if needed... */
233         if (dev->data->rx_queues[qid] != NULL) {
234                 qede_rx_queue_release(dev->data->rx_queues[qid]);
235                 dev->data->rx_queues[qid] = NULL;
236         }
237
238         max_rx_pktlen = dev->data->mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
239
240         /* Fix up RX buffer size */
241         bufsz = (uint16_t)rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM;
242         /* cache align the mbuf size to simplify rx_buf_size calculation */
243         bufsz = QEDE_FLOOR_TO_CACHE_LINE_SIZE(bufsz);
244         if ((rxmode->offloads & RTE_ETH_RX_OFFLOAD_SCATTER)     ||
245             (max_rx_pktlen + QEDE_ETH_OVERHEAD) > bufsz) {
246                 if (!dev->data->scattered_rx) {
247                         DP_INFO(edev, "Forcing scatter-gather mode\n");
248                         dev->data->scattered_rx = 1;
249                 }
250         }
251
252         rc = qede_calc_rx_buf_size(dev, bufsz, max_rx_pktlen);
253         if (rc < 0)
254                 return rc;
255
256         bufsz = rc;
257
258         if (ECORE_IS_CMT(edev)) {
259                 rxq = qede_alloc_rx_queue_mem(dev, qid * 2, nb_desc,
260                                               socket_id, mp, bufsz);
261                 if (!rxq)
262                         return -ENOMEM;
263
264                 qdev->fp_array[qid * 2].rxq = rxq;
265                 rxq = qede_alloc_rx_queue_mem(dev, qid * 2 + 1, nb_desc,
266                                               socket_id, mp, bufsz);
267                 if (!rxq)
268                         return -ENOMEM;
269
270                 qdev->fp_array[qid * 2 + 1].rxq = rxq;
271                 /* provide per engine fp struct as rx queue */
272                 dev->data->rx_queues[qid] = &qdev->fp_array_cmt[qid];
273         } else {
274                 rxq = qede_alloc_rx_queue_mem(dev, qid, nb_desc,
275                                               socket_id, mp, bufsz);
276                 if (!rxq)
277                         return -ENOMEM;
278
279                 dev->data->rx_queues[qid] = rxq;
280                 qdev->fp_array[qid].rxq = rxq;
281         }
282
283         DP_INFO(edev, "rxq %d num_desc %u rx_buf_size=%u socket %u\n",
284                   qid, nb_desc, rxq->rx_buf_size, socket_id);
285
286         return 0;
287 }
288
289 static void
290 qede_rx_queue_reset(__rte_unused struct qede_dev *qdev,
291                     struct qede_rx_queue *rxq)
292 {
293         DP_INFO(&qdev->edev, "Reset RX queue %u\n", rxq->queue_id);
294         ecore_chain_reset(&rxq->rx_bd_ring);
295         ecore_chain_reset(&rxq->rx_comp_ring);
296         rxq->sw_rx_prod = 0;
297         rxq->sw_rx_cons = 0;
298         *rxq->hw_cons_ptr = 0;
299 }
300
301 static void qede_rx_queue_release_mbufs(struct qede_rx_queue *rxq)
302 {
303         uint16_t i;
304
305         if (rxq->sw_rx_ring) {
306                 for (i = 0; i < rxq->nb_rx_desc; i++) {
307                         if (rxq->sw_rx_ring[i]) {
308                                 rte_pktmbuf_free(rxq->sw_rx_ring[i]);
309                                 rxq->sw_rx_ring[i] = NULL;
310                         }
311                 }
312         }
313 }
314
315 static void _qede_rx_queue_release(struct qede_dev *qdev,
316                                    struct ecore_dev *edev,
317                                    struct qede_rx_queue *rxq)
318 {
319         qede_rx_queue_release_mbufs(rxq);
320         qdev->ops->common->chain_free(edev, &rxq->rx_bd_ring);
321         qdev->ops->common->chain_free(edev, &rxq->rx_comp_ring);
322         rte_free(rxq->sw_rx_ring);
323         rte_free(rxq);
324 }
325
326 void qede_rx_queue_release(void *rx_queue)
327 {
328         struct qede_rx_queue *rxq = rx_queue;
329         struct qede_fastpath_cmt *fp_cmt;
330         struct qede_dev *qdev;
331         struct ecore_dev *edev;
332
333         if (rxq) {
334                 qdev = rxq->qdev;
335                 edev = QEDE_INIT_EDEV(qdev);
336                 PMD_INIT_FUNC_TRACE(edev);
337                 if (ECORE_IS_CMT(edev)) {
338                         fp_cmt = rx_queue;
339                         _qede_rx_queue_release(qdev, edev, fp_cmt->fp0->rxq);
340                         _qede_rx_queue_release(qdev, edev, fp_cmt->fp1->rxq);
341                 } else {
342                         _qede_rx_queue_release(qdev, edev, rxq);
343                 }
344         }
345 }
346
347 /* Stops a given RX queue in the HW */
348 static int qede_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
349 {
350         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
351         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
352         struct ecore_hwfn *p_hwfn;
353         struct qede_rx_queue *rxq;
354         int hwfn_index;
355         int rc;
356
357         if (rx_queue_id < qdev->num_rx_queues) {
358                 rxq = qdev->fp_array[rx_queue_id].rxq;
359                 hwfn_index = rx_queue_id % edev->num_hwfns;
360                 p_hwfn = &edev->hwfns[hwfn_index];
361                 rc = ecore_eth_rx_queue_stop(p_hwfn, rxq->handle,
362                                 true, false);
363                 if (rc != ECORE_SUCCESS) {
364                         DP_ERR(edev, "RX queue %u stop fails\n", rx_queue_id);
365                         return -1;
366                 }
367                 qede_rx_queue_release_mbufs(rxq);
368                 qede_rx_queue_reset(qdev, rxq);
369                 eth_dev->data->rx_queue_state[rx_queue_id] =
370                         RTE_ETH_QUEUE_STATE_STOPPED;
371                 DP_INFO(edev, "RX queue %u stopped\n", rx_queue_id);
372         } else {
373                 DP_ERR(edev, "RX queue %u is not in range\n", rx_queue_id);
374                 rc = -EINVAL;
375         }
376
377         return rc;
378 }
379
380 static struct qede_tx_queue *
381 qede_alloc_tx_queue_mem(struct rte_eth_dev *dev,
382                         uint16_t queue_idx,
383                         uint16_t nb_desc,
384                         unsigned int socket_id,
385                         const struct rte_eth_txconf *tx_conf)
386 {
387         struct qede_dev *qdev = dev->data->dev_private;
388         struct ecore_dev *edev = &qdev->edev;
389         struct qede_tx_queue *txq;
390         int rc;
391         size_t sw_tx_ring_size;
392
393         txq = rte_zmalloc_socket("qede_tx_queue", sizeof(struct qede_tx_queue),
394                                  RTE_CACHE_LINE_SIZE, socket_id);
395
396         if (txq == NULL) {
397                 DP_ERR(edev,
398                        "Unable to allocate memory for txq on socket %u",
399                        socket_id);
400                 return NULL;
401         }
402
403         txq->nb_tx_desc = nb_desc;
404         txq->qdev = qdev;
405         txq->port_id = dev->data->port_id;
406
407         rc = qdev->ops->common->chain_alloc(edev,
408                                             ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,
409                                             ECORE_CHAIN_MODE_PBL,
410                                             ECORE_CHAIN_CNT_TYPE_U16,
411                                             txq->nb_tx_desc,
412                                             sizeof(union eth_tx_bd_types),
413                                             &txq->tx_pbl,
414                                             NULL);
415         if (rc != ECORE_SUCCESS) {
416                 DP_ERR(edev,
417                        "Unable to allocate memory for txbd ring on socket %u",
418                        socket_id);
419                 qede_tx_queue_release(txq);
420                 return NULL;
421         }
422
423         /* Allocate software ring */
424         sw_tx_ring_size = sizeof(txq->sw_tx_ring) * txq->nb_tx_desc;
425         txq->sw_tx_ring = rte_zmalloc_socket("txq->sw_tx_ring",
426                                              sw_tx_ring_size,
427                                              RTE_CACHE_LINE_SIZE, socket_id);
428
429         if (!txq->sw_tx_ring) {
430                 DP_ERR(edev,
431                        "Unable to allocate memory for txbd ring on socket %u",
432                        socket_id);
433                 qdev->ops->common->chain_free(edev, &txq->tx_pbl);
434                 qede_tx_queue_release(txq);
435                 return NULL;
436         }
437
438         txq->queue_id = queue_idx;
439
440         txq->nb_tx_avail = txq->nb_tx_desc;
441
442         txq->tx_free_thresh =
443             tx_conf->tx_free_thresh ? tx_conf->tx_free_thresh :
444             (txq->nb_tx_desc - QEDE_DEFAULT_TX_FREE_THRESH);
445
446         DP_INFO(edev,
447                   "txq %u num_desc %u tx_free_thresh %u socket %u\n",
448                   queue_idx, nb_desc, txq->tx_free_thresh, socket_id);
449         return txq;
450 }
451
452 int
453 qede_tx_queue_setup(struct rte_eth_dev *dev,
454                     uint16_t queue_idx,
455                     uint16_t nb_desc,
456                     unsigned int socket_id,
457                     const struct rte_eth_txconf *tx_conf)
458 {
459         struct qede_dev *qdev = dev->data->dev_private;
460         struct ecore_dev *edev = &qdev->edev;
461         struct qede_tx_queue *txq;
462
463         PMD_INIT_FUNC_TRACE(edev);
464
465         if (!rte_is_power_of_2(nb_desc)) {
466                 DP_ERR(edev, "Ring size %u is not power of 2\n",
467                        nb_desc);
468                 return -EINVAL;
469         }
470
471         /* Free memory prior to re-allocation if needed... */
472         if (dev->data->tx_queues[queue_idx] != NULL) {
473                 qede_tx_queue_release(dev->data->tx_queues[queue_idx]);
474                 dev->data->tx_queues[queue_idx] = NULL;
475         }
476
477         if (ECORE_IS_CMT(edev)) {
478                 txq = qede_alloc_tx_queue_mem(dev, queue_idx * 2, nb_desc,
479                                               socket_id, tx_conf);
480                 if (!txq)
481                         return -ENOMEM;
482
483                 qdev->fp_array[queue_idx * 2].txq = txq;
484                 txq = qede_alloc_tx_queue_mem(dev, (queue_idx * 2) + 1, nb_desc,
485                                               socket_id, tx_conf);
486                 if (!txq)
487                         return -ENOMEM;
488
489                 qdev->fp_array[(queue_idx * 2) + 1].txq = txq;
490                 dev->data->tx_queues[queue_idx] =
491                                         &qdev->fp_array_cmt[queue_idx];
492         } else {
493                 txq = qede_alloc_tx_queue_mem(dev, queue_idx, nb_desc,
494                                               socket_id, tx_conf);
495                 if (!txq)
496                         return -ENOMEM;
497
498                 dev->data->tx_queues[queue_idx] = txq;
499                 qdev->fp_array[queue_idx].txq = txq;
500         }
501
502         return 0;
503 }
504
505 static void
506 qede_tx_queue_reset(__rte_unused struct qede_dev *qdev,
507                     struct qede_tx_queue *txq)
508 {
509         DP_INFO(&qdev->edev, "Reset TX queue %u\n", txq->queue_id);
510         ecore_chain_reset(&txq->tx_pbl);
511         txq->sw_tx_cons = 0;
512         txq->sw_tx_prod = 0;
513         *txq->hw_cons_ptr = 0;
514 }
515
516 static void qede_tx_queue_release_mbufs(struct qede_tx_queue *txq)
517 {
518         uint16_t i;
519
520         if (txq->sw_tx_ring) {
521                 for (i = 0; i < txq->nb_tx_desc; i++) {
522                         if (txq->sw_tx_ring[i]) {
523                                 rte_pktmbuf_free(txq->sw_tx_ring[i]);
524                                 txq->sw_tx_ring[i] = NULL;
525                         }
526                 }
527         }
528 }
529
530 static void _qede_tx_queue_release(struct qede_dev *qdev,
531                                    struct ecore_dev *edev,
532                                    struct qede_tx_queue *txq)
533 {
534         qede_tx_queue_release_mbufs(txq);
535         qdev->ops->common->chain_free(edev, &txq->tx_pbl);
536         rte_free(txq->sw_tx_ring);
537         rte_free(txq);
538 }
539
540 void qede_tx_queue_release(void *tx_queue)
541 {
542         struct qede_tx_queue *txq = tx_queue;
543         struct qede_fastpath_cmt *fp_cmt;
544         struct qede_dev *qdev;
545         struct ecore_dev *edev;
546
547         if (txq) {
548                 qdev = txq->qdev;
549                 edev = QEDE_INIT_EDEV(qdev);
550                 PMD_INIT_FUNC_TRACE(edev);
551
552                 if (ECORE_IS_CMT(edev)) {
553                         fp_cmt = tx_queue;
554                         _qede_tx_queue_release(qdev, edev, fp_cmt->fp0->txq);
555                         _qede_tx_queue_release(qdev, edev, fp_cmt->fp1->txq);
556                 } else {
557                         _qede_tx_queue_release(qdev, edev, txq);
558                 }
559         }
560 }
561
562 /* This function allocates fast-path status block memory */
563 static int
564 qede_alloc_mem_sb(struct qede_dev *qdev, struct ecore_sb_info *sb_info,
565                   uint16_t sb_id)
566 {
567         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
568         struct status_block *sb_virt;
569         dma_addr_t sb_phys;
570         int rc;
571
572         sb_virt = OSAL_DMA_ALLOC_COHERENT(edev, &sb_phys,
573                                           sizeof(struct status_block));
574         if (!sb_virt) {
575                 DP_ERR(edev, "Status block allocation failed\n");
576                 return -ENOMEM;
577         }
578         rc = qdev->ops->common->sb_init(edev, sb_info, sb_virt,
579                                         sb_phys, sb_id);
580         if (rc) {
581                 DP_ERR(edev, "Status block initialization failed\n");
582                 OSAL_DMA_FREE_COHERENT(edev, sb_virt, sb_phys,
583                                        sizeof(struct status_block));
584                 return rc;
585         }
586
587         return 0;
588 }
589
590 int qede_alloc_fp_resc(struct qede_dev *qdev)
591 {
592         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
593         struct qede_fastpath *fp;
594         uint32_t num_sbs;
595         uint16_t sb_idx;
596         int i;
597
598         PMD_INIT_FUNC_TRACE(edev);
599
600         if (IS_VF(edev))
601                 ecore_vf_get_num_sbs(ECORE_LEADING_HWFN(edev), &num_sbs);
602         else
603                 num_sbs = ecore_cxt_get_proto_cid_count
604                           (ECORE_LEADING_HWFN(edev), PROTOCOLID_ETH, NULL);
605
606         if (num_sbs == 0) {
607                 DP_ERR(edev, "No status blocks available\n");
608                 return -EINVAL;
609         }
610
611         qdev->fp_array = rte_calloc("fp", QEDE_RXTX_MAX(qdev),
612                                 sizeof(*qdev->fp_array), RTE_CACHE_LINE_SIZE);
613
614         if (!qdev->fp_array) {
615                 DP_ERR(edev, "fp array allocation failed\n");
616                 return -ENOMEM;
617         }
618
619         memset((void *)qdev->fp_array, 0, QEDE_RXTX_MAX(qdev) *
620                         sizeof(*qdev->fp_array));
621
622         if (ECORE_IS_CMT(edev)) {
623                 qdev->fp_array_cmt = rte_calloc("fp_cmt",
624                                                 QEDE_RXTX_MAX(qdev) / 2,
625                                                 sizeof(*qdev->fp_array_cmt),
626                                                 RTE_CACHE_LINE_SIZE);
627
628                 if (!qdev->fp_array_cmt) {
629                         DP_ERR(edev, "fp array for CMT allocation failed\n");
630                         return -ENOMEM;
631                 }
632
633                 memset((void *)qdev->fp_array_cmt, 0,
634                        (QEDE_RXTX_MAX(qdev) / 2) * sizeof(*qdev->fp_array_cmt));
635
636                 /* Establish the mapping of fp_array with fp_array_cmt */
637                 for (i = 0; i < QEDE_RXTX_MAX(qdev) / 2; i++) {
638                         qdev->fp_array_cmt[i].qdev = qdev;
639                         qdev->fp_array_cmt[i].fp0 = &qdev->fp_array[i * 2];
640                         qdev->fp_array_cmt[i].fp1 = &qdev->fp_array[i * 2 + 1];
641                 }
642         }
643
644         for (sb_idx = 0; sb_idx < QEDE_RXTX_MAX(qdev); sb_idx++) {
645                 fp = &qdev->fp_array[sb_idx];
646                 fp->sb_info = rte_calloc("sb", 1, sizeof(struct ecore_sb_info),
647                                 RTE_CACHE_LINE_SIZE);
648                 if (!fp->sb_info) {
649                         DP_ERR(edev, "FP sb_info allocation fails\n");
650                         return -1;
651                 }
652                 if (qede_alloc_mem_sb(qdev, fp->sb_info, sb_idx)) {
653                         DP_ERR(edev, "FP status block allocation fails\n");
654                         return -1;
655                 }
656                 DP_INFO(edev, "sb_info idx 0x%x initialized\n",
657                                 fp->sb_info->igu_sb_id);
658         }
659
660         return 0;
661 }
662
663 void qede_dealloc_fp_resc(struct rte_eth_dev *eth_dev)
664 {
665         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
666         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
667         struct qede_fastpath *fp;
668         uint16_t sb_idx;
669         uint8_t i;
670
671         PMD_INIT_FUNC_TRACE(edev);
672
673         for (sb_idx = 0; sb_idx < QEDE_RXTX_MAX(qdev); sb_idx++) {
674                 fp = &qdev->fp_array[sb_idx];
675                 if (fp->sb_info) {
676                         DP_INFO(edev, "Free sb_info index 0x%x\n",
677                                         fp->sb_info->igu_sb_id);
678                         OSAL_DMA_FREE_COHERENT(edev, fp->sb_info->sb_virt,
679                                 fp->sb_info->sb_phys,
680                                 sizeof(struct status_block));
681                         rte_free(fp->sb_info);
682                         fp->sb_info = NULL;
683                 }
684         }
685
686         /* Free packet buffers and ring memories */
687         for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
688                 if (eth_dev->data->rx_queues[i]) {
689                         qede_rx_queue_release(eth_dev->data->rx_queues[i]);
690                         eth_dev->data->rx_queues[i] = NULL;
691                 }
692         }
693
694         for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
695                 if (eth_dev->data->tx_queues[i]) {
696                         qede_tx_queue_release(eth_dev->data->tx_queues[i]);
697                         eth_dev->data->tx_queues[i] = NULL;
698                 }
699         }
700
701         rte_free(qdev->fp_array);
702         qdev->fp_array = NULL;
703
704         rte_free(qdev->fp_array_cmt);
705         qdev->fp_array_cmt = NULL;
706 }
707
708 static inline void
709 qede_update_rx_prod(__rte_unused struct qede_dev *edev,
710                     struct qede_rx_queue *rxq)
711 {
712         uint16_t bd_prod = ecore_chain_get_prod_idx(&rxq->rx_bd_ring);
713         uint16_t cqe_prod = ecore_chain_get_prod_idx(&rxq->rx_comp_ring);
714         struct eth_rx_prod_data rx_prods;
715
716         /* Update producers */
717         memset(&rx_prods, 0, sizeof(rx_prods));
718         rx_prods.bd_prod = rte_cpu_to_le_16(bd_prod);
719         rx_prods.cqe_prod = rte_cpu_to_le_16(cqe_prod);
720
721         /* Make sure that the BD and SGE data is updated before updating the
722          * producers since FW might read the BD/SGE right after the producer
723          * is updated.
724          */
725         rte_wmb();
726
727         internal_ram_wr(rxq->hw_rxq_prod_addr, sizeof(rx_prods),
728                         (uint32_t *)&rx_prods);
729
730         /* mmiowb is needed to synchronize doorbell writes from more than one
731          * processor. It guarantees that the write arrives to the device before
732          * the napi lock is released and another qede_poll is called (possibly
733          * on another CPU). Without this barrier, the next doorbell can bypass
734          * this doorbell. This is applicable to IA64/Altix systems.
735          */
736         rte_wmb();
737
738         PMD_RX_LOG(DEBUG, rxq, "bd_prod %u  cqe_prod %u", bd_prod, cqe_prod);
739 }
740
741 /* Starts a given RX queue in HW */
742 static int
743 qede_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
744 {
745         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
746         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
747         struct ecore_queue_start_common_params params;
748         struct ecore_rxq_start_ret_params ret_params;
749         struct qede_rx_queue *rxq;
750         struct qede_fastpath *fp;
751         struct ecore_hwfn *p_hwfn;
752         dma_addr_t p_phys_table;
753         uint16_t page_cnt;
754         uint16_t j;
755         int hwfn_index;
756         int rc;
757
758         if (rx_queue_id < qdev->num_rx_queues) {
759                 fp = &qdev->fp_array[rx_queue_id];
760                 rxq = fp->rxq;
761                 /* Allocate buffers for the Rx ring */
762                 for (j = 0; j < rxq->nb_rx_desc; j++) {
763                         rc = qede_alloc_rx_buffer(rxq);
764                         if (rc) {
765                                 DP_ERR(edev, "RX buffer allocation failed"
766                                                 " for rxq = %u\n", rx_queue_id);
767                                 return -ENOMEM;
768                         }
769                 }
770                 /* disable interrupts */
771                 ecore_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0);
772                 /* Prepare ramrod */
773                 memset(&params, 0, sizeof(params));
774                 params.queue_id = rx_queue_id / edev->num_hwfns;
775                 params.vport_id = 0;
776                 params.stats_id = params.vport_id;
777                 params.p_sb = fp->sb_info;
778                 DP_INFO(edev, "rxq %u igu_sb_id 0x%x\n",
779                                 fp->rxq->queue_id, fp->sb_info->igu_sb_id);
780                 params.sb_idx = RX_PI;
781                 hwfn_index = rx_queue_id % edev->num_hwfns;
782                 p_hwfn = &edev->hwfns[hwfn_index];
783                 p_phys_table = ecore_chain_get_pbl_phys(&fp->rxq->rx_comp_ring);
784                 page_cnt = ecore_chain_get_page_cnt(&fp->rxq->rx_comp_ring);
785                 memset(&ret_params, 0, sizeof(ret_params));
786                 rc = ecore_eth_rx_queue_start(p_hwfn,
787                                 p_hwfn->hw_info.opaque_fid,
788                                 &params, fp->rxq->rx_buf_size,
789                                 fp->rxq->rx_bd_ring.p_phys_addr,
790                                 p_phys_table, page_cnt,
791                                 &ret_params);
792                 if (rc) {
793                         DP_ERR(edev, "RX queue %u could not be started, rc = %d\n",
794                                         rx_queue_id, rc);
795                         return -1;
796                 }
797                 /* Update with the returned parameters */
798                 fp->rxq->hw_rxq_prod_addr = ret_params.p_prod;
799                 fp->rxq->handle = ret_params.p_handle;
800
801                 fp->rxq->hw_cons_ptr = &fp->sb_info->sb_pi_array[RX_PI];
802                 qede_update_rx_prod(qdev, fp->rxq);
803                 eth_dev->data->rx_queue_state[rx_queue_id] =
804                         RTE_ETH_QUEUE_STATE_STARTED;
805                 DP_INFO(edev, "RX queue %u started\n", rx_queue_id);
806         } else {
807                 DP_ERR(edev, "RX queue %u is not in range\n", rx_queue_id);
808                 rc = -EINVAL;
809         }
810
811         return rc;
812 }
813
814 static int
815 qede_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
816 {
817         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
818         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
819         struct ecore_queue_start_common_params params;
820         struct ecore_txq_start_ret_params ret_params;
821         struct ecore_hwfn *p_hwfn;
822         dma_addr_t p_phys_table;
823         struct qede_tx_queue *txq;
824         struct qede_fastpath *fp;
825         uint16_t page_cnt;
826         int hwfn_index;
827         int rc;
828
829         if (tx_queue_id < qdev->num_tx_queues) {
830                 fp = &qdev->fp_array[tx_queue_id];
831                 txq = fp->txq;
832                 memset(&params, 0, sizeof(params));
833                 params.queue_id = tx_queue_id / edev->num_hwfns;
834                 params.vport_id = 0;
835                 params.stats_id = params.vport_id;
836                 params.p_sb = fp->sb_info;
837                 DP_INFO(edev, "txq %u igu_sb_id 0x%x\n",
838                                 fp->txq->queue_id, fp->sb_info->igu_sb_id);
839                 params.sb_idx = TX_PI(0); /* tc = 0 */
840                 p_phys_table = ecore_chain_get_pbl_phys(&txq->tx_pbl);
841                 page_cnt = ecore_chain_get_page_cnt(&txq->tx_pbl);
842                 hwfn_index = tx_queue_id % edev->num_hwfns;
843                 p_hwfn = &edev->hwfns[hwfn_index];
844                 if (qdev->dev_info.is_legacy)
845                         fp->txq->is_legacy = true;
846                 rc = ecore_eth_tx_queue_start(p_hwfn,
847                                 p_hwfn->hw_info.opaque_fid,
848                                 &params, 0 /* tc */,
849                                 p_phys_table, page_cnt,
850                                 &ret_params);
851                 if (rc != ECORE_SUCCESS) {
852                         DP_ERR(edev, "TX queue %u couldn't be started, rc=%d\n",
853                                         tx_queue_id, rc);
854                         return -1;
855                 }
856                 txq->doorbell_addr = ret_params.p_doorbell;
857                 txq->handle = ret_params.p_handle;
858
859                 txq->hw_cons_ptr = &fp->sb_info->sb_pi_array[TX_PI(0)];
860                 SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_DEST,
861                                 DB_DEST_XCM);
862                 SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD,
863                                 DB_AGG_CMD_SET);
864                 SET_FIELD(txq->tx_db.data.params,
865                                 ETH_DB_DATA_AGG_VAL_SEL,
866                                 DQ_XCM_ETH_TX_BD_PROD_CMD);
867                 txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD;
868                 eth_dev->data->tx_queue_state[tx_queue_id] =
869                         RTE_ETH_QUEUE_STATE_STARTED;
870                 DP_INFO(edev, "TX queue %u started\n", tx_queue_id);
871         } else {
872                 DP_ERR(edev, "TX queue %u is not in range\n", tx_queue_id);
873                 rc = -EINVAL;
874         }
875
876         return rc;
877 }
878
879 static inline void
880 qede_free_tx_pkt(struct qede_tx_queue *txq)
881 {
882         struct rte_mbuf *mbuf;
883         uint16_t nb_segs;
884         uint16_t idx;
885
886         idx = TX_CONS(txq);
887         mbuf = txq->sw_tx_ring[idx];
888         if (mbuf) {
889                 nb_segs = mbuf->nb_segs;
890                 PMD_TX_LOG(DEBUG, txq, "nb_segs to free %u\n", nb_segs);
891                 while (nb_segs) {
892                         /* It's like consuming rxbuf in recv() */
893                         ecore_chain_consume(&txq->tx_pbl);
894                         txq->nb_tx_avail++;
895                         nb_segs--;
896                 }
897                 rte_pktmbuf_free(mbuf);
898                 txq->sw_tx_ring[idx] = NULL;
899                 txq->sw_tx_cons++;
900                 PMD_TX_LOG(DEBUG, txq, "Freed tx packet\n");
901         } else {
902                 ecore_chain_consume(&txq->tx_pbl);
903                 txq->nb_tx_avail++;
904         }
905 }
906
907 static inline void
908 qede_process_tx_compl(__rte_unused struct ecore_dev *edev,
909                       struct qede_tx_queue *txq)
910 {
911         uint16_t hw_bd_cons;
912 #ifdef RTE_LIBRTE_QEDE_DEBUG_TX
913         uint16_t sw_tx_cons;
914 #endif
915
916         hw_bd_cons = rte_le_to_cpu_16(*txq->hw_cons_ptr);
917         /* read barrier prevents speculative execution on stale data */
918         rte_rmb();
919
920 #ifdef RTE_LIBRTE_QEDE_DEBUG_TX
921         sw_tx_cons = ecore_chain_get_cons_idx(&txq->tx_pbl);
922         PMD_TX_LOG(DEBUG, txq, "Tx Completions = %u\n",
923                    abs(hw_bd_cons - sw_tx_cons));
924 #endif
925         while (hw_bd_cons !=  ecore_chain_get_cons_idx(&txq->tx_pbl))
926                 qede_free_tx_pkt(txq);
927 }
928
929 static int qede_drain_txq(struct qede_dev *qdev,
930                           struct qede_tx_queue *txq, bool allow_drain)
931 {
932         struct ecore_dev *edev = &qdev->edev;
933         int rc, cnt = 1000;
934
935         while (txq->sw_tx_cons != txq->sw_tx_prod) {
936                 qede_process_tx_compl(edev, txq);
937                 if (!cnt) {
938                         if (allow_drain) {
939                                 DP_ERR(edev, "Tx queue[%u] is stuck,"
940                                           "requesting MCP to drain\n",
941                                           txq->queue_id);
942                                 rc = qdev->ops->common->drain(edev);
943                                 if (rc)
944                                         return rc;
945                                 return qede_drain_txq(qdev, txq, false);
946                         }
947                         DP_ERR(edev, "Timeout waiting for tx queue[%d]:"
948                                   "PROD=%d, CONS=%d\n",
949                                   txq->queue_id, txq->sw_tx_prod,
950                                   txq->sw_tx_cons);
951                         return -1;
952                 }
953                 cnt--;
954                 DELAY(1000);
955                 rte_compiler_barrier();
956         }
957
958         /* FW finished processing, wait for HW to transmit all tx packets */
959         DELAY(2000);
960
961         return 0;
962 }
963
964 /* Stops a given TX queue in the HW */
965 static int qede_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
966 {
967         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
968         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
969         struct ecore_hwfn *p_hwfn;
970         struct qede_tx_queue *txq;
971         int hwfn_index;
972         int rc;
973
974         if (tx_queue_id < qdev->num_tx_queues) {
975                 txq = qdev->fp_array[tx_queue_id].txq;
976                 /* Drain txq */
977                 if (qede_drain_txq(qdev, txq, true))
978                         return -1; /* For the lack of retcodes */
979                 /* Stop txq */
980                 hwfn_index = tx_queue_id % edev->num_hwfns;
981                 p_hwfn = &edev->hwfns[hwfn_index];
982                 rc = ecore_eth_tx_queue_stop(p_hwfn, txq->handle);
983                 if (rc != ECORE_SUCCESS) {
984                         DP_ERR(edev, "TX queue %u stop fails\n", tx_queue_id);
985                         return -1;
986                 }
987                 qede_tx_queue_release_mbufs(txq);
988                 qede_tx_queue_reset(qdev, txq);
989                 eth_dev->data->tx_queue_state[tx_queue_id] =
990                         RTE_ETH_QUEUE_STATE_STOPPED;
991                 DP_INFO(edev, "TX queue %u stopped\n", tx_queue_id);
992         } else {
993                 DP_ERR(edev, "TX queue %u is not in range\n", tx_queue_id);
994                 rc = -EINVAL;
995         }
996
997         return rc;
998 }
999
1000 int qede_start_queues(struct rte_eth_dev *eth_dev)
1001 {
1002         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1003         uint8_t id;
1004         int rc = -1;
1005
1006         for (id = 0; id < qdev->num_rx_queues; id++) {
1007                 rc = qede_rx_queue_start(eth_dev, id);
1008                 if (rc != ECORE_SUCCESS)
1009                         return -1;
1010         }
1011
1012         for (id = 0; id < qdev->num_tx_queues; id++) {
1013                 rc = qede_tx_queue_start(eth_dev, id);
1014                 if (rc != ECORE_SUCCESS)
1015                         return -1;
1016         }
1017
1018         return rc;
1019 }
1020
1021 void qede_stop_queues(struct rte_eth_dev *eth_dev)
1022 {
1023         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1024         uint8_t id;
1025
1026         /* Stopping RX/TX queues */
1027         for (id = 0; id < qdev->num_tx_queues; id++)
1028                 qede_tx_queue_stop(eth_dev, id);
1029
1030         for (id = 0; id < qdev->num_rx_queues; id++)
1031                 qede_rx_queue_stop(eth_dev, id);
1032 }
1033
1034 static inline bool qede_tunn_exist(uint16_t flag)
1035 {
1036         return !!((PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK <<
1037                     PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT) & flag);
1038 }
1039
1040 static inline uint8_t qede_check_tunn_csum_l3(uint16_t flag)
1041 {
1042         return !!((PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK <<
1043                 PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT) & flag);
1044 }
1045
1046 /*
1047  * qede_check_tunn_csum_l4:
1048  * Returns:
1049  * 1 : If L4 csum is enabled AND if the validation has failed.
1050  * 0 : Otherwise
1051  */
1052 static inline uint8_t qede_check_tunn_csum_l4(uint16_t flag)
1053 {
1054         if ((PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK <<
1055              PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT) & flag)
1056                 return !!((PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK <<
1057                         PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT) & flag);
1058
1059         return 0;
1060 }
1061
1062 static inline uint8_t qede_check_notunn_csum_l4(uint16_t flag)
1063 {
1064         if ((PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
1065              PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT) & flag)
1066                 return !!((PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<
1067                            PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT) & flag);
1068
1069         return 0;
1070 }
1071
1072 /* Returns outer L2, L3 and L4 packet_type for tunneled packets */
1073 static inline uint32_t qede_rx_cqe_to_pkt_type_outer(struct rte_mbuf *m)
1074 {
1075         uint32_t packet_type = RTE_PTYPE_UNKNOWN;
1076         struct rte_ether_hdr *eth_hdr;
1077         struct rte_ipv4_hdr *ipv4_hdr;
1078         struct rte_ipv6_hdr *ipv6_hdr;
1079         struct rte_vlan_hdr *vlan_hdr;
1080         uint16_t ethertype;
1081         bool vlan_tagged = 0;
1082         uint16_t len;
1083
1084         eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
1085         len = sizeof(struct rte_ether_hdr);
1086         ethertype = rte_cpu_to_be_16(eth_hdr->ether_type);
1087
1088          /* Note: Valid only if VLAN stripping is disabled */
1089         if (ethertype == RTE_ETHER_TYPE_VLAN) {
1090                 vlan_tagged = 1;
1091                 vlan_hdr = (struct rte_vlan_hdr *)(eth_hdr + 1);
1092                 len += sizeof(struct rte_vlan_hdr);
1093                 ethertype = rte_cpu_to_be_16(vlan_hdr->eth_proto);
1094         }
1095
1096         if (ethertype == RTE_ETHER_TYPE_IPV4) {
1097                 packet_type |= RTE_PTYPE_L3_IPV4;
1098                 ipv4_hdr = rte_pktmbuf_mtod_offset(m,
1099                                         struct rte_ipv4_hdr *, len);
1100                 if (ipv4_hdr->next_proto_id == IPPROTO_TCP)
1101                         packet_type |= RTE_PTYPE_L4_TCP;
1102                 else if (ipv4_hdr->next_proto_id == IPPROTO_UDP)
1103                         packet_type |= RTE_PTYPE_L4_UDP;
1104         } else if (ethertype == RTE_ETHER_TYPE_IPV6) {
1105                 packet_type |= RTE_PTYPE_L3_IPV6;
1106                 ipv6_hdr = rte_pktmbuf_mtod_offset(m,
1107                                                 struct rte_ipv6_hdr *, len);
1108                 if (ipv6_hdr->proto == IPPROTO_TCP)
1109                         packet_type |= RTE_PTYPE_L4_TCP;
1110                 else if (ipv6_hdr->proto == IPPROTO_UDP)
1111                         packet_type |= RTE_PTYPE_L4_UDP;
1112         }
1113
1114         if (vlan_tagged)
1115                 packet_type |= RTE_PTYPE_L2_ETHER_VLAN;
1116         else
1117                 packet_type |= RTE_PTYPE_L2_ETHER;
1118
1119         return packet_type;
1120 }
1121
1122 static inline uint32_t qede_rx_cqe_to_pkt_type_inner(uint16_t flags)
1123 {
1124         uint16_t val;
1125
1126         /* Lookup table */
1127         static const uint32_t
1128         ptype_lkup_tbl[QEDE_PKT_TYPE_MAX] __rte_cache_aligned = {
1129                 [QEDE_PKT_TYPE_IPV4] = RTE_PTYPE_INNER_L3_IPV4          |
1130                                        RTE_PTYPE_INNER_L2_ETHER,
1131                 [QEDE_PKT_TYPE_IPV6] = RTE_PTYPE_INNER_L3_IPV6          |
1132                                        RTE_PTYPE_INNER_L2_ETHER,
1133                 [QEDE_PKT_TYPE_IPV4_TCP] = RTE_PTYPE_INNER_L3_IPV4      |
1134                                            RTE_PTYPE_INNER_L4_TCP       |
1135                                            RTE_PTYPE_INNER_L2_ETHER,
1136                 [QEDE_PKT_TYPE_IPV6_TCP] = RTE_PTYPE_INNER_L3_IPV6      |
1137                                            RTE_PTYPE_INNER_L4_TCP       |
1138                                            RTE_PTYPE_INNER_L2_ETHER,
1139                 [QEDE_PKT_TYPE_IPV4_UDP] = RTE_PTYPE_INNER_L3_IPV4      |
1140                                            RTE_PTYPE_INNER_L4_UDP       |
1141                                            RTE_PTYPE_INNER_L2_ETHER,
1142                 [QEDE_PKT_TYPE_IPV6_UDP] = RTE_PTYPE_INNER_L3_IPV6      |
1143                                            RTE_PTYPE_INNER_L4_UDP       |
1144                                            RTE_PTYPE_INNER_L2_ETHER,
1145                 /* Frags with no VLAN */
1146                 [QEDE_PKT_TYPE_IPV4_FRAG] = RTE_PTYPE_INNER_L3_IPV4     |
1147                                             RTE_PTYPE_INNER_L4_FRAG     |
1148                                             RTE_PTYPE_INNER_L2_ETHER,
1149                 [QEDE_PKT_TYPE_IPV6_FRAG] = RTE_PTYPE_INNER_L3_IPV6     |
1150                                             RTE_PTYPE_INNER_L4_FRAG     |
1151                                             RTE_PTYPE_INNER_L2_ETHER,
1152                 /* VLANs */
1153                 [QEDE_PKT_TYPE_IPV4_VLAN] = RTE_PTYPE_INNER_L3_IPV4     |
1154                                             RTE_PTYPE_INNER_L2_ETHER_VLAN,
1155                 [QEDE_PKT_TYPE_IPV6_VLAN] = RTE_PTYPE_INNER_L3_IPV6     |
1156                                             RTE_PTYPE_INNER_L2_ETHER_VLAN,
1157                 [QEDE_PKT_TYPE_IPV4_TCP_VLAN] = RTE_PTYPE_INNER_L3_IPV4 |
1158                                                 RTE_PTYPE_INNER_L4_TCP  |
1159                                                 RTE_PTYPE_INNER_L2_ETHER_VLAN,
1160                 [QEDE_PKT_TYPE_IPV6_TCP_VLAN] = RTE_PTYPE_INNER_L3_IPV6 |
1161                                                 RTE_PTYPE_INNER_L4_TCP  |
1162                                                 RTE_PTYPE_INNER_L2_ETHER_VLAN,
1163                 [QEDE_PKT_TYPE_IPV4_UDP_VLAN] = RTE_PTYPE_INNER_L3_IPV4 |
1164                                                 RTE_PTYPE_INNER_L4_UDP  |
1165                                                 RTE_PTYPE_INNER_L2_ETHER_VLAN,
1166                 [QEDE_PKT_TYPE_IPV6_UDP_VLAN] = RTE_PTYPE_INNER_L3_IPV6 |
1167                                                 RTE_PTYPE_INNER_L4_UDP  |
1168                                                 RTE_PTYPE_INNER_L2_ETHER_VLAN,
1169                 /* Frags with VLAN */
1170                 [QEDE_PKT_TYPE_IPV4_VLAN_FRAG] = RTE_PTYPE_INNER_L3_IPV4 |
1171                                                  RTE_PTYPE_INNER_L4_FRAG |
1172                                                  RTE_PTYPE_INNER_L2_ETHER_VLAN,
1173                 [QEDE_PKT_TYPE_IPV6_VLAN_FRAG] = RTE_PTYPE_INNER_L3_IPV6 |
1174                                                  RTE_PTYPE_INNER_L4_FRAG |
1175                                                  RTE_PTYPE_INNER_L2_ETHER_VLAN,
1176         };
1177
1178         /* Bits (0..3) provides L3/L4 protocol type */
1179         /* Bits (4,5) provides frag and VLAN info */
1180         val = ((PARSING_AND_ERR_FLAGS_L3TYPE_MASK <<
1181                PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT) |
1182                (PARSING_AND_ERR_FLAGS_L4PROTOCOL_MASK <<
1183                 PARSING_AND_ERR_FLAGS_L4PROTOCOL_SHIFT) |
1184                (PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK <<
1185                 PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT) |
1186                 (PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK <<
1187                  PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT)) & flags;
1188
1189         if (val < QEDE_PKT_TYPE_MAX)
1190                 return ptype_lkup_tbl[val];
1191
1192         return RTE_PTYPE_UNKNOWN;
1193 }
1194
1195 static inline uint32_t qede_rx_cqe_to_pkt_type(uint16_t flags)
1196 {
1197         uint16_t val;
1198
1199         /* Lookup table */
1200         static const uint32_t
1201         ptype_lkup_tbl[QEDE_PKT_TYPE_MAX] __rte_cache_aligned = {
1202                 [QEDE_PKT_TYPE_IPV4] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L2_ETHER,
1203                 [QEDE_PKT_TYPE_IPV6] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L2_ETHER,
1204                 [QEDE_PKT_TYPE_IPV4_TCP] = RTE_PTYPE_L3_IPV4    |
1205                                            RTE_PTYPE_L4_TCP     |
1206                                            RTE_PTYPE_L2_ETHER,
1207                 [QEDE_PKT_TYPE_IPV6_TCP] = RTE_PTYPE_L3_IPV6    |
1208                                            RTE_PTYPE_L4_TCP     |
1209                                            RTE_PTYPE_L2_ETHER,
1210                 [QEDE_PKT_TYPE_IPV4_UDP] = RTE_PTYPE_L3_IPV4    |
1211                                            RTE_PTYPE_L4_UDP     |
1212                                            RTE_PTYPE_L2_ETHER,
1213                 [QEDE_PKT_TYPE_IPV6_UDP] = RTE_PTYPE_L3_IPV6    |
1214                                            RTE_PTYPE_L4_UDP     |
1215                                            RTE_PTYPE_L2_ETHER,
1216                 /* Frags with no VLAN */
1217                 [QEDE_PKT_TYPE_IPV4_FRAG] = RTE_PTYPE_L3_IPV4   |
1218                                             RTE_PTYPE_L4_FRAG   |
1219                                             RTE_PTYPE_L2_ETHER,
1220                 [QEDE_PKT_TYPE_IPV6_FRAG] = RTE_PTYPE_L3_IPV6   |
1221                                             RTE_PTYPE_L4_FRAG   |
1222                                             RTE_PTYPE_L2_ETHER,
1223                 /* VLANs */
1224                 [QEDE_PKT_TYPE_IPV4_VLAN] = RTE_PTYPE_L3_IPV4           |
1225                                             RTE_PTYPE_L2_ETHER_VLAN,
1226                 [QEDE_PKT_TYPE_IPV6_VLAN] = RTE_PTYPE_L3_IPV6           |
1227                                             RTE_PTYPE_L2_ETHER_VLAN,
1228                 [QEDE_PKT_TYPE_IPV4_TCP_VLAN] = RTE_PTYPE_L3_IPV4       |
1229                                                 RTE_PTYPE_L4_TCP        |
1230                                                 RTE_PTYPE_L2_ETHER_VLAN,
1231                 [QEDE_PKT_TYPE_IPV6_TCP_VLAN] = RTE_PTYPE_L3_IPV6       |
1232                                                 RTE_PTYPE_L4_TCP        |
1233                                                 RTE_PTYPE_L2_ETHER_VLAN,
1234                 [QEDE_PKT_TYPE_IPV4_UDP_VLAN] = RTE_PTYPE_L3_IPV4       |
1235                                                 RTE_PTYPE_L4_UDP        |
1236                                                 RTE_PTYPE_L2_ETHER_VLAN,
1237                 [QEDE_PKT_TYPE_IPV6_UDP_VLAN] = RTE_PTYPE_L3_IPV6       |
1238                                                 RTE_PTYPE_L4_UDP        |
1239                                                 RTE_PTYPE_L2_ETHER_VLAN,
1240                 /* Frags with VLAN */
1241                 [QEDE_PKT_TYPE_IPV4_VLAN_FRAG] = RTE_PTYPE_L3_IPV4      |
1242                                                  RTE_PTYPE_L4_FRAG      |
1243                                                  RTE_PTYPE_L2_ETHER_VLAN,
1244                 [QEDE_PKT_TYPE_IPV6_VLAN_FRAG] = RTE_PTYPE_L3_IPV6      |
1245                                                  RTE_PTYPE_L4_FRAG      |
1246                                                  RTE_PTYPE_L2_ETHER_VLAN,
1247         };
1248
1249         /* Bits (0..3) provides L3/L4 protocol type */
1250         /* Bits (4,5) provides frag and VLAN info */
1251         val = ((PARSING_AND_ERR_FLAGS_L3TYPE_MASK <<
1252                PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT) |
1253                (PARSING_AND_ERR_FLAGS_L4PROTOCOL_MASK <<
1254                 PARSING_AND_ERR_FLAGS_L4PROTOCOL_SHIFT) |
1255                (PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK <<
1256                 PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT) |
1257                 (PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK <<
1258                  PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT)) & flags;
1259
1260         if (val < QEDE_PKT_TYPE_MAX)
1261                 return ptype_lkup_tbl[val];
1262
1263         return RTE_PTYPE_UNKNOWN;
1264 }
1265
1266 static inline uint8_t
1267 qede_check_notunn_csum_l3(struct rte_mbuf *m, uint16_t flag)
1268 {
1269         struct rte_ipv4_hdr *ip;
1270         uint16_t pkt_csum;
1271         uint16_t calc_csum;
1272         uint16_t val;
1273
1274         val = ((PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK <<
1275                 PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT) & flag);
1276
1277         if (unlikely(val)) {
1278                 m->packet_type = qede_rx_cqe_to_pkt_type(flag);
1279                 if (RTE_ETH_IS_IPV4_HDR(m->packet_type)) {
1280                         ip = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *,
1281                                            sizeof(struct rte_ether_hdr));
1282                         pkt_csum = ip->hdr_checksum;
1283                         ip->hdr_checksum = 0;
1284                         calc_csum = rte_ipv4_cksum(ip);
1285                         ip->hdr_checksum = pkt_csum;
1286                         return (calc_csum != pkt_csum);
1287                 } else if (RTE_ETH_IS_IPV6_HDR(m->packet_type)) {
1288                         return 1;
1289                 }
1290         }
1291         return 0;
1292 }
1293
1294 static inline void qede_rx_bd_ring_consume(struct qede_rx_queue *rxq)
1295 {
1296         ecore_chain_consume(&rxq->rx_bd_ring);
1297         rxq->sw_rx_cons++;
1298 }
1299
1300 static inline void
1301 qede_reuse_page(__rte_unused struct qede_dev *qdev,
1302                 struct qede_rx_queue *rxq, struct rte_mbuf *curr_cons)
1303 {
1304         struct eth_rx_bd *rx_bd_prod = ecore_chain_produce(&rxq->rx_bd_ring);
1305         uint16_t idx = rxq->sw_rx_prod & NUM_RX_BDS(rxq);
1306         dma_addr_t new_mapping;
1307
1308         rxq->sw_rx_ring[idx] = curr_cons;
1309
1310         new_mapping = rte_mbuf_data_iova_default(curr_cons);
1311
1312         rx_bd_prod->addr.hi = rte_cpu_to_le_32(U64_HI(new_mapping));
1313         rx_bd_prod->addr.lo = rte_cpu_to_le_32(U64_LO(new_mapping));
1314
1315         rxq->sw_rx_prod++;
1316 }
1317
1318 static inline void
1319 qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq,
1320                         struct qede_dev *qdev, uint8_t count)
1321 {
1322         struct rte_mbuf *curr_cons;
1323
1324         for (; count > 0; count--) {
1325                 curr_cons = rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS(rxq)];
1326                 qede_reuse_page(qdev, rxq, curr_cons);
1327                 qede_rx_bd_ring_consume(rxq);
1328         }
1329 }
1330
1331 static inline void
1332 qede_rx_process_tpa_cmn_cont_end_cqe(__rte_unused struct qede_dev *qdev,
1333                                      struct qede_rx_queue *rxq,
1334                                      uint8_t agg_index, uint16_t len)
1335 {
1336         struct qede_agg_info *tpa_info;
1337         struct rte_mbuf *curr_frag; /* Pointer to currently filled TPA seg */
1338         uint16_t cons_idx;
1339
1340         /* Under certain conditions it is possible that FW may not consume
1341          * additional or new BD. So decision to consume the BD must be made
1342          * based on len_list[0].
1343          */
1344         if (rte_le_to_cpu_16(len)) {
1345                 tpa_info = &rxq->tpa_info[agg_index];
1346                 cons_idx = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
1347                 curr_frag = rxq->sw_rx_ring[cons_idx];
1348                 assert(curr_frag);
1349                 curr_frag->nb_segs = 1;
1350                 curr_frag->pkt_len = rte_le_to_cpu_16(len);
1351                 curr_frag->data_len = curr_frag->pkt_len;
1352                 tpa_info->tpa_tail->next = curr_frag;
1353                 tpa_info->tpa_tail = curr_frag;
1354                 qede_rx_bd_ring_consume(rxq);
1355                 if (unlikely(qede_alloc_rx_buffer(rxq) != 0)) {
1356                         PMD_RX_LOG(ERR, rxq, "mbuf allocation fails\n");
1357                         rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
1358                         rxq->rx_alloc_errors++;
1359                 }
1360         }
1361 }
1362
1363 static inline void
1364 qede_rx_process_tpa_cont_cqe(struct qede_dev *qdev,
1365                              struct qede_rx_queue *rxq,
1366                              struct eth_fast_path_rx_tpa_cont_cqe *cqe)
1367 {
1368         PMD_RX_LOG(INFO, rxq, "TPA cont[%d] - len [%d]\n",
1369                    cqe->tpa_agg_index, rte_le_to_cpu_16(cqe->len_list[0]));
1370         /* only len_list[0] will have value */
1371         qede_rx_process_tpa_cmn_cont_end_cqe(qdev, rxq, cqe->tpa_agg_index,
1372                                              cqe->len_list[0]);
1373 }
1374
1375 static inline void
1376 qede_rx_process_tpa_end_cqe(struct qede_dev *qdev,
1377                             struct qede_rx_queue *rxq,
1378                             struct eth_fast_path_rx_tpa_end_cqe *cqe)
1379 {
1380         struct rte_mbuf *rx_mb; /* Pointer to head of the chained agg */
1381
1382         qede_rx_process_tpa_cmn_cont_end_cqe(qdev, rxq, cqe->tpa_agg_index,
1383                                              cqe->len_list[0]);
1384         /* Update total length and frags based on end TPA */
1385         rx_mb = rxq->tpa_info[cqe->tpa_agg_index].tpa_head;
1386         /* TODO:  Add Sanity Checks */
1387         rx_mb->nb_segs = cqe->num_of_bds;
1388         rx_mb->pkt_len = cqe->total_packet_len;
1389
1390         PMD_RX_LOG(INFO, rxq, "TPA End[%d] reason %d cqe_len %d nb_segs %d"
1391                    " pkt_len %d\n", cqe->tpa_agg_index, cqe->end_reason,
1392                    rte_le_to_cpu_16(cqe->len_list[0]), rx_mb->nb_segs,
1393                    rx_mb->pkt_len);
1394 }
1395
1396 static inline uint32_t qede_rx_cqe_to_tunn_pkt_type(uint16_t flags)
1397 {
1398         uint32_t val;
1399
1400         /* Lookup table */
1401         static const uint32_t
1402         ptype_tunn_lkup_tbl[QEDE_PKT_TYPE_TUNN_MAX_TYPE] __rte_cache_aligned = {
1403                 [QEDE_PKT_TYPE_UNKNOWN] = RTE_PTYPE_UNKNOWN,
1404                 [QEDE_PKT_TYPE_TUNN_GENEVE] = RTE_PTYPE_TUNNEL_GENEVE,
1405                 [QEDE_PKT_TYPE_TUNN_GRE] = RTE_PTYPE_TUNNEL_GRE,
1406                 [QEDE_PKT_TYPE_TUNN_VXLAN] = RTE_PTYPE_TUNNEL_VXLAN,
1407                 [QEDE_PKT_TYPE_TUNN_L2_TENID_NOEXIST_GENEVE] =
1408                                 RTE_PTYPE_TUNNEL_GENEVE,
1409                 [QEDE_PKT_TYPE_TUNN_L2_TENID_NOEXIST_GRE] =
1410                                 RTE_PTYPE_TUNNEL_GRE,
1411                 [QEDE_PKT_TYPE_TUNN_L2_TENID_NOEXIST_VXLAN] =
1412                                 RTE_PTYPE_TUNNEL_VXLAN,
1413                 [QEDE_PKT_TYPE_TUNN_L2_TENID_EXIST_GENEVE] =
1414                                 RTE_PTYPE_TUNNEL_GENEVE,
1415                 [QEDE_PKT_TYPE_TUNN_L2_TENID_EXIST_GRE] =
1416                                 RTE_PTYPE_TUNNEL_GRE,
1417                 [QEDE_PKT_TYPE_TUNN_L2_TENID_EXIST_VXLAN] =
1418                                 RTE_PTYPE_TUNNEL_VXLAN,
1419                 [QEDE_PKT_TYPE_TUNN_IPV4_TENID_NOEXIST_GENEVE] =
1420                                 RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L3_IPV4,
1421                 [QEDE_PKT_TYPE_TUNN_IPV4_TENID_NOEXIST_GRE] =
1422                                 RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_L3_IPV4,
1423                 [QEDE_PKT_TYPE_TUNN_IPV4_TENID_NOEXIST_VXLAN] =
1424                                 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L3_IPV4,
1425                 [QEDE_PKT_TYPE_TUNN_IPV4_TENID_EXIST_GENEVE] =
1426                                 RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L3_IPV4,
1427                 [QEDE_PKT_TYPE_TUNN_IPV4_TENID_EXIST_GRE] =
1428                                 RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_L3_IPV4,
1429                 [QEDE_PKT_TYPE_TUNN_IPV4_TENID_EXIST_VXLAN] =
1430                                 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L3_IPV4,
1431                 [QEDE_PKT_TYPE_TUNN_IPV6_TENID_NOEXIST_GENEVE] =
1432                                 RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L3_IPV6,
1433                 [QEDE_PKT_TYPE_TUNN_IPV6_TENID_NOEXIST_GRE] =
1434                                 RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_L3_IPV6,
1435                 [QEDE_PKT_TYPE_TUNN_IPV6_TENID_NOEXIST_VXLAN] =
1436                                 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L3_IPV6,
1437                 [QEDE_PKT_TYPE_TUNN_IPV6_TENID_EXIST_GENEVE] =
1438                                 RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L3_IPV6,
1439                 [QEDE_PKT_TYPE_TUNN_IPV6_TENID_EXIST_GRE] =
1440                                 RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_L3_IPV6,
1441                 [QEDE_PKT_TYPE_TUNN_IPV6_TENID_EXIST_VXLAN] =
1442                                 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L3_IPV6,
1443         };
1444
1445         /* Cover bits[4-0] to include tunn_type and next protocol */
1446         val = ((ETH_TUNNEL_PARSING_FLAGS_TYPE_MASK <<
1447                 ETH_TUNNEL_PARSING_FLAGS_TYPE_SHIFT) |
1448                 (ETH_TUNNEL_PARSING_FLAGS_NEXT_PROTOCOL_MASK <<
1449                 ETH_TUNNEL_PARSING_FLAGS_NEXT_PROTOCOL_SHIFT)) & flags;
1450
1451         if (val < QEDE_PKT_TYPE_TUNN_MAX_TYPE)
1452                 return ptype_tunn_lkup_tbl[val];
1453         else
1454                 return RTE_PTYPE_UNKNOWN;
1455 }
1456
1457 static inline int
1458 qede_process_sg_pkts(void *p_rxq,  struct rte_mbuf *rx_mb,
1459                      uint8_t num_segs, uint16_t pkt_len)
1460 {
1461         struct qede_rx_queue *rxq = p_rxq;
1462         struct qede_dev *qdev = rxq->qdev;
1463         register struct rte_mbuf *seg1 = NULL;
1464         register struct rte_mbuf *seg2 = NULL;
1465         uint16_t sw_rx_index;
1466         uint16_t cur_size;
1467
1468         seg1 = rx_mb;
1469         while (num_segs) {
1470                 cur_size = pkt_len > rxq->rx_buf_size ? rxq->rx_buf_size :
1471                                                         pkt_len;
1472                 if (unlikely(!cur_size)) {
1473                         PMD_RX_LOG(ERR, rxq, "Length is 0 while %u BDs"
1474                                    " left for mapping jumbo\n", num_segs);
1475                         qede_recycle_rx_bd_ring(rxq, qdev, num_segs);
1476                         return -EINVAL;
1477                 }
1478                 sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
1479                 seg2 = rxq->sw_rx_ring[sw_rx_index];
1480                 qede_rx_bd_ring_consume(rxq);
1481                 pkt_len -= cur_size;
1482                 seg2->data_len = cur_size;
1483                 seg1->next = seg2;
1484                 seg1 = seg1->next;
1485                 num_segs--;
1486                 rxq->rx_segs++;
1487         }
1488
1489         return 0;
1490 }
1491
1492 #ifdef RTE_LIBRTE_QEDE_DEBUG_RX
1493 static inline void
1494 print_rx_bd_info(struct rte_mbuf *m, struct qede_rx_queue *rxq,
1495                  uint8_t bitfield)
1496 {
1497         PMD_RX_LOG(INFO, rxq,
1498                 "len 0x%04x bf 0x%04x hash_val 0x%x"
1499                 " ol_flags 0x%04lx l2=%s l3=%s l4=%s tunn=%s"
1500                 " inner_l2=%s inner_l3=%s inner_l4=%s\n",
1501                 m->data_len, bitfield, m->hash.rss,
1502                 (unsigned long)m->ol_flags,
1503                 rte_get_ptype_l2_name(m->packet_type),
1504                 rte_get_ptype_l3_name(m->packet_type),
1505                 rte_get_ptype_l4_name(m->packet_type),
1506                 rte_get_ptype_tunnel_name(m->packet_type),
1507                 rte_get_ptype_inner_l2_name(m->packet_type),
1508                 rte_get_ptype_inner_l3_name(m->packet_type),
1509                 rte_get_ptype_inner_l4_name(m->packet_type));
1510 }
1511 #endif
1512
1513 uint16_t
1514 qede_recv_pkts_regular(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1515 {
1516         struct eth_fast_path_rx_reg_cqe *fp_cqe = NULL;
1517         register struct rte_mbuf *rx_mb = NULL;
1518         struct qede_rx_queue *rxq = p_rxq;
1519         struct qede_dev *qdev = rxq->qdev;
1520         struct ecore_dev *edev = &qdev->edev;
1521         union eth_rx_cqe *cqe;
1522         uint64_t ol_flags;
1523         enum eth_rx_cqe_type cqe_type;
1524         int rss_enable = qdev->rss_enable;
1525         int rx_alloc_count = 0;
1526         uint32_t packet_type;
1527         uint32_t rss_hash;
1528         uint16_t vlan_tci, port_id;
1529         uint16_t hw_comp_cons, sw_comp_cons, sw_rx_index, num_rx_bds;
1530         uint16_t rx_pkt = 0;
1531         uint16_t pkt_len = 0;
1532         uint16_t len; /* Length of first BD */
1533         uint16_t preload_idx;
1534         uint16_t parse_flag;
1535 #ifdef RTE_LIBRTE_QEDE_DEBUG_RX
1536         uint8_t bitfield_val;
1537 #endif
1538         uint8_t offset, flags, bd_num;
1539         uint16_t count = 0;
1540
1541         /* Allocate buffers that we used in previous loop */
1542         if (rxq->rx_alloc_count) {
1543                 count = rxq->rx_alloc_count > QEDE_MAX_BULK_ALLOC_COUNT ?
1544                         QEDE_MAX_BULK_ALLOC_COUNT : rxq->rx_alloc_count;
1545
1546                 if (unlikely(qede_alloc_rx_bulk_mbufs(rxq, count))) {
1547                         struct rte_eth_dev *dev;
1548
1549                         PMD_RX_LOG(ERR, rxq,
1550                                    "New buffers allocation failed,"
1551                                    "dropping incoming packets\n");
1552                         dev = &rte_eth_devices[rxq->port_id];
1553                         dev->data->rx_mbuf_alloc_failed += count;
1554                         rxq->rx_alloc_errors += count;
1555                         return 0;
1556                 }
1557                 qede_update_rx_prod(qdev, rxq);
1558                 rxq->rx_alloc_count -= count;
1559         }
1560
1561         hw_comp_cons = rte_le_to_cpu_16(*rxq->hw_cons_ptr);
1562         sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
1563
1564         rte_rmb();
1565
1566         if (hw_comp_cons == sw_comp_cons)
1567                 return 0;
1568
1569         num_rx_bds =  NUM_RX_BDS(rxq);
1570         port_id = rxq->port_id;
1571
1572         while (sw_comp_cons != hw_comp_cons) {
1573                 ol_flags = 0;
1574                 packet_type = RTE_PTYPE_UNKNOWN;
1575                 vlan_tci = 0;
1576                 rss_hash = 0;
1577
1578                 /* Get the CQE from the completion ring */
1579                 cqe =
1580                     (union eth_rx_cqe *)ecore_chain_consume(&rxq->rx_comp_ring);
1581                 cqe_type = cqe->fast_path_regular.type;
1582                 PMD_RX_LOG(INFO, rxq, "Rx CQE type %d\n", cqe_type);
1583
1584                 if (likely(cqe_type == ETH_RX_CQE_TYPE_REGULAR)) {
1585                         fp_cqe = &cqe->fast_path_regular;
1586                 } else {
1587                         if (cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH) {
1588                                 PMD_RX_LOG(INFO, rxq, "Got unexpected slowpath CQE\n");
1589                                 ecore_eth_cqe_completion
1590                                         (&edev->hwfns[rxq->queue_id %
1591                                                       edev->num_hwfns],
1592                                          (struct eth_slow_path_rx_cqe *)cqe);
1593                         }
1594                         goto next_cqe;
1595                 }
1596
1597                 /* Get the data from the SW ring */
1598                 sw_rx_index = rxq->sw_rx_cons & num_rx_bds;
1599                 rx_mb = rxq->sw_rx_ring[sw_rx_index];
1600                 assert(rx_mb != NULL);
1601
1602                 parse_flag = rte_le_to_cpu_16(fp_cqe->pars_flags.flags);
1603                 offset = fp_cqe->placement_offset;
1604                 len = rte_le_to_cpu_16(fp_cqe->len_on_first_bd);
1605                 pkt_len = rte_le_to_cpu_16(fp_cqe->pkt_len);
1606                 vlan_tci = rte_le_to_cpu_16(fp_cqe->vlan_tag);
1607                 rss_hash = rte_le_to_cpu_32(fp_cqe->rss_hash);
1608                 bd_num = fp_cqe->bd_num;
1609 #ifdef RTE_LIBRTE_QEDE_DEBUG_RX
1610                 bitfield_val = fp_cqe->bitfields;
1611 #endif
1612
1613                 if (unlikely(qede_tunn_exist(parse_flag))) {
1614                         PMD_RX_LOG(INFO, rxq, "Rx tunneled packet\n");
1615                         if (unlikely(qede_check_tunn_csum_l4(parse_flag))) {
1616                                 PMD_RX_LOG(ERR, rxq,
1617                                             "L4 csum failed, flags = 0x%x\n",
1618                                             parse_flag);
1619                                 rxq->rx_hw_errors++;
1620                                 ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
1621                         } else {
1622                                 ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
1623                         }
1624
1625                         if (unlikely(qede_check_tunn_csum_l3(parse_flag))) {
1626                                 PMD_RX_LOG(ERR, rxq,
1627                                         "Outer L3 csum failed, flags = 0x%x\n",
1628                                         parse_flag);
1629                                 rxq->rx_hw_errors++;
1630                                 ol_flags |= RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD;
1631                         } else {
1632                                 ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
1633                         }
1634
1635                         flags = fp_cqe->tunnel_pars_flags.flags;
1636
1637                         /* Tunnel_type */
1638                         packet_type =
1639                                 qede_rx_cqe_to_tunn_pkt_type(flags);
1640
1641                         /* Inner header */
1642                         packet_type |=
1643                               qede_rx_cqe_to_pkt_type_inner(parse_flag);
1644
1645                         /* Outer L3/L4 types is not available in CQE */
1646                         packet_type |= qede_rx_cqe_to_pkt_type_outer(rx_mb);
1647
1648                         /* Outer L3/L4 types is not available in CQE.
1649                          * Need to add offset to parse correctly,
1650                          */
1651                         rx_mb->data_off = offset + RTE_PKTMBUF_HEADROOM;
1652                         packet_type |= qede_rx_cqe_to_pkt_type_outer(rx_mb);
1653                 } else {
1654                         packet_type |= qede_rx_cqe_to_pkt_type(parse_flag);
1655                 }
1656
1657                 /* Common handling for non-tunnel packets and for inner
1658                  * headers in the case of tunnel.
1659                  */
1660                 if (unlikely(qede_check_notunn_csum_l4(parse_flag))) {
1661                         PMD_RX_LOG(ERR, rxq,
1662                                     "L4 csum failed, flags = 0x%x\n",
1663                                     parse_flag);
1664                         rxq->rx_hw_errors++;
1665                         ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
1666                 } else {
1667                         ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
1668                 }
1669                 if (unlikely(qede_check_notunn_csum_l3(rx_mb, parse_flag))) {
1670                         PMD_RX_LOG(ERR, rxq, "IP csum failed, flags = 0x%x\n",
1671                                    parse_flag);
1672                         rxq->rx_hw_errors++;
1673                         ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
1674                 } else {
1675                         ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
1676                 }
1677
1678                 if (unlikely(CQE_HAS_VLAN(parse_flag) ||
1679                              CQE_HAS_OUTER_VLAN(parse_flag))) {
1680                         /* Note: FW doesn't indicate Q-in-Q packet */
1681                         ol_flags |= RTE_MBUF_F_RX_VLAN;
1682                         if (qdev->vlan_strip_flg) {
1683                                 ol_flags |= RTE_MBUF_F_RX_VLAN_STRIPPED;
1684                                 rx_mb->vlan_tci = vlan_tci;
1685                         }
1686                 }
1687
1688                 if (rss_enable) {
1689                         ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
1690                         rx_mb->hash.rss = rss_hash;
1691                 }
1692
1693                 rx_alloc_count++;
1694                 qede_rx_bd_ring_consume(rxq);
1695
1696                 /* Prefetch next mbuf while processing current one. */
1697                 preload_idx = rxq->sw_rx_cons & num_rx_bds;
1698                 rte_prefetch0(rxq->sw_rx_ring[preload_idx]);
1699
1700                 /* Update rest of the MBUF fields */
1701                 rx_mb->data_off = offset + RTE_PKTMBUF_HEADROOM;
1702                 rx_mb->port = port_id;
1703                 rx_mb->ol_flags = ol_flags;
1704                 rx_mb->data_len = len;
1705                 rx_mb->packet_type = packet_type;
1706 #ifdef RTE_LIBRTE_QEDE_DEBUG_RX
1707                 print_rx_bd_info(rx_mb, rxq, bitfield_val);
1708 #endif
1709                 rx_mb->nb_segs = bd_num;
1710                 rx_mb->pkt_len = pkt_len;
1711
1712                 rx_pkts[rx_pkt] = rx_mb;
1713                 rx_pkt++;
1714
1715 next_cqe:
1716                 ecore_chain_recycle_consumed(&rxq->rx_comp_ring);
1717                 sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
1718                 if (rx_pkt == nb_pkts) {
1719                         PMD_RX_LOG(DEBUG, rxq,
1720                                    "Budget reached nb_pkts=%u received=%u",
1721                                    rx_pkt, nb_pkts);
1722                         break;
1723                 }
1724         }
1725
1726         /* Request number of buffers to be allocated in next loop */
1727         rxq->rx_alloc_count += rx_alloc_count;
1728
1729         rxq->rcv_pkts += rx_pkt;
1730         rxq->rx_segs += rx_pkt;
1731         PMD_RX_LOG(DEBUG, rxq, "rx_pkts=%u core=%d", rx_pkt, rte_lcore_id());
1732
1733         return rx_pkt;
1734 }
1735
1736 uint16_t
1737 qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1738 {
1739         struct qede_rx_queue *rxq = p_rxq;
1740         struct qede_dev *qdev = rxq->qdev;
1741         struct ecore_dev *edev = &qdev->edev;
1742         uint16_t hw_comp_cons, sw_comp_cons, sw_rx_index;
1743         uint16_t rx_pkt = 0;
1744         union eth_rx_cqe *cqe;
1745         struct eth_fast_path_rx_reg_cqe *fp_cqe = NULL;
1746         register struct rte_mbuf *rx_mb = NULL;
1747         register struct rte_mbuf *seg1 = NULL;
1748         enum eth_rx_cqe_type cqe_type;
1749         uint16_t pkt_len = 0; /* Sum of all BD segments */
1750         uint16_t len; /* Length of first BD */
1751         uint8_t num_segs = 1;
1752         uint16_t preload_idx;
1753         uint16_t parse_flag;
1754 #ifdef RTE_LIBRTE_QEDE_DEBUG_RX
1755         uint8_t bitfield_val;
1756 #endif
1757         uint8_t tunn_parse_flag;
1758         struct eth_fast_path_rx_tpa_start_cqe *cqe_start_tpa;
1759         uint64_t ol_flags;
1760         uint32_t packet_type;
1761         uint16_t vlan_tci;
1762         bool tpa_start_flg;
1763         uint8_t offset, tpa_agg_idx, flags;
1764         struct qede_agg_info *tpa_info = NULL;
1765         uint32_t rss_hash;
1766         int rx_alloc_count = 0;
1767         uint16_t count = 0;
1768
1769         /* Allocate buffers that we used in previous loop */
1770         if (rxq->rx_alloc_count) {
1771                 count = rxq->rx_alloc_count > QEDE_MAX_BULK_ALLOC_COUNT ?
1772                         QEDE_MAX_BULK_ALLOC_COUNT : rxq->rx_alloc_count;
1773
1774                 if (unlikely(qede_alloc_rx_bulk_mbufs(rxq, count))) {
1775                         struct rte_eth_dev *dev;
1776
1777                         PMD_RX_LOG(ERR, rxq,
1778                                    "New buffers allocation failed,"
1779                                    "dropping incoming packets\n");
1780                         dev = &rte_eth_devices[rxq->port_id];
1781                         dev->data->rx_mbuf_alloc_failed += count;
1782                         rxq->rx_alloc_errors += count;
1783                         return 0;
1784                 }
1785                 qede_update_rx_prod(qdev, rxq);
1786                 rxq->rx_alloc_count -= count;
1787         }
1788
1789         hw_comp_cons = rte_le_to_cpu_16(*rxq->hw_cons_ptr);
1790         sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
1791
1792         rte_rmb();
1793
1794         if (hw_comp_cons == sw_comp_cons)
1795                 return 0;
1796
1797         while (sw_comp_cons != hw_comp_cons) {
1798                 ol_flags = 0;
1799                 packet_type = RTE_PTYPE_UNKNOWN;
1800                 vlan_tci = 0;
1801                 tpa_start_flg = false;
1802                 rss_hash = 0;
1803
1804                 /* Get the CQE from the completion ring */
1805                 cqe =
1806                     (union eth_rx_cqe *)ecore_chain_consume(&rxq->rx_comp_ring);
1807                 cqe_type = cqe->fast_path_regular.type;
1808                 PMD_RX_LOG(INFO, rxq, "Rx CQE type %d\n", cqe_type);
1809
1810                 switch (cqe_type) {
1811                 case ETH_RX_CQE_TYPE_REGULAR:
1812                         fp_cqe = &cqe->fast_path_regular;
1813                 break;
1814                 case ETH_RX_CQE_TYPE_TPA_START:
1815                         cqe_start_tpa = &cqe->fast_path_tpa_start;
1816                         tpa_info = &rxq->tpa_info[cqe_start_tpa->tpa_agg_index];
1817                         tpa_start_flg = true;
1818                         /* Mark it as LRO packet */
1819                         ol_flags |= RTE_MBUF_F_RX_LRO;
1820                         /* In split mode,  seg_len is same as len_on_first_bd
1821                          * and bw_ext_bd_len_list will be empty since there are
1822                          * no additional buffers
1823                          */
1824                         PMD_RX_LOG(INFO, rxq,
1825                          "TPA start[%d] - len_on_first_bd %d header %d"
1826                          " [bd_list[0] %d], [seg_len %d]\n",
1827                          cqe_start_tpa->tpa_agg_index,
1828                          rte_le_to_cpu_16(cqe_start_tpa->len_on_first_bd),
1829                          cqe_start_tpa->header_len,
1830                          rte_le_to_cpu_16(cqe_start_tpa->bw_ext_bd_len_list[0]),
1831                          rte_le_to_cpu_16(cqe_start_tpa->seg_len));
1832
1833                 break;
1834                 case ETH_RX_CQE_TYPE_TPA_CONT:
1835                         qede_rx_process_tpa_cont_cqe(qdev, rxq,
1836                                                      &cqe->fast_path_tpa_cont);
1837                         goto next_cqe;
1838                 case ETH_RX_CQE_TYPE_TPA_END:
1839                         qede_rx_process_tpa_end_cqe(qdev, rxq,
1840                                                     &cqe->fast_path_tpa_end);
1841                         tpa_agg_idx = cqe->fast_path_tpa_end.tpa_agg_index;
1842                         tpa_info = &rxq->tpa_info[tpa_agg_idx];
1843                         rx_mb = rxq->tpa_info[tpa_agg_idx].tpa_head;
1844                         goto tpa_end;
1845                 case ETH_RX_CQE_TYPE_SLOW_PATH:
1846                         PMD_RX_LOG(INFO, rxq, "Got unexpected slowpath CQE\n");
1847                         ecore_eth_cqe_completion(
1848                                 &edev->hwfns[rxq->queue_id % edev->num_hwfns],
1849                                 (struct eth_slow_path_rx_cqe *)cqe);
1850                         /* fall-thru */
1851                 default:
1852                         goto next_cqe;
1853                 }
1854
1855                 /* Get the data from the SW ring */
1856                 sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
1857                 rx_mb = rxq->sw_rx_ring[sw_rx_index];
1858                 assert(rx_mb != NULL);
1859
1860                 /* Handle regular CQE or TPA start CQE */
1861                 if (!tpa_start_flg) {
1862                         parse_flag = rte_le_to_cpu_16(fp_cqe->pars_flags.flags);
1863                         offset = fp_cqe->placement_offset;
1864                         len = rte_le_to_cpu_16(fp_cqe->len_on_first_bd);
1865                         pkt_len = rte_le_to_cpu_16(fp_cqe->pkt_len);
1866                         vlan_tci = rte_le_to_cpu_16(fp_cqe->vlan_tag);
1867                         rss_hash = rte_le_to_cpu_32(fp_cqe->rss_hash);
1868 #ifdef RTE_LIBRTE_QEDE_DEBUG_RX
1869                         bitfield_val = fp_cqe->bitfields;
1870 #endif
1871                 } else {
1872                         parse_flag =
1873                             rte_le_to_cpu_16(cqe_start_tpa->pars_flags.flags);
1874                         offset = cqe_start_tpa->placement_offset;
1875                         /* seg_len = len_on_first_bd */
1876                         len = rte_le_to_cpu_16(cqe_start_tpa->len_on_first_bd);
1877                         vlan_tci = rte_le_to_cpu_16(cqe_start_tpa->vlan_tag);
1878 #ifdef RTE_LIBRTE_QEDE_DEBUG_RX
1879                         bitfield_val = cqe_start_tpa->bitfields;
1880 #endif
1881                         rss_hash = rte_le_to_cpu_32(cqe_start_tpa->rss_hash);
1882                 }
1883                 if (qede_tunn_exist(parse_flag)) {
1884                         PMD_RX_LOG(INFO, rxq, "Rx tunneled packet\n");
1885                         if (unlikely(qede_check_tunn_csum_l4(parse_flag))) {
1886                                 PMD_RX_LOG(ERR, rxq,
1887                                             "L4 csum failed, flags = 0x%x\n",
1888                                             parse_flag);
1889                                 rxq->rx_hw_errors++;
1890                                 ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
1891                         } else {
1892                                 ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
1893                         }
1894
1895                         if (unlikely(qede_check_tunn_csum_l3(parse_flag))) {
1896                                 PMD_RX_LOG(ERR, rxq,
1897                                         "Outer L3 csum failed, flags = 0x%x\n",
1898                                         parse_flag);
1899                                   rxq->rx_hw_errors++;
1900                                 ol_flags |= RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD;
1901                         } else {
1902                                 ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
1903                         }
1904
1905                         if (tpa_start_flg)
1906                                 flags = cqe_start_tpa->tunnel_pars_flags.flags;
1907                         else
1908                                 flags = fp_cqe->tunnel_pars_flags.flags;
1909                         tunn_parse_flag = flags;
1910
1911                         /* Tunnel_type */
1912                         packet_type =
1913                                 qede_rx_cqe_to_tunn_pkt_type(tunn_parse_flag);
1914
1915                         /* Inner header */
1916                         packet_type |=
1917                               qede_rx_cqe_to_pkt_type_inner(parse_flag);
1918
1919                         /* Outer L3/L4 types is not available in CQE */
1920                         packet_type |= qede_rx_cqe_to_pkt_type_outer(rx_mb);
1921
1922                         /* Outer L3/L4 types is not available in CQE.
1923                          * Need to add offset to parse correctly,
1924                          */
1925                         rx_mb->data_off = offset + RTE_PKTMBUF_HEADROOM;
1926                         packet_type |= qede_rx_cqe_to_pkt_type_outer(rx_mb);
1927                 } else {
1928                         packet_type |= qede_rx_cqe_to_pkt_type(parse_flag);
1929                 }
1930
1931                 /* Common handling for non-tunnel packets and for inner
1932                  * headers in the case of tunnel.
1933                  */
1934                 if (unlikely(qede_check_notunn_csum_l4(parse_flag))) {
1935                         PMD_RX_LOG(ERR, rxq,
1936                                     "L4 csum failed, flags = 0x%x\n",
1937                                     parse_flag);
1938                         rxq->rx_hw_errors++;
1939                         ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
1940                 } else {
1941                         ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
1942                 }
1943                 if (unlikely(qede_check_notunn_csum_l3(rx_mb, parse_flag))) {
1944                         PMD_RX_LOG(ERR, rxq, "IP csum failed, flags = 0x%x\n",
1945                                    parse_flag);
1946                         rxq->rx_hw_errors++;
1947                         ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
1948                 } else {
1949                         ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
1950                 }
1951
1952                 if (CQE_HAS_VLAN(parse_flag) ||
1953                     CQE_HAS_OUTER_VLAN(parse_flag)) {
1954                         /* Note: FW doesn't indicate Q-in-Q packet */
1955                         ol_flags |= RTE_MBUF_F_RX_VLAN;
1956                         if (qdev->vlan_strip_flg) {
1957                                 ol_flags |= RTE_MBUF_F_RX_VLAN_STRIPPED;
1958                                 rx_mb->vlan_tci = vlan_tci;
1959                         }
1960                 }
1961
1962                 /* RSS Hash */
1963                 if (qdev->rss_enable) {
1964                         ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
1965                         rx_mb->hash.rss = rss_hash;
1966                 }
1967
1968                 rx_alloc_count++;
1969                 qede_rx_bd_ring_consume(rxq);
1970
1971                 if (!tpa_start_flg && fp_cqe->bd_num > 1) {
1972                         PMD_RX_LOG(DEBUG, rxq, "Jumbo-over-BD packet: %02x BDs"
1973                                    " len on first: %04x Total Len: %04x",
1974                                    fp_cqe->bd_num, len, pkt_len);
1975                         num_segs = fp_cqe->bd_num - 1;
1976                         seg1 = rx_mb;
1977                         if (qede_process_sg_pkts(p_rxq, seg1, num_segs,
1978                                                  pkt_len - len))
1979                                 goto next_cqe;
1980
1981                         rx_alloc_count += num_segs;
1982                         rxq->rx_segs += num_segs;
1983                 }
1984                 rxq->rx_segs++; /* for the first segment */
1985
1986                 /* Prefetch next mbuf while processing current one. */
1987                 preload_idx = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
1988                 rte_prefetch0(rxq->sw_rx_ring[preload_idx]);
1989
1990                 /* Update rest of the MBUF fields */
1991                 rx_mb->data_off = offset + RTE_PKTMBUF_HEADROOM;
1992                 rx_mb->port = rxq->port_id;
1993                 rx_mb->ol_flags = ol_flags;
1994                 rx_mb->data_len = len;
1995                 rx_mb->packet_type = packet_type;
1996 #ifdef RTE_LIBRTE_QEDE_DEBUG_RX
1997                 print_rx_bd_info(rx_mb, rxq, bitfield_val);
1998 #endif
1999                 if (!tpa_start_flg) {
2000                         rx_mb->nb_segs = fp_cqe->bd_num;
2001                         rx_mb->pkt_len = pkt_len;
2002                 } else {
2003                         /* store ref to the updated mbuf */
2004                         tpa_info->tpa_head = rx_mb;
2005                         tpa_info->tpa_tail = tpa_info->tpa_head;
2006                 }
2007                 rte_prefetch1(rte_pktmbuf_mtod(rx_mb, void *));
2008 tpa_end:
2009                 if (!tpa_start_flg) {
2010                         rx_pkts[rx_pkt] = rx_mb;
2011                         rx_pkt++;
2012                 }
2013 next_cqe:
2014                 ecore_chain_recycle_consumed(&rxq->rx_comp_ring);
2015                 sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
2016                 if (rx_pkt == nb_pkts) {
2017                         PMD_RX_LOG(DEBUG, rxq,
2018                                    "Budget reached nb_pkts=%u received=%u",
2019                                    rx_pkt, nb_pkts);
2020                         break;
2021                 }
2022         }
2023
2024         /* Request number of buffers to be allocated in next loop */
2025         rxq->rx_alloc_count += rx_alloc_count;
2026
2027         rxq->rcv_pkts += rx_pkt;
2028
2029         PMD_RX_LOG(DEBUG, rxq, "rx_pkts=%u core=%d", rx_pkt, rte_lcore_id());
2030
2031         return rx_pkt;
2032 }
2033
2034 uint16_t
2035 qede_recv_pkts_cmt(void *p_fp_cmt, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
2036 {
2037         struct qede_fastpath_cmt *fp_cmt = p_fp_cmt;
2038         uint16_t eng0_pkts, eng1_pkts;
2039
2040         eng0_pkts = nb_pkts / 2;
2041
2042         eng0_pkts = qede_recv_pkts(fp_cmt->fp0->rxq, rx_pkts, eng0_pkts);
2043
2044         eng1_pkts = nb_pkts - eng0_pkts;
2045
2046         eng1_pkts = qede_recv_pkts(fp_cmt->fp1->rxq, rx_pkts + eng0_pkts,
2047                                    eng1_pkts);
2048
2049         return eng0_pkts + eng1_pkts;
2050 }
2051
2052 /* Populate scatter gather buffer descriptor fields */
2053 static inline uint16_t
2054 qede_encode_sg_bd(struct qede_tx_queue *p_txq, struct rte_mbuf *m_seg,
2055                   struct eth_tx_2nd_bd **bd2, struct eth_tx_3rd_bd **bd3,
2056                   uint16_t start_seg)
2057 {
2058         struct qede_tx_queue *txq = p_txq;
2059         struct eth_tx_bd *tx_bd = NULL;
2060         dma_addr_t mapping;
2061         uint16_t nb_segs = 0;
2062
2063         /* Check for scattered buffers */
2064         while (m_seg) {
2065                 if (start_seg == 0) {
2066                         if (!*bd2) {
2067                                 *bd2 = (struct eth_tx_2nd_bd *)
2068                                         ecore_chain_produce(&txq->tx_pbl);
2069                                 memset(*bd2, 0, sizeof(struct eth_tx_2nd_bd));
2070                                 nb_segs++;
2071                         }
2072                         mapping = rte_mbuf_data_iova(m_seg);
2073                         QEDE_BD_SET_ADDR_LEN(*bd2, mapping, m_seg->data_len);
2074                         PMD_TX_LOG(DEBUG, txq, "BD2 len %04x", m_seg->data_len);
2075                 } else if (start_seg == 1) {
2076                         if (!*bd3) {
2077                                 *bd3 = (struct eth_tx_3rd_bd *)
2078                                         ecore_chain_produce(&txq->tx_pbl);
2079                                 memset(*bd3, 0, sizeof(struct eth_tx_3rd_bd));
2080                                 nb_segs++;
2081                         }
2082                         mapping = rte_mbuf_data_iova(m_seg);
2083                         QEDE_BD_SET_ADDR_LEN(*bd3, mapping, m_seg->data_len);
2084                         PMD_TX_LOG(DEBUG, txq, "BD3 len %04x", m_seg->data_len);
2085                 } else {
2086                         tx_bd = (struct eth_tx_bd *)
2087                                 ecore_chain_produce(&txq->tx_pbl);
2088                         memset(tx_bd, 0, sizeof(*tx_bd));
2089                         nb_segs++;
2090                         mapping = rte_mbuf_data_iova(m_seg);
2091                         QEDE_BD_SET_ADDR_LEN(tx_bd, mapping, m_seg->data_len);
2092                         PMD_TX_LOG(DEBUG, txq, "BD len %04x", m_seg->data_len);
2093                 }
2094                 start_seg++;
2095                 m_seg = m_seg->next;
2096         }
2097
2098         /* Return total scattered buffers */
2099         return nb_segs;
2100 }
2101
2102 #ifdef RTE_LIBRTE_QEDE_DEBUG_TX
2103 static inline void
2104 print_tx_bd_info(struct qede_tx_queue *txq,
2105                  struct eth_tx_1st_bd *bd1,
2106                  struct eth_tx_2nd_bd *bd2,
2107                  struct eth_tx_3rd_bd *bd3,
2108                  uint64_t tx_ol_flags)
2109 {
2110         char ol_buf[256] = { 0 }; /* for verbose prints */
2111
2112         if (bd1)
2113                 PMD_TX_LOG(INFO, txq,
2114                    "BD1: nbytes=0x%04x nbds=0x%04x bd_flags=0x%04x bf=0x%04x",
2115                    rte_cpu_to_le_16(bd1->nbytes), bd1->data.nbds,
2116                    bd1->data.bd_flags.bitfields,
2117                    rte_cpu_to_le_16(bd1->data.bitfields));
2118         if (bd2)
2119                 PMD_TX_LOG(INFO, txq,
2120                    "BD2: nbytes=0x%04x bf1=0x%04x bf2=0x%04x tunn_ip=0x%04x\n",
2121                    rte_cpu_to_le_16(bd2->nbytes), bd2->data.bitfields1,
2122                    bd2->data.bitfields2, bd2->data.tunn_ip_size);
2123         if (bd3)
2124                 PMD_TX_LOG(INFO, txq,
2125                    "BD3: nbytes=0x%04x bf=0x%04x MSS=0x%04x "
2126                    "tunn_l4_hdr_start_offset_w=0x%04x tunn_hdr_size=0x%04x\n",
2127                    rte_cpu_to_le_16(bd3->nbytes),
2128                    rte_cpu_to_le_16(bd3->data.bitfields),
2129                    rte_cpu_to_le_16(bd3->data.lso_mss),
2130                    bd3->data.tunn_l4_hdr_start_offset_w,
2131                    bd3->data.tunn_hdr_size_w);
2132
2133         rte_get_tx_ol_flag_list(tx_ol_flags, ol_buf, sizeof(ol_buf));
2134         PMD_TX_LOG(INFO, txq, "TX offloads = %s\n", ol_buf);
2135 }
2136 #endif
2137
2138 /* TX prepare to check packets meets TX conditions */
2139 uint16_t
2140 #ifdef RTE_LIBRTE_QEDE_DEBUG_TX
2141 qede_xmit_prep_pkts(void *p_txq, struct rte_mbuf **tx_pkts,
2142                     uint16_t nb_pkts)
2143 {
2144         struct qede_tx_queue *txq = p_txq;
2145 #else
2146 qede_xmit_prep_pkts(__rte_unused void *p_txq, struct rte_mbuf **tx_pkts,
2147                     uint16_t nb_pkts)
2148 {
2149 #endif
2150         uint64_t ol_flags;
2151         struct rte_mbuf *m;
2152         uint16_t i;
2153 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
2154         int ret;
2155 #endif
2156
2157         for (i = 0; i < nb_pkts; i++) {
2158                 m = tx_pkts[i];
2159                 ol_flags = m->ol_flags;
2160                 if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
2161                         if (m->nb_segs >= ETH_TX_MAX_BDS_PER_LSO_PACKET) {
2162                                 rte_errno = EINVAL;
2163                                 break;
2164                         }
2165                         /* TBD: confirm its ~9700B for both ? */
2166                         if (m->tso_segsz > ETH_TX_MAX_NON_LSO_PKT_LEN) {
2167                                 rte_errno = EINVAL;
2168                                 break;
2169                         }
2170                 } else {
2171                         if (m->nb_segs >= ETH_TX_MAX_BDS_PER_NON_LSO_PACKET) {
2172                                 rte_errno = EINVAL;
2173                                 break;
2174                         }
2175                 }
2176                 if (ol_flags & QEDE_TX_OFFLOAD_NOTSUP_MASK) {
2177                         /* We support only limited tunnel protocols */
2178                         if (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) {
2179                                 uint64_t temp;
2180
2181                                 temp = ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK;
2182                                 if (temp == RTE_MBUF_F_TX_TUNNEL_VXLAN ||
2183                                     temp == RTE_MBUF_F_TX_TUNNEL_GENEVE ||
2184                                     temp == RTE_MBUF_F_TX_TUNNEL_MPLSINUDP ||
2185                                     temp == RTE_MBUF_F_TX_TUNNEL_GRE)
2186                                         continue;
2187                         }
2188
2189                         rte_errno = ENOTSUP;
2190                         break;
2191                 }
2192
2193 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
2194                 ret = rte_validate_tx_offload(m);
2195                 if (ret != 0) {
2196                         rte_errno = -ret;
2197                         break;
2198                 }
2199 #endif
2200         }
2201
2202 #ifdef RTE_LIBRTE_QEDE_DEBUG_TX
2203         if (unlikely(i != nb_pkts))
2204                 PMD_TX_LOG(ERR, txq, "TX prepare failed for %u\n",
2205                            nb_pkts - i);
2206 #endif
2207         return i;
2208 }
2209
2210 #define MPLSINUDP_HDR_SIZE                      (12)
2211
2212 #ifdef RTE_LIBRTE_QEDE_DEBUG_TX
2213 static inline void
2214 qede_mpls_tunn_tx_sanity_check(struct rte_mbuf *mbuf,
2215                                struct qede_tx_queue *txq)
2216 {
2217         if (((mbuf->outer_l2_len + mbuf->outer_l3_len) / 2) > 0xff)
2218                 PMD_TX_LOG(ERR, txq, "tunn_l4_hdr_start_offset overflow\n");
2219         if (((mbuf->outer_l2_len + mbuf->outer_l3_len +
2220                 MPLSINUDP_HDR_SIZE) / 2) > 0xff)
2221                 PMD_TX_LOG(ERR, txq, "tunn_hdr_size overflow\n");
2222         if (((mbuf->l2_len - MPLSINUDP_HDR_SIZE) / 2) >
2223                 ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_MASK)
2224                 PMD_TX_LOG(ERR, txq, "inner_l2_hdr_size overflow\n");
2225         if (((mbuf->l2_len - MPLSINUDP_HDR_SIZE + mbuf->l3_len) / 2) >
2226                 ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK)
2227                 PMD_TX_LOG(ERR, txq, "inner_l2_hdr_size overflow\n");
2228 }
2229 #endif
2230
2231 uint16_t
2232 qede_xmit_pkts_regular(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
2233 {
2234         struct qede_tx_queue *txq = p_txq;
2235         struct qede_dev *qdev = txq->qdev;
2236         struct ecore_dev *edev = &qdev->edev;
2237         struct eth_tx_1st_bd *bd1;
2238         struct eth_tx_2nd_bd *bd2;
2239         struct eth_tx_3rd_bd *bd3;
2240         struct rte_mbuf *m_seg = NULL;
2241         struct rte_mbuf *mbuf;
2242         struct rte_mbuf **sw_tx_ring;
2243         uint16_t nb_tx_pkts;
2244         uint16_t bd_prod;
2245         uint16_t idx;
2246         uint16_t nb_frags = 0;
2247         uint16_t nb_pkt_sent = 0;
2248         uint8_t nbds;
2249         uint64_t tx_ol_flags;
2250         /* BD1 */
2251         uint16_t bd1_bf;
2252         uint8_t bd1_bd_flags_bf;
2253
2254         if (unlikely(txq->nb_tx_avail < txq->tx_free_thresh)) {
2255                 PMD_TX_LOG(DEBUG, txq, "send=%u avail=%u free_thresh=%u",
2256                            nb_pkts, txq->nb_tx_avail, txq->tx_free_thresh);
2257                 qede_process_tx_compl(edev, txq);
2258         }
2259
2260         nb_tx_pkts  = nb_pkts;
2261         bd_prod = rte_cpu_to_le_16(ecore_chain_get_prod_idx(&txq->tx_pbl));
2262         sw_tx_ring = txq->sw_tx_ring;
2263
2264         while (nb_tx_pkts--) {
2265                 /* Init flags/values */
2266                 nbds = 0;
2267                 bd1 = NULL;
2268                 bd2 = NULL;
2269                 bd3 = NULL;
2270                 bd1_bf = 0;
2271                 bd1_bd_flags_bf = 0;
2272                 nb_frags = 0;
2273
2274                 mbuf = *tx_pkts++;
2275                 assert(mbuf);
2276
2277
2278                 /* Check minimum TX BDS availability against available BDs */
2279                 if (unlikely(txq->nb_tx_avail < mbuf->nb_segs))
2280                         break;
2281
2282                 tx_ol_flags = mbuf->ol_flags;
2283                 bd1_bd_flags_bf |= 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
2284
2285                 if (unlikely(txq->nb_tx_avail <
2286                                 ETH_TX_MIN_BDS_PER_NON_LSO_PKT))
2287                         break;
2288                 bd1_bf |=
2289                        (mbuf->pkt_len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK)
2290                         << ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
2291
2292                 /* Offload the IP checksum in the hardware */
2293                 if (tx_ol_flags & RTE_MBUF_F_TX_IP_CKSUM)
2294                         bd1_bd_flags_bf |=
2295                                 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
2296
2297                 /* L4 checksum offload (tcp or udp) */
2298                 if ((tx_ol_flags & (RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IPV6)) &&
2299                     (tx_ol_flags & (RTE_MBUF_F_TX_UDP_CKSUM | RTE_MBUF_F_TX_TCP_CKSUM)))
2300                         bd1_bd_flags_bf |=
2301                                 1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
2302
2303                 /* Fill the entry in the SW ring and the BDs in the FW ring */
2304                 idx = TX_PROD(txq);
2305                 sw_tx_ring[idx] = mbuf;
2306
2307                 /* BD1 */
2308                 bd1 = (struct eth_tx_1st_bd *)ecore_chain_produce(&txq->tx_pbl);
2309                 memset(bd1, 0, sizeof(struct eth_tx_1st_bd));
2310                 nbds++;
2311
2312                 /* Map MBUF linear data for DMA and set in the BD1 */
2313                 QEDE_BD_SET_ADDR_LEN(bd1, rte_mbuf_data_iova(mbuf),
2314                                      mbuf->data_len);
2315                 bd1->data.bitfields = rte_cpu_to_le_16(bd1_bf);
2316                 bd1->data.bd_flags.bitfields = bd1_bd_flags_bf;
2317
2318                 /* Handle fragmented MBUF */
2319                 if (unlikely(mbuf->nb_segs > 1)) {
2320                         m_seg = mbuf->next;
2321
2322                         /* Encode scatter gather buffer descriptors */
2323                         nb_frags = qede_encode_sg_bd(txq, m_seg, &bd2, &bd3,
2324                                                      nbds - 1);
2325                 }
2326
2327                 bd1->data.nbds = nbds + nb_frags;
2328
2329                 txq->nb_tx_avail -= bd1->data.nbds;
2330                 txq->sw_tx_prod++;
2331                 bd_prod =
2332                     rte_cpu_to_le_16(ecore_chain_get_prod_idx(&txq->tx_pbl));
2333 #ifdef RTE_LIBRTE_QEDE_DEBUG_TX
2334                 print_tx_bd_info(txq, bd1, bd2, bd3, tx_ol_flags);
2335 #endif
2336                 nb_pkt_sent++;
2337                 txq->xmit_pkts++;
2338         }
2339
2340         /* Write value of prod idx into bd_prod */
2341         txq->tx_db.data.bd_prod = bd_prod;
2342         rte_wmb();
2343         rte_compiler_barrier();
2344         DIRECT_REG_WR_RELAXED(edev, txq->doorbell_addr, txq->tx_db.raw);
2345         rte_wmb();
2346
2347         /* Check again for Tx completions */
2348         qede_process_tx_compl(edev, txq);
2349
2350         PMD_TX_LOG(DEBUG, txq, "to_send=%u sent=%u bd_prod=%u core=%d",
2351                    nb_pkts, nb_pkt_sent, TX_PROD(txq), rte_lcore_id());
2352
2353         return nb_pkt_sent;
2354 }
2355
2356 uint16_t
2357 qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
2358 {
2359         struct qede_tx_queue *txq = p_txq;
2360         struct qede_dev *qdev = txq->qdev;
2361         struct ecore_dev *edev = &qdev->edev;
2362         struct rte_mbuf *mbuf;
2363         struct rte_mbuf *m_seg = NULL;
2364         uint16_t nb_tx_pkts;
2365         uint16_t bd_prod;
2366         uint16_t idx;
2367         uint16_t nb_frags;
2368         uint16_t nb_pkt_sent = 0;
2369         uint8_t nbds;
2370         bool lso_flg;
2371         bool mplsoudp_flg;
2372         __rte_unused bool tunn_flg;
2373         bool tunn_ipv6_ext_flg;
2374         struct eth_tx_1st_bd *bd1;
2375         struct eth_tx_2nd_bd *bd2;
2376         struct eth_tx_3rd_bd *bd3;
2377         uint64_t tx_ol_flags;
2378         uint16_t hdr_size;
2379         /* BD1 */
2380         uint16_t bd1_bf;
2381         uint8_t bd1_bd_flags_bf;
2382         uint16_t vlan;
2383         /* BD2 */
2384         uint16_t bd2_bf1;
2385         uint16_t bd2_bf2;
2386         /* BD3 */
2387         uint16_t mss;
2388         uint16_t bd3_bf;
2389
2390         uint8_t tunn_l4_hdr_start_offset;
2391         uint8_t tunn_hdr_size;
2392         uint8_t inner_l2_hdr_size;
2393         uint16_t inner_l4_hdr_offset;
2394
2395         if (unlikely(txq->nb_tx_avail < txq->tx_free_thresh)) {
2396                 PMD_TX_LOG(DEBUG, txq, "send=%u avail=%u free_thresh=%u",
2397                            nb_pkts, txq->nb_tx_avail, txq->tx_free_thresh);
2398                 qede_process_tx_compl(edev, txq);
2399         }
2400
2401         nb_tx_pkts  = nb_pkts;
2402         bd_prod = rte_cpu_to_le_16(ecore_chain_get_prod_idx(&txq->tx_pbl));
2403         while (nb_tx_pkts--) {
2404                 /* Init flags/values */
2405                 tunn_flg = false;
2406                 lso_flg = false;
2407                 nbds = 0;
2408                 vlan = 0;
2409                 bd1 = NULL;
2410                 bd2 = NULL;
2411                 bd3 = NULL;
2412                 hdr_size = 0;
2413                 bd1_bf = 0;
2414                 bd1_bd_flags_bf = 0;
2415                 bd2_bf1 = 0;
2416                 bd2_bf2 = 0;
2417                 mss = 0;
2418                 bd3_bf = 0;
2419                 mplsoudp_flg = false;
2420                 tunn_ipv6_ext_flg = false;
2421                 tunn_hdr_size = 0;
2422                 tunn_l4_hdr_start_offset = 0;
2423
2424                 mbuf = *tx_pkts++;
2425                 assert(mbuf);
2426
2427                 /* Check minimum TX BDS availability against available BDs */
2428                 if (unlikely(txq->nb_tx_avail < mbuf->nb_segs))
2429                         break;
2430
2431                 tx_ol_flags = mbuf->ol_flags;
2432                 bd1_bd_flags_bf |= 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
2433
2434                 /* TX prepare would have already checked supported tunnel Tx
2435                  * offloads. Don't rely on pkt_type marked by Rx, instead use
2436                  * tx_ol_flags to decide.
2437                  */
2438                 tunn_flg = !!(tx_ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK);
2439
2440                 if (tunn_flg) {
2441                         /* Check against max which is Tunnel IPv6 + ext */
2442                         if (unlikely(txq->nb_tx_avail <
2443                                 ETH_TX_MIN_BDS_PER_TUNN_IPV6_WITH_EXT_PKT))
2444                                         break;
2445
2446                         /* First indicate its a tunnel pkt */
2447                         bd1_bf |= ETH_TX_DATA_1ST_BD_TUNN_FLAG_MASK <<
2448                                   ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT;
2449                         /* Legacy FW had flipped behavior in regard to this bit
2450                          * i.e. it needed to set to prevent FW from touching
2451                          * encapsulated packets when it didn't need to.
2452                          */
2453                         if (unlikely(txq->is_legacy)) {
2454                                 bd1_bf ^= 1 <<
2455                                         ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT;
2456                         }
2457
2458                         /* Outer IP checksum offload */
2459                         if (tx_ol_flags & (RTE_MBUF_F_TX_OUTER_IP_CKSUM |
2460                                            RTE_MBUF_F_TX_OUTER_IPV4)) {
2461                                 bd1_bd_flags_bf |=
2462                                         ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_MASK <<
2463                                         ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT;
2464                         }
2465
2466                         /**
2467                          * Currently, only inner checksum offload in MPLS-in-UDP
2468                          * tunnel with one MPLS label is supported. Both outer
2469                          * and inner layers  lengths need to be provided in
2470                          * mbuf.
2471                          */
2472                         if ((tx_ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) ==
2473                                                 RTE_MBUF_F_TX_TUNNEL_MPLSINUDP) {
2474                                 mplsoudp_flg = true;
2475 #ifdef RTE_LIBRTE_QEDE_DEBUG_TX
2476                                 qede_mpls_tunn_tx_sanity_check(mbuf, txq);
2477 #endif
2478                                 /* Outer L4 offset in two byte words */
2479                                 tunn_l4_hdr_start_offset =
2480                                   (mbuf->outer_l2_len + mbuf->outer_l3_len) / 2;
2481                                 /* Tunnel header size in two byte words */
2482                                 tunn_hdr_size = (mbuf->outer_l2_len +
2483                                                 mbuf->outer_l3_len +
2484                                                 MPLSINUDP_HDR_SIZE) / 2;
2485                                 /* Inner L2 header size in two byte words */
2486                                 inner_l2_hdr_size = (mbuf->l2_len -
2487                                                 MPLSINUDP_HDR_SIZE) / 2;
2488                                 /* Inner L4 header offset from the beginning
2489                                  * of inner packet in two byte words
2490                                  */
2491                                 inner_l4_hdr_offset = (mbuf->l2_len -
2492                                         MPLSINUDP_HDR_SIZE + mbuf->l3_len) / 2;
2493
2494                                 /* Inner L2 size and address type */
2495                                 bd2_bf1 |= (inner_l2_hdr_size &
2496                                         ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_MASK) <<
2497                                         ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_SHIFT;
2498                                 bd2_bf1 |= (UNICAST_ADDRESS &
2499                                         ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_MASK) <<
2500                                         ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_SHIFT;
2501                                 /* Treated as IPv6+Ext */
2502                                 bd2_bf1 |=
2503                                     1 << ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_SHIFT;
2504
2505                                 /* Mark inner IPv6 if present */
2506                                 if (tx_ol_flags & RTE_MBUF_F_TX_IPV6)
2507                                         bd2_bf1 |=
2508                                                 1 << ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_SHIFT;
2509
2510                                 /* Inner L4 offsets */
2511                                 if ((tx_ol_flags & (RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IPV6)) &&
2512                                      (tx_ol_flags & (RTE_MBUF_F_TX_UDP_CKSUM |
2513                                                         RTE_MBUF_F_TX_TCP_CKSUM))) {
2514                                         /* Determines if BD3 is needed */
2515                                         tunn_ipv6_ext_flg = true;
2516                                         if ((tx_ol_flags & RTE_MBUF_F_TX_L4_MASK) ==
2517                                                         RTE_MBUF_F_TX_UDP_CKSUM) {
2518                                                 bd2_bf1 |=
2519                                                         1 << ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT;
2520                                         }
2521
2522                                         /* TODO other pseudo checksum modes are
2523                                          * not supported
2524                                          */
2525                                         bd2_bf1 |=
2526                                         ETH_L4_PSEUDO_CSUM_CORRECT_LENGTH <<
2527                                         ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT;
2528                                         bd2_bf2 |= (inner_l4_hdr_offset &
2529                                                 ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK) <<
2530                                                 ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT;
2531                                 }
2532                         } /* End MPLSoUDP */
2533                 } /* End Tunnel handling */
2534
2535                 if (tx_ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
2536                         lso_flg = true;
2537                         if (unlikely(txq->nb_tx_avail <
2538                                                 ETH_TX_MIN_BDS_PER_LSO_PKT))
2539                                 break;
2540                         /* For LSO, packet header and payload must reside on
2541                          * buffers pointed by different BDs. Using BD1 for HDR
2542                          * and BD2 onwards for data.
2543                          */
2544                         hdr_size = mbuf->l2_len + mbuf->l3_len + mbuf->l4_len;
2545                         if (tunn_flg)
2546                                 hdr_size += mbuf->outer_l2_len +
2547                                             mbuf->outer_l3_len;
2548
2549                         bd1_bd_flags_bf |= 1 << ETH_TX_1ST_BD_FLAGS_LSO_SHIFT;
2550                         bd1_bd_flags_bf |=
2551                                         1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
2552                         /* RTE_MBUF_F_TX_TCP_SEG implies RTE_MBUF_F_TX_TCP_CKSUM */
2553                         bd1_bd_flags_bf |=
2554                                         1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
2555                         mss = rte_cpu_to_le_16(mbuf->tso_segsz);
2556                         /* Using one header BD */
2557                         bd3_bf |= rte_cpu_to_le_16(1 <<
2558                                         ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT);
2559                 } else {
2560                         if (unlikely(txq->nb_tx_avail <
2561                                         ETH_TX_MIN_BDS_PER_NON_LSO_PKT))
2562                                 break;
2563                         bd1_bf |=
2564                                (mbuf->pkt_len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK)
2565                                 << ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
2566                 }
2567
2568                 /* Descriptor based VLAN insertion */
2569                 if (tx_ol_flags & RTE_MBUF_F_TX_VLAN) {
2570                         vlan = rte_cpu_to_le_16(mbuf->vlan_tci);
2571                         bd1_bd_flags_bf |=
2572                             1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT;
2573                 }
2574
2575                 /* Offload the IP checksum in the hardware */
2576                 if (tx_ol_flags & RTE_MBUF_F_TX_IP_CKSUM) {
2577                         bd1_bd_flags_bf |=
2578                                 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
2579                         /* There's no DPDK flag to request outer-L4 csum
2580                          * offload. But in the case of tunnel if inner L3 or L4
2581                          * csum offload is requested then we need to force
2582                          * recalculation of L4 tunnel header csum also.
2583                          */
2584                         if (tunn_flg && ((tx_ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) !=
2585                                                         RTE_MBUF_F_TX_TUNNEL_GRE)) {
2586                                 bd1_bd_flags_bf |=
2587                                         ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK <<
2588                                         ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT;
2589                         }
2590                 }
2591
2592                 /* L4 checksum offload (tcp or udp) */
2593                 if ((tx_ol_flags & (RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IPV6)) &&
2594                     (tx_ol_flags & (RTE_MBUF_F_TX_UDP_CKSUM | RTE_MBUF_F_TX_TCP_CKSUM))) {
2595                         bd1_bd_flags_bf |=
2596                                 1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
2597                         /* There's no DPDK flag to request outer-L4 csum
2598                          * offload. But in the case of tunnel if inner L3 or L4
2599                          * csum offload is requested then we need to force
2600                          * recalculation of L4 tunnel header csum also.
2601                          */
2602                         if (tunn_flg) {
2603                                 bd1_bd_flags_bf |=
2604                                         ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK <<
2605                                         ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT;
2606                         }
2607                 }
2608
2609                 /* Fill the entry in the SW ring and the BDs in the FW ring */
2610                 idx = TX_PROD(txq);
2611                 txq->sw_tx_ring[idx] = mbuf;
2612
2613                 /* BD1 */
2614                 bd1 = (struct eth_tx_1st_bd *)ecore_chain_produce(&txq->tx_pbl);
2615                 memset(bd1, 0, sizeof(struct eth_tx_1st_bd));
2616                 nbds++;
2617
2618                 /* Map MBUF linear data for DMA and set in the BD1 */
2619                 QEDE_BD_SET_ADDR_LEN(bd1, rte_mbuf_data_iova(mbuf),
2620                                      mbuf->data_len);
2621                 bd1->data.bitfields = rte_cpu_to_le_16(bd1_bf);
2622                 bd1->data.bd_flags.bitfields = bd1_bd_flags_bf;
2623                 bd1->data.vlan = vlan;
2624
2625                 if (lso_flg || mplsoudp_flg) {
2626                         bd2 = (struct eth_tx_2nd_bd *)ecore_chain_produce
2627                                                         (&txq->tx_pbl);
2628                         memset(bd2, 0, sizeof(struct eth_tx_2nd_bd));
2629                         nbds++;
2630
2631                         /* BD1 */
2632                         QEDE_BD_SET_ADDR_LEN(bd1, rte_mbuf_data_iova(mbuf),
2633                                              hdr_size);
2634                         /* BD2 */
2635                         QEDE_BD_SET_ADDR_LEN(bd2, (hdr_size +
2636                                              rte_mbuf_data_iova(mbuf)),
2637                                              mbuf->data_len - hdr_size);
2638                         bd2->data.bitfields1 = rte_cpu_to_le_16(bd2_bf1);
2639                         if (mplsoudp_flg) {
2640                                 bd2->data.bitfields2 =
2641                                         rte_cpu_to_le_16(bd2_bf2);
2642                                 /* Outer L3 size */
2643                                 bd2->data.tunn_ip_size =
2644                                         rte_cpu_to_le_16(mbuf->outer_l3_len);
2645                         }
2646                         /* BD3 */
2647                         if (lso_flg || (mplsoudp_flg && tunn_ipv6_ext_flg)) {
2648                                 bd3 = (struct eth_tx_3rd_bd *)
2649                                         ecore_chain_produce(&txq->tx_pbl);
2650                                 memset(bd3, 0, sizeof(struct eth_tx_3rd_bd));
2651                                 nbds++;
2652                                 bd3->data.bitfields = rte_cpu_to_le_16(bd3_bf);
2653                                 if (lso_flg)
2654                                         bd3->data.lso_mss = mss;
2655                                 if (mplsoudp_flg) {
2656                                         bd3->data.tunn_l4_hdr_start_offset_w =
2657                                                 tunn_l4_hdr_start_offset;
2658                                         bd3->data.tunn_hdr_size_w =
2659                                                 tunn_hdr_size;
2660                                 }
2661                         }
2662                 }
2663
2664                 /* Handle fragmented MBUF */
2665                 m_seg = mbuf->next;
2666
2667                 /* Encode scatter gather buffer descriptors if required */
2668                 nb_frags = qede_encode_sg_bd(txq, m_seg, &bd2, &bd3, nbds - 1);
2669                 bd1->data.nbds = nbds + nb_frags;
2670
2671                 txq->nb_tx_avail -= bd1->data.nbds;
2672                 txq->sw_tx_prod++;
2673                 bd_prod =
2674                     rte_cpu_to_le_16(ecore_chain_get_prod_idx(&txq->tx_pbl));
2675 #ifdef RTE_LIBRTE_QEDE_DEBUG_TX
2676                 print_tx_bd_info(txq, bd1, bd2, bd3, tx_ol_flags);
2677 #endif
2678                 nb_pkt_sent++;
2679                 txq->xmit_pkts++;
2680         }
2681
2682         /* Write value of prod idx into bd_prod */
2683         txq->tx_db.data.bd_prod = bd_prod;
2684         rte_wmb();
2685         rte_compiler_barrier();
2686         DIRECT_REG_WR_RELAXED(edev, txq->doorbell_addr, txq->tx_db.raw);
2687         rte_wmb();
2688
2689         /* Check again for Tx completions */
2690         qede_process_tx_compl(edev, txq);
2691
2692         PMD_TX_LOG(DEBUG, txq, "to_send=%u sent=%u bd_prod=%u core=%d",
2693                    nb_pkts, nb_pkt_sent, TX_PROD(txq), rte_lcore_id());
2694
2695         return nb_pkt_sent;
2696 }
2697
2698 uint16_t
2699 qede_xmit_pkts_cmt(void *p_fp_cmt, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
2700 {
2701         struct qede_fastpath_cmt *fp_cmt = p_fp_cmt;
2702         uint16_t eng0_pkts, eng1_pkts;
2703
2704         eng0_pkts = nb_pkts / 2;
2705
2706         eng0_pkts = qede_xmit_pkts(fp_cmt->fp0->txq, tx_pkts, eng0_pkts);
2707
2708         eng1_pkts = nb_pkts - eng0_pkts;
2709
2710         eng1_pkts = qede_xmit_pkts(fp_cmt->fp1->txq, tx_pkts + eng0_pkts,
2711                                    eng1_pkts);
2712
2713         return eng0_pkts + eng1_pkts;
2714 }
2715
2716 /* this function does a fake walk through over completion queue
2717  * to calculate number of BDs used by HW.
2718  * At the end, it restores the state of completion queue.
2719  */
2720 static uint16_t
2721 qede_parse_fp_cqe(struct qede_rx_queue *rxq)
2722 {
2723         uint16_t hw_comp_cons, sw_comp_cons, bd_count = 0;
2724         union eth_rx_cqe *cqe, *orig_cqe = NULL;
2725
2726         hw_comp_cons = rte_le_to_cpu_16(*rxq->hw_cons_ptr);
2727         sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
2728
2729         if (hw_comp_cons == sw_comp_cons)
2730                 return 0;
2731
2732         /* Get the CQE from the completion ring */
2733         cqe = (union eth_rx_cqe *)ecore_chain_consume(&rxq->rx_comp_ring);
2734         orig_cqe = cqe;
2735
2736         while (sw_comp_cons != hw_comp_cons) {
2737                 switch (cqe->fast_path_regular.type) {
2738                 case ETH_RX_CQE_TYPE_REGULAR:
2739                         bd_count += cqe->fast_path_regular.bd_num;
2740                         break;
2741                 case ETH_RX_CQE_TYPE_TPA_END:
2742                         bd_count += cqe->fast_path_tpa_end.num_of_bds;
2743                         break;
2744                 default:
2745                         break;
2746                 }
2747
2748                 cqe =
2749                 (union eth_rx_cqe *)ecore_chain_consume(&rxq->rx_comp_ring);
2750                 sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
2751         }
2752
2753         /* revert comp_ring to original state */
2754         ecore_chain_set_cons(&rxq->rx_comp_ring, sw_comp_cons, orig_cqe);
2755
2756         return bd_count;
2757 }
2758
2759 int
2760 qede_rx_descriptor_status(void *p_rxq, uint16_t offset)
2761 {
2762         uint16_t hw_bd_cons, sw_bd_cons, sw_bd_prod;
2763         uint16_t produced, consumed;
2764         struct qede_rx_queue *rxq = p_rxq;
2765
2766         if (offset > rxq->nb_rx_desc)
2767                 return -EINVAL;
2768
2769         sw_bd_cons = ecore_chain_get_cons_idx(&rxq->rx_bd_ring);
2770         sw_bd_prod = ecore_chain_get_prod_idx(&rxq->rx_bd_ring);
2771
2772         /* find BDs used by HW from completion queue elements */
2773         hw_bd_cons = sw_bd_cons + qede_parse_fp_cqe(rxq);
2774
2775         if (hw_bd_cons < sw_bd_cons)
2776                 /* wraparound case */
2777                 consumed = (0xffff - sw_bd_cons) + hw_bd_cons;
2778         else
2779                 consumed = hw_bd_cons - sw_bd_cons;
2780
2781         if (offset <= consumed)
2782                 return RTE_ETH_RX_DESC_DONE;
2783
2784         if (sw_bd_prod < sw_bd_cons)
2785                 /* wraparound case */
2786                 produced = (0xffff - sw_bd_cons) + sw_bd_prod;
2787         else
2788                 produced = sw_bd_prod - sw_bd_cons;
2789
2790         if (offset <= produced)
2791                 return RTE_ETH_RX_DESC_AVAIL;
2792
2793         return RTE_ETH_RX_DESC_UNAVAIL;
2794 }