1633b9183bbe14555da67449869255170c6fa2f9
[dpdk.git] / drivers / net / qede / qede_rxtx.c
1 /*
2  * Copyright (c) 2016 QLogic Corporation.
3  * All rights reserved.
4  * www.qlogic.com
5  *
6  * See LICENSE.qede_pmd for copyright and licensing details.
7  */
8
9 #include <rte_net.h>
10 #include "qede_rxtx.h"
11
12 static inline int qede_alloc_rx_buffer(struct qede_rx_queue *rxq)
13 {
14         struct rte_mbuf *new_mb = NULL;
15         struct eth_rx_bd *rx_bd;
16         dma_addr_t mapping;
17         uint16_t idx = rxq->sw_rx_prod & NUM_RX_BDS(rxq);
18
19         new_mb = rte_mbuf_raw_alloc(rxq->mb_pool);
20         if (unlikely(!new_mb)) {
21                 PMD_RX_LOG(ERR, rxq,
22                            "Failed to allocate rx buffer "
23                            "sw_rx_prod %u sw_rx_cons %u mp entries %u free %u",
24                            idx, rxq->sw_rx_cons & NUM_RX_BDS(rxq),
25                            rte_mempool_avail_count(rxq->mb_pool),
26                            rte_mempool_in_use_count(rxq->mb_pool));
27                 return -ENOMEM;
28         }
29         rxq->sw_rx_ring[idx].mbuf = new_mb;
30         rxq->sw_rx_ring[idx].page_offset = 0;
31         mapping = rte_mbuf_data_dma_addr_default(new_mb);
32         /* Advance PROD and get BD pointer */
33         rx_bd = (struct eth_rx_bd *)ecore_chain_produce(&rxq->rx_bd_ring);
34         rx_bd->addr.hi = rte_cpu_to_le_32(U64_HI(mapping));
35         rx_bd->addr.lo = rte_cpu_to_le_32(U64_LO(mapping));
36         rxq->sw_rx_prod++;
37         return 0;
38 }
39
40 int
41 qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
42                     uint16_t nb_desc, unsigned int socket_id,
43                     __rte_unused const struct rte_eth_rxconf *rx_conf,
44                     struct rte_mempool *mp)
45 {
46         struct qede_dev *qdev = QEDE_INIT_QDEV(dev);
47         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
48         struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
49         struct qede_rx_queue *rxq;
50         uint16_t max_rx_pkt_len;
51         uint16_t bufsz;
52         size_t size;
53         int rc;
54
55         PMD_INIT_FUNC_TRACE(edev);
56
57         /* Note: Ring size/align is controlled by struct rte_eth_desc_lim */
58         if (!rte_is_power_of_2(nb_desc)) {
59                 DP_ERR(edev, "Ring size %u is not power of 2\n",
60                           nb_desc);
61                 return -EINVAL;
62         }
63
64         /* Free memory prior to re-allocation if needed... */
65         if (dev->data->rx_queues[queue_idx] != NULL) {
66                 qede_rx_queue_release(dev->data->rx_queues[queue_idx]);
67                 dev->data->rx_queues[queue_idx] = NULL;
68         }
69
70         /* First allocate the rx queue data structure */
71         rxq = rte_zmalloc_socket("qede_rx_queue", sizeof(struct qede_rx_queue),
72                                  RTE_CACHE_LINE_SIZE, socket_id);
73
74         if (!rxq) {
75                 DP_ERR(edev, "Unable to allocate memory for rxq on socket %u",
76                           socket_id);
77                 return -ENOMEM;
78         }
79
80         rxq->qdev = qdev;
81         rxq->mb_pool = mp;
82         rxq->nb_rx_desc = nb_desc;
83         rxq->queue_id = queue_idx;
84         rxq->port_id = dev->data->port_id;
85         max_rx_pkt_len = (uint16_t)rxmode->max_rx_pkt_len;
86         qdev->mtu = max_rx_pkt_len;
87
88         /* Fix up RX buffer size */
89         bufsz = (uint16_t)rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM;
90         if ((rxmode->enable_scatter)                    ||
91             (max_rx_pkt_len + QEDE_ETH_OVERHEAD) > bufsz) {
92                 if (!dev->data->scattered_rx) {
93                         DP_INFO(edev, "Forcing scatter-gather mode\n");
94                         dev->data->scattered_rx = 1;
95                 }
96         }
97         if (dev->data->scattered_rx)
98                 rxq->rx_buf_size = bufsz + QEDE_ETH_OVERHEAD;
99         else
100                 rxq->rx_buf_size = qdev->mtu + QEDE_ETH_OVERHEAD;
101         /* Align to cache-line size if needed */
102         rxq->rx_buf_size = QEDE_CEIL_TO_CACHE_LINE_SIZE(rxq->rx_buf_size);
103
104         DP_INFO(edev, "mtu %u mbufsz %u bd_max_bytes %u scatter_mode %d\n",
105                 qdev->mtu, bufsz, rxq->rx_buf_size, dev->data->scattered_rx);
106
107         /* Allocate the parallel driver ring for Rx buffers */
108         size = sizeof(*rxq->sw_rx_ring) * rxq->nb_rx_desc;
109         rxq->sw_rx_ring = rte_zmalloc_socket("sw_rx_ring", size,
110                                              RTE_CACHE_LINE_SIZE, socket_id);
111         if (!rxq->sw_rx_ring) {
112                 DP_NOTICE(edev, false,
113                           "Unable to alloc memory for sw_rx_ring on socket %u\n",
114                           socket_id);
115                 rte_free(rxq);
116                 return -ENOMEM;
117         }
118
119         /* Allocate FW Rx ring  */
120         rc = qdev->ops->common->chain_alloc(edev,
121                                             ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,
122                                             ECORE_CHAIN_MODE_NEXT_PTR,
123                                             ECORE_CHAIN_CNT_TYPE_U16,
124                                             rxq->nb_rx_desc,
125                                             sizeof(struct eth_rx_bd),
126                                             &rxq->rx_bd_ring,
127                                             NULL);
128
129         if (rc != ECORE_SUCCESS) {
130                 DP_NOTICE(edev, false,
131                           "Unable to alloc memory for rxbd ring on socket %u\n",
132                           socket_id);
133                 rte_free(rxq->sw_rx_ring);
134                 rte_free(rxq);
135                 return -ENOMEM;
136         }
137
138         /* Allocate FW completion ring */
139         rc = qdev->ops->common->chain_alloc(edev,
140                                             ECORE_CHAIN_USE_TO_CONSUME,
141                                             ECORE_CHAIN_MODE_PBL,
142                                             ECORE_CHAIN_CNT_TYPE_U16,
143                                             rxq->nb_rx_desc,
144                                             sizeof(union eth_rx_cqe),
145                                             &rxq->rx_comp_ring,
146                                             NULL);
147
148         if (rc != ECORE_SUCCESS) {
149                 DP_NOTICE(edev, false,
150                           "Unable to alloc memory for cqe ring on socket %u\n",
151                           socket_id);
152                 /* TBD: Freeing RX BD ring */
153                 rte_free(rxq->sw_rx_ring);
154                 rte_free(rxq);
155                 return -ENOMEM;
156         }
157
158         dev->data->rx_queues[queue_idx] = rxq;
159         qdev->fp_array[queue_idx].rxq = rxq;
160
161         DP_INFO(edev, "rxq %d num_desc %u rx_buf_size=%u socket %u\n",
162                   queue_idx, nb_desc, qdev->mtu, socket_id);
163
164         return 0;
165 }
166
167 static void
168 qede_rx_queue_reset(__rte_unused struct qede_dev *qdev,
169                     struct qede_rx_queue *rxq)
170 {
171         DP_INFO(&qdev->edev, "Reset RX queue %u\n", rxq->queue_id);
172         ecore_chain_reset(&rxq->rx_bd_ring);
173         ecore_chain_reset(&rxq->rx_comp_ring);
174         rxq->sw_rx_prod = 0;
175         rxq->sw_rx_cons = 0;
176         *rxq->hw_cons_ptr = 0;
177 }
178
179 static void qede_rx_queue_release_mbufs(struct qede_rx_queue *rxq)
180 {
181         uint16_t i;
182
183         if (rxq->sw_rx_ring) {
184                 for (i = 0; i < rxq->nb_rx_desc; i++) {
185                         if (rxq->sw_rx_ring[i].mbuf) {
186                                 rte_pktmbuf_free(rxq->sw_rx_ring[i].mbuf);
187                                 rxq->sw_rx_ring[i].mbuf = NULL;
188                         }
189                 }
190         }
191 }
192
193 void qede_rx_queue_release(void *rx_queue)
194 {
195         struct qede_rx_queue *rxq = rx_queue;
196
197         if (rxq) {
198                 qede_rx_queue_release_mbufs(rxq);
199                 rte_free(rxq->sw_rx_ring);
200                 rte_free(rxq);
201         }
202 }
203
204 /* Stops a given RX queue in the HW */
205 static int qede_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
206 {
207         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
208         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
209         struct ecore_hwfn *p_hwfn;
210         struct qede_rx_queue *rxq;
211         int hwfn_index;
212         int rc;
213
214         if (rx_queue_id < eth_dev->data->nb_rx_queues) {
215                 rxq = eth_dev->data->rx_queues[rx_queue_id];
216                 hwfn_index = rx_queue_id % edev->num_hwfns;
217                 p_hwfn = &edev->hwfns[hwfn_index];
218                 rc = ecore_eth_rx_queue_stop(p_hwfn, rxq->handle,
219                                 true, false);
220                 if (rc != ECORE_SUCCESS) {
221                         DP_ERR(edev, "RX queue %u stop fails\n", rx_queue_id);
222                         return -1;
223                 }
224                 qede_rx_queue_release_mbufs(rxq);
225                 qede_rx_queue_reset(qdev, rxq);
226                 eth_dev->data->rx_queue_state[rx_queue_id] =
227                         RTE_ETH_QUEUE_STATE_STOPPED;
228                 DP_INFO(edev, "RX queue %u stopped\n", rx_queue_id);
229         } else {
230                 DP_ERR(edev, "RX queue %u is not in range\n", rx_queue_id);
231                 rc = -EINVAL;
232         }
233
234         return rc;
235 }
236
237 int
238 qede_tx_queue_setup(struct rte_eth_dev *dev,
239                     uint16_t queue_idx,
240                     uint16_t nb_desc,
241                     unsigned int socket_id,
242                     const struct rte_eth_txconf *tx_conf)
243 {
244         struct qede_dev *qdev = dev->data->dev_private;
245         struct ecore_dev *edev = &qdev->edev;
246         struct qede_tx_queue *txq;
247         int rc;
248
249         PMD_INIT_FUNC_TRACE(edev);
250
251         if (!rte_is_power_of_2(nb_desc)) {
252                 DP_ERR(edev, "Ring size %u is not power of 2\n",
253                        nb_desc);
254                 return -EINVAL;
255         }
256
257         /* Free memory prior to re-allocation if needed... */
258         if (dev->data->tx_queues[queue_idx] != NULL) {
259                 qede_tx_queue_release(dev->data->tx_queues[queue_idx]);
260                 dev->data->tx_queues[queue_idx] = NULL;
261         }
262
263         txq = rte_zmalloc_socket("qede_tx_queue", sizeof(struct qede_tx_queue),
264                                  RTE_CACHE_LINE_SIZE, socket_id);
265
266         if (txq == NULL) {
267                 DP_ERR(edev,
268                        "Unable to allocate memory for txq on socket %u",
269                        socket_id);
270                 return -ENOMEM;
271         }
272
273         txq->nb_tx_desc = nb_desc;
274         txq->qdev = qdev;
275         txq->port_id = dev->data->port_id;
276
277         rc = qdev->ops->common->chain_alloc(edev,
278                                             ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,
279                                             ECORE_CHAIN_MODE_PBL,
280                                             ECORE_CHAIN_CNT_TYPE_U16,
281                                             txq->nb_tx_desc,
282                                             sizeof(union eth_tx_bd_types),
283                                             &txq->tx_pbl,
284                                             NULL);
285         if (rc != ECORE_SUCCESS) {
286                 DP_ERR(edev,
287                        "Unable to allocate memory for txbd ring on socket %u",
288                        socket_id);
289                 qede_tx_queue_release(txq);
290                 return -ENOMEM;
291         }
292
293         /* Allocate software ring */
294         txq->sw_tx_ring = rte_zmalloc_socket("txq->sw_tx_ring",
295                                              (sizeof(struct qede_tx_entry) *
296                                               txq->nb_tx_desc),
297                                              RTE_CACHE_LINE_SIZE, socket_id);
298
299         if (!txq->sw_tx_ring) {
300                 DP_ERR(edev,
301                        "Unable to allocate memory for txbd ring on socket %u",
302                        socket_id);
303                 qede_tx_queue_release(txq);
304                 return -ENOMEM;
305         }
306
307         txq->queue_id = queue_idx;
308
309         txq->nb_tx_avail = txq->nb_tx_desc;
310
311         txq->tx_free_thresh =
312             tx_conf->tx_free_thresh ? tx_conf->tx_free_thresh :
313             (txq->nb_tx_desc - QEDE_DEFAULT_TX_FREE_THRESH);
314
315         dev->data->tx_queues[queue_idx] = txq;
316         qdev->fp_array[queue_idx].txq = txq;
317
318         DP_INFO(edev,
319                   "txq %u num_desc %u tx_free_thresh %u socket %u\n",
320                   queue_idx, nb_desc, txq->tx_free_thresh, socket_id);
321
322         return 0;
323 }
324
325 static void
326 qede_tx_queue_reset(__rte_unused struct qede_dev *qdev,
327                     struct qede_tx_queue *txq)
328 {
329         DP_INFO(&qdev->edev, "Reset TX queue %u\n", txq->queue_id);
330         ecore_chain_reset(&txq->tx_pbl);
331         txq->sw_tx_cons = 0;
332         txq->sw_tx_prod = 0;
333         *txq->hw_cons_ptr = 0;
334 }
335
336 static void qede_tx_queue_release_mbufs(struct qede_tx_queue *txq)
337 {
338         uint16_t i;
339
340         if (txq->sw_tx_ring) {
341                 for (i = 0; i < txq->nb_tx_desc; i++) {
342                         if (txq->sw_tx_ring[i].mbuf) {
343                                 rte_pktmbuf_free(txq->sw_tx_ring[i].mbuf);
344                                 txq->sw_tx_ring[i].mbuf = NULL;
345                         }
346                 }
347         }
348 }
349
350 void qede_tx_queue_release(void *tx_queue)
351 {
352         struct qede_tx_queue *txq = tx_queue;
353
354         if (txq) {
355                 qede_tx_queue_release_mbufs(txq);
356                 rte_free(txq->sw_tx_ring);
357                 rte_free(txq);
358         }
359 }
360
361 /* This function allocates fast-path status block memory */
362 static int
363 qede_alloc_mem_sb(struct qede_dev *qdev, struct ecore_sb_info *sb_info,
364                   uint16_t sb_id)
365 {
366         struct ecore_dev *edev = &qdev->edev;
367         struct status_block *sb_virt;
368         dma_addr_t sb_phys;
369         int rc;
370
371         sb_virt = OSAL_DMA_ALLOC_COHERENT(edev, &sb_phys, sizeof(*sb_virt));
372
373         if (!sb_virt) {
374                 DP_ERR(edev, "Status block allocation failed\n");
375                 return -ENOMEM;
376         }
377
378         rc = qdev->ops->common->sb_init(edev, sb_info, sb_virt,
379                                         sb_phys, sb_id);
380         if (rc) {
381                 DP_ERR(edev, "Status block initialization failed\n");
382                 /* TBD: No dma_free_coherent possible */
383                 return rc;
384         }
385
386         return 0;
387 }
388
389 int qede_alloc_fp_resc(struct qede_dev *qdev)
390 {
391         struct ecore_dev *edev = &qdev->edev;
392         struct qede_fastpath *fp;
393         uint32_t num_sbs;
394         uint16_t sb_idx;
395
396         if (IS_VF(edev))
397                 ecore_vf_get_num_sbs(ECORE_LEADING_HWFN(edev), &num_sbs);
398         else
399                 num_sbs = ecore_cxt_get_proto_cid_count
400                           (ECORE_LEADING_HWFN(edev), PROTOCOLID_ETH, NULL);
401
402         if (num_sbs == 0) {
403                 DP_ERR(edev, "No status blocks available\n");
404                 return -EINVAL;
405         }
406
407         qdev->fp_array = rte_calloc("fp", QEDE_RXTX_MAX(qdev),
408                                 sizeof(*qdev->fp_array), RTE_CACHE_LINE_SIZE);
409
410         if (!qdev->fp_array) {
411                 DP_ERR(edev, "fp array allocation failed\n");
412                 return -ENOMEM;
413         }
414
415         memset((void *)qdev->fp_array, 0, QEDE_RXTX_MAX(qdev) *
416                         sizeof(*qdev->fp_array));
417
418         for (sb_idx = 0; sb_idx < QEDE_RXTX_MAX(qdev); sb_idx++) {
419                 fp = &qdev->fp_array[sb_idx];
420                 fp->sb_info = rte_calloc("sb", 1, sizeof(struct ecore_sb_info),
421                                 RTE_CACHE_LINE_SIZE);
422                 if (!fp->sb_info) {
423                         DP_ERR(edev, "FP sb_info allocation fails\n");
424                         return -1;
425                 }
426                 if (qede_alloc_mem_sb(qdev, fp->sb_info, sb_idx)) {
427                         DP_ERR(edev, "FP status block allocation fails\n");
428                         return -1;
429                 }
430                 DP_INFO(edev, "sb_info idx 0x%x initialized\n",
431                                 fp->sb_info->igu_sb_id);
432         }
433
434         return 0;
435 }
436
437 void qede_dealloc_fp_resc(struct rte_eth_dev *eth_dev)
438 {
439         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
440         __rte_unused struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
441         struct qede_fastpath *fp;
442         uint16_t sb_idx;
443
444         PMD_INIT_FUNC_TRACE(edev);
445
446         for (sb_idx = 0; sb_idx < QEDE_RXTX_MAX(qdev); sb_idx++) {
447                 fp = &qdev->fp_array[sb_idx];
448                 DP_INFO(edev, "Free sb_info index 0x%x\n",
449                                 fp->sb_info->igu_sb_id);
450                 if (fp->sb_info)
451                         rte_free(fp->sb_info);
452                 fp->sb_info = NULL;
453         }
454         if (qdev->fp_array)
455                 rte_free(qdev->fp_array);
456         qdev->fp_array = NULL;
457 }
458
459 static inline void
460 qede_update_rx_prod(__rte_unused struct qede_dev *edev,
461                     struct qede_rx_queue *rxq)
462 {
463         uint16_t bd_prod = ecore_chain_get_prod_idx(&rxq->rx_bd_ring);
464         uint16_t cqe_prod = ecore_chain_get_prod_idx(&rxq->rx_comp_ring);
465         struct eth_rx_prod_data rx_prods = { 0 };
466
467         /* Update producers */
468         rx_prods.bd_prod = rte_cpu_to_le_16(bd_prod);
469         rx_prods.cqe_prod = rte_cpu_to_le_16(cqe_prod);
470
471         /* Make sure that the BD and SGE data is updated before updating the
472          * producers since FW might read the BD/SGE right after the producer
473          * is updated.
474          */
475         rte_wmb();
476
477         internal_ram_wr(rxq->hw_rxq_prod_addr, sizeof(rx_prods),
478                         (uint32_t *)&rx_prods);
479
480         /* mmiowb is needed to synchronize doorbell writes from more than one
481          * processor. It guarantees that the write arrives to the device before
482          * the napi lock is released and another qede_poll is called (possibly
483          * on another CPU). Without this barrier, the next doorbell can bypass
484          * this doorbell. This is applicable to IA64/Altix systems.
485          */
486         rte_wmb();
487
488         PMD_RX_LOG(DEBUG, rxq, "bd_prod %u  cqe_prod %u", bd_prod, cqe_prod);
489 }
490
491 /* Starts a given RX queue in HW */
492 static int
493 qede_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
494 {
495         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
496         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
497         struct ecore_queue_start_common_params params;
498         struct ecore_rxq_start_ret_params ret_params;
499         struct qede_rx_queue *rxq;
500         struct qede_fastpath *fp;
501         struct ecore_hwfn *p_hwfn;
502         dma_addr_t p_phys_table;
503         uint16_t page_cnt;
504         uint16_t j;
505         int hwfn_index;
506         int rc;
507
508         if (rx_queue_id < eth_dev->data->nb_rx_queues) {
509                 fp = &qdev->fp_array[rx_queue_id];
510                 rxq = eth_dev->data->rx_queues[rx_queue_id];
511                 /* Allocate buffers for the Rx ring */
512                 for (j = 0; j < rxq->nb_rx_desc; j++) {
513                         rc = qede_alloc_rx_buffer(rxq);
514                         if (rc) {
515                                 DP_ERR(edev, "RX buffer allocation failed"
516                                                 " for rxq = %u\n", rx_queue_id);
517                                 return -ENOMEM;
518                         }
519                 }
520                 /* disable interrupts */
521                 ecore_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0);
522                 /* Prepare ramrod */
523                 memset(&params, 0, sizeof(params));
524                 params.queue_id = rx_queue_id;
525                 params.vport_id = 0;
526                 params.sb = fp->sb_info->igu_sb_id;
527                 DP_INFO(edev, "rxq %u igu_sb_id 0x%x\n",
528                                 fp->rxq->queue_id, fp->sb_info->igu_sb_id);
529                 params.sb_idx = RX_PI;
530                 hwfn_index = rx_queue_id % edev->num_hwfns;
531                 p_hwfn = &edev->hwfns[hwfn_index];
532                 p_phys_table = ecore_chain_get_pbl_phys(&fp->rxq->rx_comp_ring);
533                 page_cnt = ecore_chain_get_page_cnt(&fp->rxq->rx_comp_ring);
534                 memset(&ret_params, 0, sizeof(ret_params));
535                 rc = ecore_eth_rx_queue_start(p_hwfn,
536                                 p_hwfn->hw_info.opaque_fid,
537                                 &params, fp->rxq->rx_buf_size,
538                                 fp->rxq->rx_bd_ring.p_phys_addr,
539                                 p_phys_table, page_cnt,
540                                 &ret_params);
541                 if (rc) {
542                         DP_ERR(edev, "RX queue %u could not be started, rc = %d\n",
543                                         rx_queue_id, rc);
544                         return -1;
545                 }
546                 /* Update with the returned parameters */
547                 fp->rxq->hw_rxq_prod_addr = ret_params.p_prod;
548                 fp->rxq->handle = ret_params.p_handle;
549
550                 fp->rxq->hw_cons_ptr = &fp->sb_info->sb_virt->pi_array[RX_PI];
551                 qede_update_rx_prod(qdev, fp->rxq);
552                 eth_dev->data->rx_queue_state[rx_queue_id] =
553                         RTE_ETH_QUEUE_STATE_STARTED;
554                 DP_INFO(edev, "RX queue %u started\n", rx_queue_id);
555         } else {
556                 DP_ERR(edev, "RX queue %u is not in range\n", rx_queue_id);
557                 rc = -EINVAL;
558         }
559
560         return rc;
561 }
562
563 static int
564 qede_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
565 {
566         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
567         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
568         struct ecore_queue_start_common_params params;
569         struct ecore_txq_start_ret_params ret_params;
570         struct ecore_hwfn *p_hwfn;
571         dma_addr_t p_phys_table;
572         struct qede_tx_queue *txq;
573         struct qede_fastpath *fp;
574         uint16_t page_cnt;
575         int hwfn_index;
576         int rc;
577
578         if (tx_queue_id < eth_dev->data->nb_tx_queues) {
579                 txq = eth_dev->data->tx_queues[tx_queue_id];
580                 fp = &qdev->fp_array[tx_queue_id];
581                 memset(&params, 0, sizeof(params));
582                 params.queue_id = tx_queue_id;
583                 params.vport_id = 0;
584                 params.sb = fp->sb_info->igu_sb_id;
585                 DP_INFO(edev, "txq %u igu_sb_id 0x%x\n",
586                                 fp->txq->queue_id, fp->sb_info->igu_sb_id);
587                 params.sb_idx = TX_PI(0); /* tc = 0 */
588                 p_phys_table = ecore_chain_get_pbl_phys(&txq->tx_pbl);
589                 page_cnt = ecore_chain_get_page_cnt(&txq->tx_pbl);
590                 hwfn_index = tx_queue_id % edev->num_hwfns;
591                 p_hwfn = &edev->hwfns[hwfn_index];
592                 if (qdev->dev_info.is_legacy)
593                         fp->txq->is_legacy = true;
594                 rc = ecore_eth_tx_queue_start(p_hwfn,
595                                 p_hwfn->hw_info.opaque_fid,
596                                 &params, 0 /* tc */,
597                                 p_phys_table, page_cnt,
598                                 &ret_params);
599                 if (rc != ECORE_SUCCESS) {
600                         DP_ERR(edev, "TX queue %u couldn't be started, rc=%d\n",
601                                         tx_queue_id, rc);
602                         return -1;
603                 }
604                 txq->doorbell_addr = ret_params.p_doorbell;
605                 txq->handle = ret_params.p_handle;
606
607                 txq->hw_cons_ptr = &fp->sb_info->sb_virt->pi_array[TX_PI(0)];
608                 SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_DEST,
609                                 DB_DEST_XCM);
610                 SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD,
611                                 DB_AGG_CMD_SET);
612                 SET_FIELD(txq->tx_db.data.params,
613                                 ETH_DB_DATA_AGG_VAL_SEL,
614                                 DQ_XCM_ETH_TX_BD_PROD_CMD);
615                 txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD;
616                 eth_dev->data->tx_queue_state[tx_queue_id] =
617                         RTE_ETH_QUEUE_STATE_STARTED;
618                 DP_INFO(edev, "TX queue %u started\n", tx_queue_id);
619         } else {
620                 DP_ERR(edev, "TX queue %u is not in range\n", tx_queue_id);
621                 rc = -EINVAL;
622         }
623
624         return rc;
625 }
626
627 static inline void
628 qede_free_tx_pkt(struct qede_tx_queue *txq)
629 {
630         struct rte_mbuf *mbuf;
631         uint16_t nb_segs;
632         uint16_t idx;
633
634         idx = TX_CONS(txq);
635         mbuf = txq->sw_tx_ring[idx].mbuf;
636         if (mbuf) {
637                 nb_segs = mbuf->nb_segs;
638                 PMD_TX_LOG(DEBUG, txq, "nb_segs to free %u\n", nb_segs);
639                 while (nb_segs) {
640                         /* It's like consuming rxbuf in recv() */
641                         ecore_chain_consume(&txq->tx_pbl);
642                         txq->nb_tx_avail++;
643                         nb_segs--;
644                 }
645                 rte_pktmbuf_free(mbuf);
646                 txq->sw_tx_ring[idx].mbuf = NULL;
647                 txq->sw_tx_cons++;
648                 PMD_TX_LOG(DEBUG, txq, "Freed tx packet\n");
649         } else {
650                 ecore_chain_consume(&txq->tx_pbl);
651                 txq->nb_tx_avail++;
652         }
653 }
654
655 static inline void
656 qede_process_tx_compl(__rte_unused struct ecore_dev *edev,
657                       struct qede_tx_queue *txq)
658 {
659         uint16_t hw_bd_cons;
660 #ifdef RTE_LIBRTE_QEDE_DEBUG_TX
661         uint16_t sw_tx_cons;
662 #endif
663
664         rte_compiler_barrier();
665         hw_bd_cons = rte_le_to_cpu_16(*txq->hw_cons_ptr);
666 #ifdef RTE_LIBRTE_QEDE_DEBUG_TX
667         sw_tx_cons = ecore_chain_get_cons_idx(&txq->tx_pbl);
668         PMD_TX_LOG(DEBUG, txq, "Tx Completions = %u\n",
669                    abs(hw_bd_cons - sw_tx_cons));
670 #endif
671         while (hw_bd_cons !=  ecore_chain_get_cons_idx(&txq->tx_pbl))
672                 qede_free_tx_pkt(txq);
673 }
674
675
676 static int qede_drain_txq(struct qede_dev *qdev,
677                           struct qede_tx_queue *txq, bool allow_drain)
678 {
679         struct ecore_dev *edev = &qdev->edev;
680         int rc, cnt = 1000;
681
682         while (txq->sw_tx_cons != txq->sw_tx_prod) {
683                 qede_process_tx_compl(edev, txq);
684                 if (!cnt) {
685                         if (allow_drain) {
686                                 DP_ERR(edev, "Tx queue[%u] is stuck,"
687                                           "requesting MCP to drain\n",
688                                           txq->queue_id);
689                                 rc = qdev->ops->common->drain(edev);
690                                 if (rc)
691                                         return rc;
692                                 return qede_drain_txq(qdev, txq, false);
693                         }
694                         DP_ERR(edev, "Timeout waiting for tx queue[%d]:"
695                                   "PROD=%d, CONS=%d\n",
696                                   txq->queue_id, txq->sw_tx_prod,
697                                   txq->sw_tx_cons);
698                         return -1;
699                 }
700                 cnt--;
701                 DELAY(1000);
702                 rte_compiler_barrier();
703         }
704
705         /* FW finished processing, wait for HW to transmit all tx packets */
706         DELAY(2000);
707
708         return 0;
709 }
710
711
712 /* Stops a given TX queue in the HW */
713 static int qede_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
714 {
715         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
716         struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
717         struct ecore_hwfn *p_hwfn;
718         struct qede_tx_queue *txq;
719         int hwfn_index;
720         int rc;
721
722         if (tx_queue_id < eth_dev->data->nb_tx_queues) {
723                 txq = eth_dev->data->tx_queues[tx_queue_id];
724                 /* Drain txq */
725                 if (qede_drain_txq(qdev, txq, true))
726                         return -1; /* For the lack of retcodes */
727                 /* Stop txq */
728                 hwfn_index = tx_queue_id % edev->num_hwfns;
729                 p_hwfn = &edev->hwfns[hwfn_index];
730                 rc = ecore_eth_tx_queue_stop(p_hwfn, txq->handle);
731                 if (rc != ECORE_SUCCESS) {
732                         DP_ERR(edev, "TX queue %u stop fails\n", tx_queue_id);
733                         return -1;
734                 }
735                 qede_tx_queue_release_mbufs(txq);
736                 qede_tx_queue_reset(qdev, txq);
737                 eth_dev->data->tx_queue_state[tx_queue_id] =
738                         RTE_ETH_QUEUE_STATE_STOPPED;
739                 DP_INFO(edev, "TX queue %u stopped\n", tx_queue_id);
740         } else {
741                 DP_ERR(edev, "TX queue %u is not in range\n", tx_queue_id);
742                 rc = -EINVAL;
743         }
744
745         return rc;
746 }
747
748 int qede_start_queues(struct rte_eth_dev *eth_dev)
749 {
750         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
751         uint8_t id;
752         int rc;
753
754         for_each_rss(id) {
755                 rc = qede_rx_queue_start(eth_dev, id);
756                 if (rc != ECORE_SUCCESS)
757                         return -1;
758         }
759
760         for_each_tss(id) {
761                 rc = qede_tx_queue_start(eth_dev, id);
762                 if (rc != ECORE_SUCCESS)
763                         return -1;
764         }
765
766         return rc;
767 }
768
769 void qede_stop_queues(struct rte_eth_dev *eth_dev)
770 {
771         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
772         uint8_t id;
773
774         /* Stopping RX/TX queues */
775         for_each_tss(id) {
776                 qede_tx_queue_stop(eth_dev, id);
777         }
778
779         for_each_rss(id) {
780                 qede_rx_queue_stop(eth_dev, id);
781         }
782 }
783
784 static bool qede_tunn_exist(uint16_t flag)
785 {
786         return !!((PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK <<
787                     PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT) & flag);
788 }
789
790 /*
791  * qede_check_tunn_csum_l4:
792  * Returns:
793  * 1 : If L4 csum is enabled AND if the validation has failed.
794  * 0 : Otherwise
795  */
796 static inline uint8_t qede_check_tunn_csum_l4(uint16_t flag)
797 {
798         if ((PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK <<
799              PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT) & flag)
800                 return !!((PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK <<
801                         PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT) & flag);
802
803         return 0;
804 }
805
806 static inline uint8_t qede_check_notunn_csum_l4(uint16_t flag)
807 {
808         if ((PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
809              PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT) & flag)
810                 return !!((PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<
811                            PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT) & flag);
812
813         return 0;
814 }
815
816 static inline uint32_t qede_rx_cqe_to_pkt_type(uint16_t flags)
817 {
818         uint16_t val;
819
820         /* Lookup table */
821         static const uint32_t
822         ptype_lkup_tbl[QEDE_PKT_TYPE_MAX] __rte_cache_aligned = {
823                 [QEDE_PKT_TYPE_IPV4] = RTE_PTYPE_L3_IPV4,
824                 [QEDE_PKT_TYPE_IPV6] = RTE_PTYPE_L3_IPV6,
825                 [QEDE_PKT_TYPE_IPV4_TCP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,
826                 [QEDE_PKT_TYPE_IPV6_TCP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP,
827                 [QEDE_PKT_TYPE_IPV4_UDP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,
828                 [QEDE_PKT_TYPE_IPV6_UDP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,
829         };
830
831         /* Bits (0..3) provides L3/L4 protocol type */
832         val = ((PARSING_AND_ERR_FLAGS_L3TYPE_MASK <<
833                PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT) |
834                (PARSING_AND_ERR_FLAGS_L4PROTOCOL_MASK <<
835                 PARSING_AND_ERR_FLAGS_L4PROTOCOL_SHIFT)) & flags;
836
837         if (val < QEDE_PKT_TYPE_MAX)
838                 return ptype_lkup_tbl[val] | RTE_PTYPE_L2_ETHER;
839         else
840                 return RTE_PTYPE_UNKNOWN;
841 }
842
843 static inline uint8_t
844 qede_check_notunn_csum_l3(struct rte_mbuf *m, uint16_t flag)
845 {
846         struct ipv4_hdr *ip;
847         uint16_t pkt_csum;
848         uint16_t calc_csum;
849         uint16_t val;
850
851         val = ((PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK <<
852                 PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT) & flag);
853
854         if (unlikely(val)) {
855                 m->packet_type = qede_rx_cqe_to_pkt_type(flag);
856                 if (RTE_ETH_IS_IPV4_HDR(m->packet_type)) {
857                         ip = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *,
858                                            sizeof(struct ether_hdr));
859                         pkt_csum = ip->hdr_checksum;
860                         ip->hdr_checksum = 0;
861                         calc_csum = rte_ipv4_cksum(ip);
862                         ip->hdr_checksum = pkt_csum;
863                         return (calc_csum != pkt_csum);
864                 } else if (RTE_ETH_IS_IPV6_HDR(m->packet_type)) {
865                         return 1;
866                 }
867         }
868         return 0;
869 }
870
871 static inline void qede_rx_bd_ring_consume(struct qede_rx_queue *rxq)
872 {
873         ecore_chain_consume(&rxq->rx_bd_ring);
874         rxq->sw_rx_cons++;
875 }
876
877 static inline void
878 qede_reuse_page(__rte_unused struct qede_dev *qdev,
879                 struct qede_rx_queue *rxq, struct qede_rx_entry *curr_cons)
880 {
881         struct eth_rx_bd *rx_bd_prod = ecore_chain_produce(&rxq->rx_bd_ring);
882         uint16_t idx = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
883         struct qede_rx_entry *curr_prod;
884         dma_addr_t new_mapping;
885
886         curr_prod = &rxq->sw_rx_ring[idx];
887         *curr_prod = *curr_cons;
888
889         new_mapping = rte_mbuf_data_dma_addr_default(curr_prod->mbuf) +
890                       curr_prod->page_offset;
891
892         rx_bd_prod->addr.hi = rte_cpu_to_le_32(U64_HI(new_mapping));
893         rx_bd_prod->addr.lo = rte_cpu_to_le_32(U64_LO(new_mapping));
894
895         rxq->sw_rx_prod++;
896 }
897
898 static inline void
899 qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq,
900                         struct qede_dev *qdev, uint8_t count)
901 {
902         struct qede_rx_entry *curr_cons;
903
904         for (; count > 0; count--) {
905                 curr_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS(rxq)];
906                 qede_reuse_page(qdev, rxq, curr_cons);
907                 qede_rx_bd_ring_consume(rxq);
908         }
909 }
910
911 static inline void
912 qede_rx_process_tpa_cmn_cont_end_cqe(__rte_unused struct qede_dev *qdev,
913                                      struct qede_rx_queue *rxq,
914                                      uint8_t agg_index, uint16_t len)
915 {
916         struct qede_agg_info *tpa_info;
917         struct rte_mbuf *curr_frag; /* Pointer to currently filled TPA seg */
918         uint16_t cons_idx;
919
920         /* Under certain conditions it is possible that FW may not consume
921          * additional or new BD. So decision to consume the BD must be made
922          * based on len_list[0].
923          */
924         if (rte_le_to_cpu_16(len)) {
925                 tpa_info = &rxq->tpa_info[agg_index];
926                 cons_idx = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
927                 curr_frag = rxq->sw_rx_ring[cons_idx].mbuf;
928                 assert(curr_frag);
929                 curr_frag->nb_segs = 1;
930                 curr_frag->pkt_len = rte_le_to_cpu_16(len);
931                 curr_frag->data_len = curr_frag->pkt_len;
932                 tpa_info->tpa_tail->next = curr_frag;
933                 tpa_info->tpa_tail = curr_frag;
934                 qede_rx_bd_ring_consume(rxq);
935                 if (unlikely(qede_alloc_rx_buffer(rxq) != 0)) {
936                         PMD_RX_LOG(ERR, rxq, "mbuf allocation fails\n");
937                         rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
938                         rxq->rx_alloc_errors++;
939                 }
940         }
941 }
942
943 static inline void
944 qede_rx_process_tpa_cont_cqe(struct qede_dev *qdev,
945                              struct qede_rx_queue *rxq,
946                              struct eth_fast_path_rx_tpa_cont_cqe *cqe)
947 {
948         PMD_RX_LOG(INFO, rxq, "TPA cont[%d] - len [%d]\n",
949                    cqe->tpa_agg_index, rte_le_to_cpu_16(cqe->len_list[0]));
950         /* only len_list[0] will have value */
951         qede_rx_process_tpa_cmn_cont_end_cqe(qdev, rxq, cqe->tpa_agg_index,
952                                              cqe->len_list[0]);
953 }
954
955 static inline void
956 qede_rx_process_tpa_end_cqe(struct qede_dev *qdev,
957                             struct qede_rx_queue *rxq,
958                             struct eth_fast_path_rx_tpa_end_cqe *cqe)
959 {
960         struct rte_mbuf *rx_mb; /* Pointer to head of the chained agg */
961
962         qede_rx_process_tpa_cmn_cont_end_cqe(qdev, rxq, cqe->tpa_agg_index,
963                                              cqe->len_list[0]);
964         /* Update total length and frags based on end TPA */
965         rx_mb = rxq->tpa_info[cqe->tpa_agg_index].tpa_head;
966         /* TODO:  Add Sanity Checks */
967         rx_mb->nb_segs = cqe->num_of_bds;
968         rx_mb->pkt_len = cqe->total_packet_len;
969
970         PMD_RX_LOG(INFO, rxq, "TPA End[%d] reason %d cqe_len %d nb_segs %d"
971                    " pkt_len %d\n", cqe->tpa_agg_index, cqe->end_reason,
972                    rte_le_to_cpu_16(cqe->len_list[0]), rx_mb->nb_segs,
973                    rx_mb->pkt_len);
974 }
975
976 static inline uint32_t qede_rx_cqe_to_tunn_pkt_type(uint16_t flags)
977 {
978         uint32_t val;
979
980         /* Lookup table */
981         static const uint32_t
982         ptype_tunn_lkup_tbl[QEDE_PKT_TYPE_TUNN_MAX_TYPE] __rte_cache_aligned = {
983                 [QEDE_PKT_TYPE_UNKNOWN] = RTE_PTYPE_UNKNOWN,
984                 [QEDE_PKT_TYPE_TUNN_GENEVE] = RTE_PTYPE_TUNNEL_GENEVE,
985                 [QEDE_PKT_TYPE_TUNN_GRE] = RTE_PTYPE_TUNNEL_GRE,
986                 [QEDE_PKT_TYPE_TUNN_VXLAN] = RTE_PTYPE_TUNNEL_VXLAN,
987                 [QEDE_PKT_TYPE_TUNN_L2_TENID_NOEXIST_GENEVE] =
988                                 RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L2_ETHER,
989                 [QEDE_PKT_TYPE_TUNN_L2_TENID_NOEXIST_GRE] =
990                                 RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_L2_ETHER,
991                 [QEDE_PKT_TYPE_TUNN_L2_TENID_NOEXIST_VXLAN] =
992                                 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L2_ETHER,
993                 [QEDE_PKT_TYPE_TUNN_L2_TENID_EXIST_GENEVE] =
994                                 RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L2_ETHER,
995                 [QEDE_PKT_TYPE_TUNN_L2_TENID_EXIST_GRE] =
996                                 RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_L2_ETHER,
997                 [QEDE_PKT_TYPE_TUNN_L2_TENID_EXIST_VXLAN] =
998                                 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L2_ETHER,
999                 [QEDE_PKT_TYPE_TUNN_IPV4_TENID_NOEXIST_GENEVE] =
1000                                 RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L3_IPV4,
1001                 [QEDE_PKT_TYPE_TUNN_IPV4_TENID_NOEXIST_GRE] =
1002                                 RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_L3_IPV4,
1003                 [QEDE_PKT_TYPE_TUNN_IPV4_TENID_NOEXIST_VXLAN] =
1004                                 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L3_IPV4,
1005                 [QEDE_PKT_TYPE_TUNN_IPV4_TENID_EXIST_GENEVE] =
1006                                 RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L3_IPV4,
1007                 [QEDE_PKT_TYPE_TUNN_IPV4_TENID_EXIST_GRE] =
1008                                 RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_L3_IPV4,
1009                 [QEDE_PKT_TYPE_TUNN_IPV4_TENID_EXIST_VXLAN] =
1010                                 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L3_IPV4,
1011                 [QEDE_PKT_TYPE_TUNN_IPV6_TENID_NOEXIST_GENEVE] =
1012                                 RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L3_IPV6,
1013                 [QEDE_PKT_TYPE_TUNN_IPV6_TENID_NOEXIST_GRE] =
1014                                 RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_L3_IPV6,
1015                 [QEDE_PKT_TYPE_TUNN_IPV6_TENID_NOEXIST_VXLAN] =
1016                                 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L3_IPV6,
1017                 [QEDE_PKT_TYPE_TUNN_IPV6_TENID_EXIST_GENEVE] =
1018                                 RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L3_IPV6,
1019                 [QEDE_PKT_TYPE_TUNN_IPV6_TENID_EXIST_GRE] =
1020                                 RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_L3_IPV6,
1021                 [QEDE_PKT_TYPE_TUNN_IPV6_TENID_EXIST_VXLAN] =
1022                                 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L3_IPV6,
1023         };
1024
1025         /* Cover bits[4-0] to include tunn_type and next protocol */
1026         val = ((ETH_TUNNEL_PARSING_FLAGS_TYPE_MASK <<
1027                 ETH_TUNNEL_PARSING_FLAGS_TYPE_SHIFT) |
1028                 (ETH_TUNNEL_PARSING_FLAGS_NEXT_PROTOCOL_MASK <<
1029                 ETH_TUNNEL_PARSING_FLAGS_NEXT_PROTOCOL_SHIFT)) & flags;
1030
1031         if (val < QEDE_PKT_TYPE_TUNN_MAX_TYPE)
1032                 return ptype_tunn_lkup_tbl[val];
1033         else
1034                 return RTE_PTYPE_UNKNOWN;
1035 }
1036
1037 static inline int
1038 qede_process_sg_pkts(void *p_rxq,  struct rte_mbuf *rx_mb,
1039                      uint8_t num_segs, uint16_t pkt_len)
1040 {
1041         struct qede_rx_queue *rxq = p_rxq;
1042         struct qede_dev *qdev = rxq->qdev;
1043         register struct rte_mbuf *seg1 = NULL;
1044         register struct rte_mbuf *seg2 = NULL;
1045         uint16_t sw_rx_index;
1046         uint16_t cur_size;
1047
1048         seg1 = rx_mb;
1049         while (num_segs) {
1050                 cur_size = pkt_len > rxq->rx_buf_size ? rxq->rx_buf_size :
1051                                                         pkt_len;
1052                 if (unlikely(!cur_size)) {
1053                         PMD_RX_LOG(ERR, rxq, "Length is 0 while %u BDs"
1054                                    " left for mapping jumbo", num_segs);
1055                         qede_recycle_rx_bd_ring(rxq, qdev, num_segs);
1056                         return -EINVAL;
1057                 }
1058                 sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
1059                 seg2 = rxq->sw_rx_ring[sw_rx_index].mbuf;
1060                 qede_rx_bd_ring_consume(rxq);
1061                 pkt_len -= cur_size;
1062                 seg2->data_len = cur_size;
1063                 seg1->next = seg2;
1064                 seg1 = seg1->next;
1065                 num_segs--;
1066                 rxq->rx_segs++;
1067         }
1068
1069         return 0;
1070 }
1071
1072 uint16_t
1073 qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1074 {
1075         struct qede_rx_queue *rxq = p_rxq;
1076         struct qede_dev *qdev = rxq->qdev;
1077         struct ecore_dev *edev = &qdev->edev;
1078         uint16_t hw_comp_cons, sw_comp_cons, sw_rx_index;
1079         uint16_t rx_pkt = 0;
1080         union eth_rx_cqe *cqe;
1081         struct eth_fast_path_rx_reg_cqe *fp_cqe = NULL;
1082         register struct rte_mbuf *rx_mb = NULL;
1083         register struct rte_mbuf *seg1 = NULL;
1084         enum eth_rx_cqe_type cqe_type;
1085         uint16_t pkt_len = 0; /* Sum of all BD segments */
1086         uint16_t len; /* Length of first BD */
1087         uint8_t num_segs = 1;
1088         uint16_t preload_idx;
1089         uint16_t parse_flag;
1090 #ifdef RTE_LIBRTE_QEDE_DEBUG_RX
1091         uint8_t bitfield_val;
1092         enum rss_hash_type htype;
1093 #endif
1094         uint8_t tunn_parse_flag;
1095         uint8_t j;
1096         struct eth_fast_path_rx_tpa_start_cqe *cqe_start_tpa;
1097         uint64_t ol_flags;
1098         uint32_t packet_type;
1099         uint16_t vlan_tci;
1100         bool tpa_start_flg;
1101         uint8_t offset, tpa_agg_idx, flags;
1102         struct qede_agg_info *tpa_info = NULL;
1103         uint32_t rss_hash;
1104
1105         hw_comp_cons = rte_le_to_cpu_16(*rxq->hw_cons_ptr);
1106         sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
1107
1108         rte_rmb();
1109
1110         if (hw_comp_cons == sw_comp_cons)
1111                 return 0;
1112
1113         while (sw_comp_cons != hw_comp_cons) {
1114                 ol_flags = 0;
1115                 packet_type = RTE_PTYPE_UNKNOWN;
1116                 vlan_tci = 0;
1117                 tpa_start_flg = false;
1118                 rss_hash = 0;
1119
1120                 /* Get the CQE from the completion ring */
1121                 cqe =
1122                     (union eth_rx_cqe *)ecore_chain_consume(&rxq->rx_comp_ring);
1123                 cqe_type = cqe->fast_path_regular.type;
1124                 PMD_RX_LOG(INFO, rxq, "Rx CQE type %d\n", cqe_type);
1125
1126                 switch (cqe_type) {
1127                 case ETH_RX_CQE_TYPE_REGULAR:
1128                         fp_cqe = &cqe->fast_path_regular;
1129                 break;
1130                 case ETH_RX_CQE_TYPE_TPA_START:
1131                         cqe_start_tpa = &cqe->fast_path_tpa_start;
1132                         tpa_info = &rxq->tpa_info[cqe_start_tpa->tpa_agg_index];
1133                         tpa_start_flg = true;
1134                         /* Mark it as LRO packet */
1135                         ol_flags |= PKT_RX_LRO;
1136                         /* In split mode,  seg_len is same as len_on_first_bd
1137                          * and ext_bd_len_list will be empty since there are
1138                          * no additional buffers
1139                          */
1140                         PMD_RX_LOG(INFO, rxq,
1141                             "TPA start[%d] - len_on_first_bd %d header %d"
1142                             " [bd_list[0] %d], [seg_len %d]\n",
1143                             cqe_start_tpa->tpa_agg_index,
1144                             rte_le_to_cpu_16(cqe_start_tpa->len_on_first_bd),
1145                             cqe_start_tpa->header_len,
1146                             rte_le_to_cpu_16(cqe_start_tpa->ext_bd_len_list[0]),
1147                             rte_le_to_cpu_16(cqe_start_tpa->seg_len));
1148
1149                 break;
1150                 case ETH_RX_CQE_TYPE_TPA_CONT:
1151                         qede_rx_process_tpa_cont_cqe(qdev, rxq,
1152                                                      &cqe->fast_path_tpa_cont);
1153                         goto next_cqe;
1154                 case ETH_RX_CQE_TYPE_TPA_END:
1155                         qede_rx_process_tpa_end_cqe(qdev, rxq,
1156                                                     &cqe->fast_path_tpa_end);
1157                         tpa_agg_idx = cqe->fast_path_tpa_end.tpa_agg_index;
1158                         tpa_info = &rxq->tpa_info[tpa_agg_idx];
1159                         rx_mb = rxq->tpa_info[tpa_agg_idx].tpa_head;
1160                         goto tpa_end;
1161                 case ETH_RX_CQE_TYPE_SLOW_PATH:
1162                         PMD_RX_LOG(INFO, rxq, "Got unexpected slowpath CQE\n");
1163                         ecore_eth_cqe_completion(
1164                                 &edev->hwfns[rxq->queue_id % edev->num_hwfns],
1165                                 (struct eth_slow_path_rx_cqe *)cqe);
1166                         /* fall-thru */
1167                 default:
1168                         goto next_cqe;
1169                 }
1170
1171                 /* Get the data from the SW ring */
1172                 sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
1173                 rx_mb = rxq->sw_rx_ring[sw_rx_index].mbuf;
1174                 assert(rx_mb != NULL);
1175
1176                 /* Handle regular CQE or TPA start CQE */
1177                 if (!tpa_start_flg) {
1178                         parse_flag = rte_le_to_cpu_16(fp_cqe->pars_flags.flags);
1179                         offset = fp_cqe->placement_offset;
1180                         len = rte_le_to_cpu_16(fp_cqe->len_on_first_bd);
1181                         pkt_len = rte_le_to_cpu_16(fp_cqe->pkt_len);
1182                         vlan_tci = rte_le_to_cpu_16(fp_cqe->vlan_tag);
1183                         rss_hash = rte_le_to_cpu_32(fp_cqe->rss_hash);
1184 #ifdef RTE_LIBRTE_QEDE_DEBUG_RX
1185                         bitfield_val = fp_cqe->bitfields;
1186                         htype = (uint8_t)GET_FIELD(bitfield_val,
1187                                         ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE);
1188 #endif
1189                 } else {
1190                         parse_flag =
1191                             rte_le_to_cpu_16(cqe_start_tpa->pars_flags.flags);
1192                         offset = cqe_start_tpa->placement_offset;
1193                         /* seg_len = len_on_first_bd */
1194                         len = rte_le_to_cpu_16(cqe_start_tpa->len_on_first_bd);
1195                         vlan_tci = rte_le_to_cpu_16(cqe_start_tpa->vlan_tag);
1196 #ifdef RTE_LIBRTE_QEDE_DEBUG_RX
1197                         bitfield_val = cqe_start_tpa->bitfields;
1198                         htype = (uint8_t)GET_FIELD(bitfield_val,
1199                                 ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE);
1200 #endif
1201                         rss_hash = rte_le_to_cpu_32(cqe_start_tpa->rss_hash);
1202                 }
1203                 if (qede_tunn_exist(parse_flag)) {
1204                         PMD_RX_LOG(INFO, rxq, "Rx tunneled packet\n");
1205                         if (unlikely(qede_check_tunn_csum_l4(parse_flag))) {
1206                                 PMD_RX_LOG(ERR, rxq,
1207                                             "L4 csum failed, flags = 0x%x\n",
1208                                             parse_flag);
1209                                 rxq->rx_hw_errors++;
1210                                 ol_flags |= PKT_RX_L4_CKSUM_BAD;
1211                         } else {
1212                                 ol_flags |= PKT_RX_L4_CKSUM_GOOD;
1213                                 if (tpa_start_flg)
1214                                         flags =
1215                                          cqe_start_tpa->tunnel_pars_flags.flags;
1216                                 else
1217                                         flags = fp_cqe->tunnel_pars_flags.flags;
1218                                 tunn_parse_flag = flags;
1219                                 packet_type =
1220                                 qede_rx_cqe_to_tunn_pkt_type(tunn_parse_flag);
1221                         }
1222                 } else {
1223                         PMD_RX_LOG(INFO, rxq, "Rx non-tunneled packet\n");
1224                         if (unlikely(qede_check_notunn_csum_l4(parse_flag))) {
1225                                 PMD_RX_LOG(ERR, rxq,
1226                                             "L4 csum failed, flags = 0x%x\n",
1227                                             parse_flag);
1228                                 rxq->rx_hw_errors++;
1229                                 ol_flags |= PKT_RX_L4_CKSUM_BAD;
1230                         } else {
1231                                 ol_flags |= PKT_RX_L4_CKSUM_GOOD;
1232                         }
1233                         if (unlikely(qede_check_notunn_csum_l3(rx_mb,
1234                                                         parse_flag))) {
1235                                 PMD_RX_LOG(ERR, rxq,
1236                                            "IP csum failed, flags = 0x%x\n",
1237                                            parse_flag);
1238                                 rxq->rx_hw_errors++;
1239                                 ol_flags |= PKT_RX_IP_CKSUM_BAD;
1240                         } else {
1241                                 ol_flags |= PKT_RX_IP_CKSUM_GOOD;
1242                                 packet_type =
1243                                         qede_rx_cqe_to_pkt_type(parse_flag);
1244                         }
1245                 }
1246
1247                 if (CQE_HAS_VLAN(parse_flag)) {
1248                         ol_flags |= PKT_RX_VLAN_PKT;
1249                         if (qdev->vlan_strip_flg) {
1250                                 ol_flags |= PKT_RX_VLAN_STRIPPED;
1251                                 rx_mb->vlan_tci = vlan_tci;
1252                         }
1253                 }
1254                 if (CQE_HAS_OUTER_VLAN(parse_flag)) {
1255                         ol_flags |= PKT_RX_QINQ_PKT;
1256                         if (qdev->vlan_strip_flg) {
1257                                 rx_mb->vlan_tci = vlan_tci;
1258                                 ol_flags |= PKT_RX_QINQ_STRIPPED;
1259                         }
1260                         rx_mb->vlan_tci_outer = 0;
1261                 }
1262                 /* RSS Hash */
1263                 if (qdev->rss_enable) {
1264                         ol_flags |= PKT_RX_RSS_HASH;
1265                         rx_mb->hash.rss = rss_hash;
1266                 }
1267
1268                 if (unlikely(qede_alloc_rx_buffer(rxq) != 0)) {
1269                         PMD_RX_LOG(ERR, rxq,
1270                                    "New buffer allocation failed,"
1271                                    "dropping incoming packet\n");
1272                         qede_recycle_rx_bd_ring(rxq, qdev, fp_cqe->bd_num);
1273                         rte_eth_devices[rxq->port_id].
1274                             data->rx_mbuf_alloc_failed++;
1275                         rxq->rx_alloc_errors++;
1276                         break;
1277                 }
1278                 qede_rx_bd_ring_consume(rxq);
1279
1280                 if (!tpa_start_flg && fp_cqe->bd_num > 1) {
1281                         PMD_RX_LOG(DEBUG, rxq, "Jumbo-over-BD packet: %02x BDs"
1282                                    " len on first: %04x Total Len: %04x",
1283                                    fp_cqe->bd_num, len, pkt_len);
1284                         num_segs = fp_cqe->bd_num - 1;
1285                         seg1 = rx_mb;
1286                         if (qede_process_sg_pkts(p_rxq, seg1, num_segs,
1287                                                  pkt_len - len))
1288                                 goto next_cqe;
1289                         for (j = 0; j < num_segs; j++) {
1290                                 if (qede_alloc_rx_buffer(rxq)) {
1291                                         PMD_RX_LOG(ERR, rxq,
1292                                                 "Buffer allocation failed");
1293                                         rte_eth_devices[rxq->port_id].
1294                                                 data->rx_mbuf_alloc_failed++;
1295                                         rxq->rx_alloc_errors++;
1296                                         break;
1297                                 }
1298                                 rxq->rx_segs++;
1299                         }
1300                 }
1301                 rxq->rx_segs++; /* for the first segment */
1302
1303                 /* Prefetch next mbuf while processing current one. */
1304                 preload_idx = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
1305                 rte_prefetch0(rxq->sw_rx_ring[preload_idx].mbuf);
1306
1307                 /* Update rest of the MBUF fields */
1308                 rx_mb->data_off = offset + RTE_PKTMBUF_HEADROOM;
1309                 rx_mb->port = rxq->port_id;
1310                 rx_mb->ol_flags = ol_flags;
1311                 rx_mb->data_len = len;
1312                 rx_mb->packet_type = packet_type;
1313                 PMD_RX_LOG(INFO, rxq,
1314                            "pkt_type 0x%04x len %u hash_type %d hash_val 0x%x"
1315                            " ol_flags 0x%04lx\n",
1316                            packet_type, len, htype, rx_mb->hash.rss,
1317                            (unsigned long)ol_flags);
1318                 if (!tpa_start_flg) {
1319                         rx_mb->nb_segs = fp_cqe->bd_num;
1320                         rx_mb->pkt_len = pkt_len;
1321                 } else {
1322                         /* store ref to the updated mbuf */
1323                         tpa_info->tpa_head = rx_mb;
1324                         tpa_info->tpa_tail = tpa_info->tpa_head;
1325                 }
1326                 rte_prefetch1(rte_pktmbuf_mtod(rx_mb, void *));
1327 tpa_end:
1328                 if (!tpa_start_flg) {
1329                         rx_pkts[rx_pkt] = rx_mb;
1330                         rx_pkt++;
1331                 }
1332 next_cqe:
1333                 ecore_chain_recycle_consumed(&rxq->rx_comp_ring);
1334                 sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
1335                 if (rx_pkt == nb_pkts) {
1336                         PMD_RX_LOG(DEBUG, rxq,
1337                                    "Budget reached nb_pkts=%u received=%u",
1338                                    rx_pkt, nb_pkts);
1339                         break;
1340                 }
1341         }
1342
1343         qede_update_rx_prod(qdev, rxq);
1344
1345         rxq->rcv_pkts += rx_pkt;
1346
1347         PMD_RX_LOG(DEBUG, rxq, "rx_pkts=%u core=%d", rx_pkt, rte_lcore_id());
1348
1349         return rx_pkt;
1350 }
1351
1352
1353 /* Populate scatter gather buffer descriptor fields */
1354 static inline uint8_t
1355 qede_encode_sg_bd(struct qede_tx_queue *p_txq, struct rte_mbuf *m_seg,
1356                   struct eth_tx_2nd_bd **bd2, struct eth_tx_3rd_bd **bd3)
1357 {
1358         struct qede_tx_queue *txq = p_txq;
1359         struct eth_tx_bd *tx_bd = NULL;
1360         dma_addr_t mapping;
1361         uint8_t nb_segs = 0;
1362
1363         /* Check for scattered buffers */
1364         while (m_seg) {
1365                 if (nb_segs == 0) {
1366                         if (!*bd2) {
1367                                 *bd2 = (struct eth_tx_2nd_bd *)
1368                                         ecore_chain_produce(&txq->tx_pbl);
1369                                 memset(*bd2, 0, sizeof(struct eth_tx_2nd_bd));
1370                                 nb_segs++;
1371                         }
1372                         mapping = rte_mbuf_data_dma_addr(m_seg);
1373                         QEDE_BD_SET_ADDR_LEN(*bd2, mapping, m_seg->data_len);
1374                         PMD_TX_LOG(DEBUG, txq, "BD2 len %04x", m_seg->data_len);
1375                 } else if (nb_segs == 1) {
1376                         if (!*bd3) {
1377                                 *bd3 = (struct eth_tx_3rd_bd *)
1378                                         ecore_chain_produce(&txq->tx_pbl);
1379                                 memset(*bd3, 0, sizeof(struct eth_tx_3rd_bd));
1380                                 nb_segs++;
1381                         }
1382                         mapping = rte_mbuf_data_dma_addr(m_seg);
1383                         QEDE_BD_SET_ADDR_LEN(*bd3, mapping, m_seg->data_len);
1384                         PMD_TX_LOG(DEBUG, txq, "BD3 len %04x", m_seg->data_len);
1385                 } else {
1386                         tx_bd = (struct eth_tx_bd *)
1387                                 ecore_chain_produce(&txq->tx_pbl);
1388                         memset(tx_bd, 0, sizeof(*tx_bd));
1389                         nb_segs++;
1390                         mapping = rte_mbuf_data_dma_addr(m_seg);
1391                         QEDE_BD_SET_ADDR_LEN(tx_bd, mapping, m_seg->data_len);
1392                         PMD_TX_LOG(DEBUG, txq, "BD len %04x", m_seg->data_len);
1393                 }
1394                 m_seg = m_seg->next;
1395         }
1396
1397         /* Return total scattered buffers */
1398         return nb_segs;
1399 }
1400
1401 #ifdef RTE_LIBRTE_QEDE_DEBUG_TX
1402 static inline void
1403 print_tx_bd_info(struct qede_tx_queue *txq,
1404                  struct eth_tx_1st_bd *bd1,
1405                  struct eth_tx_2nd_bd *bd2,
1406                  struct eth_tx_3rd_bd *bd3,
1407                  uint64_t tx_ol_flags)
1408 {
1409         char ol_buf[256] = { 0 }; /* for verbose prints */
1410
1411         if (bd1)
1412                 PMD_TX_LOG(INFO, txq,
1413                            "BD1: nbytes=%u nbds=%u bd_flags=%04x bf=%04x",
1414                            rte_cpu_to_le_16(bd1->nbytes), bd1->data.nbds,
1415                            bd1->data.bd_flags.bitfields,
1416                            rte_cpu_to_le_16(bd1->data.bitfields));
1417         if (bd2)
1418                 PMD_TX_LOG(INFO, txq,
1419                            "BD2: nbytes=%u bf=%04x\n",
1420                            rte_cpu_to_le_16(bd2->nbytes), bd2->data.bitfields1);
1421         if (bd3)
1422                 PMD_TX_LOG(INFO, txq,
1423                            "BD3: nbytes=%u bf=%04x mss=%u\n",
1424                            rte_cpu_to_le_16(bd3->nbytes),
1425                            rte_cpu_to_le_16(bd3->data.bitfields),
1426                            rte_cpu_to_le_16(bd3->data.lso_mss));
1427
1428         rte_get_tx_ol_flag_list(tx_ol_flags, ol_buf, sizeof(ol_buf));
1429         PMD_TX_LOG(INFO, txq, "TX offloads = %s\n", ol_buf);
1430 }
1431 #endif
1432
1433 /* TX prepare to check packets meets TX conditions */
1434 uint16_t
1435 #ifdef RTE_LIBRTE_QEDE_DEBUG_TX
1436 qede_xmit_prep_pkts(void *p_txq, struct rte_mbuf **tx_pkts,
1437                     uint16_t nb_pkts)
1438 {
1439         struct qede_tx_queue *txq = p_txq;
1440 #else
1441 qede_xmit_prep_pkts(__rte_unused void *p_txq, struct rte_mbuf **tx_pkts,
1442                     uint16_t nb_pkts)
1443 {
1444 #endif
1445         uint64_t ol_flags;
1446         struct rte_mbuf *m;
1447         uint16_t i;
1448         int ret;
1449
1450         for (i = 0; i < nb_pkts; i++) {
1451                 m = tx_pkts[i];
1452                 ol_flags = m->ol_flags;
1453                 if (ol_flags & PKT_TX_TCP_SEG) {
1454                         if (m->nb_segs >= ETH_TX_MAX_BDS_PER_LSO_PACKET) {
1455                                 rte_errno = -EINVAL;
1456                                 break;
1457                         }
1458                         /* TBD: confirm its ~9700B for both ? */
1459                         if (m->tso_segsz > ETH_TX_MAX_NON_LSO_PKT_LEN) {
1460                                 rte_errno = -EINVAL;
1461                                 break;
1462                         }
1463                 } else {
1464                         if (m->nb_segs >= ETH_TX_MAX_BDS_PER_NON_LSO_PACKET) {
1465                                 rte_errno = -EINVAL;
1466                                 break;
1467                         }
1468                 }
1469                 if (ol_flags & QEDE_TX_OFFLOAD_NOTSUP_MASK) {
1470                         rte_errno = -ENOTSUP;
1471                         break;
1472                 }
1473
1474 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
1475                 ret = rte_validate_tx_offload(m);
1476                 if (ret != 0) {
1477                         rte_errno = ret;
1478                         break;
1479                 }
1480 #endif
1481                 /* TBD: pseudo csum calcuation required iff
1482                  * ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE not set?
1483                  */
1484                 ret = rte_net_intel_cksum_prepare(m);
1485                 if (ret != 0) {
1486                         rte_errno = ret;
1487                         break;
1488                 }
1489         }
1490
1491 #ifdef RTE_LIBRTE_QEDE_DEBUG_TX
1492         if (unlikely(i != nb_pkts))
1493                 PMD_TX_LOG(ERR, txq, "TX prepare failed for %u\n",
1494                            nb_pkts - i);
1495 #endif
1496         return i;
1497 }
1498
1499 uint16_t
1500 qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
1501 {
1502         struct qede_tx_queue *txq = p_txq;
1503         struct qede_dev *qdev = txq->qdev;
1504         struct ecore_dev *edev = &qdev->edev;
1505         struct rte_mbuf *mbuf;
1506         struct rte_mbuf *m_seg = NULL;
1507         uint16_t nb_tx_pkts;
1508         uint16_t bd_prod;
1509         uint16_t idx;
1510         uint16_t nb_frags;
1511         uint16_t nb_pkt_sent = 0;
1512         uint8_t nbds;
1513         bool ipv6_ext_flg;
1514         bool lso_flg;
1515         __rte_unused bool tunn_flg;
1516         struct eth_tx_1st_bd *bd1;
1517         struct eth_tx_2nd_bd *bd2;
1518         struct eth_tx_3rd_bd *bd3;
1519         uint64_t tx_ol_flags;
1520         uint16_t hdr_size;
1521         /* BD1 */
1522         uint16_t bd1_bf;
1523         uint8_t bd1_bd_flags_bf;
1524         uint16_t vlan;
1525         /* BD2 */
1526         uint16_t bd2_bf1;
1527         uint16_t bd2_bf2;
1528         /* BD3 */
1529         uint16_t mss;
1530         uint16_t bd3_bf;
1531
1532
1533         if (unlikely(txq->nb_tx_avail < txq->tx_free_thresh)) {
1534                 PMD_TX_LOG(DEBUG, txq, "send=%u avail=%u free_thresh=%u",
1535                            nb_pkts, txq->nb_tx_avail, txq->tx_free_thresh);
1536                 qede_process_tx_compl(edev, txq);
1537         }
1538
1539         nb_tx_pkts  = nb_pkts;
1540         bd_prod = rte_cpu_to_le_16(ecore_chain_get_prod_idx(&txq->tx_pbl));
1541         while (nb_tx_pkts--) {
1542                 /* Init flags/values */
1543                 ipv6_ext_flg = false;
1544                 tunn_flg = false;
1545                 lso_flg = false;
1546                 nbds = 0;
1547                 vlan = 0;
1548                 bd1 = NULL;
1549                 bd2 = NULL;
1550                 bd3 = NULL;
1551                 hdr_size = 0;
1552                 bd1_bf = 0;
1553                 bd1_bd_flags_bf = 0;
1554                 bd2_bf1 = 0;
1555                 bd2_bf2 = 0;
1556                 mss = 0;
1557                 bd3_bf = 0;
1558
1559                 mbuf = *tx_pkts++;
1560                 assert(mbuf);
1561
1562                 /* Check minimum TX BDS availability against available BDs */
1563                 if (unlikely(txq->nb_tx_avail < mbuf->nb_segs))
1564                         break;
1565
1566                 tx_ol_flags = mbuf->ol_flags;
1567                 bd1_bd_flags_bf |= 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
1568
1569 #define RTE_ETH_IS_IPV6_HDR_EXT(ptype) ((ptype) & RTE_PTYPE_L3_IPV6_EXT)
1570                 if (RTE_ETH_IS_IPV6_HDR_EXT(mbuf->packet_type)) {
1571                         ipv6_ext_flg = true;
1572                         if (unlikely(txq->nb_tx_avail <
1573                                         ETH_TX_MIN_BDS_PER_IPV6_WITH_EXT_PKT))
1574                                 break;
1575                 }
1576
1577                 if (RTE_ETH_IS_TUNNEL_PKT(mbuf->packet_type)) {
1578                         if (ipv6_ext_flg) {
1579                                 if (unlikely(txq->nb_tx_avail <
1580                                     ETH_TX_MIN_BDS_PER_TUNN_IPV6_WITH_EXT_PKT))
1581                                         break;
1582                         }
1583                         tunn_flg = true;
1584                         /* First indicate its a tunnel pkt */
1585                         bd1_bf |= ETH_TX_DATA_1ST_BD_TUNN_FLAG_MASK <<
1586                                   ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT;
1587                         /* Legacy FW had flipped behavior in regard to this bit
1588                          * i.e. it needed to set to prevent FW from touching
1589                          * encapsulated packets when it didn't need to.
1590                          */
1591                         if (unlikely(txq->is_legacy)) {
1592                                 bd1_bf ^= 1 <<
1593                                         ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT;
1594                         }
1595                         /* Outer IP checksum offload */
1596                         if (tx_ol_flags & PKT_TX_OUTER_IP_CKSUM) {
1597                                 bd1_bd_flags_bf |=
1598                                         ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_MASK <<
1599                                         ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT;
1600                         }
1601                         /* Outer UDP checksum offload */
1602                         bd1_bd_flags_bf |=
1603                                 ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK <<
1604                                 ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT;
1605                 }
1606
1607                 if (tx_ol_flags & PKT_TX_TCP_SEG) {
1608                         lso_flg = true;
1609                         if (unlikely(txq->nb_tx_avail <
1610                                                 ETH_TX_MIN_BDS_PER_LSO_PKT))
1611                                 break;
1612                         /* For LSO, packet header and payload must reside on
1613                          * buffers pointed by different BDs. Using BD1 for HDR
1614                          * and BD2 onwards for data.
1615                          */
1616                         hdr_size = mbuf->l2_len + mbuf->l3_len + mbuf->l4_len;
1617                         bd1_bd_flags_bf |= 1 << ETH_TX_1ST_BD_FLAGS_LSO_SHIFT;
1618                         bd1_bd_flags_bf |=
1619                                         1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
1620                         /* PKT_TX_TCP_SEG implies PKT_TX_TCP_CKSUM */
1621                         bd1_bd_flags_bf |=
1622                                         1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
1623                         mss = rte_cpu_to_le_16(mbuf->tso_segsz);
1624                         /* Using one header BD */
1625                         bd3_bf |= rte_cpu_to_le_16(1 <<
1626                                         ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT);
1627                 } else {
1628                         if (unlikely(txq->nb_tx_avail <
1629                                         ETH_TX_MIN_BDS_PER_NON_LSO_PKT))
1630                                 break;
1631                         bd1_bf |=
1632                                (mbuf->pkt_len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK)
1633                                 << ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
1634                 }
1635
1636                 /* Descriptor based VLAN insertion */
1637                 if (tx_ol_flags & (PKT_TX_VLAN_PKT | PKT_TX_QINQ_PKT)) {
1638                         vlan = rte_cpu_to_le_16(mbuf->vlan_tci);
1639                         bd1_bd_flags_bf |=
1640                             1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT;
1641                 }
1642
1643                 /* Offload the IP checksum in the hardware */
1644                 if (tx_ol_flags & PKT_TX_IP_CKSUM)
1645                         bd1_bd_flags_bf |=
1646                                 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
1647
1648                 /* L4 checksum offload (tcp or udp) */
1649                 if (tx_ol_flags & (PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM))
1650                         bd1_bd_flags_bf |=
1651                                 1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
1652
1653                 if (ipv6_ext_flg) {
1654                         /* TBD: check pseudo csum iff tx_prepare not called? */
1655                         bd2_bf1 |= ETH_L4_PSEUDO_CSUM_ZERO_LENGTH <<
1656                                 ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT;
1657                 }
1658
1659                 /* Fill the entry in the SW ring and the BDs in the FW ring */
1660                 idx = TX_PROD(txq);
1661                 txq->sw_tx_ring[idx].mbuf = mbuf;
1662
1663                 /* BD1 */
1664                 bd1 = (struct eth_tx_1st_bd *)ecore_chain_produce(&txq->tx_pbl);
1665                 memset(bd1, 0, sizeof(struct eth_tx_1st_bd));
1666                 nbds++;
1667
1668                 /* Map MBUF linear data for DMA and set in the BD1 */
1669                 QEDE_BD_SET_ADDR_LEN(bd1, rte_mbuf_data_dma_addr(mbuf),
1670                                         mbuf->data_len);
1671                 bd1->data.bitfields = bd1_bf;
1672                 bd1->data.bd_flags.bitfields = bd1_bd_flags_bf;
1673                 bd1->data.vlan = vlan;
1674
1675                 if (lso_flg || ipv6_ext_flg) {
1676                         bd2 = (struct eth_tx_2nd_bd *)ecore_chain_produce
1677                                                         (&txq->tx_pbl);
1678                         memset(bd2, 0, sizeof(struct eth_tx_2nd_bd));
1679                         nbds++;
1680
1681                         /* BD1 */
1682                         QEDE_BD_SET_ADDR_LEN(bd1, rte_mbuf_data_dma_addr(mbuf),
1683                                              hdr_size);
1684                         /* BD2 */
1685                         QEDE_BD_SET_ADDR_LEN(bd2, (hdr_size +
1686                                              rte_mbuf_data_dma_addr(mbuf)),
1687                                              mbuf->data_len - hdr_size);
1688                         bd2->data.bitfields1 = bd2_bf1;
1689                         bd2->data.bitfields2 = bd2_bf2;
1690
1691                         /* BD3 */
1692                         bd3 = (struct eth_tx_3rd_bd *)ecore_chain_produce
1693                                                         (&txq->tx_pbl);
1694                         memset(bd3, 0, sizeof(struct eth_tx_3rd_bd));
1695                         nbds++;
1696                         bd3->data.bitfields = bd3_bf;
1697                         bd3->data.lso_mss = mss;
1698                 }
1699
1700                 /* Handle fragmented MBUF */
1701                 m_seg = mbuf->next;
1702                 /* Encode scatter gather buffer descriptors if required */
1703                 nb_frags = qede_encode_sg_bd(txq, m_seg, &bd2, &bd3);
1704                 bd1->data.nbds = nbds + nb_frags;
1705                 txq->nb_tx_avail -= bd1->data.nbds;
1706                 txq->sw_tx_prod++;
1707                 rte_prefetch0(txq->sw_tx_ring[TX_PROD(txq)].mbuf);
1708                 bd_prod =
1709                     rte_cpu_to_le_16(ecore_chain_get_prod_idx(&txq->tx_pbl));
1710 #ifdef RTE_LIBRTE_QEDE_DEBUG_TX
1711                 print_tx_bd_info(txq, bd1, bd2, bd3, tx_ol_flags);
1712                 PMD_TX_LOG(INFO, txq, "lso=%d tunn=%d ipv6_ext=%d\n",
1713                            lso_flg, tunn_flg, ipv6_ext_flg);
1714 #endif
1715                 nb_pkt_sent++;
1716                 txq->xmit_pkts++;
1717         }
1718
1719         /* Write value of prod idx into bd_prod */
1720         txq->tx_db.data.bd_prod = bd_prod;
1721         rte_wmb();
1722         rte_compiler_barrier();
1723         DIRECT_REG_WR_RELAXED(edev, txq->doorbell_addr, txq->tx_db.raw);
1724         rte_wmb();
1725
1726         /* Check again for Tx completions */
1727         qede_process_tx_compl(edev, txq);
1728
1729         PMD_TX_LOG(DEBUG, txq, "to_send=%u sent=%u bd_prod=%u core=%d",
1730                    nb_pkts, nb_pkt_sent, TX_PROD(txq), rte_lcore_id());
1731
1732         return nb_pkt_sent;
1733 }
1734
1735 uint16_t
1736 qede_rxtx_pkts_dummy(__rte_unused void *p_rxq,
1737                      __rte_unused struct rte_mbuf **pkts,
1738                      __rte_unused uint16_t nb_pkts)
1739 {
1740         return 0;
1741 }