net/ice: revert fake TSO fixes
[dpdk.git] / drivers / net / ice / ice_rxtx.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Intel Corporation
3  */
4
5 #include <rte_ethdev_driver.h>
6 #include <rte_net.h>
7
8 #include "rte_pmd_ice.h"
9 #include "ice_rxtx.h"
10
11 #define ICE_TX_CKSUM_OFFLOAD_MASK (              \
12                 PKT_TX_IP_CKSUM |                \
13                 PKT_TX_L4_MASK |                 \
14                 PKT_TX_TCP_SEG |                 \
15                 PKT_TX_OUTER_IP_CKSUM)
16
17 /* Offset of mbuf dynamic field for protocol extraction data */
18 int rte_net_ice_dynfield_proto_xtr_metadata_offs = -1;
19
20 /* Mask of mbuf dynamic flags for protocol extraction type */
21 uint64_t rte_net_ice_dynflag_proto_xtr_vlan_mask;
22 uint64_t rte_net_ice_dynflag_proto_xtr_ipv4_mask;
23 uint64_t rte_net_ice_dynflag_proto_xtr_ipv6_mask;
24 uint64_t rte_net_ice_dynflag_proto_xtr_ipv6_flow_mask;
25 uint64_t rte_net_ice_dynflag_proto_xtr_tcp_mask;
26
27 static inline uint64_t
28 ice_rxdid_to_proto_xtr_ol_flag(uint8_t rxdid)
29 {
30         static uint64_t *ol_flag_map[] = {
31                 [ICE_RXDID_COMMS_AUX_VLAN] =
32                                 &rte_net_ice_dynflag_proto_xtr_vlan_mask,
33                 [ICE_RXDID_COMMS_AUX_IPV4] =
34                                 &rte_net_ice_dynflag_proto_xtr_ipv4_mask,
35                 [ICE_RXDID_COMMS_AUX_IPV6] =
36                                 &rte_net_ice_dynflag_proto_xtr_ipv6_mask,
37                 [ICE_RXDID_COMMS_AUX_IPV6_FLOW] =
38                                 &rte_net_ice_dynflag_proto_xtr_ipv6_flow_mask,
39                 [ICE_RXDID_COMMS_AUX_TCP] =
40                                 &rte_net_ice_dynflag_proto_xtr_tcp_mask,
41         };
42         uint64_t *ol_flag;
43
44         ol_flag = rxdid < RTE_DIM(ol_flag_map) ? ol_flag_map[rxdid] : NULL;
45
46         return ol_flag != NULL ? *ol_flag : 0ULL;
47 }
48
49 static inline uint8_t
50 ice_proto_xtr_type_to_rxdid(uint8_t xtr_type)
51 {
52         static uint8_t rxdid_map[] = {
53                 [PROTO_XTR_NONE]      = ICE_RXDID_COMMS_GENERIC,
54                 [PROTO_XTR_VLAN]      = ICE_RXDID_COMMS_AUX_VLAN,
55                 [PROTO_XTR_IPV4]      = ICE_RXDID_COMMS_AUX_IPV4,
56                 [PROTO_XTR_IPV6]      = ICE_RXDID_COMMS_AUX_IPV6,
57                 [PROTO_XTR_IPV6_FLOW] = ICE_RXDID_COMMS_AUX_IPV6_FLOW,
58                 [PROTO_XTR_TCP]       = ICE_RXDID_COMMS_AUX_TCP,
59         };
60
61         return xtr_type < RTE_DIM(rxdid_map) ?
62                                 rxdid_map[xtr_type] : ICE_RXDID_COMMS_GENERIC;
63 }
64
65 static enum ice_status
66 ice_program_hw_rx_queue(struct ice_rx_queue *rxq)
67 {
68         struct ice_vsi *vsi = rxq->vsi;
69         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
70         struct rte_eth_dev *dev = ICE_VSI_TO_ETH_DEV(rxq->vsi);
71         struct ice_rlan_ctx rx_ctx;
72         enum ice_status err;
73         uint16_t buf_size, len;
74         struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
75         uint32_t rxdid = ICE_RXDID_COMMS_GENERIC;
76         uint32_t regval;
77
78         /* Set buffer size as the head split is disabled. */
79         buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
80                               RTE_PKTMBUF_HEADROOM);
81         rxq->rx_hdr_len = 0;
82         rxq->rx_buf_len = RTE_ALIGN(buf_size, (1 << ICE_RLAN_CTX_DBUF_S));
83         len = ICE_SUPPORT_CHAIN_NUM * rxq->rx_buf_len;
84         rxq->max_pkt_len = RTE_MIN(len,
85                                    dev->data->dev_conf.rxmode.max_rx_pkt_len);
86
87         if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
88                 if (rxq->max_pkt_len <= RTE_ETHER_MAX_LEN ||
89                     rxq->max_pkt_len > ICE_FRAME_SIZE_MAX) {
90                         PMD_DRV_LOG(ERR, "maximum packet length must "
91                                     "be larger than %u and smaller than %u,"
92                                     "as jumbo frame is enabled",
93                                     (uint32_t)RTE_ETHER_MAX_LEN,
94                                     (uint32_t)ICE_FRAME_SIZE_MAX);
95                         return -EINVAL;
96                 }
97         } else {
98                 if (rxq->max_pkt_len < RTE_ETHER_MIN_LEN ||
99                     rxq->max_pkt_len > RTE_ETHER_MAX_LEN) {
100                         PMD_DRV_LOG(ERR, "maximum packet length must be "
101                                     "larger than %u and smaller than %u, "
102                                     "as jumbo frame is disabled",
103                                     (uint32_t)RTE_ETHER_MIN_LEN,
104                                     (uint32_t)RTE_ETHER_MAX_LEN);
105                         return -EINVAL;
106                 }
107         }
108
109         memset(&rx_ctx, 0, sizeof(rx_ctx));
110
111         rx_ctx.base = rxq->rx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT;
112         rx_ctx.qlen = rxq->nb_rx_desc;
113         rx_ctx.dbuf = rxq->rx_buf_len >> ICE_RLAN_CTX_DBUF_S;
114         rx_ctx.hbuf = rxq->rx_hdr_len >> ICE_RLAN_CTX_HBUF_S;
115         rx_ctx.dtype = 0; /* No Header Split mode */
116 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
117         rx_ctx.dsize = 1; /* 32B descriptors */
118 #endif
119         rx_ctx.rxmax = rxq->max_pkt_len;
120         /* TPH: Transaction Layer Packet (TLP) processing hints */
121         rx_ctx.tphrdesc_ena = 1;
122         rx_ctx.tphwdesc_ena = 1;
123         rx_ctx.tphdata_ena = 1;
124         rx_ctx.tphhead_ena = 1;
125         /* Low Receive Queue Threshold defined in 64 descriptors units.
126          * When the number of free descriptors goes below the lrxqthresh,
127          * an immediate interrupt is triggered.
128          */
129         rx_ctx.lrxqthresh = 2;
130         /*default use 32 byte descriptor, vlan tag extract to L2TAG2(1st)*/
131         rx_ctx.l2tsel = 1;
132         rx_ctx.showiv = 0;
133         rx_ctx.crcstrip = (rxq->crc_len == 0) ? 1 : 0;
134
135         rxdid = ice_proto_xtr_type_to_rxdid(rxq->proto_xtr);
136
137         PMD_DRV_LOG(DEBUG, "Port (%u) - Rx queue (%u) is set with RXDID : %u",
138                     rxq->port_id, rxq->queue_id, rxdid);
139
140         /* Enable Flexible Descriptors in the queue context which
141          * allows this driver to select a specific receive descriptor format
142          */
143         regval = (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) &
144                 QRXFLXP_CNTXT_RXDID_IDX_M;
145
146         /* increasing context priority to pick up profile ID;
147          * default is 0x01; setting to 0x03 to ensure profile
148          * is programming if prev context is of same priority
149          */
150         regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
151                 QRXFLXP_CNTXT_RXDID_PRIO_M;
152
153         ICE_WRITE_REG(hw, QRXFLXP_CNTXT(rxq->reg_idx), regval);
154
155         err = ice_clear_rxq_ctx(hw, rxq->reg_idx);
156         if (err) {
157                 PMD_DRV_LOG(ERR, "Failed to clear Lan Rx queue (%u) context",
158                             rxq->queue_id);
159                 return -EINVAL;
160         }
161         err = ice_write_rxq_ctx(hw, &rx_ctx, rxq->reg_idx);
162         if (err) {
163                 PMD_DRV_LOG(ERR, "Failed to write Lan Rx queue (%u) context",
164                             rxq->queue_id);
165                 return -EINVAL;
166         }
167
168         buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
169                               RTE_PKTMBUF_HEADROOM);
170
171         /* Check if scattered RX needs to be used. */
172         if (rxq->max_pkt_len > buf_size)
173                 dev->data->scattered_rx = 1;
174
175         rxq->qrx_tail = hw->hw_addr + QRX_TAIL(rxq->reg_idx);
176
177         /* Init the Rx tail register*/
178         ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
179
180         return 0;
181 }
182
183 /* Allocate mbufs for all descriptors in rx queue */
184 static int
185 ice_alloc_rx_queue_mbufs(struct ice_rx_queue *rxq)
186 {
187         struct ice_rx_entry *rxe = rxq->sw_ring;
188         uint64_t dma_addr;
189         uint16_t i;
190
191         for (i = 0; i < rxq->nb_rx_desc; i++) {
192                 volatile union ice_rx_flex_desc *rxd;
193                 struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mp);
194
195                 if (unlikely(!mbuf)) {
196                         PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX");
197                         return -ENOMEM;
198                 }
199
200                 rte_mbuf_refcnt_set(mbuf, 1);
201                 mbuf->next = NULL;
202                 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
203                 mbuf->nb_segs = 1;
204                 mbuf->port = rxq->port_id;
205
206                 dma_addr =
207                         rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
208
209                 rxd = &rxq->rx_ring[i];
210                 rxd->read.pkt_addr = dma_addr;
211                 rxd->read.hdr_addr = 0;
212 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
213                 rxd->read.rsvd1 = 0;
214                 rxd->read.rsvd2 = 0;
215 #endif
216                 rxe[i].mbuf = mbuf;
217         }
218
219         return 0;
220 }
221
222 /* Free all mbufs for descriptors in rx queue */
223 static void
224 _ice_rx_queue_release_mbufs(struct ice_rx_queue *rxq)
225 {
226         uint16_t i;
227
228         if (!rxq || !rxq->sw_ring) {
229                 PMD_DRV_LOG(DEBUG, "Pointer to sw_ring is NULL");
230                 return;
231         }
232
233         for (i = 0; i < rxq->nb_rx_desc; i++) {
234                 if (rxq->sw_ring[i].mbuf) {
235                         rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
236                         rxq->sw_ring[i].mbuf = NULL;
237                 }
238         }
239         if (rxq->rx_nb_avail == 0)
240                 return;
241         for (i = 0; i < rxq->rx_nb_avail; i++)
242                 rte_pktmbuf_free_seg(rxq->rx_stage[rxq->rx_next_avail + i]);
243
244         rxq->rx_nb_avail = 0;
245 }
246
247 /* turn on or off rx queue
248  * @q_idx: queue index in pf scope
249  * @on: turn on or off the queue
250  */
251 static int
252 ice_switch_rx_queue(struct ice_hw *hw, uint16_t q_idx, bool on)
253 {
254         uint32_t reg;
255         uint16_t j;
256
257         /* QRX_CTRL = QRX_ENA */
258         reg = ICE_READ_REG(hw, QRX_CTRL(q_idx));
259
260         if (on) {
261                 if (reg & QRX_CTRL_QENA_STAT_M)
262                         return 0; /* Already on, skip */
263                 reg |= QRX_CTRL_QENA_REQ_M;
264         } else {
265                 if (!(reg & QRX_CTRL_QENA_STAT_M))
266                         return 0; /* Already off, skip */
267                 reg &= ~QRX_CTRL_QENA_REQ_M;
268         }
269
270         /* Write the register */
271         ICE_WRITE_REG(hw, QRX_CTRL(q_idx), reg);
272         /* Check the result. It is said that QENA_STAT
273          * follows the QENA_REQ not more than 10 use.
274          * TODO: need to change the wait counter later
275          */
276         for (j = 0; j < ICE_CHK_Q_ENA_COUNT; j++) {
277                 rte_delay_us(ICE_CHK_Q_ENA_INTERVAL_US);
278                 reg = ICE_READ_REG(hw, QRX_CTRL(q_idx));
279                 if (on) {
280                         if ((reg & QRX_CTRL_QENA_REQ_M) &&
281                             (reg & QRX_CTRL_QENA_STAT_M))
282                                 break;
283                 } else {
284                         if (!(reg & QRX_CTRL_QENA_REQ_M) &&
285                             !(reg & QRX_CTRL_QENA_STAT_M))
286                                 break;
287                 }
288         }
289
290         /* Check if it is timeout */
291         if (j >= ICE_CHK_Q_ENA_COUNT) {
292                 PMD_DRV_LOG(ERR, "Failed to %s rx queue[%u]",
293                             (on ? "enable" : "disable"), q_idx);
294                 return -ETIMEDOUT;
295         }
296
297         return 0;
298 }
299
300 static inline int
301 ice_check_rx_burst_bulk_alloc_preconditions(struct ice_rx_queue *rxq)
302 {
303         int ret = 0;
304
305         if (!(rxq->rx_free_thresh >= ICE_RX_MAX_BURST)) {
306                 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
307                              "rxq->rx_free_thresh=%d, "
308                              "ICE_RX_MAX_BURST=%d",
309                              rxq->rx_free_thresh, ICE_RX_MAX_BURST);
310                 ret = -EINVAL;
311         } else if (!(rxq->rx_free_thresh < rxq->nb_rx_desc)) {
312                 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
313                              "rxq->rx_free_thresh=%d, "
314                              "rxq->nb_rx_desc=%d",
315                              rxq->rx_free_thresh, rxq->nb_rx_desc);
316                 ret = -EINVAL;
317         } else if (rxq->nb_rx_desc % rxq->rx_free_thresh != 0) {
318                 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
319                              "rxq->nb_rx_desc=%d, "
320                              "rxq->rx_free_thresh=%d",
321                              rxq->nb_rx_desc, rxq->rx_free_thresh);
322                 ret = -EINVAL;
323         }
324
325         return ret;
326 }
327
328 /* reset fields in ice_rx_queue back to default */
329 static void
330 ice_reset_rx_queue(struct ice_rx_queue *rxq)
331 {
332         unsigned int i;
333         uint16_t len;
334
335         if (!rxq) {
336                 PMD_DRV_LOG(DEBUG, "Pointer to rxq is NULL");
337                 return;
338         }
339
340         len = (uint16_t)(rxq->nb_rx_desc + ICE_RX_MAX_BURST);
341
342         for (i = 0; i < len * sizeof(union ice_rx_flex_desc); i++)
343                 ((volatile char *)rxq->rx_ring)[i] = 0;
344
345         memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
346         for (i = 0; i < ICE_RX_MAX_BURST; ++i)
347                 rxq->sw_ring[rxq->nb_rx_desc + i].mbuf = &rxq->fake_mbuf;
348
349         rxq->rx_nb_avail = 0;
350         rxq->rx_next_avail = 0;
351         rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
352
353         rxq->rx_tail = 0;
354         rxq->nb_rx_hold = 0;
355         rxq->pkt_first_seg = NULL;
356         rxq->pkt_last_seg = NULL;
357
358         rxq->rxrearm_start = 0;
359         rxq->rxrearm_nb = 0;
360 }
361
362 int
363 ice_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
364 {
365         struct ice_rx_queue *rxq;
366         int err;
367         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
368
369         PMD_INIT_FUNC_TRACE();
370
371         if (rx_queue_id >= dev->data->nb_rx_queues) {
372                 PMD_DRV_LOG(ERR, "RX queue %u is out of range %u",
373                             rx_queue_id, dev->data->nb_rx_queues);
374                 return -EINVAL;
375         }
376
377         rxq = dev->data->rx_queues[rx_queue_id];
378         if (!rxq || !rxq->q_set) {
379                 PMD_DRV_LOG(ERR, "RX queue %u not available or setup",
380                             rx_queue_id);
381                 return -EINVAL;
382         }
383
384         err = ice_program_hw_rx_queue(rxq);
385         if (err) {
386                 PMD_DRV_LOG(ERR, "fail to program RX queue %u",
387                             rx_queue_id);
388                 return -EIO;
389         }
390
391         err = ice_alloc_rx_queue_mbufs(rxq);
392         if (err) {
393                 PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
394                 return -ENOMEM;
395         }
396
397         /* Init the RX tail register. */
398         ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
399
400         err = ice_switch_rx_queue(hw, rxq->reg_idx, true);
401         if (err) {
402                 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
403                             rx_queue_id);
404
405                 rxq->rx_rel_mbufs(rxq);
406                 ice_reset_rx_queue(rxq);
407                 return -EINVAL;
408         }
409
410         dev->data->rx_queue_state[rx_queue_id] =
411                 RTE_ETH_QUEUE_STATE_STARTED;
412
413         return 0;
414 }
415
416 int
417 ice_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
418 {
419         struct ice_rx_queue *rxq;
420         int err;
421         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
422
423         if (rx_queue_id < dev->data->nb_rx_queues) {
424                 rxq = dev->data->rx_queues[rx_queue_id];
425
426                 err = ice_switch_rx_queue(hw, rxq->reg_idx, false);
427                 if (err) {
428                         PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
429                                     rx_queue_id);
430                         return -EINVAL;
431                 }
432                 rxq->rx_rel_mbufs(rxq);
433                 ice_reset_rx_queue(rxq);
434                 dev->data->rx_queue_state[rx_queue_id] =
435                         RTE_ETH_QUEUE_STATE_STOPPED;
436         }
437
438         return 0;
439 }
440
441 int
442 ice_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
443 {
444         struct ice_tx_queue *txq;
445         int err;
446         struct ice_vsi *vsi;
447         struct ice_hw *hw;
448         struct ice_aqc_add_tx_qgrp txq_elem;
449         struct ice_tlan_ctx tx_ctx;
450
451         PMD_INIT_FUNC_TRACE();
452
453         if (tx_queue_id >= dev->data->nb_tx_queues) {
454                 PMD_DRV_LOG(ERR, "TX queue %u is out of range %u",
455                             tx_queue_id, dev->data->nb_tx_queues);
456                 return -EINVAL;
457         }
458
459         txq = dev->data->tx_queues[tx_queue_id];
460         if (!txq || !txq->q_set) {
461                 PMD_DRV_LOG(ERR, "TX queue %u is not available or setup",
462                             tx_queue_id);
463                 return -EINVAL;
464         }
465
466         vsi = txq->vsi;
467         hw = ICE_VSI_TO_HW(vsi);
468
469         memset(&txq_elem, 0, sizeof(txq_elem));
470         memset(&tx_ctx, 0, sizeof(tx_ctx));
471         txq_elem.num_txqs = 1;
472         txq_elem.txqs[0].txq_id = rte_cpu_to_le_16(txq->reg_idx);
473
474         tx_ctx.base = txq->tx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT;
475         tx_ctx.qlen = txq->nb_tx_desc;
476         tx_ctx.pf_num = hw->pf_id;
477         tx_ctx.vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
478         tx_ctx.src_vsi = vsi->vsi_id;
479         tx_ctx.port_num = hw->port_info->lport;
480         tx_ctx.tso_ena = 1; /* tso enable */
481         tx_ctx.tso_qnum = txq->reg_idx; /* index for tso state structure */
482         tx_ctx.legacy_int = 1; /* Legacy or Advanced Host Interface */
483
484         ice_set_ctx(hw, (uint8_t *)&tx_ctx, txq_elem.txqs[0].txq_ctx,
485                     ice_tlan_ctx_info);
486
487         txq->qtx_tail = hw->hw_addr + QTX_COMM_DBELL(txq->reg_idx);
488
489         /* Init the Tx tail register*/
490         ICE_PCI_REG_WRITE(txq->qtx_tail, 0);
491
492         /* Fix me, we assume TC always 0 here */
493         err = ice_ena_vsi_txq(hw->port_info, vsi->idx, 0, tx_queue_id, 1,
494                         &txq_elem, sizeof(txq_elem), NULL);
495         if (err) {
496                 PMD_DRV_LOG(ERR, "Failed to add lan txq");
497                 return -EIO;
498         }
499         /* store the schedule node id */
500         txq->q_teid = txq_elem.txqs[0].q_teid;
501
502         dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
503         return 0;
504 }
505
506 static enum ice_status
507 ice_fdir_program_hw_rx_queue(struct ice_rx_queue *rxq)
508 {
509         struct ice_vsi *vsi = rxq->vsi;
510         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
511         uint32_t rxdid = ICE_RXDID_LEGACY_1;
512         struct ice_rlan_ctx rx_ctx;
513         enum ice_status err;
514         uint32_t regval;
515
516         rxq->rx_hdr_len = 0;
517         rxq->rx_buf_len = 1024;
518
519         memset(&rx_ctx, 0, sizeof(rx_ctx));
520
521         rx_ctx.base = rxq->rx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT;
522         rx_ctx.qlen = rxq->nb_rx_desc;
523         rx_ctx.dbuf = rxq->rx_buf_len >> ICE_RLAN_CTX_DBUF_S;
524         rx_ctx.hbuf = rxq->rx_hdr_len >> ICE_RLAN_CTX_HBUF_S;
525         rx_ctx.dtype = 0; /* No Header Split mode */
526         rx_ctx.dsize = 1; /* 32B descriptors */
527         rx_ctx.rxmax = RTE_ETHER_MAX_LEN;
528         /* TPH: Transaction Layer Packet (TLP) processing hints */
529         rx_ctx.tphrdesc_ena = 1;
530         rx_ctx.tphwdesc_ena = 1;
531         rx_ctx.tphdata_ena = 1;
532         rx_ctx.tphhead_ena = 1;
533         /* Low Receive Queue Threshold defined in 64 descriptors units.
534          * When the number of free descriptors goes below the lrxqthresh,
535          * an immediate interrupt is triggered.
536          */
537         rx_ctx.lrxqthresh = 2;
538         /*default use 32 byte descriptor, vlan tag extract to L2TAG2(1st)*/
539         rx_ctx.l2tsel = 1;
540         rx_ctx.showiv = 0;
541         rx_ctx.crcstrip = (rxq->crc_len == 0) ? 1 : 0;
542
543         /* Enable Flexible Descriptors in the queue context which
544          * allows this driver to select a specific receive descriptor format
545          */
546         regval = (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) &
547                 QRXFLXP_CNTXT_RXDID_IDX_M;
548
549         /* increasing context priority to pick up profile ID;
550          * default is 0x01; setting to 0x03 to ensure profile
551          * is programming if prev context is of same priority
552          */
553         regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
554                 QRXFLXP_CNTXT_RXDID_PRIO_M;
555
556         ICE_WRITE_REG(hw, QRXFLXP_CNTXT(rxq->reg_idx), regval);
557
558         err = ice_clear_rxq_ctx(hw, rxq->reg_idx);
559         if (err) {
560                 PMD_DRV_LOG(ERR, "Failed to clear Lan Rx queue (%u) context",
561                             rxq->queue_id);
562                 return -EINVAL;
563         }
564         err = ice_write_rxq_ctx(hw, &rx_ctx, rxq->reg_idx);
565         if (err) {
566                 PMD_DRV_LOG(ERR, "Failed to write Lan Rx queue (%u) context",
567                             rxq->queue_id);
568                 return -EINVAL;
569         }
570
571         rxq->qrx_tail = hw->hw_addr + QRX_TAIL(rxq->reg_idx);
572
573         /* Init the Rx tail register*/
574         ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
575
576         return 0;
577 }
578
579 int
580 ice_fdir_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
581 {
582         struct ice_rx_queue *rxq;
583         int err;
584         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
585         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
586
587         PMD_INIT_FUNC_TRACE();
588
589         rxq = pf->fdir.rxq;
590         if (!rxq || !rxq->q_set) {
591                 PMD_DRV_LOG(ERR, "FDIR RX queue %u not available or setup",
592                             rx_queue_id);
593                 return -EINVAL;
594         }
595
596         err = ice_fdir_program_hw_rx_queue(rxq);
597         if (err) {
598                 PMD_DRV_LOG(ERR, "fail to program FDIR RX queue %u",
599                             rx_queue_id);
600                 return -EIO;
601         }
602
603         /* Init the RX tail register. */
604         ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
605
606         err = ice_switch_rx_queue(hw, rxq->reg_idx, true);
607         if (err) {
608                 PMD_DRV_LOG(ERR, "Failed to switch FDIR RX queue %u on",
609                             rx_queue_id);
610
611                 ice_reset_rx_queue(rxq);
612                 return -EINVAL;
613         }
614
615         return 0;
616 }
617
618 int
619 ice_fdir_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
620 {
621         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
622         struct ice_tx_queue *txq;
623         int err;
624         struct ice_vsi *vsi;
625         struct ice_hw *hw;
626         struct ice_aqc_add_tx_qgrp txq_elem;
627         struct ice_tlan_ctx tx_ctx;
628
629         PMD_INIT_FUNC_TRACE();
630
631         txq = pf->fdir.txq;
632         if (!txq || !txq->q_set) {
633                 PMD_DRV_LOG(ERR, "FDIR TX queue %u is not available or setup",
634                             tx_queue_id);
635                 return -EINVAL;
636         }
637
638         vsi = txq->vsi;
639         hw = ICE_VSI_TO_HW(vsi);
640
641         memset(&txq_elem, 0, sizeof(txq_elem));
642         memset(&tx_ctx, 0, sizeof(tx_ctx));
643         txq_elem.num_txqs = 1;
644         txq_elem.txqs[0].txq_id = rte_cpu_to_le_16(txq->reg_idx);
645
646         tx_ctx.base = txq->tx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT;
647         tx_ctx.qlen = txq->nb_tx_desc;
648         tx_ctx.pf_num = hw->pf_id;
649         tx_ctx.vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
650         tx_ctx.src_vsi = vsi->vsi_id;
651         tx_ctx.port_num = hw->port_info->lport;
652         tx_ctx.tso_ena = 1; /* tso enable */
653         tx_ctx.tso_qnum = txq->reg_idx; /* index for tso state structure */
654         tx_ctx.legacy_int = 1; /* Legacy or Advanced Host Interface */
655
656         ice_set_ctx(hw, (uint8_t *)&tx_ctx, txq_elem.txqs[0].txq_ctx,
657                     ice_tlan_ctx_info);
658
659         txq->qtx_tail = hw->hw_addr + QTX_COMM_DBELL(txq->reg_idx);
660
661         /* Init the Tx tail register*/
662         ICE_PCI_REG_WRITE(txq->qtx_tail, 0);
663
664         /* Fix me, we assume TC always 0 here */
665         err = ice_ena_vsi_txq(hw->port_info, vsi->idx, 0, tx_queue_id, 1,
666                               &txq_elem, sizeof(txq_elem), NULL);
667         if (err) {
668                 PMD_DRV_LOG(ERR, "Failed to add FDIR txq");
669                 return -EIO;
670         }
671         /* store the schedule node id */
672         txq->q_teid = txq_elem.txqs[0].q_teid;
673
674         return 0;
675 }
676
677 /* Free all mbufs for descriptors in tx queue */
678 static void
679 _ice_tx_queue_release_mbufs(struct ice_tx_queue *txq)
680 {
681         uint16_t i;
682
683         if (!txq || !txq->sw_ring) {
684                 PMD_DRV_LOG(DEBUG, "Pointer to txq or sw_ring is NULL");
685                 return;
686         }
687
688         for (i = 0; i < txq->nb_tx_desc; i++) {
689                 if (txq->sw_ring[i].mbuf) {
690                         rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
691                         txq->sw_ring[i].mbuf = NULL;
692                 }
693         }
694 }
695
696 static void
697 ice_reset_tx_queue(struct ice_tx_queue *txq)
698 {
699         struct ice_tx_entry *txe;
700         uint16_t i, prev, size;
701
702         if (!txq) {
703                 PMD_DRV_LOG(DEBUG, "Pointer to txq is NULL");
704                 return;
705         }
706
707         txe = txq->sw_ring;
708         size = sizeof(struct ice_tx_desc) * txq->nb_tx_desc;
709         for (i = 0; i < size; i++)
710                 ((volatile char *)txq->tx_ring)[i] = 0;
711
712         prev = (uint16_t)(txq->nb_tx_desc - 1);
713         for (i = 0; i < txq->nb_tx_desc; i++) {
714                 volatile struct ice_tx_desc *txd = &txq->tx_ring[i];
715
716                 txd->cmd_type_offset_bsz =
717                         rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE);
718                 txe[i].mbuf =  NULL;
719                 txe[i].last_id = i;
720                 txe[prev].next_id = i;
721                 prev = i;
722         }
723
724         txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
725         txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
726
727         txq->tx_tail = 0;
728         txq->nb_tx_used = 0;
729
730         txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1);
731         txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1);
732 }
733
734 int
735 ice_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
736 {
737         struct ice_tx_queue *txq;
738         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
739         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
740         struct ice_vsi *vsi = pf->main_vsi;
741         enum ice_status status;
742         uint16_t q_ids[1];
743         uint32_t q_teids[1];
744         uint16_t q_handle = tx_queue_id;
745
746         if (tx_queue_id >= dev->data->nb_tx_queues) {
747                 PMD_DRV_LOG(ERR, "TX queue %u is out of range %u",
748                             tx_queue_id, dev->data->nb_tx_queues);
749                 return -EINVAL;
750         }
751
752         txq = dev->data->tx_queues[tx_queue_id];
753         if (!txq) {
754                 PMD_DRV_LOG(ERR, "TX queue %u is not available",
755                             tx_queue_id);
756                 return -EINVAL;
757         }
758
759         q_ids[0] = txq->reg_idx;
760         q_teids[0] = txq->q_teid;
761
762         /* Fix me, we assume TC always 0 here */
763         status = ice_dis_vsi_txq(hw->port_info, vsi->idx, 0, 1, &q_handle,
764                                 q_ids, q_teids, ICE_NO_RESET, 0, NULL);
765         if (status != ICE_SUCCESS) {
766                 PMD_DRV_LOG(DEBUG, "Failed to disable Lan Tx queue");
767                 return -EINVAL;
768         }
769
770         txq->tx_rel_mbufs(txq);
771         ice_reset_tx_queue(txq);
772         dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
773
774         return 0;
775 }
776
777 int
778 ice_fdir_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
779 {
780         struct ice_rx_queue *rxq;
781         int err;
782         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
783         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
784
785         rxq = pf->fdir.rxq;
786
787         err = ice_switch_rx_queue(hw, rxq->reg_idx, false);
788         if (err) {
789                 PMD_DRV_LOG(ERR, "Failed to switch FDIR RX queue %u off",
790                             rx_queue_id);
791                 return -EINVAL;
792         }
793         rxq->rx_rel_mbufs(rxq);
794
795         return 0;
796 }
797
798 int
799 ice_fdir_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
800 {
801         struct ice_tx_queue *txq;
802         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
803         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
804         struct ice_vsi *vsi = pf->main_vsi;
805         enum ice_status status;
806         uint16_t q_ids[1];
807         uint32_t q_teids[1];
808         uint16_t q_handle = tx_queue_id;
809
810         txq = pf->fdir.txq;
811         if (!txq) {
812                 PMD_DRV_LOG(ERR, "TX queue %u is not available",
813                             tx_queue_id);
814                 return -EINVAL;
815         }
816         vsi = txq->vsi;
817
818         q_ids[0] = txq->reg_idx;
819         q_teids[0] = txq->q_teid;
820
821         /* Fix me, we assume TC always 0 here */
822         status = ice_dis_vsi_txq(hw->port_info, vsi->idx, 0, 1, &q_handle,
823                                  q_ids, q_teids, ICE_NO_RESET, 0, NULL);
824         if (status != ICE_SUCCESS) {
825                 PMD_DRV_LOG(DEBUG, "Failed to disable Lan Tx queue");
826                 return -EINVAL;
827         }
828
829         txq->tx_rel_mbufs(txq);
830
831         return 0;
832 }
833
834 int
835 ice_rx_queue_setup(struct rte_eth_dev *dev,
836                    uint16_t queue_idx,
837                    uint16_t nb_desc,
838                    unsigned int socket_id,
839                    const struct rte_eth_rxconf *rx_conf,
840                    struct rte_mempool *mp)
841 {
842         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
843         struct ice_adapter *ad =
844                 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
845         struct ice_vsi *vsi = pf->main_vsi;
846         struct ice_rx_queue *rxq;
847         const struct rte_memzone *rz;
848         uint32_t ring_size;
849         uint16_t len;
850         int use_def_burst_func = 1;
851
852         if (nb_desc % ICE_ALIGN_RING_DESC != 0 ||
853             nb_desc > ICE_MAX_RING_DESC ||
854             nb_desc < ICE_MIN_RING_DESC) {
855                 PMD_INIT_LOG(ERR, "Number (%u) of receive descriptors is "
856                              "invalid", nb_desc);
857                 return -EINVAL;
858         }
859
860         /* Free memory if needed */
861         if (dev->data->rx_queues[queue_idx]) {
862                 ice_rx_queue_release(dev->data->rx_queues[queue_idx]);
863                 dev->data->rx_queues[queue_idx] = NULL;
864         }
865
866         /* Allocate the rx queue data structure */
867         rxq = rte_zmalloc_socket(NULL,
868                                  sizeof(struct ice_rx_queue),
869                                  RTE_CACHE_LINE_SIZE,
870                                  socket_id);
871         if (!rxq) {
872                 PMD_INIT_LOG(ERR, "Failed to allocate memory for "
873                              "rx queue data structure");
874                 return -ENOMEM;
875         }
876         rxq->mp = mp;
877         rxq->nb_rx_desc = nb_desc;
878         rxq->rx_free_thresh = rx_conf->rx_free_thresh;
879         rxq->queue_id = queue_idx;
880
881         rxq->reg_idx = vsi->base_queue + queue_idx;
882         rxq->port_id = dev->data->port_id;
883         if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
884                 rxq->crc_len = RTE_ETHER_CRC_LEN;
885         else
886                 rxq->crc_len = 0;
887
888         rxq->drop_en = rx_conf->rx_drop_en;
889         rxq->vsi = vsi;
890         rxq->rx_deferred_start = rx_conf->rx_deferred_start;
891         rxq->proto_xtr = pf->proto_xtr != NULL ?
892                          pf->proto_xtr[queue_idx] : PROTO_XTR_NONE;
893
894         /* Allocate the maximun number of RX ring hardware descriptor. */
895         len = ICE_MAX_RING_DESC;
896
897         /**
898          * Allocating a little more memory because vectorized/bulk_alloc Rx
899          * functions doesn't check boundaries each time.
900          */
901         len += ICE_RX_MAX_BURST;
902
903         /* Allocate the maximum number of RX ring hardware descriptor. */
904         ring_size = sizeof(union ice_rx_flex_desc) * len;
905         ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN);
906         rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
907                                       ring_size, ICE_RING_BASE_ALIGN,
908                                       socket_id);
909         if (!rz) {
910                 ice_rx_queue_release(rxq);
911                 PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for RX");
912                 return -ENOMEM;
913         }
914
915         /* Zero all the descriptors in the ring. */
916         memset(rz->addr, 0, ring_size);
917
918         rxq->rx_ring_dma = rz->iova;
919         rxq->rx_ring = rz->addr;
920
921         /* always reserve more for bulk alloc */
922         len = (uint16_t)(nb_desc + ICE_RX_MAX_BURST);
923
924         /* Allocate the software ring. */
925         rxq->sw_ring = rte_zmalloc_socket(NULL,
926                                           sizeof(struct ice_rx_entry) * len,
927                                           RTE_CACHE_LINE_SIZE,
928                                           socket_id);
929         if (!rxq->sw_ring) {
930                 ice_rx_queue_release(rxq);
931                 PMD_INIT_LOG(ERR, "Failed to allocate memory for SW ring");
932                 return -ENOMEM;
933         }
934
935         ice_reset_rx_queue(rxq);
936         rxq->q_set = true;
937         dev->data->rx_queues[queue_idx] = rxq;
938         rxq->rx_rel_mbufs = _ice_rx_queue_release_mbufs;
939
940         use_def_burst_func = ice_check_rx_burst_bulk_alloc_preconditions(rxq);
941
942         if (!use_def_burst_func) {
943                 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
944                              "satisfied. Rx Burst Bulk Alloc function will be "
945                              "used on port=%d, queue=%d.",
946                              rxq->port_id, rxq->queue_id);
947         } else {
948                 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
949                              "not satisfied, Scattered Rx is requested. "
950                              "on port=%d, queue=%d.",
951                              rxq->port_id, rxq->queue_id);
952                 ad->rx_bulk_alloc_allowed = false;
953         }
954
955         return 0;
956 }
957
958 void
959 ice_rx_queue_release(void *rxq)
960 {
961         struct ice_rx_queue *q = (struct ice_rx_queue *)rxq;
962
963         if (!q) {
964                 PMD_DRV_LOG(DEBUG, "Pointer to rxq is NULL");
965                 return;
966         }
967
968         q->rx_rel_mbufs(q);
969         rte_free(q->sw_ring);
970         rte_free(q);
971 }
972
973 int
974 ice_tx_queue_setup(struct rte_eth_dev *dev,
975                    uint16_t queue_idx,
976                    uint16_t nb_desc,
977                    unsigned int socket_id,
978                    const struct rte_eth_txconf *tx_conf)
979 {
980         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
981         struct ice_vsi *vsi = pf->main_vsi;
982         struct ice_tx_queue *txq;
983         const struct rte_memzone *tz;
984         uint32_t ring_size;
985         uint16_t tx_rs_thresh, tx_free_thresh;
986         uint64_t offloads;
987
988         offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
989
990         if (nb_desc % ICE_ALIGN_RING_DESC != 0 ||
991             nb_desc > ICE_MAX_RING_DESC ||
992             nb_desc < ICE_MIN_RING_DESC) {
993                 PMD_INIT_LOG(ERR, "Number (%u) of transmit descriptors is "
994                              "invalid", nb_desc);
995                 return -EINVAL;
996         }
997
998         /**
999          * The following two parameters control the setting of the RS bit on
1000          * transmit descriptors. TX descriptors will have their RS bit set
1001          * after txq->tx_rs_thresh descriptors have been used. The TX
1002          * descriptor ring will be cleaned after txq->tx_free_thresh
1003          * descriptors are used or if the number of descriptors required to
1004          * transmit a packet is greater than the number of free TX descriptors.
1005          *
1006          * The following constraints must be satisfied:
1007          *  - tx_rs_thresh must be greater than 0.
1008          *  - tx_rs_thresh must be less than the size of the ring minus 2.
1009          *  - tx_rs_thresh must be less than or equal to tx_free_thresh.
1010          *  - tx_rs_thresh must be a divisor of the ring size.
1011          *  - tx_free_thresh must be greater than 0.
1012          *  - tx_free_thresh must be less than the size of the ring minus 3.
1013          *  - tx_free_thresh + tx_rs_thresh must not exceed nb_desc.
1014          *
1015          * One descriptor in the TX ring is used as a sentinel to avoid a H/W
1016          * race condition, hence the maximum threshold constraints. When set
1017          * to zero use default values.
1018          */
1019         tx_free_thresh = (uint16_t)(tx_conf->tx_free_thresh ?
1020                                     tx_conf->tx_free_thresh :
1021                                     ICE_DEFAULT_TX_FREE_THRESH);
1022         /* force tx_rs_thresh to adapt an aggresive tx_free_thresh */
1023         tx_rs_thresh =
1024                 (ICE_DEFAULT_TX_RSBIT_THRESH + tx_free_thresh > nb_desc) ?
1025                         nb_desc - tx_free_thresh : ICE_DEFAULT_TX_RSBIT_THRESH;
1026         if (tx_conf->tx_rs_thresh)
1027                 tx_rs_thresh = tx_conf->tx_rs_thresh;
1028         if (tx_rs_thresh + tx_free_thresh > nb_desc) {
1029                 PMD_INIT_LOG(ERR, "tx_rs_thresh + tx_free_thresh must not "
1030                                 "exceed nb_desc. (tx_rs_thresh=%u "
1031                                 "tx_free_thresh=%u nb_desc=%u port = %d queue=%d)",
1032                                 (unsigned int)tx_rs_thresh,
1033                                 (unsigned int)tx_free_thresh,
1034                                 (unsigned int)nb_desc,
1035                                 (int)dev->data->port_id,
1036                                 (int)queue_idx);
1037                 return -EINVAL;
1038         }
1039         if (tx_rs_thresh >= (nb_desc - 2)) {
1040                 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the "
1041                              "number of TX descriptors minus 2. "
1042                              "(tx_rs_thresh=%u port=%d queue=%d)",
1043                              (unsigned int)tx_rs_thresh,
1044                              (int)dev->data->port_id,
1045                              (int)queue_idx);
1046                 return -EINVAL;
1047         }
1048         if (tx_free_thresh >= (nb_desc - 3)) {
1049                 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the "
1050                              "tx_free_thresh must be less than the "
1051                              "number of TX descriptors minus 3. "
1052                              "(tx_free_thresh=%u port=%d queue=%d)",
1053                              (unsigned int)tx_free_thresh,
1054                              (int)dev->data->port_id,
1055                              (int)queue_idx);
1056                 return -EINVAL;
1057         }
1058         if (tx_rs_thresh > tx_free_thresh) {
1059                 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than or "
1060                              "equal to tx_free_thresh. (tx_free_thresh=%u"
1061                              " tx_rs_thresh=%u port=%d queue=%d)",
1062                              (unsigned int)tx_free_thresh,
1063                              (unsigned int)tx_rs_thresh,
1064                              (int)dev->data->port_id,
1065                              (int)queue_idx);
1066                 return -EINVAL;
1067         }
1068         if ((nb_desc % tx_rs_thresh) != 0) {
1069                 PMD_INIT_LOG(ERR, "tx_rs_thresh must be a divisor of the "
1070                              "number of TX descriptors. (tx_rs_thresh=%u"
1071                              " port=%d queue=%d)",
1072                              (unsigned int)tx_rs_thresh,
1073                              (int)dev->data->port_id,
1074                              (int)queue_idx);
1075                 return -EINVAL;
1076         }
1077         if (tx_rs_thresh > 1 && tx_conf->tx_thresh.wthresh != 0) {
1078                 PMD_INIT_LOG(ERR, "TX WTHRESH must be set to 0 if "
1079                              "tx_rs_thresh is greater than 1. "
1080                              "(tx_rs_thresh=%u port=%d queue=%d)",
1081                              (unsigned int)tx_rs_thresh,
1082                              (int)dev->data->port_id,
1083                              (int)queue_idx);
1084                 return -EINVAL;
1085         }
1086
1087         /* Free memory if needed. */
1088         if (dev->data->tx_queues[queue_idx]) {
1089                 ice_tx_queue_release(dev->data->tx_queues[queue_idx]);
1090                 dev->data->tx_queues[queue_idx] = NULL;
1091         }
1092
1093         /* Allocate the TX queue data structure. */
1094         txq = rte_zmalloc_socket(NULL,
1095                                  sizeof(struct ice_tx_queue),
1096                                  RTE_CACHE_LINE_SIZE,
1097                                  socket_id);
1098         if (!txq) {
1099                 PMD_INIT_LOG(ERR, "Failed to allocate memory for "
1100                              "tx queue structure");
1101                 return -ENOMEM;
1102         }
1103
1104         /* Allocate TX hardware ring descriptors. */
1105         ring_size = sizeof(struct ice_tx_desc) * ICE_MAX_RING_DESC;
1106         ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN);
1107         tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
1108                                       ring_size, ICE_RING_BASE_ALIGN,
1109                                       socket_id);
1110         if (!tz) {
1111                 ice_tx_queue_release(txq);
1112                 PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for TX");
1113                 return -ENOMEM;
1114         }
1115
1116         txq->nb_tx_desc = nb_desc;
1117         txq->tx_rs_thresh = tx_rs_thresh;
1118         txq->tx_free_thresh = tx_free_thresh;
1119         txq->pthresh = tx_conf->tx_thresh.pthresh;
1120         txq->hthresh = tx_conf->tx_thresh.hthresh;
1121         txq->wthresh = tx_conf->tx_thresh.wthresh;
1122         txq->queue_id = queue_idx;
1123
1124         txq->reg_idx = vsi->base_queue + queue_idx;
1125         txq->port_id = dev->data->port_id;
1126         txq->offloads = offloads;
1127         txq->vsi = vsi;
1128         txq->tx_deferred_start = tx_conf->tx_deferred_start;
1129
1130         txq->tx_ring_dma = tz->iova;
1131         txq->tx_ring = tz->addr;
1132
1133         /* Allocate software ring */
1134         txq->sw_ring =
1135                 rte_zmalloc_socket(NULL,
1136                                    sizeof(struct ice_tx_entry) * nb_desc,
1137                                    RTE_CACHE_LINE_SIZE,
1138                                    socket_id);
1139         if (!txq->sw_ring) {
1140                 ice_tx_queue_release(txq);
1141                 PMD_INIT_LOG(ERR, "Failed to allocate memory for SW TX ring");
1142                 return -ENOMEM;
1143         }
1144
1145         ice_reset_tx_queue(txq);
1146         txq->q_set = true;
1147         dev->data->tx_queues[queue_idx] = txq;
1148         txq->tx_rel_mbufs = _ice_tx_queue_release_mbufs;
1149         ice_set_tx_function_flag(dev, txq);
1150
1151         return 0;
1152 }
1153
1154 void
1155 ice_tx_queue_release(void *txq)
1156 {
1157         struct ice_tx_queue *q = (struct ice_tx_queue *)txq;
1158
1159         if (!q) {
1160                 PMD_DRV_LOG(DEBUG, "Pointer to TX queue is NULL");
1161                 return;
1162         }
1163
1164         q->tx_rel_mbufs(q);
1165         rte_free(q->sw_ring);
1166         rte_free(q);
1167 }
1168
1169 void
1170 ice_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
1171                  struct rte_eth_rxq_info *qinfo)
1172 {
1173         struct ice_rx_queue *rxq;
1174
1175         rxq = dev->data->rx_queues[queue_id];
1176
1177         qinfo->mp = rxq->mp;
1178         qinfo->scattered_rx = dev->data->scattered_rx;
1179         qinfo->nb_desc = rxq->nb_rx_desc;
1180
1181         qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
1182         qinfo->conf.rx_drop_en = rxq->drop_en;
1183         qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
1184 }
1185
1186 void
1187 ice_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
1188                  struct rte_eth_txq_info *qinfo)
1189 {
1190         struct ice_tx_queue *txq;
1191
1192         txq = dev->data->tx_queues[queue_id];
1193
1194         qinfo->nb_desc = txq->nb_tx_desc;
1195
1196         qinfo->conf.tx_thresh.pthresh = txq->pthresh;
1197         qinfo->conf.tx_thresh.hthresh = txq->hthresh;
1198         qinfo->conf.tx_thresh.wthresh = txq->wthresh;
1199
1200         qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
1201         qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
1202         qinfo->conf.offloads = txq->offloads;
1203         qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
1204 }
1205
1206 uint32_t
1207 ice_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1208 {
1209 #define ICE_RXQ_SCAN_INTERVAL 4
1210         volatile union ice_rx_flex_desc *rxdp;
1211         struct ice_rx_queue *rxq;
1212         uint16_t desc = 0;
1213
1214         rxq = dev->data->rx_queues[rx_queue_id];
1215         rxdp = &rxq->rx_ring[rxq->rx_tail];
1216         while ((desc < rxq->nb_rx_desc) &&
1217                rte_le_to_cpu_16(rxdp->wb.status_error0) &
1218                (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S)) {
1219                 /**
1220                  * Check the DD bit of a rx descriptor of each 4 in a group,
1221                  * to avoid checking too frequently and downgrading performance
1222                  * too much.
1223                  */
1224                 desc += ICE_RXQ_SCAN_INTERVAL;
1225                 rxdp += ICE_RXQ_SCAN_INTERVAL;
1226                 if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
1227                         rxdp = &(rxq->rx_ring[rxq->rx_tail +
1228                                  desc - rxq->nb_rx_desc]);
1229         }
1230
1231         return desc;
1232 }
1233
1234 #define ICE_RX_FLEX_ERR0_BITS   \
1235         ((1 << ICE_RX_FLEX_DESC_STATUS0_HBO_S) |        \
1236          (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) |   \
1237          (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S) |   \
1238          (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S) |  \
1239          (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S) | \
1240          (1 << ICE_RX_FLEX_DESC_STATUS0_RXE_S))
1241
1242 /* Rx L3/L4 checksum */
1243 static inline uint64_t
1244 ice_rxd_error_to_pkt_flags(uint16_t stat_err0)
1245 {
1246         uint64_t flags = 0;
1247
1248         /* check if HW has decoded the packet and checksum */
1249         if (unlikely(!(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_L3L4P_S))))
1250                 return 0;
1251
1252         if (likely(!(stat_err0 & ICE_RX_FLEX_ERR0_BITS))) {
1253                 flags |= (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD);
1254                 return flags;
1255         }
1256
1257         if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S)))
1258                 flags |= PKT_RX_IP_CKSUM_BAD;
1259         else
1260                 flags |= PKT_RX_IP_CKSUM_GOOD;
1261
1262         if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S)))
1263                 flags |= PKT_RX_L4_CKSUM_BAD;
1264         else
1265                 flags |= PKT_RX_L4_CKSUM_GOOD;
1266
1267         if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S)))
1268                 flags |= PKT_RX_EIP_CKSUM_BAD;
1269
1270         return flags;
1271 }
1272
1273 static inline void
1274 ice_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union ice_rx_flex_desc *rxdp)
1275 {
1276         if (rte_le_to_cpu_16(rxdp->wb.status_error0) &
1277             (1 << ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S)) {
1278                 mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
1279                 mb->vlan_tci =
1280                         rte_le_to_cpu_16(rxdp->wb.l2tag1);
1281                 PMD_RX_LOG(DEBUG, "Descriptor l2tag1: %u",
1282                            rte_le_to_cpu_16(rxdp->wb.l2tag1));
1283         } else {
1284                 mb->vlan_tci = 0;
1285         }
1286
1287 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
1288         if (rte_le_to_cpu_16(rxdp->wb.status_error1) &
1289             (1 << ICE_RX_FLEX_DESC_STATUS1_L2TAG2P_S)) {
1290                 mb->ol_flags |= PKT_RX_QINQ_STRIPPED | PKT_RX_QINQ |
1291                                 PKT_RX_VLAN_STRIPPED | PKT_RX_VLAN;
1292                 mb->vlan_tci_outer = mb->vlan_tci;
1293                 mb->vlan_tci = rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd);
1294                 PMD_RX_LOG(DEBUG, "Descriptor l2tag2_1: %u, l2tag2_2: %u",
1295                            rte_le_to_cpu_16(rxdp->wb.l2tag2_1st),
1296                            rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd));
1297         } else {
1298                 mb->vlan_tci_outer = 0;
1299         }
1300 #endif
1301         PMD_RX_LOG(DEBUG, "Mbuf vlan_tci: %u, vlan_tci_outer: %u",
1302                    mb->vlan_tci, mb->vlan_tci_outer);
1303 }
1304
1305 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
1306 #define ICE_RX_PROTO_XTR_VALID \
1307         ((1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S) | \
1308          (1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S))
1309
1310 static void
1311 ice_rxd_to_proto_xtr(struct rte_mbuf *mb,
1312                      volatile struct ice_32b_rx_flex_desc_comms *desc)
1313 {
1314         uint16_t stat_err = rte_le_to_cpu_16(desc->status_error1);
1315         uint32_t metadata;
1316         uint64_t ol_flag;
1317
1318         if (unlikely(!(stat_err & ICE_RX_PROTO_XTR_VALID)))
1319                 return;
1320
1321         ol_flag = ice_rxdid_to_proto_xtr_ol_flag(desc->rxdid);
1322         if (unlikely(!ol_flag))
1323                 return;
1324
1325         mb->ol_flags |= ol_flag;
1326
1327         metadata = stat_err & (1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S) ?
1328                                 rte_le_to_cpu_16(desc->flex_ts.flex.aux0) : 0;
1329
1330         if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S)))
1331                 metadata |= rte_le_to_cpu_16(desc->flex_ts.flex.aux1) << 16;
1332
1333         *RTE_NET_ICE_DYNF_PROTO_XTR_METADATA(mb) = metadata;
1334 }
1335 #endif
1336
1337 static inline void
1338 ice_rxd_to_pkt_fields(struct rte_mbuf *mb,
1339                       volatile union ice_rx_flex_desc *rxdp)
1340 {
1341         volatile struct ice_32b_rx_flex_desc_comms *desc =
1342                         (volatile struct ice_32b_rx_flex_desc_comms *)rxdp;
1343         uint16_t stat_err;
1344
1345         stat_err = rte_le_to_cpu_16(desc->status_error0);
1346         if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
1347                 mb->ol_flags |= PKT_RX_RSS_HASH;
1348                 mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
1349         }
1350
1351 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
1352         if (desc->flow_id != 0xFFFFFFFF) {
1353                 mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
1354                 mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
1355         }
1356
1357         if (unlikely(rte_net_ice_dynf_proto_xtr_metadata_avail()))
1358                 ice_rxd_to_proto_xtr(mb, desc);
1359 #endif
1360 }
1361
1362 #define ICE_LOOK_AHEAD 8
1363 #if (ICE_LOOK_AHEAD != 8)
1364 #error "PMD ICE: ICE_LOOK_AHEAD must be 8\n"
1365 #endif
1366 static inline int
1367 ice_rx_scan_hw_ring(struct ice_rx_queue *rxq)
1368 {
1369         volatile union ice_rx_flex_desc *rxdp;
1370         struct ice_rx_entry *rxep;
1371         struct rte_mbuf *mb;
1372         uint16_t stat_err0;
1373         uint16_t pkt_len;
1374         int32_t s[ICE_LOOK_AHEAD], nb_dd;
1375         int32_t i, j, nb_rx = 0;
1376         uint64_t pkt_flags = 0;
1377         uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1378
1379         rxdp = &rxq->rx_ring[rxq->rx_tail];
1380         rxep = &rxq->sw_ring[rxq->rx_tail];
1381
1382         stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
1383
1384         /* Make sure there is at least 1 packet to receive */
1385         if (!(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S)))
1386                 return 0;
1387
1388         /**
1389          * Scan LOOK_AHEAD descriptors at a time to determine which
1390          * descriptors reference packets that are ready to be received.
1391          */
1392         for (i = 0; i < ICE_RX_MAX_BURST; i += ICE_LOOK_AHEAD,
1393              rxdp += ICE_LOOK_AHEAD, rxep += ICE_LOOK_AHEAD) {
1394                 /* Read desc statuses backwards to avoid race condition */
1395                 for (j = ICE_LOOK_AHEAD - 1; j >= 0; j--)
1396                         s[j] = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
1397
1398                 rte_smp_rmb();
1399
1400                 /* Compute how many status bits were set */
1401                 for (j = 0, nb_dd = 0; j < ICE_LOOK_AHEAD; j++)
1402                         nb_dd += s[j] & (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S);
1403
1404                 nb_rx += nb_dd;
1405
1406                 /* Translate descriptor info to mbuf parameters */
1407                 for (j = 0; j < nb_dd; j++) {
1408                         mb = rxep[j].mbuf;
1409                         pkt_len = (rte_le_to_cpu_16(rxdp[j].wb.pkt_len) &
1410                                    ICE_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len;
1411                         mb->data_len = pkt_len;
1412                         mb->pkt_len = pkt_len;
1413                         mb->ol_flags = 0;
1414                         stat_err0 = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
1415                         pkt_flags = ice_rxd_error_to_pkt_flags(stat_err0);
1416                         mb->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
1417                                 rte_le_to_cpu_16(rxdp[j].wb.ptype_flex_flags0)];
1418                         ice_rxd_to_vlan_tci(mb, &rxdp[j]);
1419                         ice_rxd_to_pkt_fields(mb, &rxdp[j]);
1420
1421                         mb->ol_flags |= pkt_flags;
1422                 }
1423
1424                 for (j = 0; j < ICE_LOOK_AHEAD; j++)
1425                         rxq->rx_stage[i + j] = rxep[j].mbuf;
1426
1427                 if (nb_dd != ICE_LOOK_AHEAD)
1428                         break;
1429         }
1430
1431         /* Clear software ring entries */
1432         for (i = 0; i < nb_rx; i++)
1433                 rxq->sw_ring[rxq->rx_tail + i].mbuf = NULL;
1434
1435         PMD_RX_LOG(DEBUG, "ice_rx_scan_hw_ring: "
1436                    "port_id=%u, queue_id=%u, nb_rx=%d",
1437                    rxq->port_id, rxq->queue_id, nb_rx);
1438
1439         return nb_rx;
1440 }
1441
1442 static inline uint16_t
1443 ice_rx_fill_from_stage(struct ice_rx_queue *rxq,
1444                        struct rte_mbuf **rx_pkts,
1445                        uint16_t nb_pkts)
1446 {
1447         uint16_t i;
1448         struct rte_mbuf **stage = &rxq->rx_stage[rxq->rx_next_avail];
1449
1450         nb_pkts = (uint16_t)RTE_MIN(nb_pkts, rxq->rx_nb_avail);
1451
1452         for (i = 0; i < nb_pkts; i++)
1453                 rx_pkts[i] = stage[i];
1454
1455         rxq->rx_nb_avail = (uint16_t)(rxq->rx_nb_avail - nb_pkts);
1456         rxq->rx_next_avail = (uint16_t)(rxq->rx_next_avail + nb_pkts);
1457
1458         return nb_pkts;
1459 }
1460
1461 static inline int
1462 ice_rx_alloc_bufs(struct ice_rx_queue *rxq)
1463 {
1464         volatile union ice_rx_flex_desc *rxdp;
1465         struct ice_rx_entry *rxep;
1466         struct rte_mbuf *mb;
1467         uint16_t alloc_idx, i;
1468         uint64_t dma_addr;
1469         int diag;
1470
1471         /* Allocate buffers in bulk */
1472         alloc_idx = (uint16_t)(rxq->rx_free_trigger -
1473                                (rxq->rx_free_thresh - 1));
1474         rxep = &rxq->sw_ring[alloc_idx];
1475         diag = rte_mempool_get_bulk(rxq->mp, (void *)rxep,
1476                                     rxq->rx_free_thresh);
1477         if (unlikely(diag != 0)) {
1478                 PMD_RX_LOG(ERR, "Failed to get mbufs in bulk");
1479                 return -ENOMEM;
1480         }
1481
1482         rxdp = &rxq->rx_ring[alloc_idx];
1483         for (i = 0; i < rxq->rx_free_thresh; i++) {
1484                 if (likely(i < (rxq->rx_free_thresh - 1)))
1485                         /* Prefetch next mbuf */
1486                         rte_prefetch0(rxep[i + 1].mbuf);
1487
1488                 mb = rxep[i].mbuf;
1489                 rte_mbuf_refcnt_set(mb, 1);
1490                 mb->next = NULL;
1491                 mb->data_off = RTE_PKTMBUF_HEADROOM;
1492                 mb->nb_segs = 1;
1493                 mb->port = rxq->port_id;
1494                 dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mb));
1495                 rxdp[i].read.hdr_addr = 0;
1496                 rxdp[i].read.pkt_addr = dma_addr;
1497         }
1498
1499         /* Update rx tail regsiter */
1500         ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->rx_free_trigger);
1501
1502         rxq->rx_free_trigger =
1503                 (uint16_t)(rxq->rx_free_trigger + rxq->rx_free_thresh);
1504         if (rxq->rx_free_trigger >= rxq->nb_rx_desc)
1505                 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
1506
1507         return 0;
1508 }
1509
1510 static inline uint16_t
1511 rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1512 {
1513         struct ice_rx_queue *rxq = (struct ice_rx_queue *)rx_queue;
1514         uint16_t nb_rx = 0;
1515         struct rte_eth_dev *dev;
1516
1517         if (!nb_pkts)
1518                 return 0;
1519
1520         if (rxq->rx_nb_avail)
1521                 return ice_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1522
1523         nb_rx = (uint16_t)ice_rx_scan_hw_ring(rxq);
1524         rxq->rx_next_avail = 0;
1525         rxq->rx_nb_avail = nb_rx;
1526         rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_rx);
1527
1528         if (rxq->rx_tail > rxq->rx_free_trigger) {
1529                 if (ice_rx_alloc_bufs(rxq) != 0) {
1530                         uint16_t i, j;
1531
1532                         dev = ICE_VSI_TO_ETH_DEV(rxq->vsi);
1533                         dev->data->rx_mbuf_alloc_failed +=
1534                                 rxq->rx_free_thresh;
1535                         PMD_RX_LOG(DEBUG, "Rx mbuf alloc failed for "
1536                                    "port_id=%u, queue_id=%u",
1537                                    rxq->port_id, rxq->queue_id);
1538                         rxq->rx_nb_avail = 0;
1539                         rxq->rx_tail = (uint16_t)(rxq->rx_tail - nb_rx);
1540                         for (i = 0, j = rxq->rx_tail; i < nb_rx; i++, j++)
1541                                 rxq->sw_ring[j].mbuf = rxq->rx_stage[i];
1542
1543                         return 0;
1544                 }
1545         }
1546
1547         if (rxq->rx_tail >= rxq->nb_rx_desc)
1548                 rxq->rx_tail = 0;
1549
1550         if (rxq->rx_nb_avail)
1551                 return ice_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1552
1553         return 0;
1554 }
1555
1556 static uint16_t
1557 ice_recv_pkts_bulk_alloc(void *rx_queue,
1558                          struct rte_mbuf **rx_pkts,
1559                          uint16_t nb_pkts)
1560 {
1561         uint16_t nb_rx = 0;
1562         uint16_t n;
1563         uint16_t count;
1564
1565         if (unlikely(nb_pkts == 0))
1566                 return nb_rx;
1567
1568         if (likely(nb_pkts <= ICE_RX_MAX_BURST))
1569                 return rx_recv_pkts(rx_queue, rx_pkts, nb_pkts);
1570
1571         while (nb_pkts) {
1572                 n = RTE_MIN(nb_pkts, ICE_RX_MAX_BURST);
1573                 count = rx_recv_pkts(rx_queue, &rx_pkts[nb_rx], n);
1574                 nb_rx = (uint16_t)(nb_rx + count);
1575                 nb_pkts = (uint16_t)(nb_pkts - count);
1576                 if (count < n)
1577                         break;
1578         }
1579
1580         return nb_rx;
1581 }
1582
1583 static uint16_t
1584 ice_recv_scattered_pkts(void *rx_queue,
1585                         struct rte_mbuf **rx_pkts,
1586                         uint16_t nb_pkts)
1587 {
1588         struct ice_rx_queue *rxq = rx_queue;
1589         volatile union ice_rx_flex_desc *rx_ring = rxq->rx_ring;
1590         volatile union ice_rx_flex_desc *rxdp;
1591         union ice_rx_flex_desc rxd;
1592         struct ice_rx_entry *sw_ring = rxq->sw_ring;
1593         struct ice_rx_entry *rxe;
1594         struct rte_mbuf *first_seg = rxq->pkt_first_seg;
1595         struct rte_mbuf *last_seg = rxq->pkt_last_seg;
1596         struct rte_mbuf *nmb; /* new allocated mbuf */
1597         struct rte_mbuf *rxm; /* pointer to store old mbuf in SW ring */
1598         uint16_t rx_id = rxq->rx_tail;
1599         uint16_t nb_rx = 0;
1600         uint16_t nb_hold = 0;
1601         uint16_t rx_packet_len;
1602         uint16_t rx_stat_err0;
1603         uint64_t dma_addr;
1604         uint64_t pkt_flags;
1605         uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1606         struct rte_eth_dev *dev;
1607
1608         while (nb_rx < nb_pkts) {
1609                 rxdp = &rx_ring[rx_id];
1610                 rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
1611
1612                 /* Check the DD bit first */
1613                 if (!(rx_stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S)))
1614                         break;
1615
1616                 /* allocate mbuf */
1617                 nmb = rte_mbuf_raw_alloc(rxq->mp);
1618                 if (unlikely(!nmb)) {
1619                         dev = ICE_VSI_TO_ETH_DEV(rxq->vsi);
1620                         dev->data->rx_mbuf_alloc_failed++;
1621                         break;
1622                 }
1623                 rxd = *rxdp; /* copy descriptor in ring to temp variable*/
1624
1625                 nb_hold++;
1626                 rxe = &sw_ring[rx_id]; /* get corresponding mbuf in SW ring */
1627                 rx_id++;
1628                 if (unlikely(rx_id == rxq->nb_rx_desc))
1629                         rx_id = 0;
1630
1631                 /* Prefetch next mbuf */
1632                 rte_prefetch0(sw_ring[rx_id].mbuf);
1633
1634                 /**
1635                  * When next RX descriptor is on a cache line boundary,
1636                  * prefetch the next 4 RX descriptors and next 8 pointers
1637                  * to mbufs.
1638                  */
1639                 if ((rx_id & 0x3) == 0) {
1640                         rte_prefetch0(&rx_ring[rx_id]);
1641                         rte_prefetch0(&sw_ring[rx_id]);
1642                 }
1643
1644                 rxm = rxe->mbuf;
1645                 rxe->mbuf = nmb;
1646                 dma_addr =
1647                         rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1648
1649                 /* Set data buffer address and data length of the mbuf */
1650                 rxdp->read.hdr_addr = 0;
1651                 rxdp->read.pkt_addr = dma_addr;
1652                 rx_packet_len = rte_le_to_cpu_16(rxd.wb.pkt_len) &
1653                                 ICE_RX_FLX_DESC_PKT_LEN_M;
1654                 rxm->data_len = rx_packet_len;
1655                 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1656
1657                 /**
1658                  * If this is the first buffer of the received packet, set the
1659                  * pointer to the first mbuf of the packet and initialize its
1660                  * context. Otherwise, update the total length and the number
1661                  * of segments of the current scattered packet, and update the
1662                  * pointer to the last mbuf of the current packet.
1663                  */
1664                 if (!first_seg) {
1665                         first_seg = rxm;
1666                         first_seg->nb_segs = 1;
1667                         first_seg->pkt_len = rx_packet_len;
1668                 } else {
1669                         first_seg->pkt_len =
1670                                 (uint16_t)(first_seg->pkt_len +
1671                                            rx_packet_len);
1672                         first_seg->nb_segs++;
1673                         last_seg->next = rxm;
1674                 }
1675
1676                 /**
1677                  * If this is not the last buffer of the received packet,
1678                  * update the pointer to the last mbuf of the current scattered
1679                  * packet and continue to parse the RX ring.
1680                  */
1681                 if (!(rx_stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_EOF_S))) {
1682                         last_seg = rxm;
1683                         continue;
1684                 }
1685
1686                 /**
1687                  * This is the last buffer of the received packet. If the CRC
1688                  * is not stripped by the hardware:
1689                  *  - Subtract the CRC length from the total packet length.
1690                  *  - If the last buffer only contains the whole CRC or a part
1691                  *  of it, free the mbuf associated to the last buffer. If part
1692                  *  of the CRC is also contained in the previous mbuf, subtract
1693                  *  the length of that CRC part from the data length of the
1694                  *  previous mbuf.
1695                  */
1696                 rxm->next = NULL;
1697                 if (unlikely(rxq->crc_len > 0)) {
1698                         first_seg->pkt_len -= RTE_ETHER_CRC_LEN;
1699                         if (rx_packet_len <= RTE_ETHER_CRC_LEN) {
1700                                 rte_pktmbuf_free_seg(rxm);
1701                                 first_seg->nb_segs--;
1702                                 last_seg->data_len =
1703                                         (uint16_t)(last_seg->data_len -
1704                                         (RTE_ETHER_CRC_LEN - rx_packet_len));
1705                                 last_seg->next = NULL;
1706                         } else
1707                                 rxm->data_len = (uint16_t)(rx_packet_len -
1708                                                            RTE_ETHER_CRC_LEN);
1709                 }
1710
1711                 first_seg->port = rxq->port_id;
1712                 first_seg->ol_flags = 0;
1713                 first_seg->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
1714                         rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
1715                 ice_rxd_to_vlan_tci(first_seg, &rxd);
1716                 ice_rxd_to_pkt_fields(first_seg, &rxd);
1717                 pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
1718                 first_seg->ol_flags |= pkt_flags;
1719                 /* Prefetch data of first segment, if configured to do so. */
1720                 rte_prefetch0(RTE_PTR_ADD(first_seg->buf_addr,
1721                                           first_seg->data_off));
1722                 rx_pkts[nb_rx++] = first_seg;
1723                 first_seg = NULL;
1724         }
1725
1726         /* Record index of the next RX descriptor to probe. */
1727         rxq->rx_tail = rx_id;
1728         rxq->pkt_first_seg = first_seg;
1729         rxq->pkt_last_seg = last_seg;
1730
1731         /**
1732          * If the number of free RX descriptors is greater than the RX free
1733          * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1734          * register. Update the RDT with the value of the last processed RX
1735          * descriptor minus 1, to guarantee that the RDT register is never
1736          * equal to the RDH register, which creates a "full" ring situtation
1737          * from the hardware point of view.
1738          */
1739         nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
1740         if (nb_hold > rxq->rx_free_thresh) {
1741                 rx_id = (uint16_t)(rx_id == 0 ?
1742                                    (rxq->nb_rx_desc - 1) : (rx_id - 1));
1743                 /* write TAIL register */
1744                 ICE_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
1745                 nb_hold = 0;
1746         }
1747         rxq->nb_rx_hold = nb_hold;
1748
1749         /* return received packet in the burst */
1750         return nb_rx;
1751 }
1752
1753 const uint32_t *
1754 ice_dev_supported_ptypes_get(struct rte_eth_dev *dev)
1755 {
1756         struct ice_adapter *ad =
1757                 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1758         const uint32_t *ptypes;
1759
1760         static const uint32_t ptypes_os[] = {
1761                 /* refers to ice_get_default_pkt_type() */
1762                 RTE_PTYPE_L2_ETHER,
1763                 RTE_PTYPE_L2_ETHER_TIMESYNC,
1764                 RTE_PTYPE_L2_ETHER_LLDP,
1765                 RTE_PTYPE_L2_ETHER_ARP,
1766                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
1767                 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
1768                 RTE_PTYPE_L4_FRAG,
1769                 RTE_PTYPE_L4_ICMP,
1770                 RTE_PTYPE_L4_NONFRAG,
1771                 RTE_PTYPE_L4_SCTP,
1772                 RTE_PTYPE_L4_TCP,
1773                 RTE_PTYPE_L4_UDP,
1774                 RTE_PTYPE_TUNNEL_GRENAT,
1775                 RTE_PTYPE_TUNNEL_IP,
1776                 RTE_PTYPE_INNER_L2_ETHER,
1777                 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
1778                 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
1779                 RTE_PTYPE_INNER_L4_FRAG,
1780                 RTE_PTYPE_INNER_L4_ICMP,
1781                 RTE_PTYPE_INNER_L4_NONFRAG,
1782                 RTE_PTYPE_INNER_L4_SCTP,
1783                 RTE_PTYPE_INNER_L4_TCP,
1784                 RTE_PTYPE_INNER_L4_UDP,
1785                 RTE_PTYPE_UNKNOWN
1786         };
1787
1788         static const uint32_t ptypes_comms[] = {
1789                 /* refers to ice_get_default_pkt_type() */
1790                 RTE_PTYPE_L2_ETHER,
1791                 RTE_PTYPE_L2_ETHER_TIMESYNC,
1792                 RTE_PTYPE_L2_ETHER_LLDP,
1793                 RTE_PTYPE_L2_ETHER_ARP,
1794                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
1795                 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
1796                 RTE_PTYPE_L4_FRAG,
1797                 RTE_PTYPE_L4_ICMP,
1798                 RTE_PTYPE_L4_NONFRAG,
1799                 RTE_PTYPE_L4_SCTP,
1800                 RTE_PTYPE_L4_TCP,
1801                 RTE_PTYPE_L4_UDP,
1802                 RTE_PTYPE_TUNNEL_GRENAT,
1803                 RTE_PTYPE_TUNNEL_IP,
1804                 RTE_PTYPE_INNER_L2_ETHER,
1805                 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
1806                 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
1807                 RTE_PTYPE_INNER_L4_FRAG,
1808                 RTE_PTYPE_INNER_L4_ICMP,
1809                 RTE_PTYPE_INNER_L4_NONFRAG,
1810                 RTE_PTYPE_INNER_L4_SCTP,
1811                 RTE_PTYPE_INNER_L4_TCP,
1812                 RTE_PTYPE_INNER_L4_UDP,
1813                 RTE_PTYPE_TUNNEL_GTPC,
1814                 RTE_PTYPE_TUNNEL_GTPU,
1815                 RTE_PTYPE_L2_ETHER_PPPOE,
1816                 RTE_PTYPE_UNKNOWN
1817         };
1818
1819         if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
1820                 ptypes = ptypes_comms;
1821         else
1822                 ptypes = ptypes_os;
1823
1824         if (dev->rx_pkt_burst == ice_recv_pkts ||
1825             dev->rx_pkt_burst == ice_recv_pkts_bulk_alloc ||
1826             dev->rx_pkt_burst == ice_recv_scattered_pkts)
1827                 return ptypes;
1828
1829 #ifdef RTE_ARCH_X86
1830         if (dev->rx_pkt_burst == ice_recv_pkts_vec ||
1831             dev->rx_pkt_burst == ice_recv_scattered_pkts_vec ||
1832             dev->rx_pkt_burst == ice_recv_pkts_vec_avx2 ||
1833             dev->rx_pkt_burst == ice_recv_scattered_pkts_vec_avx2)
1834                 return ptypes;
1835 #endif
1836
1837         return NULL;
1838 }
1839
1840 int
1841 ice_rx_descriptor_status(void *rx_queue, uint16_t offset)
1842 {
1843         volatile union ice_rx_flex_desc *rxdp;
1844         struct ice_rx_queue *rxq = rx_queue;
1845         uint32_t desc;
1846
1847         if (unlikely(offset >= rxq->nb_rx_desc))
1848                 return -EINVAL;
1849
1850         if (offset >= rxq->nb_rx_desc - rxq->nb_rx_hold)
1851                 return RTE_ETH_RX_DESC_UNAVAIL;
1852
1853         desc = rxq->rx_tail + offset;
1854         if (desc >= rxq->nb_rx_desc)
1855                 desc -= rxq->nb_rx_desc;
1856
1857         rxdp = &rxq->rx_ring[desc];
1858         if (rte_le_to_cpu_16(rxdp->wb.status_error0) &
1859             (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S))
1860                 return RTE_ETH_RX_DESC_DONE;
1861
1862         return RTE_ETH_RX_DESC_AVAIL;
1863 }
1864
1865 int
1866 ice_tx_descriptor_status(void *tx_queue, uint16_t offset)
1867 {
1868         struct ice_tx_queue *txq = tx_queue;
1869         volatile uint64_t *status;
1870         uint64_t mask, expect;
1871         uint32_t desc;
1872
1873         if (unlikely(offset >= txq->nb_tx_desc))
1874                 return -EINVAL;
1875
1876         desc = txq->tx_tail + offset;
1877         /* go to next desc that has the RS bit */
1878         desc = ((desc + txq->tx_rs_thresh - 1) / txq->tx_rs_thresh) *
1879                 txq->tx_rs_thresh;
1880         if (desc >= txq->nb_tx_desc) {
1881                 desc -= txq->nb_tx_desc;
1882                 if (desc >= txq->nb_tx_desc)
1883                         desc -= txq->nb_tx_desc;
1884         }
1885
1886         status = &txq->tx_ring[desc].cmd_type_offset_bsz;
1887         mask = rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M);
1888         expect = rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE <<
1889                                   ICE_TXD_QW1_DTYPE_S);
1890         if ((*status & mask) == expect)
1891                 return RTE_ETH_TX_DESC_DONE;
1892
1893         return RTE_ETH_TX_DESC_FULL;
1894 }
1895
1896 void
1897 ice_free_queues(struct rte_eth_dev *dev)
1898 {
1899         uint16_t i;
1900
1901         PMD_INIT_FUNC_TRACE();
1902
1903         for (i = 0; i < dev->data->nb_rx_queues; i++) {
1904                 if (!dev->data->rx_queues[i])
1905                         continue;
1906                 ice_rx_queue_release(dev->data->rx_queues[i]);
1907                 dev->data->rx_queues[i] = NULL;
1908                 rte_eth_dma_zone_free(dev, "rx_ring", i);
1909         }
1910         dev->data->nb_rx_queues = 0;
1911
1912         for (i = 0; i < dev->data->nb_tx_queues; i++) {
1913                 if (!dev->data->tx_queues[i])
1914                         continue;
1915                 ice_tx_queue_release(dev->data->tx_queues[i]);
1916                 dev->data->tx_queues[i] = NULL;
1917                 rte_eth_dma_zone_free(dev, "tx_ring", i);
1918         }
1919         dev->data->nb_tx_queues = 0;
1920 }
1921
1922 #define ICE_FDIR_NUM_TX_DESC  ICE_MIN_RING_DESC
1923 #define ICE_FDIR_NUM_RX_DESC  ICE_MIN_RING_DESC
1924
1925 int
1926 ice_fdir_setup_tx_resources(struct ice_pf *pf)
1927 {
1928         struct ice_tx_queue *txq;
1929         const struct rte_memzone *tz = NULL;
1930         uint32_t ring_size;
1931         struct rte_eth_dev *dev;
1932
1933         if (!pf) {
1934                 PMD_DRV_LOG(ERR, "PF is not available");
1935                 return -EINVAL;
1936         }
1937
1938         dev = pf->adapter->eth_dev;
1939
1940         /* Allocate the TX queue data structure. */
1941         txq = rte_zmalloc_socket("ice fdir tx queue",
1942                                  sizeof(struct ice_tx_queue),
1943                                  RTE_CACHE_LINE_SIZE,
1944                                  SOCKET_ID_ANY);
1945         if (!txq) {
1946                 PMD_DRV_LOG(ERR, "Failed to allocate memory for "
1947                             "tx queue structure.");
1948                 return -ENOMEM;
1949         }
1950
1951         /* Allocate TX hardware ring descriptors. */
1952         ring_size = sizeof(struct ice_tx_desc) * ICE_FDIR_NUM_TX_DESC;
1953         ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN);
1954
1955         tz = rte_eth_dma_zone_reserve(dev, "fdir_tx_ring",
1956                                       ICE_FDIR_QUEUE_ID, ring_size,
1957                                       ICE_RING_BASE_ALIGN, SOCKET_ID_ANY);
1958         if (!tz) {
1959                 ice_tx_queue_release(txq);
1960                 PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for TX.");
1961                 return -ENOMEM;
1962         }
1963
1964         txq->nb_tx_desc = ICE_FDIR_NUM_TX_DESC;
1965         txq->queue_id = ICE_FDIR_QUEUE_ID;
1966         txq->reg_idx = pf->fdir.fdir_vsi->base_queue;
1967         txq->vsi = pf->fdir.fdir_vsi;
1968
1969         txq->tx_ring_dma = tz->iova;
1970         txq->tx_ring = (struct ice_tx_desc *)tz->addr;
1971         /*
1972          * don't need to allocate software ring and reset for the fdir
1973          * program queue just set the queue has been configured.
1974          */
1975         txq->q_set = true;
1976         pf->fdir.txq = txq;
1977
1978         txq->tx_rel_mbufs = _ice_tx_queue_release_mbufs;
1979
1980         return ICE_SUCCESS;
1981 }
1982
1983 int
1984 ice_fdir_setup_rx_resources(struct ice_pf *pf)
1985 {
1986         struct ice_rx_queue *rxq;
1987         const struct rte_memzone *rz = NULL;
1988         uint32_t ring_size;
1989         struct rte_eth_dev *dev;
1990
1991         if (!pf) {
1992                 PMD_DRV_LOG(ERR, "PF is not available");
1993                 return -EINVAL;
1994         }
1995
1996         dev = pf->adapter->eth_dev;
1997
1998         /* Allocate the RX queue data structure. */
1999         rxq = rte_zmalloc_socket("ice fdir rx queue",
2000                                  sizeof(struct ice_rx_queue),
2001                                  RTE_CACHE_LINE_SIZE,
2002                                  SOCKET_ID_ANY);
2003         if (!rxq) {
2004                 PMD_DRV_LOG(ERR, "Failed to allocate memory for "
2005                             "rx queue structure.");
2006                 return -ENOMEM;
2007         }
2008
2009         /* Allocate RX hardware ring descriptors. */
2010         ring_size = sizeof(union ice_32byte_rx_desc) * ICE_FDIR_NUM_RX_DESC;
2011         ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN);
2012
2013         rz = rte_eth_dma_zone_reserve(dev, "fdir_rx_ring",
2014                                       ICE_FDIR_QUEUE_ID, ring_size,
2015                                       ICE_RING_BASE_ALIGN, SOCKET_ID_ANY);
2016         if (!rz) {
2017                 ice_rx_queue_release(rxq);
2018                 PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for RX.");
2019                 return -ENOMEM;
2020         }
2021
2022         rxq->nb_rx_desc = ICE_FDIR_NUM_RX_DESC;
2023         rxq->queue_id = ICE_FDIR_QUEUE_ID;
2024         rxq->reg_idx = pf->fdir.fdir_vsi->base_queue;
2025         rxq->vsi = pf->fdir.fdir_vsi;
2026
2027         rxq->rx_ring_dma = rz->iova;
2028         memset(rz->addr, 0, ICE_FDIR_NUM_RX_DESC *
2029                sizeof(union ice_32byte_rx_desc));
2030         rxq->rx_ring = (union ice_rx_flex_desc *)rz->addr;
2031
2032         /*
2033          * Don't need to allocate software ring and reset for the fdir
2034          * rx queue, just set the queue has been configured.
2035          */
2036         rxq->q_set = true;
2037         pf->fdir.rxq = rxq;
2038
2039         rxq->rx_rel_mbufs = _ice_rx_queue_release_mbufs;
2040
2041         return ICE_SUCCESS;
2042 }
2043
2044 uint16_t
2045 ice_recv_pkts(void *rx_queue,
2046               struct rte_mbuf **rx_pkts,
2047               uint16_t nb_pkts)
2048 {
2049         struct ice_rx_queue *rxq = rx_queue;
2050         volatile union ice_rx_flex_desc *rx_ring = rxq->rx_ring;
2051         volatile union ice_rx_flex_desc *rxdp;
2052         union ice_rx_flex_desc rxd;
2053         struct ice_rx_entry *sw_ring = rxq->sw_ring;
2054         struct ice_rx_entry *rxe;
2055         struct rte_mbuf *nmb; /* new allocated mbuf */
2056         struct rte_mbuf *rxm; /* pointer to store old mbuf in SW ring */
2057         uint16_t rx_id = rxq->rx_tail;
2058         uint16_t nb_rx = 0;
2059         uint16_t nb_hold = 0;
2060         uint16_t rx_packet_len;
2061         uint16_t rx_stat_err0;
2062         uint64_t dma_addr;
2063         uint64_t pkt_flags;
2064         uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
2065         struct rte_eth_dev *dev;
2066
2067         while (nb_rx < nb_pkts) {
2068                 rxdp = &rx_ring[rx_id];
2069                 rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
2070
2071                 /* Check the DD bit first */
2072                 if (!(rx_stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S)))
2073                         break;
2074
2075                 /* allocate mbuf */
2076                 nmb = rte_mbuf_raw_alloc(rxq->mp);
2077                 if (unlikely(!nmb)) {
2078                         dev = ICE_VSI_TO_ETH_DEV(rxq->vsi);
2079                         dev->data->rx_mbuf_alloc_failed++;
2080                         break;
2081                 }
2082                 rxd = *rxdp; /* copy descriptor in ring to temp variable*/
2083
2084                 nb_hold++;
2085                 rxe = &sw_ring[rx_id]; /* get corresponding mbuf in SW ring */
2086                 rx_id++;
2087                 if (unlikely(rx_id == rxq->nb_rx_desc))
2088                         rx_id = 0;
2089                 rxm = rxe->mbuf;
2090                 rxe->mbuf = nmb;
2091                 dma_addr =
2092                         rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
2093
2094                 /**
2095                  * fill the read format of descriptor with physic address in
2096                  * new allocated mbuf: nmb
2097                  */
2098                 rxdp->read.hdr_addr = 0;
2099                 rxdp->read.pkt_addr = dma_addr;
2100
2101                 /* calculate rx_packet_len of the received pkt */
2102                 rx_packet_len = (rte_le_to_cpu_16(rxd.wb.pkt_len) &
2103                                  ICE_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len;
2104
2105                 /* fill old mbuf with received descriptor: rxd */
2106                 rxm->data_off = RTE_PKTMBUF_HEADROOM;
2107                 rte_prefetch0(RTE_PTR_ADD(rxm->buf_addr, RTE_PKTMBUF_HEADROOM));
2108                 rxm->nb_segs = 1;
2109                 rxm->next = NULL;
2110                 rxm->pkt_len = rx_packet_len;
2111                 rxm->data_len = rx_packet_len;
2112                 rxm->port = rxq->port_id;
2113                 rxm->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
2114                         rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
2115                 ice_rxd_to_vlan_tci(rxm, &rxd);
2116                 ice_rxd_to_pkt_fields(rxm, &rxd);
2117                 pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
2118                 rxm->ol_flags |= pkt_flags;
2119                 /* copy old mbuf to rx_pkts */
2120                 rx_pkts[nb_rx++] = rxm;
2121         }
2122         rxq->rx_tail = rx_id;
2123         /**
2124          * If the number of free RX descriptors is greater than the RX free
2125          * threshold of the queue, advance the receive tail register of queue.
2126          * Update that register with the value of the last processed RX
2127          * descriptor minus 1.
2128          */
2129         nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
2130         if (nb_hold > rxq->rx_free_thresh) {
2131                 rx_id = (uint16_t)(rx_id == 0 ?
2132                                    (rxq->nb_rx_desc - 1) : (rx_id - 1));
2133                 /* write TAIL register */
2134                 ICE_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
2135                 nb_hold = 0;
2136         }
2137         rxq->nb_rx_hold = nb_hold;
2138
2139         /* return received packet in the burst */
2140         return nb_rx;
2141 }
2142
2143 static inline void
2144 ice_parse_tunneling_params(uint64_t ol_flags,
2145                             union ice_tx_offload tx_offload,
2146                             uint32_t *cd_tunneling)
2147 {
2148         /* EIPT: External (outer) IP header type */
2149         if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
2150                 *cd_tunneling |= ICE_TX_CTX_EIPT_IPV4;
2151         else if (ol_flags & PKT_TX_OUTER_IPV4)
2152                 *cd_tunneling |= ICE_TX_CTX_EIPT_IPV4_NO_CSUM;
2153         else if (ol_flags & PKT_TX_OUTER_IPV6)
2154                 *cd_tunneling |= ICE_TX_CTX_EIPT_IPV6;
2155
2156         /* EIPLEN: External (outer) IP header length, in DWords */
2157         *cd_tunneling |= (tx_offload.outer_l3_len >> 2) <<
2158                 ICE_TXD_CTX_QW0_EIPLEN_S;
2159
2160         /* L4TUNT: L4 Tunneling Type */
2161         switch (ol_flags & PKT_TX_TUNNEL_MASK) {
2162         case PKT_TX_TUNNEL_IPIP:
2163                 /* for non UDP / GRE tunneling, set to 00b */
2164                 break;
2165         case PKT_TX_TUNNEL_VXLAN:
2166         case PKT_TX_TUNNEL_GTP:
2167         case PKT_TX_TUNNEL_GENEVE:
2168                 *cd_tunneling |= ICE_TXD_CTX_UDP_TUNNELING;
2169                 break;
2170         case PKT_TX_TUNNEL_GRE:
2171                 *cd_tunneling |= ICE_TXD_CTX_GRE_TUNNELING;
2172                 break;
2173         default:
2174                 PMD_TX_LOG(ERR, "Tunnel type not supported");
2175                 return;
2176         }
2177
2178         /* L4TUNLEN: L4 Tunneling Length, in Words
2179          *
2180          * We depend on app to set rte_mbuf.l2_len correctly.
2181          * For IP in GRE it should be set to the length of the GRE
2182          * header;
2183          * For MAC in GRE or MAC in UDP it should be set to the length
2184          * of the GRE or UDP headers plus the inner MAC up to including
2185          * its last Ethertype.
2186          * If MPLS labels exists, it should include them as well.
2187          */
2188         *cd_tunneling |= (tx_offload.l2_len >> 1) <<
2189                 ICE_TXD_CTX_QW0_NATLEN_S;
2190
2191         if ((ol_flags & PKT_TX_OUTER_UDP_CKSUM) &&
2192             (ol_flags & PKT_TX_OUTER_IP_CKSUM) &&
2193             (*cd_tunneling & ICE_TXD_CTX_UDP_TUNNELING))
2194                 *cd_tunneling |= ICE_TXD_CTX_QW0_L4T_CS_M;
2195 }
2196
2197 static inline void
2198 ice_txd_enable_checksum(uint64_t ol_flags,
2199                         uint32_t *td_cmd,
2200                         uint32_t *td_offset,
2201                         union ice_tx_offload tx_offload)
2202 {
2203         /* Set MACLEN */
2204         if (ol_flags & PKT_TX_TUNNEL_MASK)
2205                 *td_offset |= (tx_offload.outer_l2_len >> 1)
2206                         << ICE_TX_DESC_LEN_MACLEN_S;
2207         else
2208                 *td_offset |= (tx_offload.l2_len >> 1)
2209                         << ICE_TX_DESC_LEN_MACLEN_S;
2210
2211         /* Enable L3 checksum offloads */
2212         if (ol_flags & PKT_TX_IP_CKSUM) {
2213                 *td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM;
2214                 *td_offset |= (tx_offload.l3_len >> 2) <<
2215                               ICE_TX_DESC_LEN_IPLEN_S;
2216         } else if (ol_flags & PKT_TX_IPV4) {
2217                 *td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV4;
2218                 *td_offset |= (tx_offload.l3_len >> 2) <<
2219                               ICE_TX_DESC_LEN_IPLEN_S;
2220         } else if (ol_flags & PKT_TX_IPV6) {
2221                 *td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV6;
2222                 *td_offset |= (tx_offload.l3_len >> 2) <<
2223                               ICE_TX_DESC_LEN_IPLEN_S;
2224         }
2225
2226         if (ol_flags & PKT_TX_TCP_SEG) {
2227                 *td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;
2228                 *td_offset |= (tx_offload.l4_len >> 2) <<
2229                               ICE_TX_DESC_LEN_L4_LEN_S;
2230                 return;
2231         }
2232
2233         /* Enable L4 checksum offloads */
2234         switch (ol_flags & PKT_TX_L4_MASK) {
2235         case PKT_TX_TCP_CKSUM:
2236                 *td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;
2237                 *td_offset |= (sizeof(struct rte_tcp_hdr) >> 2) <<
2238                               ICE_TX_DESC_LEN_L4_LEN_S;
2239                 break;
2240         case PKT_TX_SCTP_CKSUM:
2241                 *td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_SCTP;
2242                 *td_offset |= (sizeof(struct rte_sctp_hdr) >> 2) <<
2243                               ICE_TX_DESC_LEN_L4_LEN_S;
2244                 break;
2245         case PKT_TX_UDP_CKSUM:
2246                 *td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP;
2247                 *td_offset |= (sizeof(struct rte_udp_hdr) >> 2) <<
2248                               ICE_TX_DESC_LEN_L4_LEN_S;
2249                 break;
2250         default:
2251                 break;
2252         }
2253 }
2254
2255 static inline int
2256 ice_xmit_cleanup(struct ice_tx_queue *txq)
2257 {
2258         struct ice_tx_entry *sw_ring = txq->sw_ring;
2259         volatile struct ice_tx_desc *txd = txq->tx_ring;
2260         uint16_t last_desc_cleaned = txq->last_desc_cleaned;
2261         uint16_t nb_tx_desc = txq->nb_tx_desc;
2262         uint16_t desc_to_clean_to;
2263         uint16_t nb_tx_to_clean;
2264
2265         /* Determine the last descriptor needing to be cleaned */
2266         desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->tx_rs_thresh);
2267         if (desc_to_clean_to >= nb_tx_desc)
2268                 desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
2269
2270         /* Check to make sure the last descriptor to clean is done */
2271         desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
2272         if (!(txd[desc_to_clean_to].cmd_type_offset_bsz &
2273             rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE))) {
2274                 PMD_TX_FREE_LOG(DEBUG, "TX descriptor %4u is not done "
2275                                 "(port=%d queue=%d) value=0x%"PRIx64"\n",
2276                                 desc_to_clean_to,
2277                                 txq->port_id, txq->queue_id,
2278                                 txd[desc_to_clean_to].cmd_type_offset_bsz);
2279                 /* Failed to clean any descriptors */
2280                 return -1;
2281         }
2282
2283         /* Figure out how many descriptors will be cleaned */
2284         if (last_desc_cleaned > desc_to_clean_to)
2285                 nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) +
2286                                             desc_to_clean_to);
2287         else
2288                 nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
2289                                             last_desc_cleaned);
2290
2291         /* The last descriptor to clean is done, so that means all the
2292          * descriptors from the last descriptor that was cleaned
2293          * up to the last descriptor with the RS bit set
2294          * are done. Only reset the threshold descriptor.
2295          */
2296         txd[desc_to_clean_to].cmd_type_offset_bsz = 0;
2297
2298         /* Update the txq to reflect the last descriptor that was cleaned */
2299         txq->last_desc_cleaned = desc_to_clean_to;
2300         txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + nb_tx_to_clean);
2301
2302         return 0;
2303 }
2304
2305 /* Construct the tx flags */
2306 static inline uint64_t
2307 ice_build_ctob(uint32_t td_cmd,
2308                uint32_t td_offset,
2309                uint16_t size,
2310                uint32_t td_tag)
2311 {
2312         return rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DATA |
2313                                 ((uint64_t)td_cmd << ICE_TXD_QW1_CMD_S) |
2314                                 ((uint64_t)td_offset << ICE_TXD_QW1_OFFSET_S) |
2315                                 ((uint64_t)size << ICE_TXD_QW1_TX_BUF_SZ_S) |
2316                                 ((uint64_t)td_tag << ICE_TXD_QW1_L2TAG1_S));
2317 }
2318
2319 /* Check if the context descriptor is needed for TX offloading */
2320 static inline uint16_t
2321 ice_calc_context_desc(uint64_t flags)
2322 {
2323         static uint64_t mask = PKT_TX_TCP_SEG |
2324                 PKT_TX_QINQ |
2325                 PKT_TX_OUTER_IP_CKSUM |
2326                 PKT_TX_TUNNEL_MASK;
2327
2328         return (flags & mask) ? 1 : 0;
2329 }
2330
2331 /* set ice TSO context descriptor */
2332 static inline uint64_t
2333 ice_set_tso_ctx(struct rte_mbuf *mbuf, union ice_tx_offload tx_offload)
2334 {
2335         uint64_t ctx_desc = 0;
2336         uint32_t cd_cmd, hdr_len, cd_tso_len;
2337
2338         if (!tx_offload.l4_len) {
2339                 PMD_TX_LOG(DEBUG, "L4 length set to 0");
2340                 return ctx_desc;
2341         }
2342
2343         hdr_len = tx_offload.l2_len + tx_offload.l3_len + tx_offload.l4_len;
2344         hdr_len += (mbuf->ol_flags & PKT_TX_TUNNEL_MASK) ?
2345                    tx_offload.outer_l2_len + tx_offload.outer_l3_len : 0;
2346
2347         cd_cmd = ICE_TX_CTX_DESC_TSO;
2348         cd_tso_len = mbuf->pkt_len - hdr_len;
2349         ctx_desc |= ((uint64_t)cd_cmd << ICE_TXD_CTX_QW1_CMD_S) |
2350                     ((uint64_t)cd_tso_len << ICE_TXD_CTX_QW1_TSO_LEN_S) |
2351                     ((uint64_t)mbuf->tso_segsz << ICE_TXD_CTX_QW1_MSS_S);
2352
2353         return ctx_desc;
2354 }
2355
2356 /* HW requires that TX buffer size ranges from 1B up to (16K-1)B. */
2357 #define ICE_MAX_DATA_PER_TXD \
2358         (ICE_TXD_QW1_TX_BUF_SZ_M >> ICE_TXD_QW1_TX_BUF_SZ_S)
2359 /* Calculate the number of TX descriptors needed for each pkt */
2360 static inline uint16_t
2361 ice_calc_pkt_desc(struct rte_mbuf *tx_pkt)
2362 {
2363         struct rte_mbuf *txd = tx_pkt;
2364         uint16_t count = 0;
2365
2366         while (txd != NULL) {
2367                 count += DIV_ROUND_UP(txd->data_len, ICE_MAX_DATA_PER_TXD);
2368                 txd = txd->next;
2369         }
2370
2371         return count;
2372 }
2373
2374 uint16_t
2375 ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
2376 {
2377         struct ice_tx_queue *txq;
2378         volatile struct ice_tx_desc *tx_ring;
2379         volatile struct ice_tx_desc *txd;
2380         struct ice_tx_entry *sw_ring;
2381         struct ice_tx_entry *txe, *txn;
2382         struct rte_mbuf *tx_pkt;
2383         struct rte_mbuf *m_seg;
2384         uint32_t cd_tunneling_params;
2385         uint16_t tx_id;
2386         uint16_t nb_tx;
2387         uint16_t nb_used;
2388         uint16_t nb_ctx;
2389         uint32_t td_cmd = 0;
2390         uint32_t td_offset = 0;
2391         uint32_t td_tag = 0;
2392         uint16_t tx_last;
2393         uint16_t slen;
2394         uint64_t buf_dma_addr;
2395         uint64_t ol_flags;
2396         union ice_tx_offload tx_offload = {0};
2397
2398         txq = tx_queue;
2399         sw_ring = txq->sw_ring;
2400         tx_ring = txq->tx_ring;
2401         tx_id = txq->tx_tail;
2402         txe = &sw_ring[tx_id];
2403
2404         /* Check if the descriptor ring needs to be cleaned. */
2405         if (txq->nb_tx_free < txq->tx_free_thresh)
2406                 (void)ice_xmit_cleanup(txq);
2407
2408         for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
2409                 tx_pkt = *tx_pkts++;
2410
2411                 td_cmd = 0;
2412                 td_tag = 0;
2413                 td_offset = 0;
2414                 ol_flags = tx_pkt->ol_flags;
2415                 tx_offload.l2_len = tx_pkt->l2_len;
2416                 tx_offload.l3_len = tx_pkt->l3_len;
2417                 tx_offload.outer_l2_len = tx_pkt->outer_l2_len;
2418                 tx_offload.outer_l3_len = tx_pkt->outer_l3_len;
2419                 tx_offload.l4_len = tx_pkt->l4_len;
2420                 tx_offload.tso_segsz = tx_pkt->tso_segsz;
2421                 /* Calculate the number of context descriptors needed. */
2422                 nb_ctx = ice_calc_context_desc(ol_flags);
2423
2424                 /* The number of descriptors that must be allocated for
2425                  * a packet equals to the number of the segments of that
2426                  * packet plus the number of context descriptor if needed.
2427                  * Recalculate the needed tx descs when TSO enabled in case
2428                  * the mbuf data size exceeds max data size that hw allows
2429                  * per tx desc.
2430                  */
2431                 if (ol_flags & PKT_TX_TCP_SEG)
2432                         nb_used = (uint16_t)(ice_calc_pkt_desc(tx_pkt) +
2433                                              nb_ctx);
2434                 else
2435                         nb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx);
2436                 tx_last = (uint16_t)(tx_id + nb_used - 1);
2437
2438                 /* Circular ring */
2439                 if (tx_last >= txq->nb_tx_desc)
2440                         tx_last = (uint16_t)(tx_last - txq->nb_tx_desc);
2441
2442                 if (nb_used > txq->nb_tx_free) {
2443                         if (ice_xmit_cleanup(txq) != 0) {
2444                                 if (nb_tx == 0)
2445                                         return 0;
2446                                 goto end_of_tx;
2447                         }
2448                         if (unlikely(nb_used > txq->tx_rs_thresh)) {
2449                                 while (nb_used > txq->nb_tx_free) {
2450                                         if (ice_xmit_cleanup(txq) != 0) {
2451                                                 if (nb_tx == 0)
2452                                                         return 0;
2453                                                 goto end_of_tx;
2454                                         }
2455                                 }
2456                         }
2457                 }
2458
2459                 /* Descriptor based VLAN insertion */
2460                 if (ol_flags & (PKT_TX_VLAN | PKT_TX_QINQ)) {
2461                         td_cmd |= ICE_TX_DESC_CMD_IL2TAG1;
2462                         td_tag = tx_pkt->vlan_tci;
2463                 }
2464
2465                 /* Fill in tunneling parameters if necessary */
2466                 cd_tunneling_params = 0;
2467                 if (ol_flags & PKT_TX_TUNNEL_MASK)
2468                         ice_parse_tunneling_params(ol_flags, tx_offload,
2469                                                    &cd_tunneling_params);
2470
2471                 /* Enable checksum offloading */
2472                 if (ol_flags & ICE_TX_CKSUM_OFFLOAD_MASK)
2473                         ice_txd_enable_checksum(ol_flags, &td_cmd,
2474                                                 &td_offset, tx_offload);
2475
2476                 if (nb_ctx) {
2477                         /* Setup TX context descriptor if required */
2478                         volatile struct ice_tx_ctx_desc *ctx_txd =
2479                                 (volatile struct ice_tx_ctx_desc *)
2480                                         &tx_ring[tx_id];
2481                         uint16_t cd_l2tag2 = 0;
2482                         uint64_t cd_type_cmd_tso_mss = ICE_TX_DESC_DTYPE_CTX;
2483
2484                         txn = &sw_ring[txe->next_id];
2485                         RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
2486                         if (txe->mbuf) {
2487                                 rte_pktmbuf_free_seg(txe->mbuf);
2488                                 txe->mbuf = NULL;
2489                         }
2490
2491                         if (ol_flags & PKT_TX_TCP_SEG)
2492                                 cd_type_cmd_tso_mss |=
2493                                         ice_set_tso_ctx(tx_pkt, tx_offload);
2494
2495                         ctx_txd->tunneling_params =
2496                                 rte_cpu_to_le_32(cd_tunneling_params);
2497
2498                         /* TX context descriptor based double VLAN insert */
2499                         if (ol_flags & PKT_TX_QINQ) {
2500                                 cd_l2tag2 = tx_pkt->vlan_tci_outer;
2501                                 cd_type_cmd_tso_mss |=
2502                                         ((uint64_t)ICE_TX_CTX_DESC_IL2TAG2 <<
2503                                          ICE_TXD_CTX_QW1_CMD_S);
2504                         }
2505                         ctx_txd->l2tag2 = rte_cpu_to_le_16(cd_l2tag2);
2506                         ctx_txd->qw1 =
2507                                 rte_cpu_to_le_64(cd_type_cmd_tso_mss);
2508
2509                         txe->last_id = tx_last;
2510                         tx_id = txe->next_id;
2511                         txe = txn;
2512                 }
2513                 m_seg = tx_pkt;
2514
2515                 do {
2516                         txd = &tx_ring[tx_id];
2517                         txn = &sw_ring[txe->next_id];
2518
2519                         if (txe->mbuf)
2520                                 rte_pktmbuf_free_seg(txe->mbuf);
2521                         txe->mbuf = m_seg;
2522
2523                         /* Setup TX Descriptor */
2524                         slen = m_seg->data_len;
2525                         buf_dma_addr = rte_mbuf_data_iova(m_seg);
2526
2527                         while ((ol_flags & PKT_TX_TCP_SEG) &&
2528                                 unlikely(slen > ICE_MAX_DATA_PER_TXD)) {
2529                                 txd->buf_addr = rte_cpu_to_le_64(buf_dma_addr);
2530                                 txd->cmd_type_offset_bsz =
2531                                 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DATA |
2532                                 ((uint64_t)td_cmd << ICE_TXD_QW1_CMD_S) |
2533                                 ((uint64_t)td_offset << ICE_TXD_QW1_OFFSET_S) |
2534                                 ((uint64_t)ICE_MAX_DATA_PER_TXD <<
2535                                  ICE_TXD_QW1_TX_BUF_SZ_S) |
2536                                 ((uint64_t)td_tag << ICE_TXD_QW1_L2TAG1_S));
2537
2538                                 buf_dma_addr += ICE_MAX_DATA_PER_TXD;
2539                                 slen -= ICE_MAX_DATA_PER_TXD;
2540
2541                                 txe->last_id = tx_last;
2542                                 tx_id = txe->next_id;
2543                                 txe = txn;
2544                                 txd = &tx_ring[tx_id];
2545                                 txn = &sw_ring[txe->next_id];
2546                         }
2547
2548                         txd->buf_addr = rte_cpu_to_le_64(buf_dma_addr);
2549                         txd->cmd_type_offset_bsz =
2550                                 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DATA |
2551                                 ((uint64_t)td_cmd << ICE_TXD_QW1_CMD_S) |
2552                                 ((uint64_t)td_offset << ICE_TXD_QW1_OFFSET_S) |
2553                                 ((uint64_t)slen << ICE_TXD_QW1_TX_BUF_SZ_S) |
2554                                 ((uint64_t)td_tag << ICE_TXD_QW1_L2TAG1_S));
2555
2556                         txe->last_id = tx_last;
2557                         tx_id = txe->next_id;
2558                         txe = txn;
2559                         m_seg = m_seg->next;
2560                 } while (m_seg);
2561
2562                 /* fill the last descriptor with End of Packet (EOP) bit */
2563                 td_cmd |= ICE_TX_DESC_CMD_EOP;
2564                 txq->nb_tx_used = (uint16_t)(txq->nb_tx_used + nb_used);
2565                 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_used);
2566
2567                 /* set RS bit on the last descriptor of one packet */
2568                 if (txq->nb_tx_used >= txq->tx_rs_thresh) {
2569                         PMD_TX_FREE_LOG(DEBUG,
2570                                         "Setting RS bit on TXD id="
2571                                         "%4u (port=%d queue=%d)",
2572                                         tx_last, txq->port_id, txq->queue_id);
2573
2574                         td_cmd |= ICE_TX_DESC_CMD_RS;
2575
2576                         /* Update txq RS bit counters */
2577                         txq->nb_tx_used = 0;
2578                 }
2579                 txd->cmd_type_offset_bsz |=
2580                         rte_cpu_to_le_64(((uint64_t)td_cmd) <<
2581                                          ICE_TXD_QW1_CMD_S);
2582         }
2583 end_of_tx:
2584         /* update Tail register */
2585         ICE_PCI_REG_WRITE(txq->qtx_tail, tx_id);
2586         txq->tx_tail = tx_id;
2587
2588         return nb_tx;
2589 }
2590
2591 static __rte_always_inline int
2592 ice_tx_free_bufs(struct ice_tx_queue *txq)
2593 {
2594         struct ice_tx_entry *txep;
2595         uint16_t i;
2596
2597         if ((txq->tx_ring[txq->tx_next_dd].cmd_type_offset_bsz &
2598              rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M)) !=
2599             rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE))
2600                 return 0;
2601
2602         txep = &txq->sw_ring[txq->tx_next_dd - (txq->tx_rs_thresh - 1)];
2603
2604         for (i = 0; i < txq->tx_rs_thresh; i++)
2605                 rte_prefetch0((txep + i)->mbuf);
2606
2607         if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) {
2608                 for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
2609                         rte_mempool_put(txep->mbuf->pool, txep->mbuf);
2610                         txep->mbuf = NULL;
2611                 }
2612         } else {
2613                 for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
2614                         rte_pktmbuf_free_seg(txep->mbuf);
2615                         txep->mbuf = NULL;
2616                 }
2617         }
2618
2619         txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
2620         txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
2621         if (txq->tx_next_dd >= txq->nb_tx_desc)
2622                 txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
2623
2624         return txq->tx_rs_thresh;
2625 }
2626
2627 static int
2628 ice_tx_done_cleanup_full(struct ice_tx_queue *txq,
2629                         uint32_t free_cnt)
2630 {
2631         struct ice_tx_entry *swr_ring = txq->sw_ring;
2632         uint16_t i, tx_last, tx_id;
2633         uint16_t nb_tx_free_last;
2634         uint16_t nb_tx_to_clean;
2635         uint32_t pkt_cnt;
2636
2637         /* Start free mbuf from the next of tx_tail */
2638         tx_last = txq->tx_tail;
2639         tx_id  = swr_ring[tx_last].next_id;
2640
2641         if (txq->nb_tx_free == 0 && ice_xmit_cleanup(txq))
2642                 return 0;
2643
2644         nb_tx_to_clean = txq->nb_tx_free;
2645         nb_tx_free_last = txq->nb_tx_free;
2646         if (!free_cnt)
2647                 free_cnt = txq->nb_tx_desc;
2648
2649         /* Loop through swr_ring to count the amount of
2650          * freeable mubfs and packets.
2651          */
2652         for (pkt_cnt = 0; pkt_cnt < free_cnt; ) {
2653                 for (i = 0; i < nb_tx_to_clean &&
2654                         pkt_cnt < free_cnt &&
2655                         tx_id != tx_last; i++) {
2656                         if (swr_ring[tx_id].mbuf != NULL) {
2657                                 rte_pktmbuf_free_seg(swr_ring[tx_id].mbuf);
2658                                 swr_ring[tx_id].mbuf = NULL;
2659
2660                                 /*
2661                                  * last segment in the packet,
2662                                  * increment packet count
2663                                  */
2664                                 pkt_cnt += (swr_ring[tx_id].last_id == tx_id);
2665                         }
2666
2667                         tx_id = swr_ring[tx_id].next_id;
2668                 }
2669
2670                 if (txq->tx_rs_thresh > txq->nb_tx_desc -
2671                         txq->nb_tx_free || tx_id == tx_last)
2672                         break;
2673
2674                 if (pkt_cnt < free_cnt) {
2675                         if (ice_xmit_cleanup(txq))
2676                                 break;
2677
2678                         nb_tx_to_clean = txq->nb_tx_free - nb_tx_free_last;
2679                         nb_tx_free_last = txq->nb_tx_free;
2680                 }
2681         }
2682
2683         return (int)pkt_cnt;
2684 }
2685
2686 #ifdef RTE_ARCH_X86
2687 static int
2688 ice_tx_done_cleanup_vec(struct ice_tx_queue *txq __rte_unused,
2689                         uint32_t free_cnt __rte_unused)
2690 {
2691         return -ENOTSUP;
2692 }
2693 #endif
2694
2695 static int
2696 ice_tx_done_cleanup_simple(struct ice_tx_queue *txq,
2697                         uint32_t free_cnt)
2698 {
2699         int i, n, cnt;
2700
2701         if (free_cnt == 0 || free_cnt > txq->nb_tx_desc)
2702                 free_cnt = txq->nb_tx_desc;
2703
2704         cnt = free_cnt - free_cnt % txq->tx_rs_thresh;
2705
2706         for (i = 0; i < cnt; i += n) {
2707                 if (txq->nb_tx_desc - txq->nb_tx_free < txq->tx_rs_thresh)
2708                         break;
2709
2710                 n = ice_tx_free_bufs(txq);
2711
2712                 if (n == 0)
2713                         break;
2714         }
2715
2716         return i;
2717 }
2718
2719 int
2720 ice_tx_done_cleanup(void *txq, uint32_t free_cnt)
2721 {
2722         struct ice_tx_queue *q = (struct ice_tx_queue *)txq;
2723         struct rte_eth_dev *dev = &rte_eth_devices[q->port_id];
2724         struct ice_adapter *ad =
2725                 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2726
2727 #ifdef RTE_ARCH_X86
2728         if (ad->tx_vec_allowed)
2729                 return ice_tx_done_cleanup_vec(q, free_cnt);
2730 #endif
2731         if (ad->tx_simple_allowed)
2732                 return ice_tx_done_cleanup_simple(q, free_cnt);
2733         else
2734                 return ice_tx_done_cleanup_full(q, free_cnt);
2735 }
2736
2737 /* Populate 4 descriptors with data from 4 mbufs */
2738 static inline void
2739 tx4(volatile struct ice_tx_desc *txdp, struct rte_mbuf **pkts)
2740 {
2741         uint64_t dma_addr;
2742         uint32_t i;
2743
2744         for (i = 0; i < 4; i++, txdp++, pkts++) {
2745                 dma_addr = rte_mbuf_data_iova(*pkts);
2746                 txdp->buf_addr = rte_cpu_to_le_64(dma_addr);
2747                 txdp->cmd_type_offset_bsz =
2748                         ice_build_ctob((uint32_t)ICE_TD_CMD, 0,
2749                                        (*pkts)->data_len, 0);
2750         }
2751 }
2752
2753 /* Populate 1 descriptor with data from 1 mbuf */
2754 static inline void
2755 tx1(volatile struct ice_tx_desc *txdp, struct rte_mbuf **pkts)
2756 {
2757         uint64_t dma_addr;
2758
2759         dma_addr = rte_mbuf_data_iova(*pkts);
2760         txdp->buf_addr = rte_cpu_to_le_64(dma_addr);
2761         txdp->cmd_type_offset_bsz =
2762                 ice_build_ctob((uint32_t)ICE_TD_CMD, 0,
2763                                (*pkts)->data_len, 0);
2764 }
2765
2766 static inline void
2767 ice_tx_fill_hw_ring(struct ice_tx_queue *txq, struct rte_mbuf **pkts,
2768                     uint16_t nb_pkts)
2769 {
2770         volatile struct ice_tx_desc *txdp = &txq->tx_ring[txq->tx_tail];
2771         struct ice_tx_entry *txep = &txq->sw_ring[txq->tx_tail];
2772         const int N_PER_LOOP = 4;
2773         const int N_PER_LOOP_MASK = N_PER_LOOP - 1;
2774         int mainpart, leftover;
2775         int i, j;
2776
2777         /**
2778          * Process most of the packets in chunks of N pkts.  Any
2779          * leftover packets will get processed one at a time.
2780          */
2781         mainpart = nb_pkts & ((uint32_t)~N_PER_LOOP_MASK);
2782         leftover = nb_pkts & ((uint32_t)N_PER_LOOP_MASK);
2783         for (i = 0; i < mainpart; i += N_PER_LOOP) {
2784                 /* Copy N mbuf pointers to the S/W ring */
2785                 for (j = 0; j < N_PER_LOOP; ++j)
2786                         (txep + i + j)->mbuf = *(pkts + i + j);
2787                 tx4(txdp + i, pkts + i);
2788         }
2789
2790         if (unlikely(leftover > 0)) {
2791                 for (i = 0; i < leftover; ++i) {
2792                         (txep + mainpart + i)->mbuf = *(pkts + mainpart + i);
2793                         tx1(txdp + mainpart + i, pkts + mainpart + i);
2794                 }
2795         }
2796 }
2797
2798 static inline uint16_t
2799 tx_xmit_pkts(struct ice_tx_queue *txq,
2800              struct rte_mbuf **tx_pkts,
2801              uint16_t nb_pkts)
2802 {
2803         volatile struct ice_tx_desc *txr = txq->tx_ring;
2804         uint16_t n = 0;
2805
2806         /**
2807          * Begin scanning the H/W ring for done descriptors when the number
2808          * of available descriptors drops below tx_free_thresh. For each done
2809          * descriptor, free the associated buffer.
2810          */
2811         if (txq->nb_tx_free < txq->tx_free_thresh)
2812                 ice_tx_free_bufs(txq);
2813
2814         /* Use available descriptor only */
2815         nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
2816         if (unlikely(!nb_pkts))
2817                 return 0;
2818
2819         txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
2820         if ((txq->tx_tail + nb_pkts) > txq->nb_tx_desc) {
2821                 n = (uint16_t)(txq->nb_tx_desc - txq->tx_tail);
2822                 ice_tx_fill_hw_ring(txq, tx_pkts, n);
2823                 txr[txq->tx_next_rs].cmd_type_offset_bsz |=
2824                         rte_cpu_to_le_64(((uint64_t)ICE_TX_DESC_CMD_RS) <<
2825                                          ICE_TXD_QW1_CMD_S);
2826                 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
2827                 txq->tx_tail = 0;
2828         }
2829
2830         /* Fill hardware descriptor ring with mbuf data */
2831         ice_tx_fill_hw_ring(txq, tx_pkts + n, (uint16_t)(nb_pkts - n));
2832         txq->tx_tail = (uint16_t)(txq->tx_tail + (nb_pkts - n));
2833
2834         /* Determin if RS bit needs to be set */
2835         if (txq->tx_tail > txq->tx_next_rs) {
2836                 txr[txq->tx_next_rs].cmd_type_offset_bsz |=
2837                         rte_cpu_to_le_64(((uint64_t)ICE_TX_DESC_CMD_RS) <<
2838                                          ICE_TXD_QW1_CMD_S);
2839                 txq->tx_next_rs =
2840                         (uint16_t)(txq->tx_next_rs + txq->tx_rs_thresh);
2841                 if (txq->tx_next_rs >= txq->nb_tx_desc)
2842                         txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
2843         }
2844
2845         if (txq->tx_tail >= txq->nb_tx_desc)
2846                 txq->tx_tail = 0;
2847
2848         /* Update the tx tail register */
2849         ICE_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
2850
2851         return nb_pkts;
2852 }
2853
2854 static uint16_t
2855 ice_xmit_pkts_simple(void *tx_queue,
2856                      struct rte_mbuf **tx_pkts,
2857                      uint16_t nb_pkts)
2858 {
2859         uint16_t nb_tx = 0;
2860
2861         if (likely(nb_pkts <= ICE_TX_MAX_BURST))
2862                 return tx_xmit_pkts((struct ice_tx_queue *)tx_queue,
2863                                     tx_pkts, nb_pkts);
2864
2865         while (nb_pkts) {
2866                 uint16_t ret, num = (uint16_t)RTE_MIN(nb_pkts,
2867                                                       ICE_TX_MAX_BURST);
2868
2869                 ret = tx_xmit_pkts((struct ice_tx_queue *)tx_queue,
2870                                    &tx_pkts[nb_tx], num);
2871                 nb_tx = (uint16_t)(nb_tx + ret);
2872                 nb_pkts = (uint16_t)(nb_pkts - ret);
2873                 if (ret < num)
2874                         break;
2875         }
2876
2877         return nb_tx;
2878 }
2879
2880 void __rte_cold
2881 ice_set_rx_function(struct rte_eth_dev *dev)
2882 {
2883         PMD_INIT_FUNC_TRACE();
2884         struct ice_adapter *ad =
2885                 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2886 #ifdef RTE_ARCH_X86
2887         struct ice_rx_queue *rxq;
2888         int i;
2889         bool use_avx2 = false;
2890
2891         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
2892                 if (!ice_rx_vec_dev_check(dev) && ad->rx_bulk_alloc_allowed) {
2893                         ad->rx_vec_allowed = true;
2894                         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2895                                 rxq = dev->data->rx_queues[i];
2896                                 if (rxq && ice_rxq_vec_setup(rxq)) {
2897                                         ad->rx_vec_allowed = false;
2898                                         break;
2899                                 }
2900                         }
2901
2902                         if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
2903                         rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1)
2904                                 use_avx2 = true;
2905
2906                 } else {
2907                         ad->rx_vec_allowed = false;
2908                 }
2909         }
2910
2911         if (ad->rx_vec_allowed) {
2912                 if (dev->data->scattered_rx) {
2913                         PMD_DRV_LOG(DEBUG,
2914                                         "Using %sVector Scattered Rx (port %d).",
2915                                         use_avx2 ? "avx2 " : "",
2916                                         dev->data->port_id);
2917                         dev->rx_pkt_burst = use_avx2 ?
2918                                         ice_recv_scattered_pkts_vec_avx2 :
2919                                         ice_recv_scattered_pkts_vec;
2920                 } else {
2921                         PMD_DRV_LOG(DEBUG, "Using %sVector Rx (port %d).",
2922                                         use_avx2 ? "avx2 " : "",
2923                                         dev->data->port_id);
2924                         dev->rx_pkt_burst = use_avx2 ?
2925                                                 ice_recv_pkts_vec_avx2 :
2926                                                 ice_recv_pkts_vec;
2927                 }
2928                 return;
2929         }
2930
2931 #endif
2932
2933         if (dev->data->scattered_rx) {
2934                 /* Set the non-LRO scattered function */
2935                 PMD_INIT_LOG(DEBUG,
2936                              "Using a Scattered function on port %d.",
2937                              dev->data->port_id);
2938                 dev->rx_pkt_burst = ice_recv_scattered_pkts;
2939         } else if (ad->rx_bulk_alloc_allowed) {
2940                 PMD_INIT_LOG(DEBUG,
2941                              "Rx Burst Bulk Alloc Preconditions are "
2942                              "satisfied. Rx Burst Bulk Alloc function "
2943                              "will be used on port %d.",
2944                              dev->data->port_id);
2945                 dev->rx_pkt_burst = ice_recv_pkts_bulk_alloc;
2946         } else {
2947                 PMD_INIT_LOG(DEBUG,
2948                              "Rx Burst Bulk Alloc Preconditions are not "
2949                              "satisfied, Normal Rx will be used on port %d.",
2950                              dev->data->port_id);
2951                 dev->rx_pkt_burst = ice_recv_pkts;
2952         }
2953 }
2954
2955 static const struct {
2956         eth_rx_burst_t pkt_burst;
2957         const char *info;
2958 } ice_rx_burst_infos[] = {
2959         { ice_recv_scattered_pkts,          "Scalar Scattered" },
2960         { ice_recv_pkts_bulk_alloc,         "Scalar Bulk Alloc" },
2961         { ice_recv_pkts,                    "Scalar" },
2962 #ifdef RTE_ARCH_X86
2963         { ice_recv_scattered_pkts_vec_avx2, "Vector AVX2 Scattered" },
2964         { ice_recv_pkts_vec_avx2,           "Vector AVX2" },
2965         { ice_recv_scattered_pkts_vec,      "Vector SSE Scattered" },
2966         { ice_recv_pkts_vec,                "Vector SSE" },
2967 #endif
2968 };
2969
2970 int
2971 ice_rx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
2972                       struct rte_eth_burst_mode *mode)
2973 {
2974         eth_rx_burst_t pkt_burst = dev->rx_pkt_burst;
2975         int ret = -EINVAL;
2976         unsigned int i;
2977
2978         for (i = 0; i < RTE_DIM(ice_rx_burst_infos); ++i) {
2979                 if (pkt_burst == ice_rx_burst_infos[i].pkt_burst) {
2980                         snprintf(mode->info, sizeof(mode->info), "%s",
2981                                  ice_rx_burst_infos[i].info);
2982                         ret = 0;
2983                         break;
2984                 }
2985         }
2986
2987         return ret;
2988 }
2989
2990 void __rte_cold
2991 ice_set_tx_function_flag(struct rte_eth_dev *dev, struct ice_tx_queue *txq)
2992 {
2993         struct ice_adapter *ad =
2994                 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2995
2996         /* Use a simple Tx queue if possible (only fast free is allowed) */
2997         ad->tx_simple_allowed =
2998                 (txq->offloads ==
2999                 (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) &&
3000                 txq->tx_rs_thresh >= ICE_TX_MAX_BURST);
3001
3002         if (ad->tx_simple_allowed)
3003                 PMD_INIT_LOG(DEBUG, "Simple Tx can be enabled on Tx queue %u.",
3004                              txq->queue_id);
3005         else
3006                 PMD_INIT_LOG(DEBUG,
3007                              "Simple Tx can NOT be enabled on Tx queue %u.",
3008                              txq->queue_id);
3009 }
3010
3011 /*********************************************************************
3012  *
3013  *  TX prep functions
3014  *
3015  **********************************************************************/
3016 /* The default values of TSO MSS */
3017 #define ICE_MIN_TSO_MSS            64
3018 #define ICE_MAX_TSO_MSS            9728
3019 #define ICE_MAX_TSO_FRAME_SIZE     262144
3020 uint16_t
3021 ice_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
3022               uint16_t nb_pkts)
3023 {
3024         int i, ret;
3025         uint64_t ol_flags;
3026         struct rte_mbuf *m;
3027
3028         for (i = 0; i < nb_pkts; i++) {
3029                 m = tx_pkts[i];
3030                 ol_flags = m->ol_flags;
3031
3032                 if (ol_flags & PKT_TX_TCP_SEG &&
3033                     (m->tso_segsz < ICE_MIN_TSO_MSS ||
3034                      m->tso_segsz > ICE_MAX_TSO_MSS ||
3035                      m->pkt_len > ICE_MAX_TSO_FRAME_SIZE)) {
3036                         /**
3037                          * MSS outside the range are considered malicious
3038                          */
3039                         rte_errno = EINVAL;
3040                         return i;
3041                 }
3042
3043 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
3044                 ret = rte_validate_tx_offload(m);
3045                 if (ret != 0) {
3046                         rte_errno = -ret;
3047                         return i;
3048                 }
3049 #endif
3050                 ret = rte_net_intel_cksum_prepare(m);
3051                 if (ret != 0) {
3052                         rte_errno = -ret;
3053                         return i;
3054                 }
3055         }
3056         return i;
3057 }
3058
3059 void __rte_cold
3060 ice_set_tx_function(struct rte_eth_dev *dev)
3061 {
3062         struct ice_adapter *ad =
3063                 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
3064 #ifdef RTE_ARCH_X86
3065         struct ice_tx_queue *txq;
3066         int i;
3067         bool use_avx2 = false;
3068
3069         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
3070                 if (!ice_tx_vec_dev_check(dev)) {
3071                         ad->tx_vec_allowed = true;
3072                         for (i = 0; i < dev->data->nb_tx_queues; i++) {
3073                                 txq = dev->data->tx_queues[i];
3074                                 if (txq && ice_txq_vec_setup(txq)) {
3075                                         ad->tx_vec_allowed = false;
3076                                         break;
3077                                 }
3078                         }
3079
3080                         if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
3081                         rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1)
3082                                 use_avx2 = true;
3083
3084                 } else {
3085                         ad->tx_vec_allowed = false;
3086                 }
3087         }
3088
3089         if (ad->tx_vec_allowed) {
3090                 PMD_DRV_LOG(DEBUG, "Using %sVector Tx (port %d).",
3091                             use_avx2 ? "avx2 " : "",
3092                             dev->data->port_id);
3093                 dev->tx_pkt_burst = use_avx2 ?
3094                                     ice_xmit_pkts_vec_avx2 :
3095                                     ice_xmit_pkts_vec;
3096                 dev->tx_pkt_prepare = NULL;
3097
3098                 return;
3099         }
3100 #endif
3101
3102         if (ad->tx_simple_allowed) {
3103                 PMD_INIT_LOG(DEBUG, "Simple tx finally be used.");
3104                 dev->tx_pkt_burst = ice_xmit_pkts_simple;
3105                 dev->tx_pkt_prepare = NULL;
3106         } else {
3107                 PMD_INIT_LOG(DEBUG, "Normal tx finally be used.");
3108                 dev->tx_pkt_burst = ice_xmit_pkts;
3109                 dev->tx_pkt_prepare = ice_prep_pkts;
3110         }
3111 }
3112
3113 static const struct {
3114         eth_tx_burst_t pkt_burst;
3115         const char *info;
3116 } ice_tx_burst_infos[] = {
3117         { ice_xmit_pkts_simple,   "Scalar Simple" },
3118         { ice_xmit_pkts,          "Scalar" },
3119 #ifdef RTE_ARCH_X86
3120         { ice_xmit_pkts_vec_avx2, "Vector AVX2" },
3121         { ice_xmit_pkts_vec,      "Vector SSE" },
3122 #endif
3123 };
3124
3125 int
3126 ice_tx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
3127                       struct rte_eth_burst_mode *mode)
3128 {
3129         eth_tx_burst_t pkt_burst = dev->tx_pkt_burst;
3130         int ret = -EINVAL;
3131         unsigned int i;
3132
3133         for (i = 0; i < RTE_DIM(ice_tx_burst_infos); ++i) {
3134                 if (pkt_burst == ice_tx_burst_infos[i].pkt_burst) {
3135                         snprintf(mode->info, sizeof(mode->info), "%s",
3136                                  ice_tx_burst_infos[i].info);
3137                         ret = 0;
3138                         break;
3139                 }
3140         }
3141
3142         return ret;
3143 }
3144
3145 /* For each value it means, datasheet of hardware can tell more details
3146  *
3147  * @note: fix ice_dev_supported_ptypes_get() if any change here.
3148  */
3149 static inline uint32_t
3150 ice_get_default_pkt_type(uint16_t ptype)
3151 {
3152         static const uint32_t type_table[ICE_MAX_PKT_TYPE]
3153                 __rte_cache_aligned = {
3154                 /* L2 types */
3155                 /* [0] reserved */
3156                 [1] = RTE_PTYPE_L2_ETHER,
3157                 [2] = RTE_PTYPE_L2_ETHER_TIMESYNC,
3158                 /* [3] - [5] reserved */
3159                 [6] = RTE_PTYPE_L2_ETHER_LLDP,
3160                 /* [7] - [10] reserved */
3161                 [11] = RTE_PTYPE_L2_ETHER_ARP,
3162                 /* [12] - [21] reserved */
3163
3164                 /* Non tunneled IPv4 */
3165                 [22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3166                        RTE_PTYPE_L4_FRAG,
3167                 [23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3168                        RTE_PTYPE_L4_NONFRAG,
3169                 [24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3170                        RTE_PTYPE_L4_UDP,
3171                 /* [25] reserved */
3172                 [26] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3173                        RTE_PTYPE_L4_TCP,
3174                 [27] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3175                        RTE_PTYPE_L4_SCTP,
3176                 [28] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3177                        RTE_PTYPE_L4_ICMP,
3178
3179                 /* IPv4 --> IPv4 */
3180                 [29] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3181                        RTE_PTYPE_TUNNEL_IP |
3182                        RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3183                        RTE_PTYPE_INNER_L4_FRAG,
3184                 [30] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3185                        RTE_PTYPE_TUNNEL_IP |
3186                        RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3187                        RTE_PTYPE_INNER_L4_NONFRAG,
3188                 [31] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3189                        RTE_PTYPE_TUNNEL_IP |
3190                        RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3191                        RTE_PTYPE_INNER_L4_UDP,
3192                 /* [32] reserved */
3193                 [33] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3194                        RTE_PTYPE_TUNNEL_IP |
3195                        RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3196                        RTE_PTYPE_INNER_L4_TCP,
3197                 [34] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3198                        RTE_PTYPE_TUNNEL_IP |
3199                        RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3200                        RTE_PTYPE_INNER_L4_SCTP,
3201                 [35] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3202                        RTE_PTYPE_TUNNEL_IP |
3203                        RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3204                        RTE_PTYPE_INNER_L4_ICMP,
3205
3206                 /* IPv4 --> IPv6 */
3207                 [36] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3208                        RTE_PTYPE_TUNNEL_IP |
3209                        RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3210                        RTE_PTYPE_INNER_L4_FRAG,
3211                 [37] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3212                        RTE_PTYPE_TUNNEL_IP |
3213                        RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3214                        RTE_PTYPE_INNER_L4_NONFRAG,
3215                 [38] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3216                        RTE_PTYPE_TUNNEL_IP |
3217                        RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3218                        RTE_PTYPE_INNER_L4_UDP,
3219                 /* [39] reserved */
3220                 [40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3221                        RTE_PTYPE_TUNNEL_IP |
3222                        RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3223                        RTE_PTYPE_INNER_L4_TCP,
3224                 [41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3225                        RTE_PTYPE_TUNNEL_IP |
3226                        RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3227                        RTE_PTYPE_INNER_L4_SCTP,
3228                 [42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3229                        RTE_PTYPE_TUNNEL_IP |
3230                        RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3231                        RTE_PTYPE_INNER_L4_ICMP,
3232
3233                 /* IPv4 --> GRE/Teredo/VXLAN */
3234                 [43] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3235                        RTE_PTYPE_TUNNEL_GRENAT,
3236
3237                 /* IPv4 --> GRE/Teredo/VXLAN --> IPv4 */
3238                 [44] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3239                        RTE_PTYPE_TUNNEL_GRENAT |
3240                        RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3241                        RTE_PTYPE_INNER_L4_FRAG,
3242                 [45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3243                        RTE_PTYPE_TUNNEL_GRENAT |
3244                        RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3245                        RTE_PTYPE_INNER_L4_NONFRAG,
3246                 [46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3247                        RTE_PTYPE_TUNNEL_GRENAT |
3248                        RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3249                        RTE_PTYPE_INNER_L4_UDP,
3250                 /* [47] reserved */
3251                 [48] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3252                        RTE_PTYPE_TUNNEL_GRENAT |
3253                        RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3254                        RTE_PTYPE_INNER_L4_TCP,
3255                 [49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3256                        RTE_PTYPE_TUNNEL_GRENAT |
3257                        RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3258                        RTE_PTYPE_INNER_L4_SCTP,
3259                 [50] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3260                        RTE_PTYPE_TUNNEL_GRENAT |
3261                        RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3262                        RTE_PTYPE_INNER_L4_ICMP,
3263
3264                 /* IPv4 --> GRE/Teredo/VXLAN --> IPv6 */
3265                 [51] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3266                        RTE_PTYPE_TUNNEL_GRENAT |
3267                        RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3268                        RTE_PTYPE_INNER_L4_FRAG,
3269                 [52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3270                        RTE_PTYPE_TUNNEL_GRENAT |
3271                        RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3272                        RTE_PTYPE_INNER_L4_NONFRAG,
3273                 [53] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3274                        RTE_PTYPE_TUNNEL_GRENAT |
3275                        RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3276                        RTE_PTYPE_INNER_L4_UDP,
3277                 /* [54] reserved */
3278                 [55] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3279                        RTE_PTYPE_TUNNEL_GRENAT |
3280                        RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3281                        RTE_PTYPE_INNER_L4_TCP,
3282                 [56] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3283                        RTE_PTYPE_TUNNEL_GRENAT |
3284                        RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3285                        RTE_PTYPE_INNER_L4_SCTP,
3286                 [57] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3287                        RTE_PTYPE_TUNNEL_GRENAT |
3288                        RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3289                        RTE_PTYPE_INNER_L4_ICMP,
3290
3291                 /* IPv4 --> GRE/Teredo/VXLAN --> MAC */
3292                 [58] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3293                        RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
3294
3295                 /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
3296                 [59] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3297                        RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3298                        RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3299                        RTE_PTYPE_INNER_L4_FRAG,
3300                 [60] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3301                        RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3302                        RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3303                        RTE_PTYPE_INNER_L4_NONFRAG,
3304                 [61] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3305                        RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3306                        RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3307                        RTE_PTYPE_INNER_L4_UDP,
3308                 /* [62] reserved */
3309                 [63] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3310                        RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3311                        RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3312                        RTE_PTYPE_INNER_L4_TCP,
3313                 [64] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3314                        RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3315                        RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3316                        RTE_PTYPE_INNER_L4_SCTP,
3317                 [65] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3318                        RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3319                        RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3320                        RTE_PTYPE_INNER_L4_ICMP,
3321
3322                 /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
3323                 [66] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3324                        RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3325                        RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3326                        RTE_PTYPE_INNER_L4_FRAG,
3327                 [67] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3328                        RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3329                        RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3330                        RTE_PTYPE_INNER_L4_NONFRAG,
3331                 [68] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3332                        RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3333                        RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3334                        RTE_PTYPE_INNER_L4_UDP,
3335                 /* [69] reserved */
3336                 [70] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3337                        RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3338                        RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3339                        RTE_PTYPE_INNER_L4_TCP,
3340                 [71] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3341                        RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3342                        RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3343                        RTE_PTYPE_INNER_L4_SCTP,
3344                 [72] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3345                        RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3346                        RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3347                        RTE_PTYPE_INNER_L4_ICMP,
3348                 /* [73] - [87] reserved */
3349
3350                 /* Non tunneled IPv6 */
3351                 [88] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3352                        RTE_PTYPE_L4_FRAG,
3353                 [89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3354                        RTE_PTYPE_L4_NONFRAG,
3355                 [90] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3356                        RTE_PTYPE_L4_UDP,
3357                 /* [91] reserved */
3358                 [92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3359                        RTE_PTYPE_L4_TCP,
3360                 [93] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3361                        RTE_PTYPE_L4_SCTP,
3362                 [94] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3363                        RTE_PTYPE_L4_ICMP,
3364
3365                 /* IPv6 --> IPv4 */
3366                 [95] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3367                        RTE_PTYPE_TUNNEL_IP |
3368                        RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3369                        RTE_PTYPE_INNER_L4_FRAG,
3370                 [96] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3371                        RTE_PTYPE_TUNNEL_IP |
3372                        RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3373                        RTE_PTYPE_INNER_L4_NONFRAG,
3374                 [97] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3375                        RTE_PTYPE_TUNNEL_IP |
3376                        RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3377                        RTE_PTYPE_INNER_L4_UDP,
3378                 /* [98] reserved */
3379                 [99] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3380                        RTE_PTYPE_TUNNEL_IP |
3381                        RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3382                        RTE_PTYPE_INNER_L4_TCP,
3383                 [100] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3384                         RTE_PTYPE_TUNNEL_IP |
3385                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3386                         RTE_PTYPE_INNER_L4_SCTP,
3387                 [101] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3388                         RTE_PTYPE_TUNNEL_IP |
3389                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3390                         RTE_PTYPE_INNER_L4_ICMP,
3391
3392                 /* IPv6 --> IPv6 */
3393                 [102] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3394                         RTE_PTYPE_TUNNEL_IP |
3395                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3396                         RTE_PTYPE_INNER_L4_FRAG,
3397                 [103] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3398                         RTE_PTYPE_TUNNEL_IP |
3399                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3400                         RTE_PTYPE_INNER_L4_NONFRAG,
3401                 [104] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3402                         RTE_PTYPE_TUNNEL_IP |
3403                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3404                         RTE_PTYPE_INNER_L4_UDP,
3405                 /* [105] reserved */
3406                 [106] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3407                         RTE_PTYPE_TUNNEL_IP |
3408                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3409                         RTE_PTYPE_INNER_L4_TCP,
3410                 [107] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3411                         RTE_PTYPE_TUNNEL_IP |
3412                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3413                         RTE_PTYPE_INNER_L4_SCTP,
3414                 [108] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3415                         RTE_PTYPE_TUNNEL_IP |
3416                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3417                         RTE_PTYPE_INNER_L4_ICMP,
3418
3419                 /* IPv6 --> GRE/Teredo/VXLAN */
3420                 [109] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3421                         RTE_PTYPE_TUNNEL_GRENAT,
3422
3423                 /* IPv6 --> GRE/Teredo/VXLAN --> IPv4 */
3424                 [110] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3425                         RTE_PTYPE_TUNNEL_GRENAT |
3426                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3427                         RTE_PTYPE_INNER_L4_FRAG,
3428                 [111] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3429                         RTE_PTYPE_TUNNEL_GRENAT |
3430                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3431                         RTE_PTYPE_INNER_L4_NONFRAG,
3432                 [112] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3433                         RTE_PTYPE_TUNNEL_GRENAT |
3434                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3435                         RTE_PTYPE_INNER_L4_UDP,
3436                 /* [113] reserved */
3437                 [114] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3438                         RTE_PTYPE_TUNNEL_GRENAT |
3439                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3440                         RTE_PTYPE_INNER_L4_TCP,
3441                 [115] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3442                         RTE_PTYPE_TUNNEL_GRENAT |
3443                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3444                         RTE_PTYPE_INNER_L4_SCTP,
3445                 [116] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3446                         RTE_PTYPE_TUNNEL_GRENAT |
3447                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3448                         RTE_PTYPE_INNER_L4_ICMP,
3449
3450                 /* IPv6 --> GRE/Teredo/VXLAN --> IPv6 */
3451                 [117] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3452                         RTE_PTYPE_TUNNEL_GRENAT |
3453                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3454                         RTE_PTYPE_INNER_L4_FRAG,
3455                 [118] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3456                         RTE_PTYPE_TUNNEL_GRENAT |
3457                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3458                         RTE_PTYPE_INNER_L4_NONFRAG,
3459                 [119] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3460                         RTE_PTYPE_TUNNEL_GRENAT |
3461                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3462                         RTE_PTYPE_INNER_L4_UDP,
3463                 /* [120] reserved */
3464                 [121] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3465                         RTE_PTYPE_TUNNEL_GRENAT |
3466                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3467                         RTE_PTYPE_INNER_L4_TCP,
3468                 [122] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3469                         RTE_PTYPE_TUNNEL_GRENAT |
3470                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3471                         RTE_PTYPE_INNER_L4_SCTP,
3472                 [123] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3473                         RTE_PTYPE_TUNNEL_GRENAT |
3474                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3475                         RTE_PTYPE_INNER_L4_ICMP,
3476
3477                 /* IPv6 --> GRE/Teredo/VXLAN --> MAC */
3478                 [124] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3479                         RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
3480
3481                 /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
3482                 [125] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3483                         RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3484                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3485                         RTE_PTYPE_INNER_L4_FRAG,
3486                 [126] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3487                         RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3488                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3489                         RTE_PTYPE_INNER_L4_NONFRAG,
3490                 [127] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3491                         RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3492                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3493                         RTE_PTYPE_INNER_L4_UDP,
3494                 /* [128] reserved */
3495                 [129] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3496                         RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3497                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3498                         RTE_PTYPE_INNER_L4_TCP,
3499                 [130] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3500                         RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3501                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3502                         RTE_PTYPE_INNER_L4_SCTP,
3503                 [131] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3504                         RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3505                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3506                         RTE_PTYPE_INNER_L4_ICMP,
3507
3508                 /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
3509                 [132] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3510                         RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3511                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3512                         RTE_PTYPE_INNER_L4_FRAG,
3513                 [133] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3514                         RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3515                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3516                         RTE_PTYPE_INNER_L4_NONFRAG,
3517                 [134] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3518                         RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3519                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3520                         RTE_PTYPE_INNER_L4_UDP,
3521                 /* [135] reserved */
3522                 [136] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3523                         RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3524                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3525                         RTE_PTYPE_INNER_L4_TCP,
3526                 [137] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3527                         RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3528                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3529                         RTE_PTYPE_INNER_L4_SCTP,
3530                 [138] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3531                         RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3532                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3533                         RTE_PTYPE_INNER_L4_ICMP,
3534                 /* [139] - [299] reserved */
3535
3536                 /* PPPoE */
3537                 [300] = RTE_PTYPE_L2_ETHER_PPPOE,
3538                 [301] = RTE_PTYPE_L2_ETHER_PPPOE,
3539
3540                 /* PPPoE --> IPv4 */
3541                 [302] = RTE_PTYPE_L2_ETHER_PPPOE |
3542                         RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3543                         RTE_PTYPE_L4_FRAG,
3544                 [303] = RTE_PTYPE_L2_ETHER_PPPOE |
3545                         RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3546                         RTE_PTYPE_L4_NONFRAG,
3547                 [304] = RTE_PTYPE_L2_ETHER_PPPOE |
3548                         RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3549                         RTE_PTYPE_L4_UDP,
3550                 [305] = RTE_PTYPE_L2_ETHER_PPPOE |
3551                         RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3552                         RTE_PTYPE_L4_TCP,
3553                 [306] = RTE_PTYPE_L2_ETHER_PPPOE |
3554                         RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3555                         RTE_PTYPE_L4_SCTP,
3556                 [307] = RTE_PTYPE_L2_ETHER_PPPOE |
3557                         RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3558                         RTE_PTYPE_L4_ICMP,
3559
3560                 /* PPPoE --> IPv6 */
3561                 [308] = RTE_PTYPE_L2_ETHER_PPPOE |
3562                         RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3563                         RTE_PTYPE_L4_FRAG,
3564                 [309] = RTE_PTYPE_L2_ETHER_PPPOE |
3565                         RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3566                         RTE_PTYPE_L4_NONFRAG,
3567                 [310] = RTE_PTYPE_L2_ETHER_PPPOE |
3568                         RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3569                         RTE_PTYPE_L4_UDP,
3570                 [311] = RTE_PTYPE_L2_ETHER_PPPOE |
3571                         RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3572                         RTE_PTYPE_L4_TCP,
3573                 [312] = RTE_PTYPE_L2_ETHER_PPPOE |
3574                         RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3575                         RTE_PTYPE_L4_SCTP,
3576                 [313] = RTE_PTYPE_L2_ETHER_PPPOE |
3577                         RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3578                         RTE_PTYPE_L4_ICMP,
3579                 /* [314] - [324] reserved */
3580
3581                 /* IPv4/IPv6 --> GTPC/GTPU */
3582                 [325] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3583                         RTE_PTYPE_TUNNEL_GTPC,
3584                 [326] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3585                         RTE_PTYPE_TUNNEL_GTPC,
3586                 [327] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3587                         RTE_PTYPE_TUNNEL_GTPC,
3588                 [328] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3589                         RTE_PTYPE_TUNNEL_GTPC,
3590                 [329] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3591                         RTE_PTYPE_TUNNEL_GTPU,
3592                 [330] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3593                         RTE_PTYPE_TUNNEL_GTPU,
3594
3595                 /* IPv4 --> GTPU --> IPv4 */
3596                 [331] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3597                         RTE_PTYPE_TUNNEL_GTPU |
3598                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3599                         RTE_PTYPE_INNER_L4_FRAG,
3600                 [332] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3601                         RTE_PTYPE_TUNNEL_GTPU |
3602                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3603                         RTE_PTYPE_INNER_L4_NONFRAG,
3604                 [333] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3605                         RTE_PTYPE_TUNNEL_GTPU |
3606                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3607                         RTE_PTYPE_INNER_L4_UDP,
3608                 [334] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3609                         RTE_PTYPE_TUNNEL_GTPU |
3610                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3611                         RTE_PTYPE_INNER_L4_TCP,
3612                 [335] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3613                         RTE_PTYPE_TUNNEL_GTPU |
3614                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3615                         RTE_PTYPE_INNER_L4_ICMP,
3616
3617                 /* IPv6 --> GTPU --> IPv4 */
3618                 [336] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3619                         RTE_PTYPE_TUNNEL_GTPU |
3620                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3621                         RTE_PTYPE_INNER_L4_FRAG,
3622                 [337] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3623                         RTE_PTYPE_TUNNEL_GTPU |
3624                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3625                         RTE_PTYPE_INNER_L4_NONFRAG,
3626                 [338] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3627                         RTE_PTYPE_TUNNEL_GTPU |
3628                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3629                         RTE_PTYPE_INNER_L4_UDP,
3630                 [339] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3631                         RTE_PTYPE_TUNNEL_GTPU |
3632                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3633                         RTE_PTYPE_INNER_L4_TCP,
3634                 [340] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3635                         RTE_PTYPE_TUNNEL_GTPU |
3636                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3637                         RTE_PTYPE_INNER_L4_ICMP,
3638
3639                 /* IPv4 --> GTPU --> IPv6 */
3640                 [341] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3641                         RTE_PTYPE_TUNNEL_GTPU |
3642                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3643                         RTE_PTYPE_INNER_L4_FRAG,
3644                 [342] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3645                         RTE_PTYPE_TUNNEL_GTPU |
3646                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3647                         RTE_PTYPE_INNER_L4_NONFRAG,
3648                 [343] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3649                         RTE_PTYPE_TUNNEL_GTPU |
3650                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3651                         RTE_PTYPE_INNER_L4_UDP,
3652                 [344] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3653                         RTE_PTYPE_TUNNEL_GTPU |
3654                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3655                         RTE_PTYPE_INNER_L4_TCP,
3656                 [345] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3657                         RTE_PTYPE_TUNNEL_GTPU |
3658                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3659                         RTE_PTYPE_INNER_L4_ICMP,
3660
3661                 /* IPv6 --> GTPU --> IPv6 */
3662                 [346] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3663                         RTE_PTYPE_TUNNEL_GTPU |
3664                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3665                         RTE_PTYPE_INNER_L4_FRAG,
3666                 [347] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3667                         RTE_PTYPE_TUNNEL_GTPU |
3668                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3669                         RTE_PTYPE_INNER_L4_NONFRAG,
3670                 [348] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3671                         RTE_PTYPE_TUNNEL_GTPU |
3672                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3673                         RTE_PTYPE_INNER_L4_UDP,
3674                 [349] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3675                         RTE_PTYPE_TUNNEL_GTPU |
3676                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3677                         RTE_PTYPE_INNER_L4_TCP,
3678                 [350] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3679                         RTE_PTYPE_TUNNEL_GTPU |
3680                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3681                         RTE_PTYPE_INNER_L4_ICMP,
3682                 /* All others reserved */
3683         };
3684
3685         return type_table[ptype];
3686 }
3687
3688 void __rte_cold
3689 ice_set_default_ptype_table(struct rte_eth_dev *dev)
3690 {
3691         struct ice_adapter *ad =
3692                 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
3693         int i;
3694
3695         for (i = 0; i < ICE_MAX_PKT_TYPE; i++)
3696                 ad->ptype_tbl[i] = ice_get_default_pkt_type(i);
3697 }
3698
3699 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_S 1
3700 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_M \
3701                         (0x3UL << ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_S)
3702 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_PROG_ADD 0
3703 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_PROG_DEL 0x1
3704
3705 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_S   4
3706 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_M   \
3707         (1 << ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_S)
3708 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_S      5
3709 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_M      \
3710         (1 << ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_S)
3711
3712 /*
3713  * check the programming status descriptor in rx queue.
3714  * done after Programming Flow Director is programmed on
3715  * tx queue
3716  */
3717 static inline int
3718 ice_check_fdir_programming_status(struct ice_rx_queue *rxq)
3719 {
3720         volatile union ice_32byte_rx_desc *rxdp;
3721         uint64_t qword1;
3722         uint32_t rx_status;
3723         uint32_t error;
3724         uint32_t id;
3725         int ret = -EAGAIN;
3726
3727         rxdp = (volatile union ice_32byte_rx_desc *)
3728                 (&rxq->rx_ring[rxq->rx_tail]);
3729         qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
3730         rx_status = (qword1 & ICE_RXD_QW1_STATUS_M)
3731                         >> ICE_RXD_QW1_STATUS_S;
3732
3733         if (rx_status & (1 << ICE_RX_DESC_STATUS_DD_S)) {
3734                 ret = 0;
3735                 error = (qword1 & ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_M) >>
3736                         ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_S;
3737                 id = (qword1 & ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_M) >>
3738                         ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_S;
3739                 if (error) {
3740                         if (id == ICE_RX_PROG_STATUS_DESC_WB_QW1_PROG_ADD)
3741                                 PMD_DRV_LOG(ERR, "Failed to add FDIR rule.");
3742                         else if (id == ICE_RX_PROG_STATUS_DESC_WB_QW1_PROG_DEL)
3743                                 PMD_DRV_LOG(ERR, "Failed to remove FDIR rule.");
3744                         ret = -EINVAL;
3745                         goto err;
3746                 }
3747                 error = (qword1 & ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_M) >>
3748                         ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_S;
3749                 if (error) {
3750                         PMD_DRV_LOG(ERR, "Failed to create FDIR profile.");
3751                         ret = -EINVAL;
3752                 }
3753 err:
3754                 rxdp->wb.qword1.status_error_len = 0;
3755                 rxq->rx_tail++;
3756                 if (unlikely(rxq->rx_tail == rxq->nb_rx_desc))
3757                         rxq->rx_tail = 0;
3758                 if (rxq->rx_tail == 0)
3759                         ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
3760                 else
3761                         ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->rx_tail - 1);
3762         }
3763
3764         return ret;
3765 }
3766
3767 #define ICE_FDIR_MAX_WAIT_US 10000
3768
3769 int
3770 ice_fdir_programming(struct ice_pf *pf, struct ice_fltr_desc *fdir_desc)
3771 {
3772         struct ice_tx_queue *txq = pf->fdir.txq;
3773         struct ice_rx_queue *rxq = pf->fdir.rxq;
3774         volatile struct ice_fltr_desc *fdirdp;
3775         volatile struct ice_tx_desc *txdp;
3776         uint32_t td_cmd;
3777         uint16_t i;
3778
3779         fdirdp = (volatile struct ice_fltr_desc *)
3780                 (&txq->tx_ring[txq->tx_tail]);
3781         fdirdp->qidx_compq_space_stat = fdir_desc->qidx_compq_space_stat;
3782         fdirdp->dtype_cmd_vsi_fdid = fdir_desc->dtype_cmd_vsi_fdid;
3783
3784         txdp = &txq->tx_ring[txq->tx_tail + 1];
3785         txdp->buf_addr = rte_cpu_to_le_64(pf->fdir.dma_addr);
3786         td_cmd = ICE_TX_DESC_CMD_EOP |
3787                 ICE_TX_DESC_CMD_RS  |
3788                 ICE_TX_DESC_CMD_DUMMY;
3789
3790         txdp->cmd_type_offset_bsz =
3791                 ice_build_ctob(td_cmd, 0, ICE_FDIR_PKT_LEN, 0);
3792
3793         txq->tx_tail += 2;
3794         if (txq->tx_tail >= txq->nb_tx_desc)
3795                 txq->tx_tail = 0;
3796         /* Update the tx tail register */
3797         ICE_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
3798         for (i = 0; i < ICE_FDIR_MAX_WAIT_US; i++) {
3799                 if ((txdp->cmd_type_offset_bsz &
3800                      rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M)) ==
3801                     rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE))
3802                         break;
3803                 rte_delay_us(1);
3804         }
3805         if (i >= ICE_FDIR_MAX_WAIT_US) {
3806                 PMD_DRV_LOG(ERR,
3807                             "Failed to program FDIR filter: time out to get DD on tx queue.");
3808                 return -ETIMEDOUT;
3809         }
3810
3811         for (; i < ICE_FDIR_MAX_WAIT_US; i++) {
3812                 int ret;
3813
3814                 ret = ice_check_fdir_programming_status(rxq);
3815                 if (ret == -EAGAIN)
3816                         rte_delay_us(1);
3817                 else
3818                         return ret;
3819         }
3820
3821         PMD_DRV_LOG(ERR,
3822                     "Failed to program FDIR filter: programming status reported.");
3823         return -ETIMEDOUT;
3824
3825
3826 }