net/i40e: fix Rx packet statistics
[dpdk.git] / drivers / net / ice / ice_rxtx.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Intel Corporation
3  */
4
5 #include <ethdev_driver.h>
6 #include <rte_net.h>
7 #include <rte_vect.h>
8
9 #include "rte_pmd_ice.h"
10 #include "ice_rxtx.h"
11 #include "ice_rxtx_vec_common.h"
12
13 #define ICE_TX_CKSUM_OFFLOAD_MASK (              \
14                 PKT_TX_IP_CKSUM |                \
15                 PKT_TX_L4_MASK |                 \
16                 PKT_TX_TCP_SEG |                 \
17                 PKT_TX_OUTER_IP_CKSUM)
18
19 /* Offset of mbuf dynamic field for protocol extraction data */
20 int rte_net_ice_dynfield_proto_xtr_metadata_offs = -1;
21
22 /* Mask of mbuf dynamic flags for protocol extraction type */
23 uint64_t rte_net_ice_dynflag_proto_xtr_vlan_mask;
24 uint64_t rte_net_ice_dynflag_proto_xtr_ipv4_mask;
25 uint64_t rte_net_ice_dynflag_proto_xtr_ipv6_mask;
26 uint64_t rte_net_ice_dynflag_proto_xtr_ipv6_flow_mask;
27 uint64_t rte_net_ice_dynflag_proto_xtr_tcp_mask;
28 uint64_t rte_net_ice_dynflag_proto_xtr_ip_offset_mask;
29
30 static int
31 ice_monitor_callback(const uint64_t value,
32                 const uint64_t arg[RTE_POWER_MONITOR_OPAQUE_SZ] __rte_unused)
33 {
34         const uint64_t m = rte_cpu_to_le_16(1 << ICE_RX_FLEX_DESC_STATUS0_DD_S);
35         /*
36          * we expect the DD bit to be set to 1 if this descriptor was already
37          * written to.
38          */
39         return (value & m) == m ? -1 : 0;
40 }
41
42 int
43 ice_get_monitor_addr(void *rx_queue, struct rte_power_monitor_cond *pmc)
44 {
45         volatile union ice_rx_flex_desc *rxdp;
46         struct ice_rx_queue *rxq = rx_queue;
47         uint16_t desc;
48
49         desc = rxq->rx_tail;
50         rxdp = &rxq->rx_ring[desc];
51         /* watch for changes in status bit */
52         pmc->addr = &rxdp->wb.status_error0;
53
54         /* comparison callback */
55         pmc->fn = ice_monitor_callback;
56
57         /* register is 16-bit */
58         pmc->size = sizeof(uint16_t);
59
60         return 0;
61 }
62
63
64 static inline uint8_t
65 ice_proto_xtr_type_to_rxdid(uint8_t xtr_type)
66 {
67         static uint8_t rxdid_map[] = {
68                 [PROTO_XTR_NONE]      = ICE_RXDID_COMMS_OVS,
69                 [PROTO_XTR_VLAN]      = ICE_RXDID_COMMS_AUX_VLAN,
70                 [PROTO_XTR_IPV4]      = ICE_RXDID_COMMS_AUX_IPV4,
71                 [PROTO_XTR_IPV6]      = ICE_RXDID_COMMS_AUX_IPV6,
72                 [PROTO_XTR_IPV6_FLOW] = ICE_RXDID_COMMS_AUX_IPV6_FLOW,
73                 [PROTO_XTR_TCP]       = ICE_RXDID_COMMS_AUX_TCP,
74                 [PROTO_XTR_IP_OFFSET] = ICE_RXDID_COMMS_AUX_IP_OFFSET,
75         };
76
77         return xtr_type < RTE_DIM(rxdid_map) ?
78                                 rxdid_map[xtr_type] : ICE_RXDID_COMMS_OVS;
79 }
80
81 static inline void
82 ice_rxd_to_pkt_fields_by_comms_generic(__rte_unused struct ice_rx_queue *rxq,
83                                        struct rte_mbuf *mb,
84                                        volatile union ice_rx_flex_desc *rxdp)
85 {
86         volatile struct ice_32b_rx_flex_desc_comms *desc =
87                         (volatile struct ice_32b_rx_flex_desc_comms *)rxdp;
88         uint16_t stat_err = rte_le_to_cpu_16(desc->status_error0);
89
90         if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
91                 mb->ol_flags |= PKT_RX_RSS_HASH;
92                 mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
93         }
94
95 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
96         if (desc->flow_id != 0xFFFFFFFF) {
97                 mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
98                 mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
99         }
100 #endif
101 }
102
103 static inline void
104 ice_rxd_to_pkt_fields_by_comms_ovs(__rte_unused struct ice_rx_queue *rxq,
105                                    struct rte_mbuf *mb,
106                                    volatile union ice_rx_flex_desc *rxdp)
107 {
108         volatile struct ice_32b_rx_flex_desc_comms_ovs *desc =
109                         (volatile struct ice_32b_rx_flex_desc_comms_ovs *)rxdp;
110 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
111         uint16_t stat_err;
112 #endif
113
114         if (desc->flow_id != 0xFFFFFFFF) {
115                 mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
116                 mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
117         }
118
119 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
120         stat_err = rte_le_to_cpu_16(desc->status_error0);
121         if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
122                 mb->ol_flags |= PKT_RX_RSS_HASH;
123                 mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
124         }
125 #endif
126 }
127
128 static inline void
129 ice_rxd_to_pkt_fields_by_comms_aux_v1(struct ice_rx_queue *rxq,
130                                       struct rte_mbuf *mb,
131                                       volatile union ice_rx_flex_desc *rxdp)
132 {
133         volatile struct ice_32b_rx_flex_desc_comms *desc =
134                         (volatile struct ice_32b_rx_flex_desc_comms *)rxdp;
135         uint16_t stat_err;
136
137         stat_err = rte_le_to_cpu_16(desc->status_error0);
138         if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
139                 mb->ol_flags |= PKT_RX_RSS_HASH;
140                 mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
141         }
142
143 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
144         if (desc->flow_id != 0xFFFFFFFF) {
145                 mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
146                 mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
147         }
148
149         if (rxq->xtr_ol_flag) {
150                 uint32_t metadata = 0;
151
152                 stat_err = rte_le_to_cpu_16(desc->status_error1);
153
154                 if (stat_err & (1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S))
155                         metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux0);
156
157                 if (stat_err & (1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S))
158                         metadata |=
159                                 rte_le_to_cpu_16(desc->flex_ts.flex.aux1) << 16;
160
161                 if (metadata) {
162                         mb->ol_flags |= rxq->xtr_ol_flag;
163
164                         *RTE_NET_ICE_DYNF_PROTO_XTR_METADATA(mb) = metadata;
165                 }
166         }
167 #endif
168 }
169
170 static inline void
171 ice_rxd_to_pkt_fields_by_comms_aux_v2(struct ice_rx_queue *rxq,
172                                       struct rte_mbuf *mb,
173                                       volatile union ice_rx_flex_desc *rxdp)
174 {
175         volatile struct ice_32b_rx_flex_desc_comms *desc =
176                         (volatile struct ice_32b_rx_flex_desc_comms *)rxdp;
177         uint16_t stat_err;
178
179         stat_err = rte_le_to_cpu_16(desc->status_error0);
180         if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
181                 mb->ol_flags |= PKT_RX_RSS_HASH;
182                 mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
183         }
184
185 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
186         if (desc->flow_id != 0xFFFFFFFF) {
187                 mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
188                 mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
189         }
190
191         if (rxq->xtr_ol_flag) {
192                 uint32_t metadata = 0;
193
194                 if (desc->flex_ts.flex.aux0 != 0xFFFF)
195                         metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux0);
196                 else if (desc->flex_ts.flex.aux1 != 0xFFFF)
197                         metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux1);
198
199                 if (metadata) {
200                         mb->ol_flags |= rxq->xtr_ol_flag;
201
202                         *RTE_NET_ICE_DYNF_PROTO_XTR_METADATA(mb) = metadata;
203                 }
204         }
205 #endif
206 }
207
208 void
209 ice_select_rxd_to_pkt_fields_handler(struct ice_rx_queue *rxq, uint32_t rxdid)
210 {
211         switch (rxdid) {
212         case ICE_RXDID_COMMS_AUX_VLAN:
213                 rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_vlan_mask;
214                 rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v1;
215                 break;
216
217         case ICE_RXDID_COMMS_AUX_IPV4:
218                 rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_ipv4_mask;
219                 rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v1;
220                 break;
221
222         case ICE_RXDID_COMMS_AUX_IPV6:
223                 rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_ipv6_mask;
224                 rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v1;
225                 break;
226
227         case ICE_RXDID_COMMS_AUX_IPV6_FLOW:
228                 rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_ipv6_flow_mask;
229                 rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v1;
230                 break;
231
232         case ICE_RXDID_COMMS_AUX_TCP:
233                 rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_tcp_mask;
234                 rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v1;
235                 break;
236
237         case ICE_RXDID_COMMS_AUX_IP_OFFSET:
238                 rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_ip_offset_mask;
239                 rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v2;
240                 break;
241
242         case ICE_RXDID_COMMS_GENERIC:
243                 rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_generic;
244                 break;
245
246         case ICE_RXDID_COMMS_OVS:
247                 rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_ovs;
248                 break;
249
250         default:
251                 /* update this according to the RXDID for PROTO_XTR_NONE */
252                 rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_ovs;
253                 break;
254         }
255
256         if (!rte_net_ice_dynf_proto_xtr_metadata_avail())
257                 rxq->xtr_ol_flag = 0;
258 }
259
260 static enum ice_status
261 ice_program_hw_rx_queue(struct ice_rx_queue *rxq)
262 {
263         struct ice_vsi *vsi = rxq->vsi;
264         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
265         struct ice_pf *pf = ICE_VSI_TO_PF(vsi);
266         struct rte_eth_dev_data *dev_data = rxq->vsi->adapter->pf.dev_data;
267         struct ice_rlan_ctx rx_ctx;
268         enum ice_status err;
269         uint16_t buf_size;
270         struct rte_eth_rxmode *rxmode = &dev_data->dev_conf.rxmode;
271         uint32_t rxdid = ICE_RXDID_COMMS_OVS;
272         uint32_t regval;
273         struct ice_adapter *ad = rxq->vsi->adapter;
274
275         /* Set buffer size as the head split is disabled. */
276         buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
277                               RTE_PKTMBUF_HEADROOM);
278         rxq->rx_hdr_len = 0;
279         rxq->rx_buf_len = RTE_ALIGN(buf_size, (1 << ICE_RLAN_CTX_DBUF_S));
280         rxq->max_pkt_len = RTE_MIN((uint32_t)
281                                    ICE_SUPPORT_CHAIN_NUM * rxq->rx_buf_len,
282                                    dev_data->dev_conf.rxmode.max_rx_pkt_len);
283
284         if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
285                 if (rxq->max_pkt_len <= ICE_ETH_MAX_LEN ||
286                     rxq->max_pkt_len > ICE_FRAME_SIZE_MAX) {
287                         PMD_DRV_LOG(ERR, "maximum packet length must "
288                                     "be larger than %u and smaller than %u,"
289                                     "as jumbo frame is enabled",
290                                     (uint32_t)ICE_ETH_MAX_LEN,
291                                     (uint32_t)ICE_FRAME_SIZE_MAX);
292                         return -EINVAL;
293                 }
294         } else {
295                 if (rxq->max_pkt_len < RTE_ETHER_MIN_LEN ||
296                     rxq->max_pkt_len > ICE_ETH_MAX_LEN) {
297                         PMD_DRV_LOG(ERR, "maximum packet length must be "
298                                     "larger than %u and smaller than %u, "
299                                     "as jumbo frame is disabled",
300                                     (uint32_t)RTE_ETHER_MIN_LEN,
301                                     (uint32_t)ICE_ETH_MAX_LEN);
302                         return -EINVAL;
303                 }
304         }
305
306         if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP) {
307                 /* Register mbuf field and flag for Rx timestamp */
308                 err = rte_mbuf_dyn_rx_timestamp_register(
309                                 &ice_timestamp_dynfield_offset,
310                                 &ice_timestamp_dynflag);
311                 if (err) {
312                         PMD_DRV_LOG(ERR,
313                                 "Cannot register mbuf field/flag for timestamp");
314                         return -EINVAL;
315                 }
316         }
317
318         memset(&rx_ctx, 0, sizeof(rx_ctx));
319
320         rx_ctx.base = rxq->rx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT;
321         rx_ctx.qlen = rxq->nb_rx_desc;
322         rx_ctx.dbuf = rxq->rx_buf_len >> ICE_RLAN_CTX_DBUF_S;
323         rx_ctx.hbuf = rxq->rx_hdr_len >> ICE_RLAN_CTX_HBUF_S;
324         rx_ctx.dtype = 0; /* No Header Split mode */
325 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
326         rx_ctx.dsize = 1; /* 32B descriptors */
327 #endif
328         rx_ctx.rxmax = rxq->max_pkt_len;
329         /* TPH: Transaction Layer Packet (TLP) processing hints */
330         rx_ctx.tphrdesc_ena = 1;
331         rx_ctx.tphwdesc_ena = 1;
332         rx_ctx.tphdata_ena = 1;
333         rx_ctx.tphhead_ena = 1;
334         /* Low Receive Queue Threshold defined in 64 descriptors units.
335          * When the number of free descriptors goes below the lrxqthresh,
336          * an immediate interrupt is triggered.
337          */
338         rx_ctx.lrxqthresh = 2;
339         /*default use 32 byte descriptor, vlan tag extract to L2TAG2(1st)*/
340         rx_ctx.l2tsel = 1;
341         rx_ctx.showiv = 0;
342         rx_ctx.crcstrip = (rxq->crc_len == 0) ? 1 : 0;
343
344         rxdid = ice_proto_xtr_type_to_rxdid(rxq->proto_xtr);
345
346         PMD_DRV_LOG(DEBUG, "Port (%u) - Rx queue (%u) is set with RXDID : %u",
347                     rxq->port_id, rxq->queue_id, rxdid);
348
349         if (!(pf->supported_rxdid & BIT(rxdid))) {
350                 PMD_DRV_LOG(ERR, "currently package doesn't support RXDID (%u)",
351                             rxdid);
352                 return -EINVAL;
353         }
354
355         ice_select_rxd_to_pkt_fields_handler(rxq, rxdid);
356
357         /* Enable Flexible Descriptors in the queue context which
358          * allows this driver to select a specific receive descriptor format
359          */
360         regval = (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) &
361                 QRXFLXP_CNTXT_RXDID_IDX_M;
362
363         /* increasing context priority to pick up profile ID;
364          * default is 0x01; setting to 0x03 to ensure profile
365          * is programming if prev context is of same priority
366          */
367         regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
368                 QRXFLXP_CNTXT_RXDID_PRIO_M;
369
370         if (ad->ptp_ena || rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP)
371                 regval |= QRXFLXP_CNTXT_TS_M;
372
373         ICE_WRITE_REG(hw, QRXFLXP_CNTXT(rxq->reg_idx), regval);
374
375         err = ice_clear_rxq_ctx(hw, rxq->reg_idx);
376         if (err) {
377                 PMD_DRV_LOG(ERR, "Failed to clear Lan Rx queue (%u) context",
378                             rxq->queue_id);
379                 return -EINVAL;
380         }
381         err = ice_write_rxq_ctx(hw, &rx_ctx, rxq->reg_idx);
382         if (err) {
383                 PMD_DRV_LOG(ERR, "Failed to write Lan Rx queue (%u) context",
384                             rxq->queue_id);
385                 return -EINVAL;
386         }
387
388         buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
389                               RTE_PKTMBUF_HEADROOM);
390
391         /* Check if scattered RX needs to be used. */
392         if (rxq->max_pkt_len > buf_size)
393                 dev_data->scattered_rx = 1;
394
395         rxq->qrx_tail = hw->hw_addr + QRX_TAIL(rxq->reg_idx);
396
397         /* Init the Rx tail register*/
398         ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
399
400         return 0;
401 }
402
403 /* Allocate mbufs for all descriptors in rx queue */
404 static int
405 ice_alloc_rx_queue_mbufs(struct ice_rx_queue *rxq)
406 {
407         struct ice_rx_entry *rxe = rxq->sw_ring;
408         uint64_t dma_addr;
409         uint16_t i;
410
411         for (i = 0; i < rxq->nb_rx_desc; i++) {
412                 volatile union ice_rx_flex_desc *rxd;
413                 struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mp);
414
415                 if (unlikely(!mbuf)) {
416                         PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX");
417                         return -ENOMEM;
418                 }
419
420                 rte_mbuf_refcnt_set(mbuf, 1);
421                 mbuf->next = NULL;
422                 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
423                 mbuf->nb_segs = 1;
424                 mbuf->port = rxq->port_id;
425
426                 dma_addr =
427                         rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
428
429                 rxd = &rxq->rx_ring[i];
430                 rxd->read.pkt_addr = dma_addr;
431                 rxd->read.hdr_addr = 0;
432 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
433                 rxd->read.rsvd1 = 0;
434                 rxd->read.rsvd2 = 0;
435 #endif
436                 rxe[i].mbuf = mbuf;
437         }
438
439         return 0;
440 }
441
442 /* Free all mbufs for descriptors in rx queue */
443 static void
444 _ice_rx_queue_release_mbufs(struct ice_rx_queue *rxq)
445 {
446         uint16_t i;
447
448         if (!rxq || !rxq->sw_ring) {
449                 PMD_DRV_LOG(DEBUG, "Pointer to sw_ring is NULL");
450                 return;
451         }
452
453         for (i = 0; i < rxq->nb_rx_desc; i++) {
454                 if (rxq->sw_ring[i].mbuf) {
455                         rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
456                         rxq->sw_ring[i].mbuf = NULL;
457                 }
458         }
459         if (rxq->rx_nb_avail == 0)
460                 return;
461         for (i = 0; i < rxq->rx_nb_avail; i++)
462                 rte_pktmbuf_free_seg(rxq->rx_stage[rxq->rx_next_avail + i]);
463
464         rxq->rx_nb_avail = 0;
465 }
466
467 /* turn on or off rx queue
468  * @q_idx: queue index in pf scope
469  * @on: turn on or off the queue
470  */
471 static int
472 ice_switch_rx_queue(struct ice_hw *hw, uint16_t q_idx, bool on)
473 {
474         uint32_t reg;
475         uint16_t j;
476
477         /* QRX_CTRL = QRX_ENA */
478         reg = ICE_READ_REG(hw, QRX_CTRL(q_idx));
479
480         if (on) {
481                 if (reg & QRX_CTRL_QENA_STAT_M)
482                         return 0; /* Already on, skip */
483                 reg |= QRX_CTRL_QENA_REQ_M;
484         } else {
485                 if (!(reg & QRX_CTRL_QENA_STAT_M))
486                         return 0; /* Already off, skip */
487                 reg &= ~QRX_CTRL_QENA_REQ_M;
488         }
489
490         /* Write the register */
491         ICE_WRITE_REG(hw, QRX_CTRL(q_idx), reg);
492         /* Check the result. It is said that QENA_STAT
493          * follows the QENA_REQ not more than 10 use.
494          * TODO: need to change the wait counter later
495          */
496         for (j = 0; j < ICE_CHK_Q_ENA_COUNT; j++) {
497                 rte_delay_us(ICE_CHK_Q_ENA_INTERVAL_US);
498                 reg = ICE_READ_REG(hw, QRX_CTRL(q_idx));
499                 if (on) {
500                         if ((reg & QRX_CTRL_QENA_REQ_M) &&
501                             (reg & QRX_CTRL_QENA_STAT_M))
502                                 break;
503                 } else {
504                         if (!(reg & QRX_CTRL_QENA_REQ_M) &&
505                             !(reg & QRX_CTRL_QENA_STAT_M))
506                                 break;
507                 }
508         }
509
510         /* Check if it is timeout */
511         if (j >= ICE_CHK_Q_ENA_COUNT) {
512                 PMD_DRV_LOG(ERR, "Failed to %s rx queue[%u]",
513                             (on ? "enable" : "disable"), q_idx);
514                 return -ETIMEDOUT;
515         }
516
517         return 0;
518 }
519
520 static inline int
521 ice_check_rx_burst_bulk_alloc_preconditions(struct ice_rx_queue *rxq)
522 {
523         int ret = 0;
524
525         if (!(rxq->rx_free_thresh >= ICE_RX_MAX_BURST)) {
526                 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
527                              "rxq->rx_free_thresh=%d, "
528                              "ICE_RX_MAX_BURST=%d",
529                              rxq->rx_free_thresh, ICE_RX_MAX_BURST);
530                 ret = -EINVAL;
531         } else if (!(rxq->rx_free_thresh < rxq->nb_rx_desc)) {
532                 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
533                              "rxq->rx_free_thresh=%d, "
534                              "rxq->nb_rx_desc=%d",
535                              rxq->rx_free_thresh, rxq->nb_rx_desc);
536                 ret = -EINVAL;
537         } else if (rxq->nb_rx_desc % rxq->rx_free_thresh != 0) {
538                 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
539                              "rxq->nb_rx_desc=%d, "
540                              "rxq->rx_free_thresh=%d",
541                              rxq->nb_rx_desc, rxq->rx_free_thresh);
542                 ret = -EINVAL;
543         }
544
545         return ret;
546 }
547
548 /* reset fields in ice_rx_queue back to default */
549 static void
550 ice_reset_rx_queue(struct ice_rx_queue *rxq)
551 {
552         unsigned int i;
553         uint16_t len;
554
555         if (!rxq) {
556                 PMD_DRV_LOG(DEBUG, "Pointer to rxq is NULL");
557                 return;
558         }
559
560         len = (uint16_t)(rxq->nb_rx_desc + ICE_RX_MAX_BURST);
561
562         for (i = 0; i < len * sizeof(union ice_rx_flex_desc); i++)
563                 ((volatile char *)rxq->rx_ring)[i] = 0;
564
565         memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
566         for (i = 0; i < ICE_RX_MAX_BURST; ++i)
567                 rxq->sw_ring[rxq->nb_rx_desc + i].mbuf = &rxq->fake_mbuf;
568
569         rxq->rx_nb_avail = 0;
570         rxq->rx_next_avail = 0;
571         rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
572
573         rxq->rx_tail = 0;
574         rxq->nb_rx_hold = 0;
575         rxq->pkt_first_seg = NULL;
576         rxq->pkt_last_seg = NULL;
577
578         rxq->rxrearm_start = 0;
579         rxq->rxrearm_nb = 0;
580 }
581
582 int
583 ice_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
584 {
585         struct ice_rx_queue *rxq;
586         int err;
587         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
588
589         PMD_INIT_FUNC_TRACE();
590
591         if (rx_queue_id >= dev->data->nb_rx_queues) {
592                 PMD_DRV_LOG(ERR, "RX queue %u is out of range %u",
593                             rx_queue_id, dev->data->nb_rx_queues);
594                 return -EINVAL;
595         }
596
597         rxq = dev->data->rx_queues[rx_queue_id];
598         if (!rxq || !rxq->q_set) {
599                 PMD_DRV_LOG(ERR, "RX queue %u not available or setup",
600                             rx_queue_id);
601                 return -EINVAL;
602         }
603
604         err = ice_program_hw_rx_queue(rxq);
605         if (err) {
606                 PMD_DRV_LOG(ERR, "fail to program RX queue %u",
607                             rx_queue_id);
608                 return -EIO;
609         }
610
611         err = ice_alloc_rx_queue_mbufs(rxq);
612         if (err) {
613                 PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
614                 return -ENOMEM;
615         }
616
617         /* Init the RX tail register. */
618         ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
619
620         err = ice_switch_rx_queue(hw, rxq->reg_idx, true);
621         if (err) {
622                 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
623                             rx_queue_id);
624
625                 rxq->rx_rel_mbufs(rxq);
626                 ice_reset_rx_queue(rxq);
627                 return -EINVAL;
628         }
629
630         dev->data->rx_queue_state[rx_queue_id] =
631                 RTE_ETH_QUEUE_STATE_STARTED;
632
633         return 0;
634 }
635
636 int
637 ice_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
638 {
639         struct ice_rx_queue *rxq;
640         int err;
641         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
642
643         if (rx_queue_id < dev->data->nb_rx_queues) {
644                 rxq = dev->data->rx_queues[rx_queue_id];
645
646                 err = ice_switch_rx_queue(hw, rxq->reg_idx, false);
647                 if (err) {
648                         PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
649                                     rx_queue_id);
650                         return -EINVAL;
651                 }
652                 rxq->rx_rel_mbufs(rxq);
653                 ice_reset_rx_queue(rxq);
654                 dev->data->rx_queue_state[rx_queue_id] =
655                         RTE_ETH_QUEUE_STATE_STOPPED;
656         }
657
658         return 0;
659 }
660
661 int
662 ice_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
663 {
664         struct ice_tx_queue *txq;
665         int err;
666         struct ice_vsi *vsi;
667         struct ice_hw *hw;
668         struct ice_aqc_add_tx_qgrp *txq_elem;
669         struct ice_tlan_ctx tx_ctx;
670         int buf_len;
671
672         PMD_INIT_FUNC_TRACE();
673
674         if (tx_queue_id >= dev->data->nb_tx_queues) {
675                 PMD_DRV_LOG(ERR, "TX queue %u is out of range %u",
676                             tx_queue_id, dev->data->nb_tx_queues);
677                 return -EINVAL;
678         }
679
680         txq = dev->data->tx_queues[tx_queue_id];
681         if (!txq || !txq->q_set) {
682                 PMD_DRV_LOG(ERR, "TX queue %u is not available or setup",
683                             tx_queue_id);
684                 return -EINVAL;
685         }
686
687         buf_len = ice_struct_size(txq_elem, txqs, 1);
688         txq_elem = ice_malloc(hw, buf_len);
689         if (!txq_elem)
690                 return -ENOMEM;
691
692         vsi = txq->vsi;
693         hw = ICE_VSI_TO_HW(vsi);
694
695         memset(&tx_ctx, 0, sizeof(tx_ctx));
696         txq_elem->num_txqs = 1;
697         txq_elem->txqs[0].txq_id = rte_cpu_to_le_16(txq->reg_idx);
698
699         tx_ctx.base = txq->tx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT;
700         tx_ctx.qlen = txq->nb_tx_desc;
701         tx_ctx.pf_num = hw->pf_id;
702         tx_ctx.vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
703         tx_ctx.src_vsi = vsi->vsi_id;
704         tx_ctx.port_num = hw->port_info->lport;
705         tx_ctx.tso_ena = 1; /* tso enable */
706         tx_ctx.tso_qnum = txq->reg_idx; /* index for tso state structure */
707         tx_ctx.legacy_int = 1; /* Legacy or Advanced Host Interface */
708         tx_ctx.tsyn_ena = 1;
709
710         ice_set_ctx(hw, (uint8_t *)&tx_ctx, txq_elem->txqs[0].txq_ctx,
711                     ice_tlan_ctx_info);
712
713         txq->qtx_tail = hw->hw_addr + QTX_COMM_DBELL(txq->reg_idx);
714
715         /* Init the Tx tail register*/
716         ICE_PCI_REG_WRITE(txq->qtx_tail, 0);
717
718         /* Fix me, we assume TC always 0 here */
719         err = ice_ena_vsi_txq(hw->port_info, vsi->idx, 0, tx_queue_id, 1,
720                         txq_elem, buf_len, NULL);
721         if (err) {
722                 PMD_DRV_LOG(ERR, "Failed to add lan txq");
723                 rte_free(txq_elem);
724                 return -EIO;
725         }
726         /* store the schedule node id */
727         txq->q_teid = txq_elem->txqs[0].q_teid;
728
729         dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
730
731         rte_free(txq_elem);
732         return 0;
733 }
734
735 static enum ice_status
736 ice_fdir_program_hw_rx_queue(struct ice_rx_queue *rxq)
737 {
738         struct ice_vsi *vsi = rxq->vsi;
739         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
740         uint32_t rxdid = ICE_RXDID_LEGACY_1;
741         struct ice_rlan_ctx rx_ctx;
742         enum ice_status err;
743         uint32_t regval;
744
745         rxq->rx_hdr_len = 0;
746         rxq->rx_buf_len = 1024;
747
748         memset(&rx_ctx, 0, sizeof(rx_ctx));
749
750         rx_ctx.base = rxq->rx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT;
751         rx_ctx.qlen = rxq->nb_rx_desc;
752         rx_ctx.dbuf = rxq->rx_buf_len >> ICE_RLAN_CTX_DBUF_S;
753         rx_ctx.hbuf = rxq->rx_hdr_len >> ICE_RLAN_CTX_HBUF_S;
754         rx_ctx.dtype = 0; /* No Header Split mode */
755         rx_ctx.dsize = 1; /* 32B descriptors */
756         rx_ctx.rxmax = ICE_ETH_MAX_LEN;
757         /* TPH: Transaction Layer Packet (TLP) processing hints */
758         rx_ctx.tphrdesc_ena = 1;
759         rx_ctx.tphwdesc_ena = 1;
760         rx_ctx.tphdata_ena = 1;
761         rx_ctx.tphhead_ena = 1;
762         /* Low Receive Queue Threshold defined in 64 descriptors units.
763          * When the number of free descriptors goes below the lrxqthresh,
764          * an immediate interrupt is triggered.
765          */
766         rx_ctx.lrxqthresh = 2;
767         /*default use 32 byte descriptor, vlan tag extract to L2TAG2(1st)*/
768         rx_ctx.l2tsel = 1;
769         rx_ctx.showiv = 0;
770         rx_ctx.crcstrip = (rxq->crc_len == 0) ? 1 : 0;
771
772         /* Enable Flexible Descriptors in the queue context which
773          * allows this driver to select a specific receive descriptor format
774          */
775         regval = (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) &
776                 QRXFLXP_CNTXT_RXDID_IDX_M;
777
778         /* increasing context priority to pick up profile ID;
779          * default is 0x01; setting to 0x03 to ensure profile
780          * is programming if prev context is of same priority
781          */
782         regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
783                 QRXFLXP_CNTXT_RXDID_PRIO_M;
784
785         ICE_WRITE_REG(hw, QRXFLXP_CNTXT(rxq->reg_idx), regval);
786
787         err = ice_clear_rxq_ctx(hw, rxq->reg_idx);
788         if (err) {
789                 PMD_DRV_LOG(ERR, "Failed to clear Lan Rx queue (%u) context",
790                             rxq->queue_id);
791                 return -EINVAL;
792         }
793         err = ice_write_rxq_ctx(hw, &rx_ctx, rxq->reg_idx);
794         if (err) {
795                 PMD_DRV_LOG(ERR, "Failed to write Lan Rx queue (%u) context",
796                             rxq->queue_id);
797                 return -EINVAL;
798         }
799
800         rxq->qrx_tail = hw->hw_addr + QRX_TAIL(rxq->reg_idx);
801
802         /* Init the Rx tail register*/
803         ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
804
805         return 0;
806 }
807
808 int
809 ice_fdir_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
810 {
811         struct ice_rx_queue *rxq;
812         int err;
813         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
814         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
815
816         PMD_INIT_FUNC_TRACE();
817
818         rxq = pf->fdir.rxq;
819         if (!rxq || !rxq->q_set) {
820                 PMD_DRV_LOG(ERR, "FDIR RX queue %u not available or setup",
821                             rx_queue_id);
822                 return -EINVAL;
823         }
824
825         err = ice_fdir_program_hw_rx_queue(rxq);
826         if (err) {
827                 PMD_DRV_LOG(ERR, "fail to program FDIR RX queue %u",
828                             rx_queue_id);
829                 return -EIO;
830         }
831
832         /* Init the RX tail register. */
833         ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
834
835         err = ice_switch_rx_queue(hw, rxq->reg_idx, true);
836         if (err) {
837                 PMD_DRV_LOG(ERR, "Failed to switch FDIR RX queue %u on",
838                             rx_queue_id);
839
840                 ice_reset_rx_queue(rxq);
841                 return -EINVAL;
842         }
843
844         return 0;
845 }
846
847 int
848 ice_fdir_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
849 {
850         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
851         struct ice_tx_queue *txq;
852         int err;
853         struct ice_vsi *vsi;
854         struct ice_hw *hw;
855         struct ice_aqc_add_tx_qgrp *txq_elem;
856         struct ice_tlan_ctx tx_ctx;
857         int buf_len;
858
859         PMD_INIT_FUNC_TRACE();
860
861         txq = pf->fdir.txq;
862         if (!txq || !txq->q_set) {
863                 PMD_DRV_LOG(ERR, "FDIR TX queue %u is not available or setup",
864                             tx_queue_id);
865                 return -EINVAL;
866         }
867
868         buf_len = ice_struct_size(txq_elem, txqs, 1);
869         txq_elem = ice_malloc(hw, buf_len);
870         if (!txq_elem)
871                 return -ENOMEM;
872
873         vsi = txq->vsi;
874         hw = ICE_VSI_TO_HW(vsi);
875
876         memset(&tx_ctx, 0, sizeof(tx_ctx));
877         txq_elem->num_txqs = 1;
878         txq_elem->txqs[0].txq_id = rte_cpu_to_le_16(txq->reg_idx);
879
880         tx_ctx.base = txq->tx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT;
881         tx_ctx.qlen = txq->nb_tx_desc;
882         tx_ctx.pf_num = hw->pf_id;
883         tx_ctx.vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
884         tx_ctx.src_vsi = vsi->vsi_id;
885         tx_ctx.port_num = hw->port_info->lport;
886         tx_ctx.tso_ena = 1; /* tso enable */
887         tx_ctx.tso_qnum = txq->reg_idx; /* index for tso state structure */
888         tx_ctx.legacy_int = 1; /* Legacy or Advanced Host Interface */
889
890         ice_set_ctx(hw, (uint8_t *)&tx_ctx, txq_elem->txqs[0].txq_ctx,
891                     ice_tlan_ctx_info);
892
893         txq->qtx_tail = hw->hw_addr + QTX_COMM_DBELL(txq->reg_idx);
894
895         /* Init the Tx tail register*/
896         ICE_PCI_REG_WRITE(txq->qtx_tail, 0);
897
898         /* Fix me, we assume TC always 0 here */
899         err = ice_ena_vsi_txq(hw->port_info, vsi->idx, 0, tx_queue_id, 1,
900                               txq_elem, buf_len, NULL);
901         if (err) {
902                 PMD_DRV_LOG(ERR, "Failed to add FDIR txq");
903                 rte_free(txq_elem);
904                 return -EIO;
905         }
906         /* store the schedule node id */
907         txq->q_teid = txq_elem->txqs[0].q_teid;
908
909         rte_free(txq_elem);
910         return 0;
911 }
912
913 /* Free all mbufs for descriptors in tx queue */
914 static void
915 _ice_tx_queue_release_mbufs(struct ice_tx_queue *txq)
916 {
917         uint16_t i;
918
919         if (!txq || !txq->sw_ring) {
920                 PMD_DRV_LOG(DEBUG, "Pointer to txq or sw_ring is NULL");
921                 return;
922         }
923
924         for (i = 0; i < txq->nb_tx_desc; i++) {
925                 if (txq->sw_ring[i].mbuf) {
926                         rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
927                         txq->sw_ring[i].mbuf = NULL;
928                 }
929         }
930 }
931
932 static void
933 ice_reset_tx_queue(struct ice_tx_queue *txq)
934 {
935         struct ice_tx_entry *txe;
936         uint16_t i, prev, size;
937
938         if (!txq) {
939                 PMD_DRV_LOG(DEBUG, "Pointer to txq is NULL");
940                 return;
941         }
942
943         txe = txq->sw_ring;
944         size = sizeof(struct ice_tx_desc) * txq->nb_tx_desc;
945         for (i = 0; i < size; i++)
946                 ((volatile char *)txq->tx_ring)[i] = 0;
947
948         prev = (uint16_t)(txq->nb_tx_desc - 1);
949         for (i = 0; i < txq->nb_tx_desc; i++) {
950                 volatile struct ice_tx_desc *txd = &txq->tx_ring[i];
951
952                 txd->cmd_type_offset_bsz =
953                         rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE);
954                 txe[i].mbuf =  NULL;
955                 txe[i].last_id = i;
956                 txe[prev].next_id = i;
957                 prev = i;
958         }
959
960         txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
961         txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
962
963         txq->tx_tail = 0;
964         txq->nb_tx_used = 0;
965
966         txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1);
967         txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1);
968 }
969
970 int
971 ice_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
972 {
973         struct ice_tx_queue *txq;
974         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
975         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
976         struct ice_vsi *vsi = pf->main_vsi;
977         enum ice_status status;
978         uint16_t q_ids[1];
979         uint32_t q_teids[1];
980         uint16_t q_handle = tx_queue_id;
981
982         if (tx_queue_id >= dev->data->nb_tx_queues) {
983                 PMD_DRV_LOG(ERR, "TX queue %u is out of range %u",
984                             tx_queue_id, dev->data->nb_tx_queues);
985                 return -EINVAL;
986         }
987
988         txq = dev->data->tx_queues[tx_queue_id];
989         if (!txq) {
990                 PMD_DRV_LOG(ERR, "TX queue %u is not available",
991                             tx_queue_id);
992                 return -EINVAL;
993         }
994
995         q_ids[0] = txq->reg_idx;
996         q_teids[0] = txq->q_teid;
997
998         /* Fix me, we assume TC always 0 here */
999         status = ice_dis_vsi_txq(hw->port_info, vsi->idx, 0, 1, &q_handle,
1000                                 q_ids, q_teids, ICE_NO_RESET, 0, NULL);
1001         if (status != ICE_SUCCESS) {
1002                 PMD_DRV_LOG(DEBUG, "Failed to disable Lan Tx queue");
1003                 return -EINVAL;
1004         }
1005
1006         txq->tx_rel_mbufs(txq);
1007         ice_reset_tx_queue(txq);
1008         dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
1009
1010         return 0;
1011 }
1012
1013 int
1014 ice_fdir_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1015 {
1016         struct ice_rx_queue *rxq;
1017         int err;
1018         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1019         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1020
1021         rxq = pf->fdir.rxq;
1022
1023         err = ice_switch_rx_queue(hw, rxq->reg_idx, false);
1024         if (err) {
1025                 PMD_DRV_LOG(ERR, "Failed to switch FDIR RX queue %u off",
1026                             rx_queue_id);
1027                 return -EINVAL;
1028         }
1029         rxq->rx_rel_mbufs(rxq);
1030
1031         return 0;
1032 }
1033
1034 int
1035 ice_fdir_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
1036 {
1037         struct ice_tx_queue *txq;
1038         struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1039         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1040         struct ice_vsi *vsi = pf->main_vsi;
1041         enum ice_status status;
1042         uint16_t q_ids[1];
1043         uint32_t q_teids[1];
1044         uint16_t q_handle = tx_queue_id;
1045
1046         txq = pf->fdir.txq;
1047         if (!txq) {
1048                 PMD_DRV_LOG(ERR, "TX queue %u is not available",
1049                             tx_queue_id);
1050                 return -EINVAL;
1051         }
1052         vsi = txq->vsi;
1053
1054         q_ids[0] = txq->reg_idx;
1055         q_teids[0] = txq->q_teid;
1056
1057         /* Fix me, we assume TC always 0 here */
1058         status = ice_dis_vsi_txq(hw->port_info, vsi->idx, 0, 1, &q_handle,
1059                                  q_ids, q_teids, ICE_NO_RESET, 0, NULL);
1060         if (status != ICE_SUCCESS) {
1061                 PMD_DRV_LOG(DEBUG, "Failed to disable Lan Tx queue");
1062                 return -EINVAL;
1063         }
1064
1065         txq->tx_rel_mbufs(txq);
1066
1067         return 0;
1068 }
1069
1070 int
1071 ice_rx_queue_setup(struct rte_eth_dev *dev,
1072                    uint16_t queue_idx,
1073                    uint16_t nb_desc,
1074                    unsigned int socket_id,
1075                    const struct rte_eth_rxconf *rx_conf,
1076                    struct rte_mempool *mp)
1077 {
1078         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1079         struct ice_adapter *ad =
1080                 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1081         struct ice_vsi *vsi = pf->main_vsi;
1082         struct ice_rx_queue *rxq;
1083         const struct rte_memzone *rz;
1084         uint32_t ring_size;
1085         uint16_t len;
1086         int use_def_burst_func = 1;
1087         uint64_t offloads;
1088
1089         if (nb_desc % ICE_ALIGN_RING_DESC != 0 ||
1090             nb_desc > ICE_MAX_RING_DESC ||
1091             nb_desc < ICE_MIN_RING_DESC) {
1092                 PMD_INIT_LOG(ERR, "Number (%u) of receive descriptors is "
1093                              "invalid", nb_desc);
1094                 return -EINVAL;
1095         }
1096
1097         offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
1098
1099         /* Free memory if needed */
1100         if (dev->data->rx_queues[queue_idx]) {
1101                 ice_rx_queue_release(dev->data->rx_queues[queue_idx]);
1102                 dev->data->rx_queues[queue_idx] = NULL;
1103         }
1104
1105         /* Allocate the rx queue data structure */
1106         rxq = rte_zmalloc_socket(NULL,
1107                                  sizeof(struct ice_rx_queue),
1108                                  RTE_CACHE_LINE_SIZE,
1109                                  socket_id);
1110         if (!rxq) {
1111                 PMD_INIT_LOG(ERR, "Failed to allocate memory for "
1112                              "rx queue data structure");
1113                 return -ENOMEM;
1114         }
1115         rxq->mp = mp;
1116         rxq->nb_rx_desc = nb_desc;
1117         rxq->rx_free_thresh = rx_conf->rx_free_thresh;
1118         rxq->queue_id = queue_idx;
1119         rxq->offloads = offloads;
1120
1121         rxq->reg_idx = vsi->base_queue + queue_idx;
1122         rxq->port_id = dev->data->port_id;
1123         if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
1124                 rxq->crc_len = RTE_ETHER_CRC_LEN;
1125         else
1126                 rxq->crc_len = 0;
1127
1128         rxq->drop_en = rx_conf->rx_drop_en;
1129         rxq->vsi = vsi;
1130         rxq->rx_deferred_start = rx_conf->rx_deferred_start;
1131         rxq->proto_xtr = pf->proto_xtr != NULL ?
1132                          pf->proto_xtr[queue_idx] : PROTO_XTR_NONE;
1133
1134         /* Allocate the maximun number of RX ring hardware descriptor. */
1135         len = ICE_MAX_RING_DESC;
1136
1137         /**
1138          * Allocating a little more memory because vectorized/bulk_alloc Rx
1139          * functions doesn't check boundaries each time.
1140          */
1141         len += ICE_RX_MAX_BURST;
1142
1143         /* Allocate the maximum number of RX ring hardware descriptor. */
1144         ring_size = sizeof(union ice_rx_flex_desc) * len;
1145         ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN);
1146         rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
1147                                       ring_size, ICE_RING_BASE_ALIGN,
1148                                       socket_id);
1149         if (!rz) {
1150                 ice_rx_queue_release(rxq);
1151                 PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for RX");
1152                 return -ENOMEM;
1153         }
1154
1155         rxq->mz = rz;
1156         /* Zero all the descriptors in the ring. */
1157         memset(rz->addr, 0, ring_size);
1158
1159         rxq->rx_ring_dma = rz->iova;
1160         rxq->rx_ring = rz->addr;
1161
1162         /* always reserve more for bulk alloc */
1163         len = (uint16_t)(nb_desc + ICE_RX_MAX_BURST);
1164
1165         /* Allocate the software ring. */
1166         rxq->sw_ring = rte_zmalloc_socket(NULL,
1167                                           sizeof(struct ice_rx_entry) * len,
1168                                           RTE_CACHE_LINE_SIZE,
1169                                           socket_id);
1170         if (!rxq->sw_ring) {
1171                 ice_rx_queue_release(rxq);
1172                 PMD_INIT_LOG(ERR, "Failed to allocate memory for SW ring");
1173                 return -ENOMEM;
1174         }
1175
1176         ice_reset_rx_queue(rxq);
1177         rxq->q_set = true;
1178         dev->data->rx_queues[queue_idx] = rxq;
1179         rxq->rx_rel_mbufs = _ice_rx_queue_release_mbufs;
1180
1181         use_def_burst_func = ice_check_rx_burst_bulk_alloc_preconditions(rxq);
1182
1183         if (!use_def_burst_func) {
1184                 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
1185                              "satisfied. Rx Burst Bulk Alloc function will be "
1186                              "used on port=%d, queue=%d.",
1187                              rxq->port_id, rxq->queue_id);
1188         } else {
1189                 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
1190                              "not satisfied, Scattered Rx is requested. "
1191                              "on port=%d, queue=%d.",
1192                              rxq->port_id, rxq->queue_id);
1193                 ad->rx_bulk_alloc_allowed = false;
1194         }
1195
1196         return 0;
1197 }
1198
1199 void
1200 ice_rx_queue_release(void *rxq)
1201 {
1202         struct ice_rx_queue *q = (struct ice_rx_queue *)rxq;
1203
1204         if (!q) {
1205                 PMD_DRV_LOG(DEBUG, "Pointer to rxq is NULL");
1206                 return;
1207         }
1208
1209         q->rx_rel_mbufs(q);
1210         rte_free(q->sw_ring);
1211         rte_memzone_free(q->mz);
1212         rte_free(q);
1213 }
1214
1215 int
1216 ice_tx_queue_setup(struct rte_eth_dev *dev,
1217                    uint16_t queue_idx,
1218                    uint16_t nb_desc,
1219                    unsigned int socket_id,
1220                    const struct rte_eth_txconf *tx_conf)
1221 {
1222         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1223         struct ice_vsi *vsi = pf->main_vsi;
1224         struct ice_tx_queue *txq;
1225         const struct rte_memzone *tz;
1226         uint32_t ring_size;
1227         uint16_t tx_rs_thresh, tx_free_thresh;
1228         uint64_t offloads;
1229
1230         offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
1231
1232         if (nb_desc % ICE_ALIGN_RING_DESC != 0 ||
1233             nb_desc > ICE_MAX_RING_DESC ||
1234             nb_desc < ICE_MIN_RING_DESC) {
1235                 PMD_INIT_LOG(ERR, "Number (%u) of transmit descriptors is "
1236                              "invalid", nb_desc);
1237                 return -EINVAL;
1238         }
1239
1240         /**
1241          * The following two parameters control the setting of the RS bit on
1242          * transmit descriptors. TX descriptors will have their RS bit set
1243          * after txq->tx_rs_thresh descriptors have been used. The TX
1244          * descriptor ring will be cleaned after txq->tx_free_thresh
1245          * descriptors are used or if the number of descriptors required to
1246          * transmit a packet is greater than the number of free TX descriptors.
1247          *
1248          * The following constraints must be satisfied:
1249          *  - tx_rs_thresh must be greater than 0.
1250          *  - tx_rs_thresh must be less than the size of the ring minus 2.
1251          *  - tx_rs_thresh must be less than or equal to tx_free_thresh.
1252          *  - tx_rs_thresh must be a divisor of the ring size.
1253          *  - tx_free_thresh must be greater than 0.
1254          *  - tx_free_thresh must be less than the size of the ring minus 3.
1255          *  - tx_free_thresh + tx_rs_thresh must not exceed nb_desc.
1256          *
1257          * One descriptor in the TX ring is used as a sentinel to avoid a H/W
1258          * race condition, hence the maximum threshold constraints. When set
1259          * to zero use default values.
1260          */
1261         tx_free_thresh = (uint16_t)(tx_conf->tx_free_thresh ?
1262                                     tx_conf->tx_free_thresh :
1263                                     ICE_DEFAULT_TX_FREE_THRESH);
1264         /* force tx_rs_thresh to adapt an aggresive tx_free_thresh */
1265         tx_rs_thresh =
1266                 (ICE_DEFAULT_TX_RSBIT_THRESH + tx_free_thresh > nb_desc) ?
1267                         nb_desc - tx_free_thresh : ICE_DEFAULT_TX_RSBIT_THRESH;
1268         if (tx_conf->tx_rs_thresh)
1269                 tx_rs_thresh = tx_conf->tx_rs_thresh;
1270         if (tx_rs_thresh + tx_free_thresh > nb_desc) {
1271                 PMD_INIT_LOG(ERR, "tx_rs_thresh + tx_free_thresh must not "
1272                                 "exceed nb_desc. (tx_rs_thresh=%u "
1273                                 "tx_free_thresh=%u nb_desc=%u port = %d queue=%d)",
1274                                 (unsigned int)tx_rs_thresh,
1275                                 (unsigned int)tx_free_thresh,
1276                                 (unsigned int)nb_desc,
1277                                 (int)dev->data->port_id,
1278                                 (int)queue_idx);
1279                 return -EINVAL;
1280         }
1281         if (tx_rs_thresh >= (nb_desc - 2)) {
1282                 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the "
1283                              "number of TX descriptors minus 2. "
1284                              "(tx_rs_thresh=%u port=%d queue=%d)",
1285                              (unsigned int)tx_rs_thresh,
1286                              (int)dev->data->port_id,
1287                              (int)queue_idx);
1288                 return -EINVAL;
1289         }
1290         if (tx_free_thresh >= (nb_desc - 3)) {
1291                 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the "
1292                              "tx_free_thresh must be less than the "
1293                              "number of TX descriptors minus 3. "
1294                              "(tx_free_thresh=%u port=%d queue=%d)",
1295                              (unsigned int)tx_free_thresh,
1296                              (int)dev->data->port_id,
1297                              (int)queue_idx);
1298                 return -EINVAL;
1299         }
1300         if (tx_rs_thresh > tx_free_thresh) {
1301                 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than or "
1302                              "equal to tx_free_thresh. (tx_free_thresh=%u"
1303                              " tx_rs_thresh=%u port=%d queue=%d)",
1304                              (unsigned int)tx_free_thresh,
1305                              (unsigned int)tx_rs_thresh,
1306                              (int)dev->data->port_id,
1307                              (int)queue_idx);
1308                 return -EINVAL;
1309         }
1310         if ((nb_desc % tx_rs_thresh) != 0) {
1311                 PMD_INIT_LOG(ERR, "tx_rs_thresh must be a divisor of the "
1312                              "number of TX descriptors. (tx_rs_thresh=%u"
1313                              " port=%d queue=%d)",
1314                              (unsigned int)tx_rs_thresh,
1315                              (int)dev->data->port_id,
1316                              (int)queue_idx);
1317                 return -EINVAL;
1318         }
1319         if (tx_rs_thresh > 1 && tx_conf->tx_thresh.wthresh != 0) {
1320                 PMD_INIT_LOG(ERR, "TX WTHRESH must be set to 0 if "
1321                              "tx_rs_thresh is greater than 1. "
1322                              "(tx_rs_thresh=%u port=%d queue=%d)",
1323                              (unsigned int)tx_rs_thresh,
1324                              (int)dev->data->port_id,
1325                              (int)queue_idx);
1326                 return -EINVAL;
1327         }
1328
1329         /* Free memory if needed. */
1330         if (dev->data->tx_queues[queue_idx]) {
1331                 ice_tx_queue_release(dev->data->tx_queues[queue_idx]);
1332                 dev->data->tx_queues[queue_idx] = NULL;
1333         }
1334
1335         /* Allocate the TX queue data structure. */
1336         txq = rte_zmalloc_socket(NULL,
1337                                  sizeof(struct ice_tx_queue),
1338                                  RTE_CACHE_LINE_SIZE,
1339                                  socket_id);
1340         if (!txq) {
1341                 PMD_INIT_LOG(ERR, "Failed to allocate memory for "
1342                              "tx queue structure");
1343                 return -ENOMEM;
1344         }
1345
1346         /* Allocate TX hardware ring descriptors. */
1347         ring_size = sizeof(struct ice_tx_desc) * ICE_MAX_RING_DESC;
1348         ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN);
1349         tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
1350                                       ring_size, ICE_RING_BASE_ALIGN,
1351                                       socket_id);
1352         if (!tz) {
1353                 ice_tx_queue_release(txq);
1354                 PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for TX");
1355                 return -ENOMEM;
1356         }
1357
1358         txq->mz = tz;
1359         txq->nb_tx_desc = nb_desc;
1360         txq->tx_rs_thresh = tx_rs_thresh;
1361         txq->tx_free_thresh = tx_free_thresh;
1362         txq->pthresh = tx_conf->tx_thresh.pthresh;
1363         txq->hthresh = tx_conf->tx_thresh.hthresh;
1364         txq->wthresh = tx_conf->tx_thresh.wthresh;
1365         txq->queue_id = queue_idx;
1366
1367         txq->reg_idx = vsi->base_queue + queue_idx;
1368         txq->port_id = dev->data->port_id;
1369         txq->offloads = offloads;
1370         txq->vsi = vsi;
1371         txq->tx_deferred_start = tx_conf->tx_deferred_start;
1372
1373         txq->tx_ring_dma = tz->iova;
1374         txq->tx_ring = tz->addr;
1375
1376         /* Allocate software ring */
1377         txq->sw_ring =
1378                 rte_zmalloc_socket(NULL,
1379                                    sizeof(struct ice_tx_entry) * nb_desc,
1380                                    RTE_CACHE_LINE_SIZE,
1381                                    socket_id);
1382         if (!txq->sw_ring) {
1383                 ice_tx_queue_release(txq);
1384                 PMD_INIT_LOG(ERR, "Failed to allocate memory for SW TX ring");
1385                 return -ENOMEM;
1386         }
1387
1388         ice_reset_tx_queue(txq);
1389         txq->q_set = true;
1390         dev->data->tx_queues[queue_idx] = txq;
1391         txq->tx_rel_mbufs = _ice_tx_queue_release_mbufs;
1392         ice_set_tx_function_flag(dev, txq);
1393
1394         return 0;
1395 }
1396
1397 void
1398 ice_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
1399 {
1400         ice_rx_queue_release(dev->data->rx_queues[qid]);
1401 }
1402
1403 void
1404 ice_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
1405 {
1406         ice_tx_queue_release(dev->data->tx_queues[qid]);
1407 }
1408
1409 void
1410 ice_tx_queue_release(void *txq)
1411 {
1412         struct ice_tx_queue *q = (struct ice_tx_queue *)txq;
1413
1414         if (!q) {
1415                 PMD_DRV_LOG(DEBUG, "Pointer to TX queue is NULL");
1416                 return;
1417         }
1418
1419         q->tx_rel_mbufs(q);
1420         rte_free(q->sw_ring);
1421         rte_memzone_free(q->mz);
1422         rte_free(q);
1423 }
1424
1425 void
1426 ice_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
1427                  struct rte_eth_rxq_info *qinfo)
1428 {
1429         struct ice_rx_queue *rxq;
1430
1431         rxq = dev->data->rx_queues[queue_id];
1432
1433         qinfo->mp = rxq->mp;
1434         qinfo->scattered_rx = dev->data->scattered_rx;
1435         qinfo->nb_desc = rxq->nb_rx_desc;
1436
1437         qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
1438         qinfo->conf.rx_drop_en = rxq->drop_en;
1439         qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
1440 }
1441
1442 void
1443 ice_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
1444                  struct rte_eth_txq_info *qinfo)
1445 {
1446         struct ice_tx_queue *txq;
1447
1448         txq = dev->data->tx_queues[queue_id];
1449
1450         qinfo->nb_desc = txq->nb_tx_desc;
1451
1452         qinfo->conf.tx_thresh.pthresh = txq->pthresh;
1453         qinfo->conf.tx_thresh.hthresh = txq->hthresh;
1454         qinfo->conf.tx_thresh.wthresh = txq->wthresh;
1455
1456         qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
1457         qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
1458         qinfo->conf.offloads = txq->offloads;
1459         qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
1460 }
1461
1462 uint32_t
1463 ice_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1464 {
1465 #define ICE_RXQ_SCAN_INTERVAL 4
1466         volatile union ice_rx_flex_desc *rxdp;
1467         struct ice_rx_queue *rxq;
1468         uint16_t desc = 0;
1469
1470         rxq = dev->data->rx_queues[rx_queue_id];
1471         rxdp = &rxq->rx_ring[rxq->rx_tail];
1472         while ((desc < rxq->nb_rx_desc) &&
1473                rte_le_to_cpu_16(rxdp->wb.status_error0) &
1474                (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S)) {
1475                 /**
1476                  * Check the DD bit of a rx descriptor of each 4 in a group,
1477                  * to avoid checking too frequently and downgrading performance
1478                  * too much.
1479                  */
1480                 desc += ICE_RXQ_SCAN_INTERVAL;
1481                 rxdp += ICE_RXQ_SCAN_INTERVAL;
1482                 if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
1483                         rxdp = &(rxq->rx_ring[rxq->rx_tail +
1484                                  desc - rxq->nb_rx_desc]);
1485         }
1486
1487         return desc;
1488 }
1489
1490 #define ICE_RX_FLEX_ERR0_BITS   \
1491         ((1 << ICE_RX_FLEX_DESC_STATUS0_HBO_S) |        \
1492          (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) |   \
1493          (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S) |   \
1494          (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S) |  \
1495          (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S) | \
1496          (1 << ICE_RX_FLEX_DESC_STATUS0_RXE_S))
1497
1498 /* Rx L3/L4 checksum */
1499 static inline uint64_t
1500 ice_rxd_error_to_pkt_flags(uint16_t stat_err0)
1501 {
1502         uint64_t flags = 0;
1503
1504         /* check if HW has decoded the packet and checksum */
1505         if (unlikely(!(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_L3L4P_S))))
1506                 return 0;
1507
1508         if (likely(!(stat_err0 & ICE_RX_FLEX_ERR0_BITS))) {
1509                 flags |= (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD);
1510                 return flags;
1511         }
1512
1513         if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S)))
1514                 flags |= PKT_RX_IP_CKSUM_BAD;
1515         else
1516                 flags |= PKT_RX_IP_CKSUM_GOOD;
1517
1518         if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S)))
1519                 flags |= PKT_RX_L4_CKSUM_BAD;
1520         else
1521                 flags |= PKT_RX_L4_CKSUM_GOOD;
1522
1523         if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S)))
1524                 flags |= PKT_RX_OUTER_IP_CKSUM_BAD;
1525
1526         if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S)))
1527                 flags |= PKT_RX_OUTER_L4_CKSUM_BAD;
1528         else
1529                 flags |= PKT_RX_OUTER_L4_CKSUM_GOOD;
1530
1531         return flags;
1532 }
1533
1534 static inline void
1535 ice_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union ice_rx_flex_desc *rxdp)
1536 {
1537         if (rte_le_to_cpu_16(rxdp->wb.status_error0) &
1538             (1 << ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S)) {
1539                 mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
1540                 mb->vlan_tci =
1541                         rte_le_to_cpu_16(rxdp->wb.l2tag1);
1542                 PMD_RX_LOG(DEBUG, "Descriptor l2tag1: %u",
1543                            rte_le_to_cpu_16(rxdp->wb.l2tag1));
1544         } else {
1545                 mb->vlan_tci = 0;
1546         }
1547
1548 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
1549         if (rte_le_to_cpu_16(rxdp->wb.status_error1) &
1550             (1 << ICE_RX_FLEX_DESC_STATUS1_L2TAG2P_S)) {
1551                 mb->ol_flags |= PKT_RX_QINQ_STRIPPED | PKT_RX_QINQ |
1552                                 PKT_RX_VLAN_STRIPPED | PKT_RX_VLAN;
1553                 mb->vlan_tci_outer = mb->vlan_tci;
1554                 mb->vlan_tci = rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd);
1555                 PMD_RX_LOG(DEBUG, "Descriptor l2tag2_1: %u, l2tag2_2: %u",
1556                            rte_le_to_cpu_16(rxdp->wb.l2tag2_1st),
1557                            rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd));
1558         } else {
1559                 mb->vlan_tci_outer = 0;
1560         }
1561 #endif
1562         PMD_RX_LOG(DEBUG, "Mbuf vlan_tci: %u, vlan_tci_outer: %u",
1563                    mb->vlan_tci, mb->vlan_tci_outer);
1564 }
1565
1566 #define ICE_LOOK_AHEAD 8
1567 #if (ICE_LOOK_AHEAD != 8)
1568 #error "PMD ICE: ICE_LOOK_AHEAD must be 8\n"
1569 #endif
1570 static inline int
1571 ice_rx_scan_hw_ring(struct ice_rx_queue *rxq)
1572 {
1573         volatile union ice_rx_flex_desc *rxdp;
1574         struct ice_rx_entry *rxep;
1575         struct rte_mbuf *mb;
1576         uint16_t stat_err0;
1577         uint16_t pkt_len;
1578         int32_t s[ICE_LOOK_AHEAD], nb_dd;
1579         int32_t i, j, nb_rx = 0;
1580         uint64_t pkt_flags = 0;
1581         uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1582         struct ice_vsi *vsi = rxq->vsi;
1583         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
1584         uint64_t ts_ns;
1585         struct ice_adapter *ad = rxq->vsi->adapter;
1586
1587         rxdp = &rxq->rx_ring[rxq->rx_tail];
1588         rxep = &rxq->sw_ring[rxq->rx_tail];
1589
1590         stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
1591
1592         /* Make sure there is at least 1 packet to receive */
1593         if (!(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S)))
1594                 return 0;
1595
1596         /**
1597          * Scan LOOK_AHEAD descriptors at a time to determine which
1598          * descriptors reference packets that are ready to be received.
1599          */
1600         for (i = 0; i < ICE_RX_MAX_BURST; i += ICE_LOOK_AHEAD,
1601              rxdp += ICE_LOOK_AHEAD, rxep += ICE_LOOK_AHEAD) {
1602                 /* Read desc statuses backwards to avoid race condition */
1603                 for (j = ICE_LOOK_AHEAD - 1; j >= 0; j--)
1604                         s[j] = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
1605
1606                 rte_smp_rmb();
1607
1608                 /* Compute how many status bits were set */
1609                 for (j = 0, nb_dd = 0; j < ICE_LOOK_AHEAD; j++)
1610                         nb_dd += s[j] & (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S);
1611
1612                 nb_rx += nb_dd;
1613
1614                 /* Translate descriptor info to mbuf parameters */
1615                 for (j = 0; j < nb_dd; j++) {
1616                         mb = rxep[j].mbuf;
1617                         pkt_len = (rte_le_to_cpu_16(rxdp[j].wb.pkt_len) &
1618                                    ICE_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len;
1619                         mb->data_len = pkt_len;
1620                         mb->pkt_len = pkt_len;
1621                         mb->ol_flags = 0;
1622                         stat_err0 = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
1623                         pkt_flags = ice_rxd_error_to_pkt_flags(stat_err0);
1624                         mb->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
1625                                 rte_le_to_cpu_16(rxdp[j].wb.ptype_flex_flags0)];
1626                         ice_rxd_to_vlan_tci(mb, &rxdp[j]);
1627                         rxq->rxd_to_pkt_fields(rxq, mb, &rxdp[j]);
1628
1629                         if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP) {
1630                                 ts_ns = ice_tstamp_convert_32b_64b(hw,
1631                                         rte_le_to_cpu_32(rxdp[j].wb.flex_ts.ts_high));
1632                                 if (ice_timestamp_dynflag > 0) {
1633                                         *RTE_MBUF_DYNFIELD(mb,
1634                                                 ice_timestamp_dynfield_offset,
1635                                                 rte_mbuf_timestamp_t *) = ts_ns;
1636                                         mb->ol_flags |= ice_timestamp_dynflag;
1637                                 }
1638                         }
1639
1640                         if (ad->ptp_ena && ((mb->packet_type &
1641                             RTE_PTYPE_L2_MASK) == RTE_PTYPE_L2_ETHER_TIMESYNC)) {
1642                                 rxq->time_high =
1643                                    rte_le_to_cpu_32(rxdp[j].wb.flex_ts.ts_high);
1644                                 mb->timesync = rxq->queue_id;
1645                                 pkt_flags |= PKT_RX_IEEE1588_PTP;
1646                         }
1647
1648                         mb->ol_flags |= pkt_flags;
1649                 }
1650
1651                 for (j = 0; j < ICE_LOOK_AHEAD; j++)
1652                         rxq->rx_stage[i + j] = rxep[j].mbuf;
1653
1654                 if (nb_dd != ICE_LOOK_AHEAD)
1655                         break;
1656         }
1657
1658         /* Clear software ring entries */
1659         for (i = 0; i < nb_rx; i++)
1660                 rxq->sw_ring[rxq->rx_tail + i].mbuf = NULL;
1661
1662         PMD_RX_LOG(DEBUG, "ice_rx_scan_hw_ring: "
1663                    "port_id=%u, queue_id=%u, nb_rx=%d",
1664                    rxq->port_id, rxq->queue_id, nb_rx);
1665
1666         return nb_rx;
1667 }
1668
1669 static inline uint16_t
1670 ice_rx_fill_from_stage(struct ice_rx_queue *rxq,
1671                        struct rte_mbuf **rx_pkts,
1672                        uint16_t nb_pkts)
1673 {
1674         uint16_t i;
1675         struct rte_mbuf **stage = &rxq->rx_stage[rxq->rx_next_avail];
1676
1677         nb_pkts = (uint16_t)RTE_MIN(nb_pkts, rxq->rx_nb_avail);
1678
1679         for (i = 0; i < nb_pkts; i++)
1680                 rx_pkts[i] = stage[i];
1681
1682         rxq->rx_nb_avail = (uint16_t)(rxq->rx_nb_avail - nb_pkts);
1683         rxq->rx_next_avail = (uint16_t)(rxq->rx_next_avail + nb_pkts);
1684
1685         return nb_pkts;
1686 }
1687
1688 static inline int
1689 ice_rx_alloc_bufs(struct ice_rx_queue *rxq)
1690 {
1691         volatile union ice_rx_flex_desc *rxdp;
1692         struct ice_rx_entry *rxep;
1693         struct rte_mbuf *mb;
1694         uint16_t alloc_idx, i;
1695         uint64_t dma_addr;
1696         int diag;
1697
1698         /* Allocate buffers in bulk */
1699         alloc_idx = (uint16_t)(rxq->rx_free_trigger -
1700                                (rxq->rx_free_thresh - 1));
1701         rxep = &rxq->sw_ring[alloc_idx];
1702         diag = rte_mempool_get_bulk(rxq->mp, (void *)rxep,
1703                                     rxq->rx_free_thresh);
1704         if (unlikely(diag != 0)) {
1705                 PMD_RX_LOG(ERR, "Failed to get mbufs in bulk");
1706                 return -ENOMEM;
1707         }
1708
1709         rxdp = &rxq->rx_ring[alloc_idx];
1710         for (i = 0; i < rxq->rx_free_thresh; i++) {
1711                 if (likely(i < (rxq->rx_free_thresh - 1)))
1712                         /* Prefetch next mbuf */
1713                         rte_prefetch0(rxep[i + 1].mbuf);
1714
1715                 mb = rxep[i].mbuf;
1716                 rte_mbuf_refcnt_set(mb, 1);
1717                 mb->next = NULL;
1718                 mb->data_off = RTE_PKTMBUF_HEADROOM;
1719                 mb->nb_segs = 1;
1720                 mb->port = rxq->port_id;
1721                 dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mb));
1722                 rxdp[i].read.hdr_addr = 0;
1723                 rxdp[i].read.pkt_addr = dma_addr;
1724         }
1725
1726         /* Update rx tail regsiter */
1727         ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->rx_free_trigger);
1728
1729         rxq->rx_free_trigger =
1730                 (uint16_t)(rxq->rx_free_trigger + rxq->rx_free_thresh);
1731         if (rxq->rx_free_trigger >= rxq->nb_rx_desc)
1732                 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
1733
1734         return 0;
1735 }
1736
1737 static inline uint16_t
1738 rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1739 {
1740         struct ice_rx_queue *rxq = (struct ice_rx_queue *)rx_queue;
1741         uint16_t nb_rx = 0;
1742
1743         if (!nb_pkts)
1744                 return 0;
1745
1746         if (rxq->rx_nb_avail)
1747                 return ice_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1748
1749         nb_rx = (uint16_t)ice_rx_scan_hw_ring(rxq);
1750         rxq->rx_next_avail = 0;
1751         rxq->rx_nb_avail = nb_rx;
1752         rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_rx);
1753
1754         if (rxq->rx_tail > rxq->rx_free_trigger) {
1755                 if (ice_rx_alloc_bufs(rxq) != 0) {
1756                         uint16_t i, j;
1757
1758                         rxq->vsi->adapter->pf.dev_data->rx_mbuf_alloc_failed +=
1759                                 rxq->rx_free_thresh;
1760                         PMD_RX_LOG(DEBUG, "Rx mbuf alloc failed for "
1761                                    "port_id=%u, queue_id=%u",
1762                                    rxq->port_id, rxq->queue_id);
1763                         rxq->rx_nb_avail = 0;
1764                         rxq->rx_tail = (uint16_t)(rxq->rx_tail - nb_rx);
1765                         for (i = 0, j = rxq->rx_tail; i < nb_rx; i++, j++)
1766                                 rxq->sw_ring[j].mbuf = rxq->rx_stage[i];
1767
1768                         return 0;
1769                 }
1770         }
1771
1772         if (rxq->rx_tail >= rxq->nb_rx_desc)
1773                 rxq->rx_tail = 0;
1774
1775         if (rxq->rx_nb_avail)
1776                 return ice_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1777
1778         return 0;
1779 }
1780
1781 static uint16_t
1782 ice_recv_pkts_bulk_alloc(void *rx_queue,
1783                          struct rte_mbuf **rx_pkts,
1784                          uint16_t nb_pkts)
1785 {
1786         uint16_t nb_rx = 0;
1787         uint16_t n;
1788         uint16_t count;
1789
1790         if (unlikely(nb_pkts == 0))
1791                 return nb_rx;
1792
1793         if (likely(nb_pkts <= ICE_RX_MAX_BURST))
1794                 return rx_recv_pkts(rx_queue, rx_pkts, nb_pkts);
1795
1796         while (nb_pkts) {
1797                 n = RTE_MIN(nb_pkts, ICE_RX_MAX_BURST);
1798                 count = rx_recv_pkts(rx_queue, &rx_pkts[nb_rx], n);
1799                 nb_rx = (uint16_t)(nb_rx + count);
1800                 nb_pkts = (uint16_t)(nb_pkts - count);
1801                 if (count < n)
1802                         break;
1803         }
1804
1805         return nb_rx;
1806 }
1807
1808 static uint16_t
1809 ice_recv_scattered_pkts(void *rx_queue,
1810                         struct rte_mbuf **rx_pkts,
1811                         uint16_t nb_pkts)
1812 {
1813         struct ice_rx_queue *rxq = rx_queue;
1814         volatile union ice_rx_flex_desc *rx_ring = rxq->rx_ring;
1815         volatile union ice_rx_flex_desc *rxdp;
1816         union ice_rx_flex_desc rxd;
1817         struct ice_rx_entry *sw_ring = rxq->sw_ring;
1818         struct ice_rx_entry *rxe;
1819         struct rte_mbuf *first_seg = rxq->pkt_first_seg;
1820         struct rte_mbuf *last_seg = rxq->pkt_last_seg;
1821         struct rte_mbuf *nmb; /* new allocated mbuf */
1822         struct rte_mbuf *rxm; /* pointer to store old mbuf in SW ring */
1823         uint16_t rx_id = rxq->rx_tail;
1824         uint16_t nb_rx = 0;
1825         uint16_t nb_hold = 0;
1826         uint16_t rx_packet_len;
1827         uint16_t rx_stat_err0;
1828         uint64_t dma_addr;
1829         uint64_t pkt_flags;
1830         uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1831         struct ice_vsi *vsi = rxq->vsi;
1832         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
1833         uint64_t ts_ns;
1834         struct ice_adapter *ad = rxq->vsi->adapter;
1835
1836         while (nb_rx < nb_pkts) {
1837                 rxdp = &rx_ring[rx_id];
1838                 rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
1839
1840                 /* Check the DD bit first */
1841                 if (!(rx_stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S)))
1842                         break;
1843
1844                 /* allocate mbuf */
1845                 nmb = rte_mbuf_raw_alloc(rxq->mp);
1846                 if (unlikely(!nmb)) {
1847                         rxq->vsi->adapter->pf.dev_data->rx_mbuf_alloc_failed++;
1848                         break;
1849                 }
1850                 rxd = *rxdp; /* copy descriptor in ring to temp variable*/
1851
1852                 nb_hold++;
1853                 rxe = &sw_ring[rx_id]; /* get corresponding mbuf in SW ring */
1854                 rx_id++;
1855                 if (unlikely(rx_id == rxq->nb_rx_desc))
1856                         rx_id = 0;
1857
1858                 /* Prefetch next mbuf */
1859                 rte_prefetch0(sw_ring[rx_id].mbuf);
1860
1861                 /**
1862                  * When next RX descriptor is on a cache line boundary,
1863                  * prefetch the next 4 RX descriptors and next 8 pointers
1864                  * to mbufs.
1865                  */
1866                 if ((rx_id & 0x3) == 0) {
1867                         rte_prefetch0(&rx_ring[rx_id]);
1868                         rte_prefetch0(&sw_ring[rx_id]);
1869                 }
1870
1871                 rxm = rxe->mbuf;
1872                 rxe->mbuf = nmb;
1873                 dma_addr =
1874                         rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1875
1876                 /* Set data buffer address and data length of the mbuf */
1877                 rxdp->read.hdr_addr = 0;
1878                 rxdp->read.pkt_addr = dma_addr;
1879                 rx_packet_len = rte_le_to_cpu_16(rxd.wb.pkt_len) &
1880                                 ICE_RX_FLX_DESC_PKT_LEN_M;
1881                 rxm->data_len = rx_packet_len;
1882                 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1883
1884                 /**
1885                  * If this is the first buffer of the received packet, set the
1886                  * pointer to the first mbuf of the packet and initialize its
1887                  * context. Otherwise, update the total length and the number
1888                  * of segments of the current scattered packet, and update the
1889                  * pointer to the last mbuf of the current packet.
1890                  */
1891                 if (!first_seg) {
1892                         first_seg = rxm;
1893                         first_seg->nb_segs = 1;
1894                         first_seg->pkt_len = rx_packet_len;
1895                 } else {
1896                         first_seg->pkt_len =
1897                                 (uint16_t)(first_seg->pkt_len +
1898                                            rx_packet_len);
1899                         first_seg->nb_segs++;
1900                         last_seg->next = rxm;
1901                 }
1902
1903                 /**
1904                  * If this is not the last buffer of the received packet,
1905                  * update the pointer to the last mbuf of the current scattered
1906                  * packet and continue to parse the RX ring.
1907                  */
1908                 if (!(rx_stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_EOF_S))) {
1909                         last_seg = rxm;
1910                         continue;
1911                 }
1912
1913                 /**
1914                  * This is the last buffer of the received packet. If the CRC
1915                  * is not stripped by the hardware:
1916                  *  - Subtract the CRC length from the total packet length.
1917                  *  - If the last buffer only contains the whole CRC or a part
1918                  *  of it, free the mbuf associated to the last buffer. If part
1919                  *  of the CRC is also contained in the previous mbuf, subtract
1920                  *  the length of that CRC part from the data length of the
1921                  *  previous mbuf.
1922                  */
1923                 rxm->next = NULL;
1924                 if (unlikely(rxq->crc_len > 0)) {
1925                         first_seg->pkt_len -= RTE_ETHER_CRC_LEN;
1926                         if (rx_packet_len <= RTE_ETHER_CRC_LEN) {
1927                                 rte_pktmbuf_free_seg(rxm);
1928                                 first_seg->nb_segs--;
1929                                 last_seg->data_len =
1930                                         (uint16_t)(last_seg->data_len -
1931                                         (RTE_ETHER_CRC_LEN - rx_packet_len));
1932                                 last_seg->next = NULL;
1933                         } else
1934                                 rxm->data_len = (uint16_t)(rx_packet_len -
1935                                                            RTE_ETHER_CRC_LEN);
1936                 }
1937
1938                 first_seg->port = rxq->port_id;
1939                 first_seg->ol_flags = 0;
1940                 first_seg->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
1941                         rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
1942                 ice_rxd_to_vlan_tci(first_seg, &rxd);
1943                 rxq->rxd_to_pkt_fields(rxq, first_seg, &rxd);
1944                 pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
1945
1946                 if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP) {
1947                         ts_ns = ice_tstamp_convert_32b_64b(hw,
1948                                 rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high));
1949                         if (ice_timestamp_dynflag > 0) {
1950                                 *RTE_MBUF_DYNFIELD(first_seg,
1951                                         ice_timestamp_dynfield_offset,
1952                                         rte_mbuf_timestamp_t *) = ts_ns;
1953                                 first_seg->ol_flags |= ice_timestamp_dynflag;
1954                         }
1955                 }
1956
1957                 if (ad->ptp_ena && ((first_seg->packet_type & RTE_PTYPE_L2_MASK)
1958                     == RTE_PTYPE_L2_ETHER_TIMESYNC)) {
1959                         rxq->time_high =
1960                            rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high);
1961                         first_seg->timesync = rxq->queue_id;
1962                         pkt_flags |= PKT_RX_IEEE1588_PTP;
1963                 }
1964
1965                 first_seg->ol_flags |= pkt_flags;
1966                 /* Prefetch data of first segment, if configured to do so. */
1967                 rte_prefetch0(RTE_PTR_ADD(first_seg->buf_addr,
1968                                           first_seg->data_off));
1969                 rx_pkts[nb_rx++] = first_seg;
1970                 first_seg = NULL;
1971         }
1972
1973         /* Record index of the next RX descriptor to probe. */
1974         rxq->rx_tail = rx_id;
1975         rxq->pkt_first_seg = first_seg;
1976         rxq->pkt_last_seg = last_seg;
1977
1978         /**
1979          * If the number of free RX descriptors is greater than the RX free
1980          * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1981          * register. Update the RDT with the value of the last processed RX
1982          * descriptor minus 1, to guarantee that the RDT register is never
1983          * equal to the RDH register, which creates a "full" ring situtation
1984          * from the hardware point of view.
1985          */
1986         nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
1987         if (nb_hold > rxq->rx_free_thresh) {
1988                 rx_id = (uint16_t)(rx_id == 0 ?
1989                                    (rxq->nb_rx_desc - 1) : (rx_id - 1));
1990                 /* write TAIL register */
1991                 ICE_PCI_REG_WC_WRITE(rxq->qrx_tail, rx_id);
1992                 nb_hold = 0;
1993         }
1994         rxq->nb_rx_hold = nb_hold;
1995
1996         /* return received packet in the burst */
1997         return nb_rx;
1998 }
1999
2000 const uint32_t *
2001 ice_dev_supported_ptypes_get(struct rte_eth_dev *dev)
2002 {
2003         struct ice_adapter *ad =
2004                 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2005         const uint32_t *ptypes;
2006
2007         static const uint32_t ptypes_os[] = {
2008                 /* refers to ice_get_default_pkt_type() */
2009                 RTE_PTYPE_L2_ETHER,
2010                 RTE_PTYPE_L2_ETHER_TIMESYNC,
2011                 RTE_PTYPE_L2_ETHER_LLDP,
2012                 RTE_PTYPE_L2_ETHER_ARP,
2013                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
2014                 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
2015                 RTE_PTYPE_L4_FRAG,
2016                 RTE_PTYPE_L4_ICMP,
2017                 RTE_PTYPE_L4_NONFRAG,
2018                 RTE_PTYPE_L4_SCTP,
2019                 RTE_PTYPE_L4_TCP,
2020                 RTE_PTYPE_L4_UDP,
2021                 RTE_PTYPE_TUNNEL_GRENAT,
2022                 RTE_PTYPE_TUNNEL_IP,
2023                 RTE_PTYPE_INNER_L2_ETHER,
2024                 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
2025                 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
2026                 RTE_PTYPE_INNER_L4_FRAG,
2027                 RTE_PTYPE_INNER_L4_ICMP,
2028                 RTE_PTYPE_INNER_L4_NONFRAG,
2029                 RTE_PTYPE_INNER_L4_SCTP,
2030                 RTE_PTYPE_INNER_L4_TCP,
2031                 RTE_PTYPE_INNER_L4_UDP,
2032                 RTE_PTYPE_UNKNOWN
2033         };
2034
2035         static const uint32_t ptypes_comms[] = {
2036                 /* refers to ice_get_default_pkt_type() */
2037                 RTE_PTYPE_L2_ETHER,
2038                 RTE_PTYPE_L2_ETHER_TIMESYNC,
2039                 RTE_PTYPE_L2_ETHER_LLDP,
2040                 RTE_PTYPE_L2_ETHER_ARP,
2041                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
2042                 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
2043                 RTE_PTYPE_L4_FRAG,
2044                 RTE_PTYPE_L4_ICMP,
2045                 RTE_PTYPE_L4_NONFRAG,
2046                 RTE_PTYPE_L4_SCTP,
2047                 RTE_PTYPE_L4_TCP,
2048                 RTE_PTYPE_L4_UDP,
2049                 RTE_PTYPE_TUNNEL_GRENAT,
2050                 RTE_PTYPE_TUNNEL_IP,
2051                 RTE_PTYPE_INNER_L2_ETHER,
2052                 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
2053                 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
2054                 RTE_PTYPE_INNER_L4_FRAG,
2055                 RTE_PTYPE_INNER_L4_ICMP,
2056                 RTE_PTYPE_INNER_L4_NONFRAG,
2057                 RTE_PTYPE_INNER_L4_SCTP,
2058                 RTE_PTYPE_INNER_L4_TCP,
2059                 RTE_PTYPE_INNER_L4_UDP,
2060                 RTE_PTYPE_TUNNEL_GTPC,
2061                 RTE_PTYPE_TUNNEL_GTPU,
2062                 RTE_PTYPE_L2_ETHER_PPPOE,
2063                 RTE_PTYPE_UNKNOWN
2064         };
2065
2066         if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
2067                 ptypes = ptypes_comms;
2068         else
2069                 ptypes = ptypes_os;
2070
2071         if (dev->rx_pkt_burst == ice_recv_pkts ||
2072             dev->rx_pkt_burst == ice_recv_pkts_bulk_alloc ||
2073             dev->rx_pkt_burst == ice_recv_scattered_pkts)
2074                 return ptypes;
2075
2076 #ifdef RTE_ARCH_X86
2077         if (dev->rx_pkt_burst == ice_recv_pkts_vec ||
2078             dev->rx_pkt_burst == ice_recv_scattered_pkts_vec ||
2079 #ifdef CC_AVX512_SUPPORT
2080             dev->rx_pkt_burst == ice_recv_pkts_vec_avx512 ||
2081             dev->rx_pkt_burst == ice_recv_pkts_vec_avx512_offload ||
2082             dev->rx_pkt_burst == ice_recv_scattered_pkts_vec_avx512 ||
2083             dev->rx_pkt_burst == ice_recv_scattered_pkts_vec_avx512_offload ||
2084 #endif
2085             dev->rx_pkt_burst == ice_recv_pkts_vec_avx2 ||
2086             dev->rx_pkt_burst == ice_recv_pkts_vec_avx2_offload ||
2087             dev->rx_pkt_burst == ice_recv_scattered_pkts_vec_avx2 ||
2088             dev->rx_pkt_burst == ice_recv_scattered_pkts_vec_avx2_offload)
2089                 return ptypes;
2090 #endif
2091
2092         return NULL;
2093 }
2094
2095 int
2096 ice_rx_descriptor_status(void *rx_queue, uint16_t offset)
2097 {
2098         volatile union ice_rx_flex_desc *rxdp;
2099         struct ice_rx_queue *rxq = rx_queue;
2100         uint32_t desc;
2101
2102         if (unlikely(offset >= rxq->nb_rx_desc))
2103                 return -EINVAL;
2104
2105         if (offset >= rxq->nb_rx_desc - rxq->nb_rx_hold)
2106                 return RTE_ETH_RX_DESC_UNAVAIL;
2107
2108         desc = rxq->rx_tail + offset;
2109         if (desc >= rxq->nb_rx_desc)
2110                 desc -= rxq->nb_rx_desc;
2111
2112         rxdp = &rxq->rx_ring[desc];
2113         if (rte_le_to_cpu_16(rxdp->wb.status_error0) &
2114             (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S))
2115                 return RTE_ETH_RX_DESC_DONE;
2116
2117         return RTE_ETH_RX_DESC_AVAIL;
2118 }
2119
2120 int
2121 ice_tx_descriptor_status(void *tx_queue, uint16_t offset)
2122 {
2123         struct ice_tx_queue *txq = tx_queue;
2124         volatile uint64_t *status;
2125         uint64_t mask, expect;
2126         uint32_t desc;
2127
2128         if (unlikely(offset >= txq->nb_tx_desc))
2129                 return -EINVAL;
2130
2131         desc = txq->tx_tail + offset;
2132         /* go to next desc that has the RS bit */
2133         desc = ((desc + txq->tx_rs_thresh - 1) / txq->tx_rs_thresh) *
2134                 txq->tx_rs_thresh;
2135         if (desc >= txq->nb_tx_desc) {
2136                 desc -= txq->nb_tx_desc;
2137                 if (desc >= txq->nb_tx_desc)
2138                         desc -= txq->nb_tx_desc;
2139         }
2140
2141         status = &txq->tx_ring[desc].cmd_type_offset_bsz;
2142         mask = rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M);
2143         expect = rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE <<
2144                                   ICE_TXD_QW1_DTYPE_S);
2145         if ((*status & mask) == expect)
2146                 return RTE_ETH_TX_DESC_DONE;
2147
2148         return RTE_ETH_TX_DESC_FULL;
2149 }
2150
2151 void
2152 ice_free_queues(struct rte_eth_dev *dev)
2153 {
2154         uint16_t i;
2155
2156         PMD_INIT_FUNC_TRACE();
2157
2158         for (i = 0; i < dev->data->nb_rx_queues; i++) {
2159                 if (!dev->data->rx_queues[i])
2160                         continue;
2161                 ice_rx_queue_release(dev->data->rx_queues[i]);
2162                 dev->data->rx_queues[i] = NULL;
2163         }
2164         dev->data->nb_rx_queues = 0;
2165
2166         for (i = 0; i < dev->data->nb_tx_queues; i++) {
2167                 if (!dev->data->tx_queues[i])
2168                         continue;
2169                 ice_tx_queue_release(dev->data->tx_queues[i]);
2170                 dev->data->tx_queues[i] = NULL;
2171         }
2172         dev->data->nb_tx_queues = 0;
2173 }
2174
2175 #define ICE_FDIR_NUM_TX_DESC  ICE_MIN_RING_DESC
2176 #define ICE_FDIR_NUM_RX_DESC  ICE_MIN_RING_DESC
2177
2178 int
2179 ice_fdir_setup_tx_resources(struct ice_pf *pf)
2180 {
2181         struct ice_tx_queue *txq;
2182         const struct rte_memzone *tz = NULL;
2183         uint32_t ring_size;
2184         struct rte_eth_dev *dev;
2185
2186         if (!pf) {
2187                 PMD_DRV_LOG(ERR, "PF is not available");
2188                 return -EINVAL;
2189         }
2190
2191         dev = &rte_eth_devices[pf->adapter->pf.dev_data->port_id];
2192
2193         /* Allocate the TX queue data structure. */
2194         txq = rte_zmalloc_socket("ice fdir tx queue",
2195                                  sizeof(struct ice_tx_queue),
2196                                  RTE_CACHE_LINE_SIZE,
2197                                  SOCKET_ID_ANY);
2198         if (!txq) {
2199                 PMD_DRV_LOG(ERR, "Failed to allocate memory for "
2200                             "tx queue structure.");
2201                 return -ENOMEM;
2202         }
2203
2204         /* Allocate TX hardware ring descriptors. */
2205         ring_size = sizeof(struct ice_tx_desc) * ICE_FDIR_NUM_TX_DESC;
2206         ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN);
2207
2208         tz = rte_eth_dma_zone_reserve(dev, "fdir_tx_ring",
2209                                       ICE_FDIR_QUEUE_ID, ring_size,
2210                                       ICE_RING_BASE_ALIGN, SOCKET_ID_ANY);
2211         if (!tz) {
2212                 ice_tx_queue_release(txq);
2213                 PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for TX.");
2214                 return -ENOMEM;
2215         }
2216
2217         txq->mz = tz;
2218         txq->nb_tx_desc = ICE_FDIR_NUM_TX_DESC;
2219         txq->queue_id = ICE_FDIR_QUEUE_ID;
2220         txq->reg_idx = pf->fdir.fdir_vsi->base_queue;
2221         txq->vsi = pf->fdir.fdir_vsi;
2222
2223         txq->tx_ring_dma = tz->iova;
2224         txq->tx_ring = (struct ice_tx_desc *)tz->addr;
2225         /*
2226          * don't need to allocate software ring and reset for the fdir
2227          * program queue just set the queue has been configured.
2228          */
2229         txq->q_set = true;
2230         pf->fdir.txq = txq;
2231
2232         txq->tx_rel_mbufs = _ice_tx_queue_release_mbufs;
2233
2234         return ICE_SUCCESS;
2235 }
2236
2237 int
2238 ice_fdir_setup_rx_resources(struct ice_pf *pf)
2239 {
2240         struct ice_rx_queue *rxq;
2241         const struct rte_memzone *rz = NULL;
2242         uint32_t ring_size;
2243         struct rte_eth_dev *dev;
2244
2245         if (!pf) {
2246                 PMD_DRV_LOG(ERR, "PF is not available");
2247                 return -EINVAL;
2248         }
2249
2250         dev = &rte_eth_devices[pf->adapter->pf.dev_data->port_id];
2251
2252         /* Allocate the RX queue data structure. */
2253         rxq = rte_zmalloc_socket("ice fdir rx queue",
2254                                  sizeof(struct ice_rx_queue),
2255                                  RTE_CACHE_LINE_SIZE,
2256                                  SOCKET_ID_ANY);
2257         if (!rxq) {
2258                 PMD_DRV_LOG(ERR, "Failed to allocate memory for "
2259                             "rx queue structure.");
2260                 return -ENOMEM;
2261         }
2262
2263         /* Allocate RX hardware ring descriptors. */
2264         ring_size = sizeof(union ice_32byte_rx_desc) * ICE_FDIR_NUM_RX_DESC;
2265         ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN);
2266
2267         rz = rte_eth_dma_zone_reserve(dev, "fdir_rx_ring",
2268                                       ICE_FDIR_QUEUE_ID, ring_size,
2269                                       ICE_RING_BASE_ALIGN, SOCKET_ID_ANY);
2270         if (!rz) {
2271                 ice_rx_queue_release(rxq);
2272                 PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for RX.");
2273                 return -ENOMEM;
2274         }
2275
2276         rxq->mz = rz;
2277         rxq->nb_rx_desc = ICE_FDIR_NUM_RX_DESC;
2278         rxq->queue_id = ICE_FDIR_QUEUE_ID;
2279         rxq->reg_idx = pf->fdir.fdir_vsi->base_queue;
2280         rxq->vsi = pf->fdir.fdir_vsi;
2281
2282         rxq->rx_ring_dma = rz->iova;
2283         memset(rz->addr, 0, ICE_FDIR_NUM_RX_DESC *
2284                sizeof(union ice_32byte_rx_desc));
2285         rxq->rx_ring = (union ice_rx_flex_desc *)rz->addr;
2286
2287         /*
2288          * Don't need to allocate software ring and reset for the fdir
2289          * rx queue, just set the queue has been configured.
2290          */
2291         rxq->q_set = true;
2292         pf->fdir.rxq = rxq;
2293
2294         rxq->rx_rel_mbufs = _ice_rx_queue_release_mbufs;
2295
2296         return ICE_SUCCESS;
2297 }
2298
2299 uint16_t
2300 ice_recv_pkts(void *rx_queue,
2301               struct rte_mbuf **rx_pkts,
2302               uint16_t nb_pkts)
2303 {
2304         struct ice_rx_queue *rxq = rx_queue;
2305         volatile union ice_rx_flex_desc *rx_ring = rxq->rx_ring;
2306         volatile union ice_rx_flex_desc *rxdp;
2307         union ice_rx_flex_desc rxd;
2308         struct ice_rx_entry *sw_ring = rxq->sw_ring;
2309         struct ice_rx_entry *rxe;
2310         struct rte_mbuf *nmb; /* new allocated mbuf */
2311         struct rte_mbuf *rxm; /* pointer to store old mbuf in SW ring */
2312         uint16_t rx_id = rxq->rx_tail;
2313         uint16_t nb_rx = 0;
2314         uint16_t nb_hold = 0;
2315         uint16_t rx_packet_len;
2316         uint16_t rx_stat_err0;
2317         uint64_t dma_addr;
2318         uint64_t pkt_flags;
2319         uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
2320         struct ice_vsi *vsi = rxq->vsi;
2321         struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
2322         uint64_t ts_ns;
2323         struct ice_adapter *ad = rxq->vsi->adapter;
2324
2325         while (nb_rx < nb_pkts) {
2326                 rxdp = &rx_ring[rx_id];
2327                 rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
2328
2329                 /* Check the DD bit first */
2330                 if (!(rx_stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S)))
2331                         break;
2332
2333                 /* allocate mbuf */
2334                 nmb = rte_mbuf_raw_alloc(rxq->mp);
2335                 if (unlikely(!nmb)) {
2336                         rxq->vsi->adapter->pf.dev_data->rx_mbuf_alloc_failed++;
2337                         break;
2338                 }
2339                 rxd = *rxdp; /* copy descriptor in ring to temp variable*/
2340
2341                 nb_hold++;
2342                 rxe = &sw_ring[rx_id]; /* get corresponding mbuf in SW ring */
2343                 rx_id++;
2344                 if (unlikely(rx_id == rxq->nb_rx_desc))
2345                         rx_id = 0;
2346                 rxm = rxe->mbuf;
2347                 rxe->mbuf = nmb;
2348                 dma_addr =
2349                         rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
2350
2351                 /**
2352                  * fill the read format of descriptor with physic address in
2353                  * new allocated mbuf: nmb
2354                  */
2355                 rxdp->read.hdr_addr = 0;
2356                 rxdp->read.pkt_addr = dma_addr;
2357
2358                 /* calculate rx_packet_len of the received pkt */
2359                 rx_packet_len = (rte_le_to_cpu_16(rxd.wb.pkt_len) &
2360                                  ICE_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len;
2361
2362                 /* fill old mbuf with received descriptor: rxd */
2363                 rxm->data_off = RTE_PKTMBUF_HEADROOM;
2364                 rte_prefetch0(RTE_PTR_ADD(rxm->buf_addr, RTE_PKTMBUF_HEADROOM));
2365                 rxm->nb_segs = 1;
2366                 rxm->next = NULL;
2367                 rxm->pkt_len = rx_packet_len;
2368                 rxm->data_len = rx_packet_len;
2369                 rxm->port = rxq->port_id;
2370                 rxm->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
2371                         rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
2372                 ice_rxd_to_vlan_tci(rxm, &rxd);
2373                 rxq->rxd_to_pkt_fields(rxq, rxm, &rxd);
2374                 pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
2375
2376                 if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP) {
2377                         ts_ns = ice_tstamp_convert_32b_64b(hw,
2378                                 rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high));
2379                         if (ice_timestamp_dynflag > 0) {
2380                                 *RTE_MBUF_DYNFIELD(rxm,
2381                                         ice_timestamp_dynfield_offset,
2382                                         rte_mbuf_timestamp_t *) = ts_ns;
2383                                 rxm->ol_flags |= ice_timestamp_dynflag;
2384                         }
2385                 }
2386
2387                 if (ad->ptp_ena && ((rxm->packet_type & RTE_PTYPE_L2_MASK) ==
2388                     RTE_PTYPE_L2_ETHER_TIMESYNC)) {
2389                         rxq->time_high =
2390                            rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high);
2391                         rxm->timesync = rxq->queue_id;
2392                         pkt_flags |= PKT_RX_IEEE1588_PTP;
2393                 }
2394
2395                 rxm->ol_flags |= pkt_flags;
2396                 /* copy old mbuf to rx_pkts */
2397                 rx_pkts[nb_rx++] = rxm;
2398         }
2399         rxq->rx_tail = rx_id;
2400         /**
2401          * If the number of free RX descriptors is greater than the RX free
2402          * threshold of the queue, advance the receive tail register of queue.
2403          * Update that register with the value of the last processed RX
2404          * descriptor minus 1.
2405          */
2406         nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
2407         if (nb_hold > rxq->rx_free_thresh) {
2408                 rx_id = (uint16_t)(rx_id == 0 ?
2409                                    (rxq->nb_rx_desc - 1) : (rx_id - 1));
2410                 /* write TAIL register */
2411                 ICE_PCI_REG_WC_WRITE(rxq->qrx_tail, rx_id);
2412                 nb_hold = 0;
2413         }
2414         rxq->nb_rx_hold = nb_hold;
2415
2416         /* return received packet in the burst */
2417         return nb_rx;
2418 }
2419
2420 static inline void
2421 ice_parse_tunneling_params(uint64_t ol_flags,
2422                             union ice_tx_offload tx_offload,
2423                             uint32_t *cd_tunneling)
2424 {
2425         /* EIPT: External (outer) IP header type */
2426         if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
2427                 *cd_tunneling |= ICE_TX_CTX_EIPT_IPV4;
2428         else if (ol_flags & PKT_TX_OUTER_IPV4)
2429                 *cd_tunneling |= ICE_TX_CTX_EIPT_IPV4_NO_CSUM;
2430         else if (ol_flags & PKT_TX_OUTER_IPV6)
2431                 *cd_tunneling |= ICE_TX_CTX_EIPT_IPV6;
2432
2433         /* EIPLEN: External (outer) IP header length, in DWords */
2434         *cd_tunneling |= (tx_offload.outer_l3_len >> 2) <<
2435                 ICE_TXD_CTX_QW0_EIPLEN_S;
2436
2437         /* L4TUNT: L4 Tunneling Type */
2438         switch (ol_flags & PKT_TX_TUNNEL_MASK) {
2439         case PKT_TX_TUNNEL_IPIP:
2440                 /* for non UDP / GRE tunneling, set to 00b */
2441                 break;
2442         case PKT_TX_TUNNEL_VXLAN:
2443         case PKT_TX_TUNNEL_GTP:
2444         case PKT_TX_TUNNEL_GENEVE:
2445                 *cd_tunneling |= ICE_TXD_CTX_UDP_TUNNELING;
2446                 break;
2447         case PKT_TX_TUNNEL_GRE:
2448                 *cd_tunneling |= ICE_TXD_CTX_GRE_TUNNELING;
2449                 break;
2450         default:
2451                 PMD_TX_LOG(ERR, "Tunnel type not supported");
2452                 return;
2453         }
2454
2455         /* L4TUNLEN: L4 Tunneling Length, in Words
2456          *
2457          * We depend on app to set rte_mbuf.l2_len correctly.
2458          * For IP in GRE it should be set to the length of the GRE
2459          * header;
2460          * For MAC in GRE or MAC in UDP it should be set to the length
2461          * of the GRE or UDP headers plus the inner MAC up to including
2462          * its last Ethertype.
2463          * If MPLS labels exists, it should include them as well.
2464          */
2465         *cd_tunneling |= (tx_offload.l2_len >> 1) <<
2466                 ICE_TXD_CTX_QW0_NATLEN_S;
2467
2468         /**
2469          * Calculate the tunneling UDP checksum.
2470          * Shall be set only if L4TUNT = 01b and EIPT is not zero
2471          */
2472         if (!(*cd_tunneling & ICE_TX_CTX_EIPT_NONE) &&
2473             (*cd_tunneling & ICE_TXD_CTX_UDP_TUNNELING))
2474                 *cd_tunneling |= ICE_TXD_CTX_QW0_L4T_CS_M;
2475 }
2476
2477 static inline void
2478 ice_txd_enable_checksum(uint64_t ol_flags,
2479                         uint32_t *td_cmd,
2480                         uint32_t *td_offset,
2481                         union ice_tx_offload tx_offload)
2482 {
2483         /* Set MACLEN */
2484         if (ol_flags & PKT_TX_TUNNEL_MASK)
2485                 *td_offset |= (tx_offload.outer_l2_len >> 1)
2486                         << ICE_TX_DESC_LEN_MACLEN_S;
2487         else
2488                 *td_offset |= (tx_offload.l2_len >> 1)
2489                         << ICE_TX_DESC_LEN_MACLEN_S;
2490
2491         /* Enable L3 checksum offloads */
2492         if (ol_flags & PKT_TX_IP_CKSUM) {
2493                 *td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM;
2494                 *td_offset |= (tx_offload.l3_len >> 2) <<
2495                               ICE_TX_DESC_LEN_IPLEN_S;
2496         } else if (ol_flags & PKT_TX_IPV4) {
2497                 *td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV4;
2498                 *td_offset |= (tx_offload.l3_len >> 2) <<
2499                               ICE_TX_DESC_LEN_IPLEN_S;
2500         } else if (ol_flags & PKT_TX_IPV6) {
2501                 *td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV6;
2502                 *td_offset |= (tx_offload.l3_len >> 2) <<
2503                               ICE_TX_DESC_LEN_IPLEN_S;
2504         }
2505
2506         if (ol_flags & PKT_TX_TCP_SEG) {
2507                 *td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;
2508                 *td_offset |= (tx_offload.l4_len >> 2) <<
2509                               ICE_TX_DESC_LEN_L4_LEN_S;
2510                 return;
2511         }
2512
2513         /* Enable L4 checksum offloads */
2514         switch (ol_flags & PKT_TX_L4_MASK) {
2515         case PKT_TX_TCP_CKSUM:
2516                 *td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;
2517                 *td_offset |= (sizeof(struct rte_tcp_hdr) >> 2) <<
2518                               ICE_TX_DESC_LEN_L4_LEN_S;
2519                 break;
2520         case PKT_TX_SCTP_CKSUM:
2521                 *td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_SCTP;
2522                 *td_offset |= (sizeof(struct rte_sctp_hdr) >> 2) <<
2523                               ICE_TX_DESC_LEN_L4_LEN_S;
2524                 break;
2525         case PKT_TX_UDP_CKSUM:
2526                 *td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP;
2527                 *td_offset |= (sizeof(struct rte_udp_hdr) >> 2) <<
2528                               ICE_TX_DESC_LEN_L4_LEN_S;
2529                 break;
2530         default:
2531                 break;
2532         }
2533 }
2534
2535 static inline int
2536 ice_xmit_cleanup(struct ice_tx_queue *txq)
2537 {
2538         struct ice_tx_entry *sw_ring = txq->sw_ring;
2539         volatile struct ice_tx_desc *txd = txq->tx_ring;
2540         uint16_t last_desc_cleaned = txq->last_desc_cleaned;
2541         uint16_t nb_tx_desc = txq->nb_tx_desc;
2542         uint16_t desc_to_clean_to;
2543         uint16_t nb_tx_to_clean;
2544
2545         /* Determine the last descriptor needing to be cleaned */
2546         desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->tx_rs_thresh);
2547         if (desc_to_clean_to >= nb_tx_desc)
2548                 desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
2549
2550         /* Check to make sure the last descriptor to clean is done */
2551         desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
2552         if (!(txd[desc_to_clean_to].cmd_type_offset_bsz &
2553             rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE))) {
2554                 PMD_TX_LOG(DEBUG, "TX descriptor %4u is not done "
2555                            "(port=%d queue=%d) value=0x%"PRIx64"\n",
2556                            desc_to_clean_to,
2557                            txq->port_id, txq->queue_id,
2558                            txd[desc_to_clean_to].cmd_type_offset_bsz);
2559                 /* Failed to clean any descriptors */
2560                 return -1;
2561         }
2562
2563         /* Figure out how many descriptors will be cleaned */
2564         if (last_desc_cleaned > desc_to_clean_to)
2565                 nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) +
2566                                             desc_to_clean_to);
2567         else
2568                 nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
2569                                             last_desc_cleaned);
2570
2571         /* The last descriptor to clean is done, so that means all the
2572          * descriptors from the last descriptor that was cleaned
2573          * up to the last descriptor with the RS bit set
2574          * are done. Only reset the threshold descriptor.
2575          */
2576         txd[desc_to_clean_to].cmd_type_offset_bsz = 0;
2577
2578         /* Update the txq to reflect the last descriptor that was cleaned */
2579         txq->last_desc_cleaned = desc_to_clean_to;
2580         txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + nb_tx_to_clean);
2581
2582         return 0;
2583 }
2584
2585 /* Construct the tx flags */
2586 static inline uint64_t
2587 ice_build_ctob(uint32_t td_cmd,
2588                uint32_t td_offset,
2589                uint16_t size,
2590                uint32_t td_tag)
2591 {
2592         return rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DATA |
2593                                 ((uint64_t)td_cmd << ICE_TXD_QW1_CMD_S) |
2594                                 ((uint64_t)td_offset << ICE_TXD_QW1_OFFSET_S) |
2595                                 ((uint64_t)size << ICE_TXD_QW1_TX_BUF_SZ_S) |
2596                                 ((uint64_t)td_tag << ICE_TXD_QW1_L2TAG1_S));
2597 }
2598
2599 /* Check if the context descriptor is needed for TX offloading */
2600 static inline uint16_t
2601 ice_calc_context_desc(uint64_t flags)
2602 {
2603         static uint64_t mask = PKT_TX_TCP_SEG |
2604                 PKT_TX_QINQ |
2605                 PKT_TX_OUTER_IP_CKSUM |
2606                 PKT_TX_TUNNEL_MASK |
2607                 PKT_TX_IEEE1588_TMST;
2608
2609         return (flags & mask) ? 1 : 0;
2610 }
2611
2612 /* set ice TSO context descriptor */
2613 static inline uint64_t
2614 ice_set_tso_ctx(struct rte_mbuf *mbuf, union ice_tx_offload tx_offload)
2615 {
2616         uint64_t ctx_desc = 0;
2617         uint32_t cd_cmd, hdr_len, cd_tso_len;
2618
2619         if (!tx_offload.l4_len) {
2620                 PMD_TX_LOG(DEBUG, "L4 length set to 0");
2621                 return ctx_desc;
2622         }
2623
2624         hdr_len = tx_offload.l2_len + tx_offload.l3_len + tx_offload.l4_len;
2625         hdr_len += (mbuf->ol_flags & PKT_TX_TUNNEL_MASK) ?
2626                    tx_offload.outer_l2_len + tx_offload.outer_l3_len : 0;
2627
2628         cd_cmd = ICE_TX_CTX_DESC_TSO;
2629         cd_tso_len = mbuf->pkt_len - hdr_len;
2630         ctx_desc |= ((uint64_t)cd_cmd << ICE_TXD_CTX_QW1_CMD_S) |
2631                     ((uint64_t)cd_tso_len << ICE_TXD_CTX_QW1_TSO_LEN_S) |
2632                     ((uint64_t)mbuf->tso_segsz << ICE_TXD_CTX_QW1_MSS_S);
2633
2634         return ctx_desc;
2635 }
2636
2637 /* HW requires that TX buffer size ranges from 1B up to (16K-1)B. */
2638 #define ICE_MAX_DATA_PER_TXD \
2639         (ICE_TXD_QW1_TX_BUF_SZ_M >> ICE_TXD_QW1_TX_BUF_SZ_S)
2640 /* Calculate the number of TX descriptors needed for each pkt */
2641 static inline uint16_t
2642 ice_calc_pkt_desc(struct rte_mbuf *tx_pkt)
2643 {
2644         struct rte_mbuf *txd = tx_pkt;
2645         uint16_t count = 0;
2646
2647         while (txd != NULL) {
2648                 count += DIV_ROUND_UP(txd->data_len, ICE_MAX_DATA_PER_TXD);
2649                 txd = txd->next;
2650         }
2651
2652         return count;
2653 }
2654
2655 uint16_t
2656 ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
2657 {
2658         struct ice_tx_queue *txq;
2659         volatile struct ice_tx_desc *tx_ring;
2660         volatile struct ice_tx_desc *txd;
2661         struct ice_tx_entry *sw_ring;
2662         struct ice_tx_entry *txe, *txn;
2663         struct rte_mbuf *tx_pkt;
2664         struct rte_mbuf *m_seg;
2665         uint32_t cd_tunneling_params;
2666         uint16_t tx_id;
2667         uint16_t nb_tx;
2668         uint16_t nb_used;
2669         uint16_t nb_ctx;
2670         uint32_t td_cmd = 0;
2671         uint32_t td_offset = 0;
2672         uint32_t td_tag = 0;
2673         uint16_t tx_last;
2674         uint16_t slen;
2675         uint64_t buf_dma_addr;
2676         uint64_t ol_flags;
2677         union ice_tx_offload tx_offload = {0};
2678
2679         txq = tx_queue;
2680         sw_ring = txq->sw_ring;
2681         tx_ring = txq->tx_ring;
2682         tx_id = txq->tx_tail;
2683         txe = &sw_ring[tx_id];
2684
2685         /* Check if the descriptor ring needs to be cleaned. */
2686         if (txq->nb_tx_free < txq->tx_free_thresh)
2687                 (void)ice_xmit_cleanup(txq);
2688
2689         for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
2690                 tx_pkt = *tx_pkts++;
2691
2692                 td_cmd = 0;
2693                 td_tag = 0;
2694                 td_offset = 0;
2695                 ol_flags = tx_pkt->ol_flags;
2696                 tx_offload.l2_len = tx_pkt->l2_len;
2697                 tx_offload.l3_len = tx_pkt->l3_len;
2698                 tx_offload.outer_l2_len = tx_pkt->outer_l2_len;
2699                 tx_offload.outer_l3_len = tx_pkt->outer_l3_len;
2700                 tx_offload.l4_len = tx_pkt->l4_len;
2701                 tx_offload.tso_segsz = tx_pkt->tso_segsz;
2702                 /* Calculate the number of context descriptors needed. */
2703                 nb_ctx = ice_calc_context_desc(ol_flags);
2704
2705                 /* The number of descriptors that must be allocated for
2706                  * a packet equals to the number of the segments of that
2707                  * packet plus the number of context descriptor if needed.
2708                  * Recalculate the needed tx descs when TSO enabled in case
2709                  * the mbuf data size exceeds max data size that hw allows
2710                  * per tx desc.
2711                  */
2712                 if (ol_flags & PKT_TX_TCP_SEG)
2713                         nb_used = (uint16_t)(ice_calc_pkt_desc(tx_pkt) +
2714                                              nb_ctx);
2715                 else
2716                         nb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx);
2717                 tx_last = (uint16_t)(tx_id + nb_used - 1);
2718
2719                 /* Circular ring */
2720                 if (tx_last >= txq->nb_tx_desc)
2721                         tx_last = (uint16_t)(tx_last - txq->nb_tx_desc);
2722
2723                 if (nb_used > txq->nb_tx_free) {
2724                         if (ice_xmit_cleanup(txq) != 0) {
2725                                 if (nb_tx == 0)
2726                                         return 0;
2727                                 goto end_of_tx;
2728                         }
2729                         if (unlikely(nb_used > txq->tx_rs_thresh)) {
2730                                 while (nb_used > txq->nb_tx_free) {
2731                                         if (ice_xmit_cleanup(txq) != 0) {
2732                                                 if (nb_tx == 0)
2733                                                         return 0;
2734                                                 goto end_of_tx;
2735                                         }
2736                                 }
2737                         }
2738                 }
2739
2740                 /* Descriptor based VLAN insertion */
2741                 if (ol_flags & (PKT_TX_VLAN | PKT_TX_QINQ)) {
2742                         td_cmd |= ICE_TX_DESC_CMD_IL2TAG1;
2743                         td_tag = tx_pkt->vlan_tci;
2744                 }
2745
2746                 /* Fill in tunneling parameters if necessary */
2747                 cd_tunneling_params = 0;
2748                 if (ol_flags & PKT_TX_TUNNEL_MASK)
2749                         ice_parse_tunneling_params(ol_flags, tx_offload,
2750                                                    &cd_tunneling_params);
2751
2752                 /* Enable checksum offloading */
2753                 if (ol_flags & ICE_TX_CKSUM_OFFLOAD_MASK)
2754                         ice_txd_enable_checksum(ol_flags, &td_cmd,
2755                                                 &td_offset, tx_offload);
2756
2757                 if (nb_ctx) {
2758                         /* Setup TX context descriptor if required */
2759                         volatile struct ice_tx_ctx_desc *ctx_txd =
2760                                 (volatile struct ice_tx_ctx_desc *)
2761                                         &tx_ring[tx_id];
2762                         uint16_t cd_l2tag2 = 0;
2763                         uint64_t cd_type_cmd_tso_mss = ICE_TX_DESC_DTYPE_CTX;
2764
2765                         txn = &sw_ring[txe->next_id];
2766                         RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
2767                         if (txe->mbuf) {
2768                                 rte_pktmbuf_free_seg(txe->mbuf);
2769                                 txe->mbuf = NULL;
2770                         }
2771
2772                         if (ol_flags & PKT_TX_TCP_SEG)
2773                                 cd_type_cmd_tso_mss |=
2774                                         ice_set_tso_ctx(tx_pkt, tx_offload);
2775                         else if (ol_flags & PKT_TX_IEEE1588_TMST)
2776                                 cd_type_cmd_tso_mss |=
2777                                         ((uint64_t)ICE_TX_CTX_DESC_TSYN <<
2778                                         ICE_TXD_CTX_QW1_CMD_S);
2779
2780                         ctx_txd->tunneling_params =
2781                                 rte_cpu_to_le_32(cd_tunneling_params);
2782
2783                         /* TX context descriptor based double VLAN insert */
2784                         if (ol_flags & PKT_TX_QINQ) {
2785                                 cd_l2tag2 = tx_pkt->vlan_tci_outer;
2786                                 cd_type_cmd_tso_mss |=
2787                                         ((uint64_t)ICE_TX_CTX_DESC_IL2TAG2 <<
2788                                          ICE_TXD_CTX_QW1_CMD_S);
2789                         }
2790                         ctx_txd->l2tag2 = rte_cpu_to_le_16(cd_l2tag2);
2791                         ctx_txd->qw1 =
2792                                 rte_cpu_to_le_64(cd_type_cmd_tso_mss);
2793
2794                         txe->last_id = tx_last;
2795                         tx_id = txe->next_id;
2796                         txe = txn;
2797                 }
2798                 m_seg = tx_pkt;
2799
2800                 do {
2801                         txd = &tx_ring[tx_id];
2802                         txn = &sw_ring[txe->next_id];
2803
2804                         if (txe->mbuf)
2805                                 rte_pktmbuf_free_seg(txe->mbuf);
2806                         txe->mbuf = m_seg;
2807
2808                         /* Setup TX Descriptor */
2809                         slen = m_seg->data_len;
2810                         buf_dma_addr = rte_mbuf_data_iova(m_seg);
2811
2812                         while ((ol_flags & PKT_TX_TCP_SEG) &&
2813                                 unlikely(slen > ICE_MAX_DATA_PER_TXD)) {
2814                                 txd->buf_addr = rte_cpu_to_le_64(buf_dma_addr);
2815                                 txd->cmd_type_offset_bsz =
2816                                 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DATA |
2817                                 ((uint64_t)td_cmd << ICE_TXD_QW1_CMD_S) |
2818                                 ((uint64_t)td_offset << ICE_TXD_QW1_OFFSET_S) |
2819                                 ((uint64_t)ICE_MAX_DATA_PER_TXD <<
2820                                  ICE_TXD_QW1_TX_BUF_SZ_S) |
2821                                 ((uint64_t)td_tag << ICE_TXD_QW1_L2TAG1_S));
2822
2823                                 buf_dma_addr += ICE_MAX_DATA_PER_TXD;
2824                                 slen -= ICE_MAX_DATA_PER_TXD;
2825
2826                                 txe->last_id = tx_last;
2827                                 tx_id = txe->next_id;
2828                                 txe = txn;
2829                                 txd = &tx_ring[tx_id];
2830                                 txn = &sw_ring[txe->next_id];
2831                         }
2832
2833                         txd->buf_addr = rte_cpu_to_le_64(buf_dma_addr);
2834                         txd->cmd_type_offset_bsz =
2835                                 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DATA |
2836                                 ((uint64_t)td_cmd << ICE_TXD_QW1_CMD_S) |
2837                                 ((uint64_t)td_offset << ICE_TXD_QW1_OFFSET_S) |
2838                                 ((uint64_t)slen << ICE_TXD_QW1_TX_BUF_SZ_S) |
2839                                 ((uint64_t)td_tag << ICE_TXD_QW1_L2TAG1_S));
2840
2841                         txe->last_id = tx_last;
2842                         tx_id = txe->next_id;
2843                         txe = txn;
2844                         m_seg = m_seg->next;
2845                 } while (m_seg);
2846
2847                 /* fill the last descriptor with End of Packet (EOP) bit */
2848                 td_cmd |= ICE_TX_DESC_CMD_EOP;
2849                 txq->nb_tx_used = (uint16_t)(txq->nb_tx_used + nb_used);
2850                 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_used);
2851
2852                 /* set RS bit on the last descriptor of one packet */
2853                 if (txq->nb_tx_used >= txq->tx_rs_thresh) {
2854                         PMD_TX_LOG(DEBUG,
2855                                    "Setting RS bit on TXD id="
2856                                    "%4u (port=%d queue=%d)",
2857                                    tx_last, txq->port_id, txq->queue_id);
2858
2859                         td_cmd |= ICE_TX_DESC_CMD_RS;
2860
2861                         /* Update txq RS bit counters */
2862                         txq->nb_tx_used = 0;
2863                 }
2864                 txd->cmd_type_offset_bsz |=
2865                         rte_cpu_to_le_64(((uint64_t)td_cmd) <<
2866                                          ICE_TXD_QW1_CMD_S);
2867         }
2868 end_of_tx:
2869         /* update Tail register */
2870         ICE_PCI_REG_WRITE(txq->qtx_tail, tx_id);
2871         txq->tx_tail = tx_id;
2872
2873         return nb_tx;
2874 }
2875
2876 static __rte_always_inline int
2877 ice_tx_free_bufs(struct ice_tx_queue *txq)
2878 {
2879         struct ice_tx_entry *txep;
2880         uint16_t i;
2881
2882         if ((txq->tx_ring[txq->tx_next_dd].cmd_type_offset_bsz &
2883              rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M)) !=
2884             rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE))
2885                 return 0;
2886
2887         txep = &txq->sw_ring[txq->tx_next_dd - (txq->tx_rs_thresh - 1)];
2888
2889         for (i = 0; i < txq->tx_rs_thresh; i++)
2890                 rte_prefetch0((txep + i)->mbuf);
2891
2892         if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) {
2893                 for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
2894                         rte_mempool_put(txep->mbuf->pool, txep->mbuf);
2895                         txep->mbuf = NULL;
2896                 }
2897         } else {
2898                 for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
2899                         rte_pktmbuf_free_seg(txep->mbuf);
2900                         txep->mbuf = NULL;
2901                 }
2902         }
2903
2904         txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
2905         txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
2906         if (txq->tx_next_dd >= txq->nb_tx_desc)
2907                 txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
2908
2909         return txq->tx_rs_thresh;
2910 }
2911
2912 static int
2913 ice_tx_done_cleanup_full(struct ice_tx_queue *txq,
2914                         uint32_t free_cnt)
2915 {
2916         struct ice_tx_entry *swr_ring = txq->sw_ring;
2917         uint16_t i, tx_last, tx_id;
2918         uint16_t nb_tx_free_last;
2919         uint16_t nb_tx_to_clean;
2920         uint32_t pkt_cnt;
2921
2922         /* Start free mbuf from the next of tx_tail */
2923         tx_last = txq->tx_tail;
2924         tx_id  = swr_ring[tx_last].next_id;
2925
2926         if (txq->nb_tx_free == 0 && ice_xmit_cleanup(txq))
2927                 return 0;
2928
2929         nb_tx_to_clean = txq->nb_tx_free;
2930         nb_tx_free_last = txq->nb_tx_free;
2931         if (!free_cnt)
2932                 free_cnt = txq->nb_tx_desc;
2933
2934         /* Loop through swr_ring to count the amount of
2935          * freeable mubfs and packets.
2936          */
2937         for (pkt_cnt = 0; pkt_cnt < free_cnt; ) {
2938                 for (i = 0; i < nb_tx_to_clean &&
2939                         pkt_cnt < free_cnt &&
2940                         tx_id != tx_last; i++) {
2941                         if (swr_ring[tx_id].mbuf != NULL) {
2942                                 rte_pktmbuf_free_seg(swr_ring[tx_id].mbuf);
2943                                 swr_ring[tx_id].mbuf = NULL;
2944
2945                                 /*
2946                                  * last segment in the packet,
2947                                  * increment packet count
2948                                  */
2949                                 pkt_cnt += (swr_ring[tx_id].last_id == tx_id);
2950                         }
2951
2952                         tx_id = swr_ring[tx_id].next_id;
2953                 }
2954
2955                 if (txq->tx_rs_thresh > txq->nb_tx_desc -
2956                         txq->nb_tx_free || tx_id == tx_last)
2957                         break;
2958
2959                 if (pkt_cnt < free_cnt) {
2960                         if (ice_xmit_cleanup(txq))
2961                                 break;
2962
2963                         nb_tx_to_clean = txq->nb_tx_free - nb_tx_free_last;
2964                         nb_tx_free_last = txq->nb_tx_free;
2965                 }
2966         }
2967
2968         return (int)pkt_cnt;
2969 }
2970
2971 #ifdef RTE_ARCH_X86
2972 static int
2973 ice_tx_done_cleanup_vec(struct ice_tx_queue *txq __rte_unused,
2974                         uint32_t free_cnt __rte_unused)
2975 {
2976         return -ENOTSUP;
2977 }
2978 #endif
2979
2980 static int
2981 ice_tx_done_cleanup_simple(struct ice_tx_queue *txq,
2982                         uint32_t free_cnt)
2983 {
2984         int i, n, cnt;
2985
2986         if (free_cnt == 0 || free_cnt > txq->nb_tx_desc)
2987                 free_cnt = txq->nb_tx_desc;
2988
2989         cnt = free_cnt - free_cnt % txq->tx_rs_thresh;
2990
2991         for (i = 0; i < cnt; i += n) {
2992                 if (txq->nb_tx_desc - txq->nb_tx_free < txq->tx_rs_thresh)
2993                         break;
2994
2995                 n = ice_tx_free_bufs(txq);
2996
2997                 if (n == 0)
2998                         break;
2999         }
3000
3001         return i;
3002 }
3003
3004 int
3005 ice_tx_done_cleanup(void *txq, uint32_t free_cnt)
3006 {
3007         struct ice_tx_queue *q = (struct ice_tx_queue *)txq;
3008         struct rte_eth_dev *dev = &rte_eth_devices[q->port_id];
3009         struct ice_adapter *ad =
3010                 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
3011
3012 #ifdef RTE_ARCH_X86
3013         if (ad->tx_vec_allowed)
3014                 return ice_tx_done_cleanup_vec(q, free_cnt);
3015 #endif
3016         if (ad->tx_simple_allowed)
3017                 return ice_tx_done_cleanup_simple(q, free_cnt);
3018         else
3019                 return ice_tx_done_cleanup_full(q, free_cnt);
3020 }
3021
3022 /* Populate 4 descriptors with data from 4 mbufs */
3023 static inline void
3024 tx4(volatile struct ice_tx_desc *txdp, struct rte_mbuf **pkts)
3025 {
3026         uint64_t dma_addr;
3027         uint32_t i;
3028
3029         for (i = 0; i < 4; i++, txdp++, pkts++) {
3030                 dma_addr = rte_mbuf_data_iova(*pkts);
3031                 txdp->buf_addr = rte_cpu_to_le_64(dma_addr);
3032                 txdp->cmd_type_offset_bsz =
3033                         ice_build_ctob((uint32_t)ICE_TD_CMD, 0,
3034                                        (*pkts)->data_len, 0);
3035         }
3036 }
3037
3038 /* Populate 1 descriptor with data from 1 mbuf */
3039 static inline void
3040 tx1(volatile struct ice_tx_desc *txdp, struct rte_mbuf **pkts)
3041 {
3042         uint64_t dma_addr;
3043
3044         dma_addr = rte_mbuf_data_iova(*pkts);
3045         txdp->buf_addr = rte_cpu_to_le_64(dma_addr);
3046         txdp->cmd_type_offset_bsz =
3047                 ice_build_ctob((uint32_t)ICE_TD_CMD, 0,
3048                                (*pkts)->data_len, 0);
3049 }
3050
3051 static inline void
3052 ice_tx_fill_hw_ring(struct ice_tx_queue *txq, struct rte_mbuf **pkts,
3053                     uint16_t nb_pkts)
3054 {
3055         volatile struct ice_tx_desc *txdp = &txq->tx_ring[txq->tx_tail];
3056         struct ice_tx_entry *txep = &txq->sw_ring[txq->tx_tail];
3057         const int N_PER_LOOP = 4;
3058         const int N_PER_LOOP_MASK = N_PER_LOOP - 1;
3059         int mainpart, leftover;
3060         int i, j;
3061
3062         /**
3063          * Process most of the packets in chunks of N pkts.  Any
3064          * leftover packets will get processed one at a time.
3065          */
3066         mainpart = nb_pkts & ((uint32_t)~N_PER_LOOP_MASK);
3067         leftover = nb_pkts & ((uint32_t)N_PER_LOOP_MASK);
3068         for (i = 0; i < mainpart; i += N_PER_LOOP) {
3069                 /* Copy N mbuf pointers to the S/W ring */
3070                 for (j = 0; j < N_PER_LOOP; ++j)
3071                         (txep + i + j)->mbuf = *(pkts + i + j);
3072                 tx4(txdp + i, pkts + i);
3073         }
3074
3075         if (unlikely(leftover > 0)) {
3076                 for (i = 0; i < leftover; ++i) {
3077                         (txep + mainpart + i)->mbuf = *(pkts + mainpart + i);
3078                         tx1(txdp + mainpart + i, pkts + mainpart + i);
3079                 }
3080         }
3081 }
3082
3083 static inline uint16_t
3084 tx_xmit_pkts(struct ice_tx_queue *txq,
3085              struct rte_mbuf **tx_pkts,
3086              uint16_t nb_pkts)
3087 {
3088         volatile struct ice_tx_desc *txr = txq->tx_ring;
3089         uint16_t n = 0;
3090
3091         /**
3092          * Begin scanning the H/W ring for done descriptors when the number
3093          * of available descriptors drops below tx_free_thresh. For each done
3094          * descriptor, free the associated buffer.
3095          */
3096         if (txq->nb_tx_free < txq->tx_free_thresh)
3097                 ice_tx_free_bufs(txq);
3098
3099         /* Use available descriptor only */
3100         nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
3101         if (unlikely(!nb_pkts))
3102                 return 0;
3103
3104         txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
3105         if ((txq->tx_tail + nb_pkts) > txq->nb_tx_desc) {
3106                 n = (uint16_t)(txq->nb_tx_desc - txq->tx_tail);
3107                 ice_tx_fill_hw_ring(txq, tx_pkts, n);
3108                 txr[txq->tx_next_rs].cmd_type_offset_bsz |=
3109                         rte_cpu_to_le_64(((uint64_t)ICE_TX_DESC_CMD_RS) <<
3110                                          ICE_TXD_QW1_CMD_S);
3111                 txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
3112                 txq->tx_tail = 0;
3113         }
3114
3115         /* Fill hardware descriptor ring with mbuf data */
3116         ice_tx_fill_hw_ring(txq, tx_pkts + n, (uint16_t)(nb_pkts - n));
3117         txq->tx_tail = (uint16_t)(txq->tx_tail + (nb_pkts - n));
3118
3119         /* Determin if RS bit needs to be set */
3120         if (txq->tx_tail > txq->tx_next_rs) {
3121                 txr[txq->tx_next_rs].cmd_type_offset_bsz |=
3122                         rte_cpu_to_le_64(((uint64_t)ICE_TX_DESC_CMD_RS) <<
3123                                          ICE_TXD_QW1_CMD_S);
3124                 txq->tx_next_rs =
3125                         (uint16_t)(txq->tx_next_rs + txq->tx_rs_thresh);
3126                 if (txq->tx_next_rs >= txq->nb_tx_desc)
3127                         txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
3128         }
3129
3130         if (txq->tx_tail >= txq->nb_tx_desc)
3131                 txq->tx_tail = 0;
3132
3133         /* Update the tx tail register */
3134         ICE_PCI_REG_WC_WRITE(txq->qtx_tail, txq->tx_tail);
3135
3136         return nb_pkts;
3137 }
3138
3139 static uint16_t
3140 ice_xmit_pkts_simple(void *tx_queue,
3141                      struct rte_mbuf **tx_pkts,
3142                      uint16_t nb_pkts)
3143 {
3144         uint16_t nb_tx = 0;
3145
3146         if (likely(nb_pkts <= ICE_TX_MAX_BURST))
3147                 return tx_xmit_pkts((struct ice_tx_queue *)tx_queue,
3148                                     tx_pkts, nb_pkts);
3149
3150         while (nb_pkts) {
3151                 uint16_t ret, num = (uint16_t)RTE_MIN(nb_pkts,
3152                                                       ICE_TX_MAX_BURST);
3153
3154                 ret = tx_xmit_pkts((struct ice_tx_queue *)tx_queue,
3155                                    &tx_pkts[nb_tx], num);
3156                 nb_tx = (uint16_t)(nb_tx + ret);
3157                 nb_pkts = (uint16_t)(nb_pkts - ret);
3158                 if (ret < num)
3159                         break;
3160         }
3161
3162         return nb_tx;
3163 }
3164
3165 void __rte_cold
3166 ice_set_rx_function(struct rte_eth_dev *dev)
3167 {
3168         PMD_INIT_FUNC_TRACE();
3169         struct ice_adapter *ad =
3170                 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
3171 #ifdef RTE_ARCH_X86
3172         struct ice_rx_queue *rxq;
3173         int i;
3174         int rx_check_ret = -1;
3175
3176         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
3177                 ad->rx_use_avx512 = false;
3178                 ad->rx_use_avx2 = false;
3179                 rx_check_ret = ice_rx_vec_dev_check(dev);
3180                 if (ad->ptp_ena)
3181                         rx_check_ret = -1;
3182                 if (rx_check_ret >= 0 && ad->rx_bulk_alloc_allowed &&
3183                     rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
3184                         ad->rx_vec_allowed = true;
3185                         for (i = 0; i < dev->data->nb_rx_queues; i++) {
3186                                 rxq = dev->data->rx_queues[i];
3187                                 if (rxq && ice_rxq_vec_setup(rxq)) {
3188                                         ad->rx_vec_allowed = false;
3189                                         break;
3190                                 }
3191                         }
3192
3193                         if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_512 &&
3194                         rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1 &&
3195                         rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512BW) == 1)
3196 #ifdef CC_AVX512_SUPPORT
3197                                 ad->rx_use_avx512 = true;
3198 #else
3199                         PMD_DRV_LOG(NOTICE,
3200                                 "AVX512 is not supported in build env");
3201 #endif
3202                         if (!ad->rx_use_avx512 &&
3203                         (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
3204                         rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) &&
3205                         rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256)
3206                                 ad->rx_use_avx2 = true;
3207
3208                 } else {
3209                         ad->rx_vec_allowed = false;
3210                 }
3211         }
3212
3213         if (ad->rx_vec_allowed) {
3214                 if (dev->data->scattered_rx) {
3215                         if (ad->rx_use_avx512) {
3216 #ifdef CC_AVX512_SUPPORT
3217                                 if (rx_check_ret == ICE_VECTOR_OFFLOAD_PATH) {
3218                                         PMD_DRV_LOG(NOTICE,
3219                                                 "Using AVX512 OFFLOAD Vector Scattered Rx (port %d).",
3220                                                 dev->data->port_id);
3221                                         dev->rx_pkt_burst =
3222                                                 ice_recv_scattered_pkts_vec_avx512_offload;
3223                                 } else {
3224                                         PMD_DRV_LOG(NOTICE,
3225                                                 "Using AVX512 Vector Scattered Rx (port %d).",
3226                                                 dev->data->port_id);
3227                                         dev->rx_pkt_burst =
3228                                                 ice_recv_scattered_pkts_vec_avx512;
3229                                 }
3230 #endif
3231                         } else if (ad->rx_use_avx2) {
3232                                 if (rx_check_ret == ICE_VECTOR_OFFLOAD_PATH) {
3233                                         PMD_DRV_LOG(NOTICE,
3234                                                     "Using AVX2 OFFLOAD Vector Scattered Rx (port %d).",
3235                                                     dev->data->port_id);
3236                                         dev->rx_pkt_burst =
3237                                                 ice_recv_scattered_pkts_vec_avx2_offload;
3238                                 } else {
3239                                         PMD_DRV_LOG(NOTICE,
3240                                                     "Using AVX2 Vector Scattered Rx (port %d).",
3241                                                     dev->data->port_id);
3242                                         dev->rx_pkt_burst =
3243                                                 ice_recv_scattered_pkts_vec_avx2;
3244                                 }
3245                         } else {
3246                                 PMD_DRV_LOG(DEBUG,
3247                                         "Using Vector Scattered Rx (port %d).",
3248                                         dev->data->port_id);
3249                                 dev->rx_pkt_burst = ice_recv_scattered_pkts_vec;
3250                         }
3251                 } else {
3252                         if (ad->rx_use_avx512) {
3253 #ifdef CC_AVX512_SUPPORT
3254                                 if (rx_check_ret == ICE_VECTOR_OFFLOAD_PATH) {
3255                                         PMD_DRV_LOG(NOTICE,
3256                                                 "Using AVX512 OFFLOAD Vector Rx (port %d).",
3257                                                 dev->data->port_id);
3258                                         dev->rx_pkt_burst =
3259                                                 ice_recv_pkts_vec_avx512_offload;
3260                                 } else {
3261                                         PMD_DRV_LOG(NOTICE,
3262                                                 "Using AVX512 Vector Rx (port %d).",
3263                                                 dev->data->port_id);
3264                                         dev->rx_pkt_burst =
3265                                                 ice_recv_pkts_vec_avx512;
3266                                 }
3267 #endif
3268                         } else if (ad->rx_use_avx2) {
3269                                 if (rx_check_ret == ICE_VECTOR_OFFLOAD_PATH) {
3270                                         PMD_DRV_LOG(NOTICE,
3271                                                     "Using AVX2 OFFLOAD Vector Rx (port %d).",
3272                                                     dev->data->port_id);
3273                                         dev->rx_pkt_burst =
3274                                                 ice_recv_pkts_vec_avx2_offload;
3275                                 } else {
3276                                         PMD_DRV_LOG(NOTICE,
3277                                                     "Using AVX2 Vector Rx (port %d).",
3278                                                     dev->data->port_id);
3279                                         dev->rx_pkt_burst =
3280                                                 ice_recv_pkts_vec_avx2;
3281                                 }
3282                         } else {
3283                                 PMD_DRV_LOG(DEBUG,
3284                                         "Using Vector Rx (port %d).",
3285                                         dev->data->port_id);
3286                                 dev->rx_pkt_burst = ice_recv_pkts_vec;
3287                         }
3288                 }
3289                 return;
3290         }
3291
3292 #endif
3293
3294         if (dev->data->scattered_rx) {
3295                 /* Set the non-LRO scattered function */
3296                 PMD_INIT_LOG(DEBUG,
3297                              "Using a Scattered function on port %d.",
3298                              dev->data->port_id);
3299                 dev->rx_pkt_burst = ice_recv_scattered_pkts;
3300         } else if (ad->rx_bulk_alloc_allowed) {
3301                 PMD_INIT_LOG(DEBUG,
3302                              "Rx Burst Bulk Alloc Preconditions are "
3303                              "satisfied. Rx Burst Bulk Alloc function "
3304                              "will be used on port %d.",
3305                              dev->data->port_id);
3306                 dev->rx_pkt_burst = ice_recv_pkts_bulk_alloc;
3307         } else {
3308                 PMD_INIT_LOG(DEBUG,
3309                              "Rx Burst Bulk Alloc Preconditions are not "
3310                              "satisfied, Normal Rx will be used on port %d.",
3311                              dev->data->port_id);
3312                 dev->rx_pkt_burst = ice_recv_pkts;
3313         }
3314 }
3315
3316 static const struct {
3317         eth_rx_burst_t pkt_burst;
3318         const char *info;
3319 } ice_rx_burst_infos[] = {
3320         { ice_recv_scattered_pkts,          "Scalar Scattered" },
3321         { ice_recv_pkts_bulk_alloc,         "Scalar Bulk Alloc" },
3322         { ice_recv_pkts,                    "Scalar" },
3323 #ifdef RTE_ARCH_X86
3324 #ifdef CC_AVX512_SUPPORT
3325         { ice_recv_scattered_pkts_vec_avx512, "Vector AVX512 Scattered" },
3326         { ice_recv_scattered_pkts_vec_avx512_offload, "Offload Vector AVX512 Scattered" },
3327         { ice_recv_pkts_vec_avx512,           "Vector AVX512" },
3328         { ice_recv_pkts_vec_avx512_offload,   "Offload Vector AVX512" },
3329 #endif
3330         { ice_recv_scattered_pkts_vec_avx2, "Vector AVX2 Scattered" },
3331         { ice_recv_scattered_pkts_vec_avx2_offload, "Offload Vector AVX2 Scattered" },
3332         { ice_recv_pkts_vec_avx2,           "Vector AVX2" },
3333         { ice_recv_pkts_vec_avx2_offload,   "Offload Vector AVX2" },
3334         { ice_recv_scattered_pkts_vec,      "Vector SSE Scattered" },
3335         { ice_recv_pkts_vec,                "Vector SSE" },
3336 #endif
3337 };
3338
3339 int
3340 ice_rx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
3341                       struct rte_eth_burst_mode *mode)
3342 {
3343         eth_rx_burst_t pkt_burst = dev->rx_pkt_burst;
3344         int ret = -EINVAL;
3345         unsigned int i;
3346
3347         for (i = 0; i < RTE_DIM(ice_rx_burst_infos); ++i) {
3348                 if (pkt_burst == ice_rx_burst_infos[i].pkt_burst) {
3349                         snprintf(mode->info, sizeof(mode->info), "%s",
3350                                  ice_rx_burst_infos[i].info);
3351                         ret = 0;
3352                         break;
3353                 }
3354         }
3355
3356         return ret;
3357 }
3358
3359 void __rte_cold
3360 ice_set_tx_function_flag(struct rte_eth_dev *dev, struct ice_tx_queue *txq)
3361 {
3362         struct ice_adapter *ad =
3363                 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
3364
3365         /* Use a simple Tx queue if possible (only fast free is allowed) */
3366         ad->tx_simple_allowed =
3367                 (txq->offloads ==
3368                 (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) &&
3369                 txq->tx_rs_thresh >= ICE_TX_MAX_BURST);
3370
3371         if (ad->tx_simple_allowed)
3372                 PMD_INIT_LOG(DEBUG, "Simple Tx can be enabled on Tx queue %u.",
3373                              txq->queue_id);
3374         else
3375                 PMD_INIT_LOG(DEBUG,
3376                              "Simple Tx can NOT be enabled on Tx queue %u.",
3377                              txq->queue_id);
3378 }
3379
3380 /*********************************************************************
3381  *
3382  *  TX prep functions
3383  *
3384  **********************************************************************/
3385 /* The default values of TSO MSS */
3386 #define ICE_MIN_TSO_MSS            64
3387 #define ICE_MAX_TSO_MSS            9728
3388 #define ICE_MAX_TSO_FRAME_SIZE     262144
3389 uint16_t
3390 ice_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
3391               uint16_t nb_pkts)
3392 {
3393         int i, ret;
3394         uint64_t ol_flags;
3395         struct rte_mbuf *m;
3396
3397         for (i = 0; i < nb_pkts; i++) {
3398                 m = tx_pkts[i];
3399                 ol_flags = m->ol_flags;
3400
3401                 if (ol_flags & PKT_TX_TCP_SEG &&
3402                     (m->tso_segsz < ICE_MIN_TSO_MSS ||
3403                      m->tso_segsz > ICE_MAX_TSO_MSS ||
3404                      m->pkt_len > ICE_MAX_TSO_FRAME_SIZE)) {
3405                         /**
3406                          * MSS outside the range are considered malicious
3407                          */
3408                         rte_errno = EINVAL;
3409                         return i;
3410                 }
3411
3412 #ifdef RTE_ETHDEV_DEBUG_TX
3413                 ret = rte_validate_tx_offload(m);
3414                 if (ret != 0) {
3415                         rte_errno = -ret;
3416                         return i;
3417                 }
3418 #endif
3419                 ret = rte_net_intel_cksum_prepare(m);
3420                 if (ret != 0) {
3421                         rte_errno = -ret;
3422                         return i;
3423                 }
3424         }
3425         return i;
3426 }
3427
3428 void __rte_cold
3429 ice_set_tx_function(struct rte_eth_dev *dev)
3430 {
3431         struct ice_adapter *ad =
3432                 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
3433 #ifdef RTE_ARCH_X86
3434         struct ice_tx_queue *txq;
3435         int i;
3436         int tx_check_ret = -1;
3437
3438         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
3439                 ad->tx_use_avx2 = false;
3440                 ad->tx_use_avx512 = false;
3441                 tx_check_ret = ice_tx_vec_dev_check(dev);
3442                 if (tx_check_ret >= 0 &&
3443                     rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
3444                         ad->tx_vec_allowed = true;
3445
3446                         if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_512 &&
3447                         rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1 &&
3448                         rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512BW) == 1)
3449 #ifdef CC_AVX512_SUPPORT
3450                                 ad->tx_use_avx512 = true;
3451 #else
3452                         PMD_DRV_LOG(NOTICE,
3453                                 "AVX512 is not supported in build env");
3454 #endif
3455                         if (!ad->tx_use_avx512 &&
3456                                 (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
3457                                 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) &&
3458                                 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256)
3459                                 ad->tx_use_avx2 = true;
3460
3461                         if (!ad->tx_use_avx2 && !ad->tx_use_avx512 &&
3462                                 tx_check_ret == ICE_VECTOR_OFFLOAD_PATH)
3463                                 ad->tx_vec_allowed = false;
3464
3465                         if (ad->tx_vec_allowed) {
3466                                 for (i = 0; i < dev->data->nb_tx_queues; i++) {
3467                                         txq = dev->data->tx_queues[i];
3468                                         if (txq && ice_txq_vec_setup(txq)) {
3469                                                 ad->tx_vec_allowed = false;
3470                                                 break;
3471                                         }
3472                                 }
3473                         }
3474                 } else {
3475                         ad->tx_vec_allowed = false;
3476                 }
3477         }
3478
3479         if (ad->tx_vec_allowed) {
3480                 dev->tx_pkt_prepare = NULL;
3481                 if (ad->tx_use_avx512) {
3482 #ifdef CC_AVX512_SUPPORT
3483                         if (tx_check_ret == ICE_VECTOR_OFFLOAD_PATH) {
3484                                 PMD_DRV_LOG(NOTICE,
3485                                             "Using AVX512 OFFLOAD Vector Tx (port %d).",
3486                                             dev->data->port_id);
3487                                 dev->tx_pkt_burst =
3488                                         ice_xmit_pkts_vec_avx512_offload;
3489                                 dev->tx_pkt_prepare = ice_prep_pkts;
3490                         } else {
3491                                 PMD_DRV_LOG(NOTICE,
3492                                             "Using AVX512 Vector Tx (port %d).",
3493                                             dev->data->port_id);
3494                                 dev->tx_pkt_burst = ice_xmit_pkts_vec_avx512;
3495                         }
3496 #endif
3497                 } else {
3498                         if (tx_check_ret == ICE_VECTOR_OFFLOAD_PATH) {
3499                                 PMD_DRV_LOG(NOTICE,
3500                                             "Using AVX2 OFFLOAD Vector Tx (port %d).",
3501                                             dev->data->port_id);
3502                                 dev->tx_pkt_burst =
3503                                         ice_xmit_pkts_vec_avx2_offload;
3504                                 dev->tx_pkt_prepare = ice_prep_pkts;
3505                         } else {
3506                                 PMD_DRV_LOG(DEBUG, "Using %sVector Tx (port %d).",
3507                                             ad->tx_use_avx2 ? "avx2 " : "",
3508                                             dev->data->port_id);
3509                                 dev->tx_pkt_burst = ad->tx_use_avx2 ?
3510                                                     ice_xmit_pkts_vec_avx2 :
3511                                                     ice_xmit_pkts_vec;
3512                         }
3513                 }
3514
3515                 return;
3516         }
3517 #endif
3518
3519         if (ad->tx_simple_allowed) {
3520                 PMD_INIT_LOG(DEBUG, "Simple tx finally be used.");
3521                 dev->tx_pkt_burst = ice_xmit_pkts_simple;
3522                 dev->tx_pkt_prepare = NULL;
3523         } else {
3524                 PMD_INIT_LOG(DEBUG, "Normal tx finally be used.");
3525                 dev->tx_pkt_burst = ice_xmit_pkts;
3526                 dev->tx_pkt_prepare = ice_prep_pkts;
3527         }
3528 }
3529
3530 static const struct {
3531         eth_tx_burst_t pkt_burst;
3532         const char *info;
3533 } ice_tx_burst_infos[] = {
3534         { ice_xmit_pkts_simple,   "Scalar Simple" },
3535         { ice_xmit_pkts,          "Scalar" },
3536 #ifdef RTE_ARCH_X86
3537 #ifdef CC_AVX512_SUPPORT
3538         { ice_xmit_pkts_vec_avx512, "Vector AVX512" },
3539         { ice_xmit_pkts_vec_avx512_offload, "Offload Vector AVX512" },
3540 #endif
3541         { ice_xmit_pkts_vec_avx2, "Vector AVX2" },
3542         { ice_xmit_pkts_vec,      "Vector SSE" },
3543 #endif
3544 };
3545
3546 int
3547 ice_tx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
3548                       struct rte_eth_burst_mode *mode)
3549 {
3550         eth_tx_burst_t pkt_burst = dev->tx_pkt_burst;
3551         int ret = -EINVAL;
3552         unsigned int i;
3553
3554         for (i = 0; i < RTE_DIM(ice_tx_burst_infos); ++i) {
3555                 if (pkt_burst == ice_tx_burst_infos[i].pkt_burst) {
3556                         snprintf(mode->info, sizeof(mode->info), "%s",
3557                                  ice_tx_burst_infos[i].info);
3558                         ret = 0;
3559                         break;
3560                 }
3561         }
3562
3563         return ret;
3564 }
3565
3566 /* For each value it means, datasheet of hardware can tell more details
3567  *
3568  * @note: fix ice_dev_supported_ptypes_get() if any change here.
3569  */
3570 static inline uint32_t
3571 ice_get_default_pkt_type(uint16_t ptype)
3572 {
3573         static const uint32_t type_table[ICE_MAX_PKT_TYPE]
3574                 __rte_cache_aligned = {
3575                 /* L2 types */
3576                 /* [0] reserved */
3577                 [1] = RTE_PTYPE_L2_ETHER,
3578                 [2] = RTE_PTYPE_L2_ETHER_TIMESYNC,
3579                 /* [3] - [5] reserved */
3580                 [6] = RTE_PTYPE_L2_ETHER_LLDP,
3581                 /* [7] - [10] reserved */
3582                 [11] = RTE_PTYPE_L2_ETHER_ARP,
3583                 /* [12] - [21] reserved */
3584
3585                 /* Non tunneled IPv4 */
3586                 [22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3587                        RTE_PTYPE_L4_FRAG,
3588                 [23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3589                        RTE_PTYPE_L4_NONFRAG,
3590                 [24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3591                        RTE_PTYPE_L4_UDP,
3592                 /* [25] reserved */
3593                 [26] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3594                        RTE_PTYPE_L4_TCP,
3595                 [27] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3596                        RTE_PTYPE_L4_SCTP,
3597                 [28] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3598                        RTE_PTYPE_L4_ICMP,
3599
3600                 /* IPv4 --> IPv4 */
3601                 [29] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3602                        RTE_PTYPE_TUNNEL_IP |
3603                        RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3604                        RTE_PTYPE_INNER_L4_FRAG,
3605                 [30] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3606                        RTE_PTYPE_TUNNEL_IP |
3607                        RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3608                        RTE_PTYPE_INNER_L4_NONFRAG,
3609                 [31] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3610                        RTE_PTYPE_TUNNEL_IP |
3611                        RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3612                        RTE_PTYPE_INNER_L4_UDP,
3613                 /* [32] reserved */
3614                 [33] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3615                        RTE_PTYPE_TUNNEL_IP |
3616                        RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3617                        RTE_PTYPE_INNER_L4_TCP,
3618                 [34] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3619                        RTE_PTYPE_TUNNEL_IP |
3620                        RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3621                        RTE_PTYPE_INNER_L4_SCTP,
3622                 [35] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3623                        RTE_PTYPE_TUNNEL_IP |
3624                        RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3625                        RTE_PTYPE_INNER_L4_ICMP,
3626
3627                 /* IPv4 --> IPv6 */
3628                 [36] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3629                        RTE_PTYPE_TUNNEL_IP |
3630                        RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3631                        RTE_PTYPE_INNER_L4_FRAG,
3632                 [37] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3633                        RTE_PTYPE_TUNNEL_IP |
3634                        RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3635                        RTE_PTYPE_INNER_L4_NONFRAG,
3636                 [38] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3637                        RTE_PTYPE_TUNNEL_IP |
3638                        RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3639                        RTE_PTYPE_INNER_L4_UDP,
3640                 /* [39] reserved */
3641                 [40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3642                        RTE_PTYPE_TUNNEL_IP |
3643                        RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3644                        RTE_PTYPE_INNER_L4_TCP,
3645                 [41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3646                        RTE_PTYPE_TUNNEL_IP |
3647                        RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3648                        RTE_PTYPE_INNER_L4_SCTP,
3649                 [42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3650                        RTE_PTYPE_TUNNEL_IP |
3651                        RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3652                        RTE_PTYPE_INNER_L4_ICMP,
3653
3654                 /* IPv4 --> GRE/Teredo/VXLAN */
3655                 [43] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3656                        RTE_PTYPE_TUNNEL_GRENAT,
3657
3658                 /* IPv4 --> GRE/Teredo/VXLAN --> IPv4 */
3659                 [44] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3660                        RTE_PTYPE_TUNNEL_GRENAT |
3661                        RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3662                        RTE_PTYPE_INNER_L4_FRAG,
3663                 [45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3664                        RTE_PTYPE_TUNNEL_GRENAT |
3665                        RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3666                        RTE_PTYPE_INNER_L4_NONFRAG,
3667                 [46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3668                        RTE_PTYPE_TUNNEL_GRENAT |
3669                        RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3670                        RTE_PTYPE_INNER_L4_UDP,
3671                 /* [47] reserved */
3672                 [48] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3673                        RTE_PTYPE_TUNNEL_GRENAT |
3674                        RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3675                        RTE_PTYPE_INNER_L4_TCP,
3676                 [49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3677                        RTE_PTYPE_TUNNEL_GRENAT |
3678                        RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3679                        RTE_PTYPE_INNER_L4_SCTP,
3680                 [50] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3681                        RTE_PTYPE_TUNNEL_GRENAT |
3682                        RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3683                        RTE_PTYPE_INNER_L4_ICMP,
3684
3685                 /* IPv4 --> GRE/Teredo/VXLAN --> IPv6 */
3686                 [51] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3687                        RTE_PTYPE_TUNNEL_GRENAT |
3688                        RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3689                        RTE_PTYPE_INNER_L4_FRAG,
3690                 [52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3691                        RTE_PTYPE_TUNNEL_GRENAT |
3692                        RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3693                        RTE_PTYPE_INNER_L4_NONFRAG,
3694                 [53] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3695                        RTE_PTYPE_TUNNEL_GRENAT |
3696                        RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3697                        RTE_PTYPE_INNER_L4_UDP,
3698                 /* [54] reserved */
3699                 [55] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3700                        RTE_PTYPE_TUNNEL_GRENAT |
3701                        RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3702                        RTE_PTYPE_INNER_L4_TCP,
3703                 [56] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3704                        RTE_PTYPE_TUNNEL_GRENAT |
3705                        RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3706                        RTE_PTYPE_INNER_L4_SCTP,
3707                 [57] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3708                        RTE_PTYPE_TUNNEL_GRENAT |
3709                        RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3710                        RTE_PTYPE_INNER_L4_ICMP,
3711
3712                 /* IPv4 --> GRE/Teredo/VXLAN --> MAC */
3713                 [58] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3714                        RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
3715
3716                 /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
3717                 [59] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3718                        RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3719                        RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3720                        RTE_PTYPE_INNER_L4_FRAG,
3721                 [60] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3722                        RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3723                        RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3724                        RTE_PTYPE_INNER_L4_NONFRAG,
3725                 [61] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3726                        RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3727                        RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3728                        RTE_PTYPE_INNER_L4_UDP,
3729                 /* [62] reserved */
3730                 [63] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3731                        RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3732                        RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3733                        RTE_PTYPE_INNER_L4_TCP,
3734                 [64] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3735                        RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3736                        RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3737                        RTE_PTYPE_INNER_L4_SCTP,
3738                 [65] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3739                        RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3740                        RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3741                        RTE_PTYPE_INNER_L4_ICMP,
3742
3743                 /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
3744                 [66] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3745                        RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3746                        RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3747                        RTE_PTYPE_INNER_L4_FRAG,
3748                 [67] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3749                        RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3750                        RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3751                        RTE_PTYPE_INNER_L4_NONFRAG,
3752                 [68] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3753                        RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3754                        RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3755                        RTE_PTYPE_INNER_L4_UDP,
3756                 /* [69] reserved */
3757                 [70] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3758                        RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3759                        RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3760                        RTE_PTYPE_INNER_L4_TCP,
3761                 [71] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3762                        RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3763                        RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3764                        RTE_PTYPE_INNER_L4_SCTP,
3765                 [72] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3766                        RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3767                        RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3768                        RTE_PTYPE_INNER_L4_ICMP,
3769                 /* [73] - [87] reserved */
3770
3771                 /* Non tunneled IPv6 */
3772                 [88] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3773                        RTE_PTYPE_L4_FRAG,
3774                 [89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3775                        RTE_PTYPE_L4_NONFRAG,
3776                 [90] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3777                        RTE_PTYPE_L4_UDP,
3778                 /* [91] reserved */
3779                 [92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3780                        RTE_PTYPE_L4_TCP,
3781                 [93] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3782                        RTE_PTYPE_L4_SCTP,
3783                 [94] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3784                        RTE_PTYPE_L4_ICMP,
3785
3786                 /* IPv6 --> IPv4 */
3787                 [95] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3788                        RTE_PTYPE_TUNNEL_IP |
3789                        RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3790                        RTE_PTYPE_INNER_L4_FRAG,
3791                 [96] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3792                        RTE_PTYPE_TUNNEL_IP |
3793                        RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3794                        RTE_PTYPE_INNER_L4_NONFRAG,
3795                 [97] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3796                        RTE_PTYPE_TUNNEL_IP |
3797                        RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3798                        RTE_PTYPE_INNER_L4_UDP,
3799                 /* [98] reserved */
3800                 [99] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3801                        RTE_PTYPE_TUNNEL_IP |
3802                        RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3803                        RTE_PTYPE_INNER_L4_TCP,
3804                 [100] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3805                         RTE_PTYPE_TUNNEL_IP |
3806                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3807                         RTE_PTYPE_INNER_L4_SCTP,
3808                 [101] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3809                         RTE_PTYPE_TUNNEL_IP |
3810                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3811                         RTE_PTYPE_INNER_L4_ICMP,
3812
3813                 /* IPv6 --> IPv6 */
3814                 [102] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3815                         RTE_PTYPE_TUNNEL_IP |
3816                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3817                         RTE_PTYPE_INNER_L4_FRAG,
3818                 [103] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3819                         RTE_PTYPE_TUNNEL_IP |
3820                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3821                         RTE_PTYPE_INNER_L4_NONFRAG,
3822                 [104] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3823                         RTE_PTYPE_TUNNEL_IP |
3824                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3825                         RTE_PTYPE_INNER_L4_UDP,
3826                 /* [105] reserved */
3827                 [106] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3828                         RTE_PTYPE_TUNNEL_IP |
3829                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3830                         RTE_PTYPE_INNER_L4_TCP,
3831                 [107] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3832                         RTE_PTYPE_TUNNEL_IP |
3833                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3834                         RTE_PTYPE_INNER_L4_SCTP,
3835                 [108] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3836                         RTE_PTYPE_TUNNEL_IP |
3837                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3838                         RTE_PTYPE_INNER_L4_ICMP,
3839
3840                 /* IPv6 --> GRE/Teredo/VXLAN */
3841                 [109] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3842                         RTE_PTYPE_TUNNEL_GRENAT,
3843
3844                 /* IPv6 --> GRE/Teredo/VXLAN --> IPv4 */
3845                 [110] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3846                         RTE_PTYPE_TUNNEL_GRENAT |
3847                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3848                         RTE_PTYPE_INNER_L4_FRAG,
3849                 [111] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3850                         RTE_PTYPE_TUNNEL_GRENAT |
3851                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3852                         RTE_PTYPE_INNER_L4_NONFRAG,
3853                 [112] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3854                         RTE_PTYPE_TUNNEL_GRENAT |
3855                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3856                         RTE_PTYPE_INNER_L4_UDP,
3857                 /* [113] reserved */
3858                 [114] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3859                         RTE_PTYPE_TUNNEL_GRENAT |
3860                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3861                         RTE_PTYPE_INNER_L4_TCP,
3862                 [115] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3863                         RTE_PTYPE_TUNNEL_GRENAT |
3864                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3865                         RTE_PTYPE_INNER_L4_SCTP,
3866                 [116] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3867                         RTE_PTYPE_TUNNEL_GRENAT |
3868                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3869                         RTE_PTYPE_INNER_L4_ICMP,
3870
3871                 /* IPv6 --> GRE/Teredo/VXLAN --> IPv6 */
3872                 [117] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3873                         RTE_PTYPE_TUNNEL_GRENAT |
3874                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3875                         RTE_PTYPE_INNER_L4_FRAG,
3876                 [118] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3877                         RTE_PTYPE_TUNNEL_GRENAT |
3878                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3879                         RTE_PTYPE_INNER_L4_NONFRAG,
3880                 [119] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3881                         RTE_PTYPE_TUNNEL_GRENAT |
3882                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3883                         RTE_PTYPE_INNER_L4_UDP,
3884                 /* [120] reserved */
3885                 [121] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3886                         RTE_PTYPE_TUNNEL_GRENAT |
3887                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3888                         RTE_PTYPE_INNER_L4_TCP,
3889                 [122] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3890                         RTE_PTYPE_TUNNEL_GRENAT |
3891                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3892                         RTE_PTYPE_INNER_L4_SCTP,
3893                 [123] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3894                         RTE_PTYPE_TUNNEL_GRENAT |
3895                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3896                         RTE_PTYPE_INNER_L4_ICMP,
3897
3898                 /* IPv6 --> GRE/Teredo/VXLAN --> MAC */
3899                 [124] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3900                         RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
3901
3902                 /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
3903                 [125] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3904                         RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3905                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3906                         RTE_PTYPE_INNER_L4_FRAG,
3907                 [126] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3908                         RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3909                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3910                         RTE_PTYPE_INNER_L4_NONFRAG,
3911                 [127] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3912                         RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3913                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3914                         RTE_PTYPE_INNER_L4_UDP,
3915                 /* [128] reserved */
3916                 [129] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3917                         RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3918                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3919                         RTE_PTYPE_INNER_L4_TCP,
3920                 [130] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3921                         RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3922                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3923                         RTE_PTYPE_INNER_L4_SCTP,
3924                 [131] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3925                         RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3926                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3927                         RTE_PTYPE_INNER_L4_ICMP,
3928
3929                 /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
3930                 [132] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3931                         RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3932                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3933                         RTE_PTYPE_INNER_L4_FRAG,
3934                 [133] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3935                         RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3936                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3937                         RTE_PTYPE_INNER_L4_NONFRAG,
3938                 [134] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3939                         RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3940                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3941                         RTE_PTYPE_INNER_L4_UDP,
3942                 /* [135] reserved */
3943                 [136] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3944                         RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3945                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3946                         RTE_PTYPE_INNER_L4_TCP,
3947                 [137] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3948                         RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3949                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3950                         RTE_PTYPE_INNER_L4_SCTP,
3951                 [138] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3952                         RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3953                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3954                         RTE_PTYPE_INNER_L4_ICMP,
3955                 /* [139] - [299] reserved */
3956
3957                 /* PPPoE */
3958                 [300] = RTE_PTYPE_L2_ETHER_PPPOE,
3959                 [301] = RTE_PTYPE_L2_ETHER_PPPOE,
3960
3961                 /* PPPoE --> IPv4 */
3962                 [302] = RTE_PTYPE_L2_ETHER_PPPOE |
3963                         RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3964                         RTE_PTYPE_L4_FRAG,
3965                 [303] = RTE_PTYPE_L2_ETHER_PPPOE |
3966                         RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3967                         RTE_PTYPE_L4_NONFRAG,
3968                 [304] = RTE_PTYPE_L2_ETHER_PPPOE |
3969                         RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3970                         RTE_PTYPE_L4_UDP,
3971                 [305] = RTE_PTYPE_L2_ETHER_PPPOE |
3972                         RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3973                         RTE_PTYPE_L4_TCP,
3974                 [306] = RTE_PTYPE_L2_ETHER_PPPOE |
3975                         RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3976                         RTE_PTYPE_L4_SCTP,
3977                 [307] = RTE_PTYPE_L2_ETHER_PPPOE |
3978                         RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3979                         RTE_PTYPE_L4_ICMP,
3980
3981                 /* PPPoE --> IPv6 */
3982                 [308] = RTE_PTYPE_L2_ETHER_PPPOE |
3983                         RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3984                         RTE_PTYPE_L4_FRAG,
3985                 [309] = RTE_PTYPE_L2_ETHER_PPPOE |
3986                         RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3987                         RTE_PTYPE_L4_NONFRAG,
3988                 [310] = RTE_PTYPE_L2_ETHER_PPPOE |
3989                         RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3990                         RTE_PTYPE_L4_UDP,
3991                 [311] = RTE_PTYPE_L2_ETHER_PPPOE |
3992                         RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3993                         RTE_PTYPE_L4_TCP,
3994                 [312] = RTE_PTYPE_L2_ETHER_PPPOE |
3995                         RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3996                         RTE_PTYPE_L4_SCTP,
3997                 [313] = RTE_PTYPE_L2_ETHER_PPPOE |
3998                         RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3999                         RTE_PTYPE_L4_ICMP,
4000                 /* [314] - [324] reserved */
4001
4002                 /* IPv4/IPv6 --> GTPC/GTPU */
4003                 [325] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4004                         RTE_PTYPE_TUNNEL_GTPC,
4005                 [326] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4006                         RTE_PTYPE_TUNNEL_GTPC,
4007                 [327] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4008                         RTE_PTYPE_TUNNEL_GTPC,
4009                 [328] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4010                         RTE_PTYPE_TUNNEL_GTPC,
4011                 [329] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4012                         RTE_PTYPE_TUNNEL_GTPU,
4013                 [330] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4014                         RTE_PTYPE_TUNNEL_GTPU,
4015
4016                 /* IPv4 --> GTPU --> IPv4 */
4017                 [331] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4018                         RTE_PTYPE_TUNNEL_GTPU |
4019                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4020                         RTE_PTYPE_INNER_L4_FRAG,
4021                 [332] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4022                         RTE_PTYPE_TUNNEL_GTPU |
4023                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4024                         RTE_PTYPE_INNER_L4_NONFRAG,
4025                 [333] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4026                         RTE_PTYPE_TUNNEL_GTPU |
4027                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4028                         RTE_PTYPE_INNER_L4_UDP,
4029                 [334] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4030                         RTE_PTYPE_TUNNEL_GTPU |
4031                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4032                         RTE_PTYPE_INNER_L4_TCP,
4033                 [335] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4034                         RTE_PTYPE_TUNNEL_GTPU |
4035                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4036                         RTE_PTYPE_INNER_L4_ICMP,
4037
4038                 /* IPv6 --> GTPU --> IPv4 */
4039                 [336] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4040                         RTE_PTYPE_TUNNEL_GTPU |
4041                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4042                         RTE_PTYPE_INNER_L4_FRAG,
4043                 [337] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4044                         RTE_PTYPE_TUNNEL_GTPU |
4045                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4046                         RTE_PTYPE_INNER_L4_NONFRAG,
4047                 [338] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4048                         RTE_PTYPE_TUNNEL_GTPU |
4049                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4050                         RTE_PTYPE_INNER_L4_UDP,
4051                 [339] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4052                         RTE_PTYPE_TUNNEL_GTPU |
4053                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4054                         RTE_PTYPE_INNER_L4_TCP,
4055                 [340] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4056                         RTE_PTYPE_TUNNEL_GTPU |
4057                         RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4058                         RTE_PTYPE_INNER_L4_ICMP,
4059
4060                 /* IPv4 --> GTPU --> IPv6 */
4061                 [341] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4062                         RTE_PTYPE_TUNNEL_GTPU |
4063                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4064                         RTE_PTYPE_INNER_L4_FRAG,
4065                 [342] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4066                         RTE_PTYPE_TUNNEL_GTPU |
4067                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4068                         RTE_PTYPE_INNER_L4_NONFRAG,
4069                 [343] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4070                         RTE_PTYPE_TUNNEL_GTPU |
4071                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4072                         RTE_PTYPE_INNER_L4_UDP,
4073                 [344] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4074                         RTE_PTYPE_TUNNEL_GTPU |
4075                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4076                         RTE_PTYPE_INNER_L4_TCP,
4077                 [345] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4078                         RTE_PTYPE_TUNNEL_GTPU |
4079                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4080                         RTE_PTYPE_INNER_L4_ICMP,
4081
4082                 /* IPv6 --> GTPU --> IPv6 */
4083                 [346] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4084                         RTE_PTYPE_TUNNEL_GTPU |
4085                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4086                         RTE_PTYPE_INNER_L4_FRAG,
4087                 [347] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4088                         RTE_PTYPE_TUNNEL_GTPU |
4089                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4090                         RTE_PTYPE_INNER_L4_NONFRAG,
4091                 [348] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4092                         RTE_PTYPE_TUNNEL_GTPU |
4093                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4094                         RTE_PTYPE_INNER_L4_UDP,
4095                 [349] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4096                         RTE_PTYPE_TUNNEL_GTPU |
4097                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4098                         RTE_PTYPE_INNER_L4_TCP,
4099                 [350] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4100                         RTE_PTYPE_TUNNEL_GTPU |
4101                         RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4102                         RTE_PTYPE_INNER_L4_ICMP,
4103
4104                 /* IPv4 --> UDP ECPRI */
4105                 [372] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4106                         RTE_PTYPE_L4_UDP,
4107                 [373] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4108                         RTE_PTYPE_L4_UDP,
4109                 [374] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4110                         RTE_PTYPE_L4_UDP,
4111                 [375] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4112                         RTE_PTYPE_L4_UDP,
4113                 [376] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4114                         RTE_PTYPE_L4_UDP,
4115                 [377] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4116                         RTE_PTYPE_L4_UDP,
4117                 [378] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4118                         RTE_PTYPE_L4_UDP,
4119                 [379] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4120                         RTE_PTYPE_L4_UDP,
4121                 [380] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4122                         RTE_PTYPE_L4_UDP,
4123                 [381] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4124                         RTE_PTYPE_L4_UDP,
4125
4126                 /* IPV6 --> UDP ECPRI */
4127                 [382] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4128                         RTE_PTYPE_L4_UDP,
4129                 [383] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4130                         RTE_PTYPE_L4_UDP,
4131                 [384] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4132                         RTE_PTYPE_L4_UDP,
4133                 [385] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4134                         RTE_PTYPE_L4_UDP,
4135                 [386] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4136                         RTE_PTYPE_L4_UDP,
4137                 [387] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4138                         RTE_PTYPE_L4_UDP,
4139                 [388] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4140                         RTE_PTYPE_L4_UDP,
4141                 [389] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4142                         RTE_PTYPE_L4_UDP,
4143                 [390] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4144                         RTE_PTYPE_L4_UDP,
4145                 [391] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4146                         RTE_PTYPE_L4_UDP,
4147                 /* All others reserved */
4148         };
4149
4150         return type_table[ptype];
4151 }
4152
4153 void __rte_cold
4154 ice_set_default_ptype_table(struct rte_eth_dev *dev)
4155 {
4156         struct ice_adapter *ad =
4157                 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
4158         int i;
4159
4160         for (i = 0; i < ICE_MAX_PKT_TYPE; i++)
4161                 ad->ptype_tbl[i] = ice_get_default_pkt_type(i);
4162 }
4163
4164 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_S 1
4165 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_M \
4166                         (0x3UL << ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_S)
4167 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_PROG_ADD 0
4168 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_PROG_DEL 0x1
4169
4170 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_S   4
4171 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_M   \
4172         (1 << ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_S)
4173 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_S      5
4174 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_M      \
4175         (1 << ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_S)
4176
4177 /*
4178  * check the programming status descriptor in rx queue.
4179  * done after Programming Flow Director is programmed on
4180  * tx queue
4181  */
4182 static inline int
4183 ice_check_fdir_programming_status(struct ice_rx_queue *rxq)
4184 {
4185         volatile union ice_32byte_rx_desc *rxdp;
4186         uint64_t qword1;
4187         uint32_t rx_status;
4188         uint32_t error;
4189         uint32_t id;
4190         int ret = -EAGAIN;
4191
4192         rxdp = (volatile union ice_32byte_rx_desc *)
4193                 (&rxq->rx_ring[rxq->rx_tail]);
4194         qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
4195         rx_status = (qword1 & ICE_RXD_QW1_STATUS_M)
4196                         >> ICE_RXD_QW1_STATUS_S;
4197
4198         if (rx_status & (1 << ICE_RX_DESC_STATUS_DD_S)) {
4199                 ret = 0;
4200                 error = (qword1 & ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_M) >>
4201                         ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_S;
4202                 id = (qword1 & ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_M) >>
4203                         ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_S;
4204                 if (error) {
4205                         if (id == ICE_RX_PROG_STATUS_DESC_WB_QW1_PROG_ADD)
4206                                 PMD_DRV_LOG(ERR, "Failed to add FDIR rule.");
4207                         else if (id == ICE_RX_PROG_STATUS_DESC_WB_QW1_PROG_DEL)
4208                                 PMD_DRV_LOG(ERR, "Failed to remove FDIR rule.");
4209                         ret = -EINVAL;
4210                         goto err;
4211                 }
4212                 error = (qword1 & ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_M) >>
4213                         ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_S;
4214                 if (error) {
4215                         PMD_DRV_LOG(ERR, "Failed to create FDIR profile.");
4216                         ret = -EINVAL;
4217                 }
4218 err:
4219                 rxdp->wb.qword1.status_error_len = 0;
4220                 rxq->rx_tail++;
4221                 if (unlikely(rxq->rx_tail == rxq->nb_rx_desc))
4222                         rxq->rx_tail = 0;
4223                 if (rxq->rx_tail == 0)
4224                         ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
4225                 else
4226                         ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->rx_tail - 1);
4227         }
4228
4229         return ret;
4230 }
4231
4232 #define ICE_FDIR_MAX_WAIT_US 10000
4233
4234 int
4235 ice_fdir_programming(struct ice_pf *pf, struct ice_fltr_desc *fdir_desc)
4236 {
4237         struct ice_tx_queue *txq = pf->fdir.txq;
4238         struct ice_rx_queue *rxq = pf->fdir.rxq;
4239         volatile struct ice_fltr_desc *fdirdp;
4240         volatile struct ice_tx_desc *txdp;
4241         uint32_t td_cmd;
4242         uint16_t i;
4243
4244         fdirdp = (volatile struct ice_fltr_desc *)
4245                 (&txq->tx_ring[txq->tx_tail]);
4246         fdirdp->qidx_compq_space_stat = fdir_desc->qidx_compq_space_stat;
4247         fdirdp->dtype_cmd_vsi_fdid = fdir_desc->dtype_cmd_vsi_fdid;
4248
4249         txdp = &txq->tx_ring[txq->tx_tail + 1];
4250         txdp->buf_addr = rte_cpu_to_le_64(pf->fdir.dma_addr);
4251         td_cmd = ICE_TX_DESC_CMD_EOP |
4252                 ICE_TX_DESC_CMD_RS  |
4253                 ICE_TX_DESC_CMD_DUMMY;
4254
4255         txdp->cmd_type_offset_bsz =
4256                 ice_build_ctob(td_cmd, 0, ICE_FDIR_PKT_LEN, 0);
4257
4258         txq->tx_tail += 2;
4259         if (txq->tx_tail >= txq->nb_tx_desc)
4260                 txq->tx_tail = 0;
4261         /* Update the tx tail register */
4262         ICE_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
4263         for (i = 0; i < ICE_FDIR_MAX_WAIT_US; i++) {
4264                 if ((txdp->cmd_type_offset_bsz &
4265                      rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M)) ==
4266                     rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE))
4267                         break;
4268                 rte_delay_us(1);
4269         }
4270         if (i >= ICE_FDIR_MAX_WAIT_US) {
4271                 PMD_DRV_LOG(ERR,
4272                             "Failed to program FDIR filter: time out to get DD on tx queue.");
4273                 return -ETIMEDOUT;
4274         }
4275
4276         for (; i < ICE_FDIR_MAX_WAIT_US; i++) {
4277                 int ret;
4278
4279                 ret = ice_check_fdir_programming_status(rxq);
4280                 if (ret == -EAGAIN)
4281                         rte_delay_us(1);
4282                 else
4283                         return ret;
4284         }
4285
4286         PMD_DRV_LOG(ERR,
4287                     "Failed to program FDIR filter: programming status reported.");
4288         return -ETIMEDOUT;
4289
4290
4291 }