1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
13 #include <sys/queue.h>
15 #include <rte_string_fns.h>
16 #include <rte_memzone.h>
18 #include <rte_malloc.h>
19 #include <rte_ether.h>
20 #include <ethdev_driver.h>
29 #include "iavf_rxtx.h"
30 #include "iavf_ipsec_crypto.h"
31 #include "rte_pmd_iavf.h"
33 /* Offset of mbuf dynamic field for protocol extraction's metadata */
34 int rte_pmd_ifd_dynfield_proto_xtr_metadata_offs = -1;
36 /* Mask of mbuf dynamic flags for protocol extraction's type */
37 uint64_t rte_pmd_ifd_dynflag_proto_xtr_vlan_mask;
38 uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv4_mask;
39 uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv6_mask;
40 uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv6_flow_mask;
41 uint64_t rte_pmd_ifd_dynflag_proto_xtr_tcp_mask;
42 uint64_t rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask;
43 uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipsec_crypto_said_mask;
46 iavf_proto_xtr_type_to_rxdid(uint8_t flex_type)
48 static uint8_t rxdid_map[] = {
49 [IAVF_PROTO_XTR_NONE] = IAVF_RXDID_COMMS_OVS_1,
50 [IAVF_PROTO_XTR_VLAN] = IAVF_RXDID_COMMS_AUX_VLAN,
51 [IAVF_PROTO_XTR_IPV4] = IAVF_RXDID_COMMS_AUX_IPV4,
52 [IAVF_PROTO_XTR_IPV6] = IAVF_RXDID_COMMS_AUX_IPV6,
53 [IAVF_PROTO_XTR_IPV6_FLOW] = IAVF_RXDID_COMMS_AUX_IPV6_FLOW,
54 [IAVF_PROTO_XTR_TCP] = IAVF_RXDID_COMMS_AUX_TCP,
55 [IAVF_PROTO_XTR_IP_OFFSET] = IAVF_RXDID_COMMS_AUX_IP_OFFSET,
56 [IAVF_PROTO_XTR_IPSEC_CRYPTO_SAID] =
57 IAVF_RXDID_COMMS_IPSEC_CRYPTO,
60 return flex_type < RTE_DIM(rxdid_map) ?
61 rxdid_map[flex_type] : IAVF_RXDID_COMMS_OVS_1;
65 iavf_monitor_callback(const uint64_t value,
66 const uint64_t arg[RTE_POWER_MONITOR_OPAQUE_SZ] __rte_unused)
68 const uint64_t m = rte_cpu_to_le_64(1 << IAVF_RX_DESC_STATUS_DD_SHIFT);
70 * we expect the DD bit to be set to 1 if this descriptor was already
73 return (value & m) == m ? -1 : 0;
77 iavf_get_monitor_addr(void *rx_queue, struct rte_power_monitor_cond *pmc)
79 struct iavf_rx_queue *rxq = rx_queue;
80 volatile union iavf_rx_desc *rxdp;
84 rxdp = &rxq->rx_ring[desc];
85 /* watch for changes in status bit */
86 pmc->addr = &rxdp->wb.qword1.status_error_len;
88 /* comparison callback */
89 pmc->fn = iavf_monitor_callback;
91 /* registers are 64-bit */
92 pmc->size = sizeof(uint64_t);
98 check_rx_thresh(uint16_t nb_desc, uint16_t thresh)
100 /* The following constraints must be satisfied:
101 * thresh < rxq->nb_rx_desc
103 if (thresh >= nb_desc) {
104 PMD_INIT_LOG(ERR, "rx_free_thresh (%u) must be less than %u",
112 check_tx_thresh(uint16_t nb_desc, uint16_t tx_rs_thresh,
113 uint16_t tx_free_thresh)
115 /* TX descriptors will have their RS bit set after tx_rs_thresh
116 * descriptors have been used. The TX descriptor ring will be cleaned
117 * after tx_free_thresh descriptors are used or if the number of
118 * descriptors required to transmit a packet is greater than the
119 * number of free TX descriptors.
121 * The following constraints must be satisfied:
122 * - tx_rs_thresh must be less than the size of the ring minus 2.
123 * - tx_free_thresh must be less than the size of the ring minus 3.
124 * - tx_rs_thresh must be less than or equal to tx_free_thresh.
125 * - tx_rs_thresh must be a divisor of the ring size.
127 * One descriptor in the TX ring is used as a sentinel to avoid a H/W
128 * race condition, hence the maximum threshold constraints. When set
129 * to zero use default values.
131 if (tx_rs_thresh >= (nb_desc - 2)) {
132 PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be less than the "
133 "number of TX descriptors (%u) minus 2",
134 tx_rs_thresh, nb_desc);
137 if (tx_free_thresh >= (nb_desc - 3)) {
138 PMD_INIT_LOG(ERR, "tx_free_thresh (%u) must be less than the "
139 "number of TX descriptors (%u) minus 3.",
140 tx_free_thresh, nb_desc);
143 if (tx_rs_thresh > tx_free_thresh) {
144 PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be less than or "
145 "equal to tx_free_thresh (%u).",
146 tx_rs_thresh, tx_free_thresh);
149 if ((nb_desc % tx_rs_thresh) != 0) {
150 PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be a divisor of the "
151 "number of TX descriptors (%u).",
152 tx_rs_thresh, nb_desc);
160 check_rx_vec_allow(struct iavf_rx_queue *rxq)
162 if (rxq->rx_free_thresh >= IAVF_VPMD_RX_MAX_BURST &&
163 rxq->nb_rx_desc % rxq->rx_free_thresh == 0) {
164 PMD_INIT_LOG(DEBUG, "Vector Rx can be enabled on this rxq.");
168 PMD_INIT_LOG(DEBUG, "Vector Rx cannot be enabled on this rxq.");
173 check_tx_vec_allow(struct iavf_tx_queue *txq)
175 if (!(txq->offloads & IAVF_TX_NO_VECTOR_FLAGS) &&
176 txq->rs_thresh >= IAVF_VPMD_TX_MAX_BURST &&
177 txq->rs_thresh <= IAVF_VPMD_TX_MAX_FREE_BUF) {
178 PMD_INIT_LOG(DEBUG, "Vector tx can be enabled on this txq.");
181 PMD_INIT_LOG(DEBUG, "Vector Tx cannot be enabled on this txq.");
186 check_rx_bulk_allow(struct iavf_rx_queue *rxq)
190 if (!(rxq->rx_free_thresh >= IAVF_RX_MAX_BURST)) {
191 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
192 "rxq->rx_free_thresh=%d, "
193 "IAVF_RX_MAX_BURST=%d",
194 rxq->rx_free_thresh, IAVF_RX_MAX_BURST);
196 } else if (rxq->nb_rx_desc % rxq->rx_free_thresh != 0) {
197 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
198 "rxq->nb_rx_desc=%d, "
199 "rxq->rx_free_thresh=%d",
200 rxq->nb_rx_desc, rxq->rx_free_thresh);
207 reset_rx_queue(struct iavf_rx_queue *rxq)
215 len = rxq->nb_rx_desc + IAVF_RX_MAX_BURST;
217 for (i = 0; i < len * sizeof(union iavf_rx_desc); i++)
218 ((volatile char *)rxq->rx_ring)[i] = 0;
220 memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
222 for (i = 0; i < IAVF_RX_MAX_BURST; i++)
223 rxq->sw_ring[rxq->nb_rx_desc + i] = &rxq->fake_mbuf;
226 rxq->rx_nb_avail = 0;
227 rxq->rx_next_avail = 0;
228 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
233 rte_pktmbuf_free(rxq->pkt_first_seg);
235 rxq->pkt_first_seg = NULL;
236 rxq->pkt_last_seg = NULL;
238 rxq->rxrearm_start = 0;
242 reset_tx_queue(struct iavf_tx_queue *txq)
244 struct iavf_tx_entry *txe;
249 PMD_DRV_LOG(DEBUG, "Pointer to txq is NULL");
254 size = sizeof(struct iavf_tx_desc) * txq->nb_tx_desc;
255 for (i = 0; i < size; i++)
256 ((volatile char *)txq->tx_ring)[i] = 0;
258 prev = (uint16_t)(txq->nb_tx_desc - 1);
259 for (i = 0; i < txq->nb_tx_desc; i++) {
260 txq->tx_ring[i].cmd_type_offset_bsz =
261 rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DESC_DONE);
264 txe[prev].next_id = i;
271 txq->last_desc_cleaned = txq->nb_tx_desc - 1;
272 txq->nb_free = txq->nb_tx_desc - 1;
274 txq->next_dd = txq->rs_thresh - 1;
275 txq->next_rs = txq->rs_thresh - 1;
279 alloc_rxq_mbufs(struct iavf_rx_queue *rxq)
281 volatile union iavf_rx_desc *rxd;
282 struct rte_mbuf *mbuf = NULL;
286 for (i = 0; i < rxq->nb_rx_desc; i++) {
287 mbuf = rte_mbuf_raw_alloc(rxq->mp);
288 if (unlikely(!mbuf)) {
289 for (j = 0; j < i; j++) {
290 rte_pktmbuf_free_seg(rxq->sw_ring[j]);
291 rxq->sw_ring[j] = NULL;
293 PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX");
297 rte_mbuf_refcnt_set(mbuf, 1);
299 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
301 mbuf->port = rxq->port_id;
304 rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
306 rxd = &rxq->rx_ring[i];
307 rxd->read.pkt_addr = dma_addr;
308 rxd->read.hdr_addr = 0;
309 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
314 rxq->sw_ring[i] = mbuf;
321 release_rxq_mbufs(struct iavf_rx_queue *rxq)
328 for (i = 0; i < rxq->nb_rx_desc; i++) {
329 if (rxq->sw_ring[i]) {
330 rte_pktmbuf_free_seg(rxq->sw_ring[i]);
331 rxq->sw_ring[i] = NULL;
336 if (rxq->rx_nb_avail == 0)
338 for (i = 0; i < rxq->rx_nb_avail; i++) {
339 struct rte_mbuf *mbuf;
341 mbuf = rxq->rx_stage[rxq->rx_next_avail + i];
342 rte_pktmbuf_free_seg(mbuf);
344 rxq->rx_nb_avail = 0;
348 release_txq_mbufs(struct iavf_tx_queue *txq)
352 if (!txq || !txq->sw_ring) {
353 PMD_DRV_LOG(DEBUG, "Pointer to rxq or sw_ring is NULL");
357 for (i = 0; i < txq->nb_tx_desc; i++) {
358 if (txq->sw_ring[i].mbuf) {
359 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
360 txq->sw_ring[i].mbuf = NULL;
365 static const struct iavf_rxq_ops def_rxq_ops = {
366 .release_mbufs = release_rxq_mbufs,
369 static const struct iavf_txq_ops def_txq_ops = {
370 .release_mbufs = release_txq_mbufs,
374 iavf_rxd_to_pkt_fields_by_comms_ovs(__rte_unused struct iavf_rx_queue *rxq,
376 volatile union iavf_rx_flex_desc *rxdp)
378 volatile struct iavf_32b_rx_flex_desc_comms_ovs *desc =
379 (volatile struct iavf_32b_rx_flex_desc_comms_ovs *)rxdp;
380 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
384 if (desc->flow_id != 0xFFFFFFFF) {
385 mb->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID;
386 mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
389 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
390 stat_err = rte_le_to_cpu_16(desc->status_error0);
391 if (likely(stat_err & (1 << IAVF_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
392 mb->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
393 mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
399 iavf_rxd_to_pkt_fields_by_comms_aux_v1(struct iavf_rx_queue *rxq,
401 volatile union iavf_rx_flex_desc *rxdp)
403 volatile struct iavf_32b_rx_flex_desc_comms *desc =
404 (volatile struct iavf_32b_rx_flex_desc_comms *)rxdp;
407 stat_err = rte_le_to_cpu_16(desc->status_error0);
408 if (likely(stat_err & (1 << IAVF_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
409 mb->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
410 mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
413 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
414 if (desc->flow_id != 0xFFFFFFFF) {
415 mb->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID;
416 mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
419 if (rxq->xtr_ol_flag) {
420 uint32_t metadata = 0;
422 stat_err = rte_le_to_cpu_16(desc->status_error1);
424 if (stat_err & (1 << IAVF_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S))
425 metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux0);
427 if (stat_err & (1 << IAVF_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S))
429 rte_le_to_cpu_16(desc->flex_ts.flex.aux1) << 16;
432 mb->ol_flags |= rxq->xtr_ol_flag;
434 *RTE_PMD_IFD_DYNF_PROTO_XTR_METADATA(mb) = metadata;
441 iavf_rxd_to_pkt_fields_by_comms_aux_v2(struct iavf_rx_queue *rxq,
443 volatile union iavf_rx_flex_desc *rxdp)
445 volatile struct iavf_32b_rx_flex_desc_comms *desc =
446 (volatile struct iavf_32b_rx_flex_desc_comms *)rxdp;
449 stat_err = rte_le_to_cpu_16(desc->status_error0);
450 if (likely(stat_err & (1 << IAVF_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
451 mb->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
452 mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
455 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
456 if (desc->flow_id != 0xFFFFFFFF) {
457 mb->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID;
458 mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
461 if (rxq->xtr_ol_flag) {
462 uint32_t metadata = 0;
464 if (desc->flex_ts.flex.aux0 != 0xFFFF)
465 metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux0);
466 else if (desc->flex_ts.flex.aux1 != 0xFFFF)
467 metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux1);
470 mb->ol_flags |= rxq->xtr_ol_flag;
472 *RTE_PMD_IFD_DYNF_PROTO_XTR_METADATA(mb) = metadata;
479 iavf_select_rxd_to_pkt_fields_handler(struct iavf_rx_queue *rxq, uint32_t rxdid)
482 case IAVF_RXDID_COMMS_AUX_VLAN:
483 rxq->xtr_ol_flag = rte_pmd_ifd_dynflag_proto_xtr_vlan_mask;
484 rxq->rxd_to_pkt_fields =
485 iavf_rxd_to_pkt_fields_by_comms_aux_v1;
487 case IAVF_RXDID_COMMS_AUX_IPV4:
488 rxq->xtr_ol_flag = rte_pmd_ifd_dynflag_proto_xtr_ipv4_mask;
489 rxq->rxd_to_pkt_fields =
490 iavf_rxd_to_pkt_fields_by_comms_aux_v1;
492 case IAVF_RXDID_COMMS_AUX_IPV6:
493 rxq->xtr_ol_flag = rte_pmd_ifd_dynflag_proto_xtr_ipv6_mask;
494 rxq->rxd_to_pkt_fields =
495 iavf_rxd_to_pkt_fields_by_comms_aux_v1;
497 case IAVF_RXDID_COMMS_AUX_IPV6_FLOW:
499 rte_pmd_ifd_dynflag_proto_xtr_ipv6_flow_mask;
500 rxq->rxd_to_pkt_fields =
501 iavf_rxd_to_pkt_fields_by_comms_aux_v1;
503 case IAVF_RXDID_COMMS_AUX_TCP:
504 rxq->xtr_ol_flag = rte_pmd_ifd_dynflag_proto_xtr_tcp_mask;
505 rxq->rxd_to_pkt_fields =
506 iavf_rxd_to_pkt_fields_by_comms_aux_v1;
508 case IAVF_RXDID_COMMS_AUX_IP_OFFSET:
510 rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask;
511 rxq->rxd_to_pkt_fields =
512 iavf_rxd_to_pkt_fields_by_comms_aux_v2;
514 case IAVF_RXDID_COMMS_IPSEC_CRYPTO:
516 rte_pmd_ifd_dynflag_proto_xtr_ipsec_crypto_said_mask;
517 rxq->rxd_to_pkt_fields =
518 iavf_rxd_to_pkt_fields_by_comms_aux_v2;
520 case IAVF_RXDID_COMMS_OVS_1:
521 rxq->rxd_to_pkt_fields = iavf_rxd_to_pkt_fields_by_comms_ovs;
524 /* update this according to the RXDID for FLEX_DESC_NONE */
525 rxq->rxd_to_pkt_fields = iavf_rxd_to_pkt_fields_by_comms_ovs;
529 if (!rte_pmd_ifd_dynf_proto_xtr_metadata_avail())
530 rxq->xtr_ol_flag = 0;
534 iavf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
535 uint16_t nb_desc, unsigned int socket_id,
536 const struct rte_eth_rxconf *rx_conf,
537 struct rte_mempool *mp)
539 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
540 struct iavf_adapter *ad =
541 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
542 struct iavf_info *vf =
543 IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
544 struct iavf_vsi *vsi = &vf->vsi;
545 struct iavf_rx_queue *rxq;
546 const struct rte_memzone *mz;
550 uint16_t rx_free_thresh;
553 PMD_INIT_FUNC_TRACE();
555 offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
557 if (nb_desc % IAVF_ALIGN_RING_DESC != 0 ||
558 nb_desc > IAVF_MAX_RING_DESC ||
559 nb_desc < IAVF_MIN_RING_DESC) {
560 PMD_INIT_LOG(ERR, "Number (%u) of receive descriptors is "
565 /* Check free threshold */
566 rx_free_thresh = (rx_conf->rx_free_thresh == 0) ?
567 IAVF_DEFAULT_RX_FREE_THRESH :
568 rx_conf->rx_free_thresh;
569 if (check_rx_thresh(nb_desc, rx_free_thresh) != 0)
572 /* Free memory if needed */
573 if (dev->data->rx_queues[queue_idx]) {
574 iavf_dev_rx_queue_release(dev, queue_idx);
575 dev->data->rx_queues[queue_idx] = NULL;
578 /* Allocate the rx queue data structure */
579 rxq = rte_zmalloc_socket("iavf rxq",
580 sizeof(struct iavf_rx_queue),
584 PMD_INIT_LOG(ERR, "Failed to allocate memory for "
585 "rx queue data structure");
589 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) {
590 proto_xtr = vf->proto_xtr ? vf->proto_xtr[queue_idx] :
592 rxq->rxdid = iavf_proto_xtr_type_to_rxdid(proto_xtr);
593 rxq->proto_xtr = proto_xtr;
595 rxq->rxdid = IAVF_RXDID_LEGACY_1;
596 rxq->proto_xtr = IAVF_PROTO_XTR_NONE;
599 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2) {
600 struct virtchnl_vlan_supported_caps *stripping_support =
601 &vf->vlan_v2_caps.offloads.stripping_support;
602 uint32_t stripping_cap;
604 if (stripping_support->outer)
605 stripping_cap = stripping_support->outer;
607 stripping_cap = stripping_support->inner;
609 if (stripping_cap & VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1)
610 rxq->rx_flags = IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG1;
611 else if (stripping_cap & VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2)
612 rxq->rx_flags = IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG2_2;
614 rxq->rx_flags = IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG1;
617 iavf_select_rxd_to_pkt_fields_handler(rxq, rxq->rxdid);
620 rxq->nb_rx_desc = nb_desc;
621 rxq->rx_free_thresh = rx_free_thresh;
622 rxq->queue_id = queue_idx;
623 rxq->port_id = dev->data->port_id;
624 rxq->rx_deferred_start = rx_conf->rx_deferred_start;
627 rxq->offloads = offloads;
629 if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
630 rxq->crc_len = RTE_ETHER_CRC_LEN;
634 len = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM;
635 rxq->rx_buf_len = RTE_ALIGN_FLOOR(len, (1 << IAVF_RXQ_CTX_DBUFF_SHIFT));
637 /* Allocate the software ring. */
638 len = nb_desc + IAVF_RX_MAX_BURST;
640 rte_zmalloc_socket("iavf rx sw ring",
641 sizeof(struct rte_mbuf *) * len,
645 PMD_INIT_LOG(ERR, "Failed to allocate memory for SW ring");
650 /* Allocate the maximum number of RX ring hardware descriptor with
651 * a little more to support bulk allocate.
653 len = IAVF_MAX_RING_DESC + IAVF_RX_MAX_BURST;
654 ring_size = RTE_ALIGN(len * sizeof(union iavf_rx_desc),
656 mz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
657 ring_size, IAVF_RING_BASE_ALIGN,
660 PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for RX");
661 rte_free(rxq->sw_ring);
665 /* Zero all the descriptors in the ring. */
666 memset(mz->addr, 0, ring_size);
667 rxq->rx_ring_phys_addr = mz->iova;
668 rxq->rx_ring = (union iavf_rx_desc *)mz->addr;
673 dev->data->rx_queues[queue_idx] = rxq;
674 rxq->qrx_tail = hw->hw_addr + IAVF_QRX_TAIL1(rxq->queue_id);
675 rxq->ops = &def_rxq_ops;
677 if (check_rx_bulk_allow(rxq) == true) {
678 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
679 "satisfied. Rx Burst Bulk Alloc function will be "
680 "used on port=%d, queue=%d.",
681 rxq->port_id, rxq->queue_id);
683 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
684 "not satisfied, Scattered Rx is requested "
685 "on port=%d, queue=%d.",
686 rxq->port_id, rxq->queue_id);
687 ad->rx_bulk_alloc_allowed = false;
690 if (check_rx_vec_allow(rxq) == false)
691 ad->rx_vec_allowed = false;
697 iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
700 unsigned int socket_id,
701 const struct rte_eth_txconf *tx_conf)
703 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
704 struct iavf_adapter *adapter =
705 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
706 struct iavf_info *vf =
707 IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
708 struct iavf_tx_queue *txq;
709 const struct rte_memzone *mz;
711 uint16_t tx_rs_thresh, tx_free_thresh;
714 PMD_INIT_FUNC_TRACE();
716 offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
718 if (nb_desc % IAVF_ALIGN_RING_DESC != 0 ||
719 nb_desc > IAVF_MAX_RING_DESC ||
720 nb_desc < IAVF_MIN_RING_DESC) {
721 PMD_INIT_LOG(ERR, "Number (%u) of transmit descriptors is "
726 tx_rs_thresh = (uint16_t)((tx_conf->tx_rs_thresh) ?
727 tx_conf->tx_rs_thresh : DEFAULT_TX_RS_THRESH);
728 tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
729 tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH);
730 if (check_tx_thresh(nb_desc, tx_rs_thresh, tx_free_thresh) != 0)
733 /* Free memory if needed. */
734 if (dev->data->tx_queues[queue_idx]) {
735 iavf_dev_tx_queue_release(dev, queue_idx);
736 dev->data->tx_queues[queue_idx] = NULL;
739 /* Allocate the TX queue data structure. */
740 txq = rte_zmalloc_socket("iavf txq",
741 sizeof(struct iavf_tx_queue),
745 PMD_INIT_LOG(ERR, "Failed to allocate memory for "
746 "tx queue structure");
750 if (adapter->vf.vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2) {
751 struct virtchnl_vlan_supported_caps *insertion_support =
752 &adapter->vf.vlan_v2_caps.offloads.insertion_support;
753 uint32_t insertion_cap;
755 if (insertion_support->outer)
756 insertion_cap = insertion_support->outer;
758 insertion_cap = insertion_support->inner;
760 if (insertion_cap & VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1)
761 txq->vlan_flag = IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG1;
762 else if (insertion_cap & VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2)
763 txq->vlan_flag = IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2;
765 txq->vlan_flag = IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG1;
768 txq->nb_tx_desc = nb_desc;
769 txq->rs_thresh = tx_rs_thresh;
770 txq->free_thresh = tx_free_thresh;
771 txq->queue_id = queue_idx;
772 txq->port_id = dev->data->port_id;
773 txq->offloads = offloads;
774 txq->tx_deferred_start = tx_conf->tx_deferred_start;
776 if (iavf_ipsec_crypto_supported(adapter))
777 txq->ipsec_crypto_pkt_md_offset =
778 iavf_security_get_pkt_md_offset(adapter);
780 /* Allocate software ring */
782 rte_zmalloc_socket("iavf tx sw ring",
783 sizeof(struct iavf_tx_entry) * nb_desc,
787 PMD_INIT_LOG(ERR, "Failed to allocate memory for SW TX ring");
792 /* Allocate TX hardware ring descriptors. */
793 ring_size = sizeof(struct iavf_tx_desc) * IAVF_MAX_RING_DESC;
794 ring_size = RTE_ALIGN(ring_size, IAVF_DMA_MEM_ALIGN);
795 mz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
796 ring_size, IAVF_RING_BASE_ALIGN,
799 PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for TX");
800 rte_free(txq->sw_ring);
804 txq->tx_ring_phys_addr = mz->iova;
805 txq->tx_ring = (struct iavf_tx_desc *)mz->addr;
810 dev->data->tx_queues[queue_idx] = txq;
811 txq->qtx_tail = hw->hw_addr + IAVF_QTX_TAIL1(queue_idx);
812 txq->ops = &def_txq_ops;
814 if (check_tx_vec_allow(txq) == false) {
815 struct iavf_adapter *ad =
816 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
817 ad->tx_vec_allowed = false;
820 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_QOS &&
821 vf->tm_conf.committed) {
823 for (tc = 0; tc < vf->qos_cap->num_elem; tc++) {
824 if (txq->queue_id >= vf->qtc_map[tc].start_queue_id &&
825 txq->queue_id < (vf->qtc_map[tc].start_queue_id +
826 vf->qtc_map[tc].queue_count))
829 if (tc >= vf->qos_cap->num_elem) {
830 PMD_INIT_LOG(ERR, "Queue TC mapping is not correct");
840 iavf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
842 struct iavf_adapter *adapter =
843 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
844 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
845 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
846 struct iavf_rx_queue *rxq;
849 PMD_DRV_FUNC_TRACE();
851 if (rx_queue_id >= dev->data->nb_rx_queues)
854 rxq = dev->data->rx_queues[rx_queue_id];
856 err = alloc_rxq_mbufs(rxq);
858 PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
864 /* Init the RX tail register. */
865 IAVF_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
866 IAVF_WRITE_FLUSH(hw);
868 /* Ready to switch the queue on */
870 err = iavf_switch_queue(adapter, rx_queue_id, true, true);
872 err = iavf_switch_queue_lv(adapter, rx_queue_id, true, true);
875 release_rxq_mbufs(rxq);
876 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
879 dev->data->rx_queue_state[rx_queue_id] =
880 RTE_ETH_QUEUE_STATE_STARTED;
887 iavf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
889 struct iavf_adapter *adapter =
890 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
891 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
892 struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
893 struct iavf_tx_queue *txq;
896 PMD_DRV_FUNC_TRACE();
898 if (tx_queue_id >= dev->data->nb_tx_queues)
901 txq = dev->data->tx_queues[tx_queue_id];
903 /* Init the RX tail register. */
904 IAVF_PCI_REG_WRITE(txq->qtx_tail, 0);
905 IAVF_WRITE_FLUSH(hw);
907 /* Ready to switch the queue on */
909 err = iavf_switch_queue(adapter, tx_queue_id, false, true);
911 err = iavf_switch_queue_lv(adapter, tx_queue_id, false, true);
914 PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
917 dev->data->tx_queue_state[tx_queue_id] =
918 RTE_ETH_QUEUE_STATE_STARTED;
924 iavf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
926 struct iavf_adapter *adapter =
927 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
928 struct iavf_rx_queue *rxq;
931 PMD_DRV_FUNC_TRACE();
933 if (rx_queue_id >= dev->data->nb_rx_queues)
936 err = iavf_switch_queue(adapter, rx_queue_id, true, false);
938 PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
943 rxq = dev->data->rx_queues[rx_queue_id];
944 rxq->ops->release_mbufs(rxq);
946 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
952 iavf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
954 struct iavf_adapter *adapter =
955 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
956 struct iavf_tx_queue *txq;
959 PMD_DRV_FUNC_TRACE();
961 if (tx_queue_id >= dev->data->nb_tx_queues)
964 err = iavf_switch_queue(adapter, tx_queue_id, false, false);
966 PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off",
971 txq = dev->data->tx_queues[tx_queue_id];
972 txq->ops->release_mbufs(txq);
974 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
980 iavf_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
982 struct iavf_rx_queue *q = dev->data->rx_queues[qid];
987 q->ops->release_mbufs(q);
988 rte_free(q->sw_ring);
989 rte_memzone_free(q->mz);
994 iavf_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
996 struct iavf_tx_queue *q = dev->data->tx_queues[qid];
1001 q->ops->release_mbufs(q);
1002 rte_free(q->sw_ring);
1003 rte_memzone_free(q->mz);
1008 iavf_stop_queues(struct rte_eth_dev *dev)
1010 struct iavf_adapter *adapter =
1011 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1012 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1013 struct iavf_rx_queue *rxq;
1014 struct iavf_tx_queue *txq;
1017 /* Stop All queues */
1018 if (!vf->lv_enabled) {
1019 ret = iavf_disable_queues(adapter);
1021 PMD_DRV_LOG(WARNING, "Fail to stop queues");
1023 ret = iavf_disable_queues_lv(adapter);
1025 PMD_DRV_LOG(WARNING, "Fail to stop queues for large VF");
1029 PMD_DRV_LOG(WARNING, "Fail to stop queues");
1031 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1032 txq = dev->data->tx_queues[i];
1035 txq->ops->release_mbufs(txq);
1036 reset_tx_queue(txq);
1037 dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
1039 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1040 rxq = dev->data->rx_queues[i];
1043 rxq->ops->release_mbufs(rxq);
1044 reset_rx_queue(rxq);
1045 dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
1049 #define IAVF_RX_FLEX_ERR0_BITS \
1050 ((1 << IAVF_RX_FLEX_DESC_STATUS0_HBO_S) | \
1051 (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) | \
1052 (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_L4E_S) | \
1053 (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S) | \
1054 (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S) | \
1055 (1 << IAVF_RX_FLEX_DESC_STATUS0_RXE_S))
1058 iavf_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union iavf_rx_desc *rxdp)
1060 if (rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) &
1061 (1 << IAVF_RX_DESC_STATUS_L2TAG1P_SHIFT)) {
1062 mb->ol_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
1064 rte_le_to_cpu_16(rxdp->wb.qword0.lo_dword.l2tag1);
1071 iavf_flex_rxd_to_vlan_tci(struct rte_mbuf *mb,
1072 volatile union iavf_rx_flex_desc *rxdp)
1074 if (rte_le_to_cpu_64(rxdp->wb.status_error0) &
1075 (1 << IAVF_RX_FLEX_DESC_STATUS0_L2TAG1P_S)) {
1076 mb->ol_flags |= RTE_MBUF_F_RX_VLAN |
1077 RTE_MBUF_F_RX_VLAN_STRIPPED;
1079 rte_le_to_cpu_16(rxdp->wb.l2tag1);
1084 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
1085 if (rte_le_to_cpu_16(rxdp->wb.status_error1) &
1086 (1 << IAVF_RX_FLEX_DESC_STATUS1_L2TAG2P_S)) {
1087 mb->ol_flags |= RTE_MBUF_F_RX_QINQ_STRIPPED |
1088 RTE_MBUF_F_RX_QINQ |
1089 RTE_MBUF_F_RX_VLAN_STRIPPED |
1091 mb->vlan_tci_outer = mb->vlan_tci;
1092 mb->vlan_tci = rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd);
1093 PMD_RX_LOG(DEBUG, "Descriptor l2tag2_1: %u, l2tag2_2: %u",
1094 rte_le_to_cpu_16(rxdp->wb.l2tag2_1st),
1095 rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd));
1097 mb->vlan_tci_outer = 0;
1103 iavf_flex_rxd_to_ipsec_crypto_said_get(struct rte_mbuf *mb,
1104 volatile union iavf_rx_flex_desc *rxdp)
1106 volatile struct iavf_32b_rx_flex_desc_comms_ipsec *desc =
1107 (volatile struct iavf_32b_rx_flex_desc_comms_ipsec *)rxdp;
1109 mb->dynfield1[0] = desc->ipsec_said &
1110 IAVF_RX_FLEX_DESC_IPSEC_CRYPTO_SAID_MASK;
1114 iavf_flex_rxd_to_ipsec_crypto_status(struct rte_mbuf *mb,
1115 volatile union iavf_rx_flex_desc *rxdp,
1116 struct iavf_ipsec_crypto_stats *stats)
1118 uint16_t status1 = rte_le_to_cpu_64(rxdp->wb.status_error1);
1120 if (status1 & BIT(IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_PROCESSED)) {
1121 uint16_t ipsec_status;
1123 mb->ol_flags |= RTE_MBUF_F_RX_SEC_OFFLOAD;
1125 ipsec_status = status1 &
1126 IAVF_RX_FLEX_DESC_IPSEC_CRYPTO_STATUS_MASK;
1129 if (unlikely(ipsec_status !=
1130 IAVF_IPSEC_CRYPTO_STATUS_SUCCESS)) {
1131 mb->ol_flags |= RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED;
1133 switch (ipsec_status) {
1134 case IAVF_IPSEC_CRYPTO_STATUS_SAD_MISS:
1135 stats->ierrors.sad_miss++;
1137 case IAVF_IPSEC_CRYPTO_STATUS_NOT_PROCESSED:
1138 stats->ierrors.not_processed++;
1140 case IAVF_IPSEC_CRYPTO_STATUS_ICV_CHECK_FAIL:
1141 stats->ierrors.icv_check++;
1143 case IAVF_IPSEC_CRYPTO_STATUS_LENGTH_ERR:
1144 stats->ierrors.ipsec_length++;
1146 case IAVF_IPSEC_CRYPTO_STATUS_MISC_ERR:
1147 stats->ierrors.misc++;
1151 stats->ierrors.count++;
1156 stats->ibytes += rxdp->wb.pkt_len & 0x3FFF;
1158 if (rxdp->wb.rxdid == IAVF_RXDID_COMMS_IPSEC_CRYPTO &&
1160 IAVF_IPSEC_CRYPTO_STATUS_SAD_MISS)
1161 iavf_flex_rxd_to_ipsec_crypto_said_get(mb, rxdp);
1166 /* Translate the rx descriptor status and error fields to pkt flags */
1167 static inline uint64_t
1168 iavf_rxd_to_pkt_flags(uint64_t qword)
1171 uint64_t error_bits = (qword >> IAVF_RXD_QW1_ERROR_SHIFT);
1173 #define IAVF_RX_ERR_BITS 0x3f
1175 /* Check if RSS_HASH */
1176 flags = (((qword >> IAVF_RX_DESC_STATUS_FLTSTAT_SHIFT) &
1177 IAVF_RX_DESC_FLTSTAT_RSS_HASH) ==
1178 IAVF_RX_DESC_FLTSTAT_RSS_HASH) ? RTE_MBUF_F_RX_RSS_HASH : 0;
1180 /* Check if FDIR Match */
1181 flags |= (qword & (1 << IAVF_RX_DESC_STATUS_FLM_SHIFT) ?
1182 RTE_MBUF_F_RX_FDIR : 0);
1184 if (likely((error_bits & IAVF_RX_ERR_BITS) == 0)) {
1185 flags |= (RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD);
1189 if (unlikely(error_bits & (1 << IAVF_RX_DESC_ERROR_IPE_SHIFT)))
1190 flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
1192 flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
1194 if (unlikely(error_bits & (1 << IAVF_RX_DESC_ERROR_L4E_SHIFT)))
1195 flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
1197 flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
1199 /* TODO: Oversize error bit is not processed here */
1204 static inline uint64_t
1205 iavf_rxd_build_fdir(volatile union iavf_rx_desc *rxdp, struct rte_mbuf *mb)
1208 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
1211 flexbh = (rte_le_to_cpu_32(rxdp->wb.qword2.ext_status) >>
1212 IAVF_RX_DESC_EXT_STATUS_FLEXBH_SHIFT) &
1213 IAVF_RX_DESC_EXT_STATUS_FLEXBH_MASK;
1215 if (flexbh == IAVF_RX_DESC_EXT_STATUS_FLEXBH_FD_ID) {
1217 rte_le_to_cpu_32(rxdp->wb.qword3.hi_dword.fd_id);
1218 flags |= RTE_MBUF_F_RX_FDIR_ID;
1222 rte_le_to_cpu_32(rxdp->wb.qword0.hi_dword.fd_id);
1223 flags |= RTE_MBUF_F_RX_FDIR_ID;
1228 #define IAVF_RX_FLEX_ERR0_BITS \
1229 ((1 << IAVF_RX_FLEX_DESC_STATUS0_HBO_S) | \
1230 (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) | \
1231 (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_L4E_S) | \
1232 (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S) | \
1233 (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S) | \
1234 (1 << IAVF_RX_FLEX_DESC_STATUS0_RXE_S))
1236 /* Rx L3/L4 checksum */
1237 static inline uint64_t
1238 iavf_flex_rxd_error_to_pkt_flags(uint16_t stat_err0)
1242 /* check if HW has decoded the packet and checksum */
1243 if (unlikely(!(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_L3L4P_S))))
1246 if (likely(!(stat_err0 & IAVF_RX_FLEX_ERR0_BITS))) {
1247 flags |= (RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD);
1251 if (unlikely(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_IPE_S)))
1252 flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
1254 flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
1256 if (unlikely(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_L4E_S)))
1257 flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
1259 flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
1261 if (unlikely(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S)))
1262 flags |= RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD;
1267 /* If the number of free RX descriptors is greater than the RX free
1268 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1269 * register. Update the RDT with the value of the last processed RX
1270 * descriptor minus 1, to guarantee that the RDT register is never
1271 * equal to the RDH register, which creates a "full" ring situation
1272 * from the hardware point of view.
1275 iavf_update_rx_tail(struct iavf_rx_queue *rxq, uint16_t nb_hold, uint16_t rx_id)
1277 nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
1279 if (nb_hold > rxq->rx_free_thresh) {
1281 "port_id=%u queue_id=%u rx_tail=%u nb_hold=%u",
1282 rxq->port_id, rxq->queue_id, rx_id, nb_hold);
1283 rx_id = (uint16_t)((rx_id == 0) ?
1284 (rxq->nb_rx_desc - 1) : (rx_id - 1));
1285 IAVF_PCI_REG_WC_WRITE(rxq->qrx_tail, rx_id);
1288 rxq->nb_rx_hold = nb_hold;
1291 /* implement recv_pkts */
1293 iavf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1295 volatile union iavf_rx_desc *rx_ring;
1296 volatile union iavf_rx_desc *rxdp;
1297 struct iavf_rx_queue *rxq;
1298 union iavf_rx_desc rxd;
1299 struct rte_mbuf *rxe;
1300 struct rte_eth_dev *dev;
1301 struct rte_mbuf *rxm;
1302 struct rte_mbuf *nmb;
1306 uint16_t rx_packet_len;
1307 uint16_t rx_id, nb_hold;
1310 const uint32_t *ptype_tbl;
1315 rx_id = rxq->rx_tail;
1316 rx_ring = rxq->rx_ring;
1317 ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1319 while (nb_rx < nb_pkts) {
1320 rxdp = &rx_ring[rx_id];
1321 qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
1322 rx_status = (qword1 & IAVF_RXD_QW1_STATUS_MASK) >>
1323 IAVF_RXD_QW1_STATUS_SHIFT;
1325 /* Check the DD bit first */
1326 if (!(rx_status & (1 << IAVF_RX_DESC_STATUS_DD_SHIFT)))
1328 IAVF_DUMP_RX_DESC(rxq, rxdp, rx_id);
1330 nmb = rte_mbuf_raw_alloc(rxq->mp);
1331 if (unlikely(!nmb)) {
1332 dev = &rte_eth_devices[rxq->port_id];
1333 dev->data->rx_mbuf_alloc_failed++;
1334 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1335 "queue_id=%u", rxq->port_id, rxq->queue_id);
1341 rxe = rxq->sw_ring[rx_id];
1342 rxq->sw_ring[rx_id] = nmb;
1344 if (unlikely(rx_id == rxq->nb_rx_desc))
1347 /* Prefetch next mbuf */
1348 rte_prefetch0(rxq->sw_ring[rx_id]);
1350 /* When next RX descriptor is on a cache line boundary,
1351 * prefetch the next 4 RX descriptors and next 8 pointers
1354 if ((rx_id & 0x3) == 0) {
1355 rte_prefetch0(&rx_ring[rx_id]);
1356 rte_prefetch0(rxq->sw_ring[rx_id]);
1360 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1361 rxdp->read.hdr_addr = 0;
1362 rxdp->read.pkt_addr = dma_addr;
1364 rx_packet_len = ((qword1 & IAVF_RXD_QW1_LENGTH_PBUF_MASK) >>
1365 IAVF_RXD_QW1_LENGTH_PBUF_SHIFT) - rxq->crc_len;
1367 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1368 rte_prefetch0(RTE_PTR_ADD(rxm->buf_addr, RTE_PKTMBUF_HEADROOM));
1371 rxm->pkt_len = rx_packet_len;
1372 rxm->data_len = rx_packet_len;
1373 rxm->port = rxq->port_id;
1375 iavf_rxd_to_vlan_tci(rxm, &rxd);
1376 pkt_flags = iavf_rxd_to_pkt_flags(qword1);
1378 ptype_tbl[(uint8_t)((qword1 &
1379 IAVF_RXD_QW1_PTYPE_MASK) >> IAVF_RXD_QW1_PTYPE_SHIFT)];
1381 if (pkt_flags & RTE_MBUF_F_RX_RSS_HASH)
1383 rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);
1385 if (pkt_flags & RTE_MBUF_F_RX_FDIR)
1386 pkt_flags |= iavf_rxd_build_fdir(&rxd, rxm);
1388 rxm->ol_flags |= pkt_flags;
1390 rx_pkts[nb_rx++] = rxm;
1392 rxq->rx_tail = rx_id;
1394 iavf_update_rx_tail(rxq, nb_hold, rx_id);
1399 /* implement recv_pkts for flexible Rx descriptor */
1401 iavf_recv_pkts_flex_rxd(void *rx_queue,
1402 struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1404 volatile union iavf_rx_desc *rx_ring;
1405 volatile union iavf_rx_flex_desc *rxdp;
1406 struct iavf_rx_queue *rxq;
1407 union iavf_rx_flex_desc rxd;
1408 struct rte_mbuf *rxe;
1409 struct rte_eth_dev *dev;
1410 struct rte_mbuf *rxm;
1411 struct rte_mbuf *nmb;
1413 uint16_t rx_stat_err0;
1414 uint16_t rx_packet_len;
1415 uint16_t rx_id, nb_hold;
1418 const uint32_t *ptype_tbl;
1423 rx_id = rxq->rx_tail;
1424 rx_ring = rxq->rx_ring;
1425 ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1427 while (nb_rx < nb_pkts) {
1428 rxdp = (volatile union iavf_rx_flex_desc *)&rx_ring[rx_id];
1429 rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
1431 /* Check the DD bit first */
1432 if (!(rx_stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S)))
1434 IAVF_DUMP_RX_DESC(rxq, rxdp, rx_id);
1436 nmb = rte_mbuf_raw_alloc(rxq->mp);
1437 if (unlikely(!nmb)) {
1438 dev = &rte_eth_devices[rxq->port_id];
1439 dev->data->rx_mbuf_alloc_failed++;
1440 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1441 "queue_id=%u", rxq->port_id, rxq->queue_id);
1447 rxe = rxq->sw_ring[rx_id];
1448 rxq->sw_ring[rx_id] = nmb;
1450 if (unlikely(rx_id == rxq->nb_rx_desc))
1453 /* Prefetch next mbuf */
1454 rte_prefetch0(rxq->sw_ring[rx_id]);
1456 /* When next RX descriptor is on a cache line boundary,
1457 * prefetch the next 4 RX descriptors and next 8 pointers
1460 if ((rx_id & 0x3) == 0) {
1461 rte_prefetch0(&rx_ring[rx_id]);
1462 rte_prefetch0(rxq->sw_ring[rx_id]);
1466 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1467 rxdp->read.hdr_addr = 0;
1468 rxdp->read.pkt_addr = dma_addr;
1470 rx_packet_len = (rte_le_to_cpu_16(rxd.wb.pkt_len) &
1471 IAVF_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len;
1473 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1474 rte_prefetch0(RTE_PTR_ADD(rxm->buf_addr, RTE_PKTMBUF_HEADROOM));
1477 rxm->pkt_len = rx_packet_len;
1478 rxm->data_len = rx_packet_len;
1479 rxm->port = rxq->port_id;
1481 rxm->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
1482 rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
1483 iavf_flex_rxd_to_vlan_tci(rxm, &rxd);
1484 iavf_flex_rxd_to_ipsec_crypto_status(rxm, &rxd,
1485 &rxq->stats.ipsec_crypto);
1486 rxq->rxd_to_pkt_fields(rxq, rxm, &rxd);
1487 pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
1488 rxm->ol_flags |= pkt_flags;
1490 rx_pkts[nb_rx++] = rxm;
1492 rxq->rx_tail = rx_id;
1494 iavf_update_rx_tail(rxq, nb_hold, rx_id);
1499 /* implement recv_scattered_pkts for flexible Rx descriptor */
1501 iavf_recv_scattered_pkts_flex_rxd(void *rx_queue, struct rte_mbuf **rx_pkts,
1504 struct iavf_rx_queue *rxq = rx_queue;
1505 union iavf_rx_flex_desc rxd;
1506 struct rte_mbuf *rxe;
1507 struct rte_mbuf *first_seg = rxq->pkt_first_seg;
1508 struct rte_mbuf *last_seg = rxq->pkt_last_seg;
1509 struct rte_mbuf *nmb, *rxm;
1510 uint16_t rx_id = rxq->rx_tail;
1511 uint16_t nb_rx = 0, nb_hold = 0, rx_packet_len;
1512 struct rte_eth_dev *dev;
1513 uint16_t rx_stat_err0;
1517 volatile union iavf_rx_desc *rx_ring = rxq->rx_ring;
1518 volatile union iavf_rx_flex_desc *rxdp;
1519 const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1521 while (nb_rx < nb_pkts) {
1522 rxdp = (volatile union iavf_rx_flex_desc *)&rx_ring[rx_id];
1523 rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
1525 /* Check the DD bit */
1526 if (!(rx_stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S)))
1528 IAVF_DUMP_RX_DESC(rxq, rxdp, rx_id);
1530 nmb = rte_mbuf_raw_alloc(rxq->mp);
1531 if (unlikely(!nmb)) {
1532 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1533 "queue_id=%u", rxq->port_id, rxq->queue_id);
1534 dev = &rte_eth_devices[rxq->port_id];
1535 dev->data->rx_mbuf_alloc_failed++;
1541 rxe = rxq->sw_ring[rx_id];
1542 rxq->sw_ring[rx_id] = nmb;
1544 if (rx_id == rxq->nb_rx_desc)
1547 /* Prefetch next mbuf */
1548 rte_prefetch0(rxq->sw_ring[rx_id]);
1550 /* When next RX descriptor is on a cache line boundary,
1551 * prefetch the next 4 RX descriptors and next 8 pointers
1554 if ((rx_id & 0x3) == 0) {
1555 rte_prefetch0(&rx_ring[rx_id]);
1556 rte_prefetch0(rxq->sw_ring[rx_id]);
1561 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1563 /* Set data buffer address and data length of the mbuf */
1564 rxdp->read.hdr_addr = 0;
1565 rxdp->read.pkt_addr = dma_addr;
1566 rx_packet_len = rte_le_to_cpu_16(rxd.wb.pkt_len) &
1567 IAVF_RX_FLX_DESC_PKT_LEN_M;
1568 rxm->data_len = rx_packet_len;
1569 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1571 /* If this is the first buffer of the received packet, set the
1572 * pointer to the first mbuf of the packet and initialize its
1573 * context. Otherwise, update the total length and the number
1574 * of segments of the current scattered packet, and update the
1575 * pointer to the last mbuf of the current packet.
1579 first_seg->nb_segs = 1;
1580 first_seg->pkt_len = rx_packet_len;
1582 first_seg->pkt_len =
1583 (uint16_t)(first_seg->pkt_len +
1585 first_seg->nb_segs++;
1586 last_seg->next = rxm;
1589 /* If this is not the last buffer of the received packet,
1590 * update the pointer to the last mbuf of the current scattered
1591 * packet and continue to parse the RX ring.
1593 if (!(rx_stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_EOF_S))) {
1598 /* This is the last buffer of the received packet. If the CRC
1599 * is not stripped by the hardware:
1600 * - Subtract the CRC length from the total packet length.
1601 * - If the last buffer only contains the whole CRC or a part
1602 * of it, free the mbuf associated to the last buffer. If part
1603 * of the CRC is also contained in the previous mbuf, subtract
1604 * the length of that CRC part from the data length of the
1608 if (unlikely(rxq->crc_len > 0)) {
1609 first_seg->pkt_len -= RTE_ETHER_CRC_LEN;
1610 if (rx_packet_len <= RTE_ETHER_CRC_LEN) {
1611 rte_pktmbuf_free_seg(rxm);
1612 first_seg->nb_segs--;
1613 last_seg->data_len =
1614 (uint16_t)(last_seg->data_len -
1615 (RTE_ETHER_CRC_LEN - rx_packet_len));
1616 last_seg->next = NULL;
1618 rxm->data_len = (uint16_t)(rx_packet_len -
1623 first_seg->port = rxq->port_id;
1624 first_seg->ol_flags = 0;
1625 first_seg->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
1626 rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
1627 iavf_flex_rxd_to_vlan_tci(first_seg, &rxd);
1628 iavf_flex_rxd_to_ipsec_crypto_status(first_seg, &rxd,
1629 &rxq->stats.ipsec_crypto);
1630 rxq->rxd_to_pkt_fields(rxq, first_seg, &rxd);
1631 pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
1633 first_seg->ol_flags |= pkt_flags;
1635 /* Prefetch data of first segment, if configured to do so. */
1636 rte_prefetch0(RTE_PTR_ADD(first_seg->buf_addr,
1637 first_seg->data_off));
1638 rx_pkts[nb_rx++] = first_seg;
1642 /* Record index of the next RX descriptor to probe. */
1643 rxq->rx_tail = rx_id;
1644 rxq->pkt_first_seg = first_seg;
1645 rxq->pkt_last_seg = last_seg;
1647 iavf_update_rx_tail(rxq, nb_hold, rx_id);
1652 /* implement recv_scattered_pkts */
1654 iavf_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
1657 struct iavf_rx_queue *rxq = rx_queue;
1658 union iavf_rx_desc rxd;
1659 struct rte_mbuf *rxe;
1660 struct rte_mbuf *first_seg = rxq->pkt_first_seg;
1661 struct rte_mbuf *last_seg = rxq->pkt_last_seg;
1662 struct rte_mbuf *nmb, *rxm;
1663 uint16_t rx_id = rxq->rx_tail;
1664 uint16_t nb_rx = 0, nb_hold = 0, rx_packet_len;
1665 struct rte_eth_dev *dev;
1671 volatile union iavf_rx_desc *rx_ring = rxq->rx_ring;
1672 volatile union iavf_rx_desc *rxdp;
1673 const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1675 while (nb_rx < nb_pkts) {
1676 rxdp = &rx_ring[rx_id];
1677 qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
1678 rx_status = (qword1 & IAVF_RXD_QW1_STATUS_MASK) >>
1679 IAVF_RXD_QW1_STATUS_SHIFT;
1681 /* Check the DD bit */
1682 if (!(rx_status & (1 << IAVF_RX_DESC_STATUS_DD_SHIFT)))
1684 IAVF_DUMP_RX_DESC(rxq, rxdp, rx_id);
1686 nmb = rte_mbuf_raw_alloc(rxq->mp);
1687 if (unlikely(!nmb)) {
1688 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1689 "queue_id=%u", rxq->port_id, rxq->queue_id);
1690 dev = &rte_eth_devices[rxq->port_id];
1691 dev->data->rx_mbuf_alloc_failed++;
1697 rxe = rxq->sw_ring[rx_id];
1698 rxq->sw_ring[rx_id] = nmb;
1700 if (rx_id == rxq->nb_rx_desc)
1703 /* Prefetch next mbuf */
1704 rte_prefetch0(rxq->sw_ring[rx_id]);
1706 /* When next RX descriptor is on a cache line boundary,
1707 * prefetch the next 4 RX descriptors and next 8 pointers
1710 if ((rx_id & 0x3) == 0) {
1711 rte_prefetch0(&rx_ring[rx_id]);
1712 rte_prefetch0(rxq->sw_ring[rx_id]);
1717 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1719 /* Set data buffer address and data length of the mbuf */
1720 rxdp->read.hdr_addr = 0;
1721 rxdp->read.pkt_addr = dma_addr;
1722 rx_packet_len = (qword1 & IAVF_RXD_QW1_LENGTH_PBUF_MASK) >>
1723 IAVF_RXD_QW1_LENGTH_PBUF_SHIFT;
1724 rxm->data_len = rx_packet_len;
1725 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1727 /* If this is the first buffer of the received packet, set the
1728 * pointer to the first mbuf of the packet and initialize its
1729 * context. Otherwise, update the total length and the number
1730 * of segments of the current scattered packet, and update the
1731 * pointer to the last mbuf of the current packet.
1735 first_seg->nb_segs = 1;
1736 first_seg->pkt_len = rx_packet_len;
1738 first_seg->pkt_len =
1739 (uint16_t)(first_seg->pkt_len +
1741 first_seg->nb_segs++;
1742 last_seg->next = rxm;
1745 /* If this is not the last buffer of the received packet,
1746 * update the pointer to the last mbuf of the current scattered
1747 * packet and continue to parse the RX ring.
1749 if (!(rx_status & (1 << IAVF_RX_DESC_STATUS_EOF_SHIFT))) {
1754 /* This is the last buffer of the received packet. If the CRC
1755 * is not stripped by the hardware:
1756 * - Subtract the CRC length from the total packet length.
1757 * - If the last buffer only contains the whole CRC or a part
1758 * of it, free the mbuf associated to the last buffer. If part
1759 * of the CRC is also contained in the previous mbuf, subtract
1760 * the length of that CRC part from the data length of the
1764 if (unlikely(rxq->crc_len > 0)) {
1765 first_seg->pkt_len -= RTE_ETHER_CRC_LEN;
1766 if (rx_packet_len <= RTE_ETHER_CRC_LEN) {
1767 rte_pktmbuf_free_seg(rxm);
1768 first_seg->nb_segs--;
1769 last_seg->data_len =
1770 (uint16_t)(last_seg->data_len -
1771 (RTE_ETHER_CRC_LEN - rx_packet_len));
1772 last_seg->next = NULL;
1774 rxm->data_len = (uint16_t)(rx_packet_len -
1778 first_seg->port = rxq->port_id;
1779 first_seg->ol_flags = 0;
1780 iavf_rxd_to_vlan_tci(first_seg, &rxd);
1781 pkt_flags = iavf_rxd_to_pkt_flags(qword1);
1782 first_seg->packet_type =
1783 ptype_tbl[(uint8_t)((qword1 &
1784 IAVF_RXD_QW1_PTYPE_MASK) >> IAVF_RXD_QW1_PTYPE_SHIFT)];
1786 if (pkt_flags & RTE_MBUF_F_RX_RSS_HASH)
1787 first_seg->hash.rss =
1788 rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);
1790 if (pkt_flags & RTE_MBUF_F_RX_FDIR)
1791 pkt_flags |= iavf_rxd_build_fdir(&rxd, first_seg);
1793 first_seg->ol_flags |= pkt_flags;
1795 /* Prefetch data of first segment, if configured to do so. */
1796 rte_prefetch0(RTE_PTR_ADD(first_seg->buf_addr,
1797 first_seg->data_off));
1798 rx_pkts[nb_rx++] = first_seg;
1802 /* Record index of the next RX descriptor to probe. */
1803 rxq->rx_tail = rx_id;
1804 rxq->pkt_first_seg = first_seg;
1805 rxq->pkt_last_seg = last_seg;
1807 iavf_update_rx_tail(rxq, nb_hold, rx_id);
1812 #define IAVF_LOOK_AHEAD 8
1814 iavf_rx_scan_hw_ring_flex_rxd(struct iavf_rx_queue *rxq)
1816 volatile union iavf_rx_flex_desc *rxdp;
1817 struct rte_mbuf **rxep;
1818 struct rte_mbuf *mb;
1821 int32_t s[IAVF_LOOK_AHEAD], var, nb_dd;
1822 int32_t i, j, nb_rx = 0;
1824 const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1826 rxdp = (volatile union iavf_rx_flex_desc *)&rxq->rx_ring[rxq->rx_tail];
1827 rxep = &rxq->sw_ring[rxq->rx_tail];
1829 stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
1831 /* Make sure there is at least 1 packet to receive */
1832 if (!(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S)))
1835 /* Scan LOOK_AHEAD descriptors at a time to determine which
1836 * descriptors reference packets that are ready to be received.
1838 for (i = 0; i < IAVF_RX_MAX_BURST; i += IAVF_LOOK_AHEAD,
1839 rxdp += IAVF_LOOK_AHEAD, rxep += IAVF_LOOK_AHEAD) {
1840 /* Read desc statuses backwards to avoid race condition */
1841 for (j = IAVF_LOOK_AHEAD - 1; j >= 0; j--)
1842 s[j] = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
1846 /* Compute how many contiguous DD bits were set */
1847 for (j = 0, nb_dd = 0; j < IAVF_LOOK_AHEAD; j++) {
1848 var = s[j] & (1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S);
1850 /* For Arm platforms, count only contiguous descriptors
1851 * whose DD bit is set to 1. On Arm platforms, reads of
1852 * descriptors can be reordered. Since the CPU may
1853 * be reading the descriptors as the NIC updates them
1854 * in memory, it is possbile that the DD bit for a
1855 * descriptor earlier in the queue is read as not set
1856 * while the DD bit for a descriptor later in the queue
1870 /* Translate descriptor info to mbuf parameters */
1871 for (j = 0; j < nb_dd; j++) {
1872 IAVF_DUMP_RX_DESC(rxq, &rxdp[j],
1874 i * IAVF_LOOK_AHEAD + j);
1877 pkt_len = (rte_le_to_cpu_16(rxdp[j].wb.pkt_len) &
1878 IAVF_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len;
1879 mb->data_len = pkt_len;
1880 mb->pkt_len = pkt_len;
1883 mb->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
1884 rte_le_to_cpu_16(rxdp[j].wb.ptype_flex_flags0)];
1885 iavf_flex_rxd_to_vlan_tci(mb, &rxdp[j]);
1886 iavf_flex_rxd_to_ipsec_crypto_status(mb, &rxdp[j],
1887 &rxq->stats.ipsec_crypto);
1888 rxq->rxd_to_pkt_fields(rxq, mb, &rxdp[j]);
1889 stat_err0 = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
1890 pkt_flags = iavf_flex_rxd_error_to_pkt_flags(stat_err0);
1892 mb->ol_flags |= pkt_flags;
1895 for (j = 0; j < IAVF_LOOK_AHEAD; j++)
1896 rxq->rx_stage[i + j] = rxep[j];
1898 if (nb_dd != IAVF_LOOK_AHEAD)
1902 /* Clear software ring entries */
1903 for (i = 0; i < nb_rx; i++)
1904 rxq->sw_ring[rxq->rx_tail + i] = NULL;
1910 iavf_rx_scan_hw_ring(struct iavf_rx_queue *rxq)
1912 volatile union iavf_rx_desc *rxdp;
1913 struct rte_mbuf **rxep;
1914 struct rte_mbuf *mb;
1918 int32_t s[IAVF_LOOK_AHEAD], var, nb_dd;
1919 int32_t i, j, nb_rx = 0;
1921 const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1923 rxdp = &rxq->rx_ring[rxq->rx_tail];
1924 rxep = &rxq->sw_ring[rxq->rx_tail];
1926 qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
1927 rx_status = (qword1 & IAVF_RXD_QW1_STATUS_MASK) >>
1928 IAVF_RXD_QW1_STATUS_SHIFT;
1930 /* Make sure there is at least 1 packet to receive */
1931 if (!(rx_status & (1 << IAVF_RX_DESC_STATUS_DD_SHIFT)))
1934 /* Scan LOOK_AHEAD descriptors at a time to determine which
1935 * descriptors reference packets that are ready to be received.
1937 for (i = 0; i < IAVF_RX_MAX_BURST; i += IAVF_LOOK_AHEAD,
1938 rxdp += IAVF_LOOK_AHEAD, rxep += IAVF_LOOK_AHEAD) {
1939 /* Read desc statuses backwards to avoid race condition */
1940 for (j = IAVF_LOOK_AHEAD - 1; j >= 0; j--) {
1941 qword1 = rte_le_to_cpu_64(
1942 rxdp[j].wb.qword1.status_error_len);
1943 s[j] = (qword1 & IAVF_RXD_QW1_STATUS_MASK) >>
1944 IAVF_RXD_QW1_STATUS_SHIFT;
1949 /* Compute how many contiguous DD bits were set */
1950 for (j = 0, nb_dd = 0; j < IAVF_LOOK_AHEAD; j++) {
1951 var = s[j] & (1 << IAVF_RX_DESC_STATUS_DD_SHIFT);
1953 /* For Arm platforms, count only contiguous descriptors
1954 * whose DD bit is set to 1. On Arm platforms, reads of
1955 * descriptors can be reordered. Since the CPU may
1956 * be reading the descriptors as the NIC updates them
1957 * in memory, it is possbile that the DD bit for a
1958 * descriptor earlier in the queue is read as not set
1959 * while the DD bit for a descriptor later in the queue
1973 /* Translate descriptor info to mbuf parameters */
1974 for (j = 0; j < nb_dd; j++) {
1975 IAVF_DUMP_RX_DESC(rxq, &rxdp[j],
1976 rxq->rx_tail + i * IAVF_LOOK_AHEAD + j);
1979 qword1 = rte_le_to_cpu_64
1980 (rxdp[j].wb.qword1.status_error_len);
1981 pkt_len = ((qword1 & IAVF_RXD_QW1_LENGTH_PBUF_MASK) >>
1982 IAVF_RXD_QW1_LENGTH_PBUF_SHIFT) - rxq->crc_len;
1983 mb->data_len = pkt_len;
1984 mb->pkt_len = pkt_len;
1986 iavf_rxd_to_vlan_tci(mb, &rxdp[j]);
1987 pkt_flags = iavf_rxd_to_pkt_flags(qword1);
1989 ptype_tbl[(uint8_t)((qword1 &
1990 IAVF_RXD_QW1_PTYPE_MASK) >>
1991 IAVF_RXD_QW1_PTYPE_SHIFT)];
1993 if (pkt_flags & RTE_MBUF_F_RX_RSS_HASH)
1994 mb->hash.rss = rte_le_to_cpu_32(
1995 rxdp[j].wb.qword0.hi_dword.rss);
1997 if (pkt_flags & RTE_MBUF_F_RX_FDIR)
1998 pkt_flags |= iavf_rxd_build_fdir(&rxdp[j], mb);
2000 mb->ol_flags |= pkt_flags;
2003 for (j = 0; j < IAVF_LOOK_AHEAD; j++)
2004 rxq->rx_stage[i + j] = rxep[j];
2006 if (nb_dd != IAVF_LOOK_AHEAD)
2010 /* Clear software ring entries */
2011 for (i = 0; i < nb_rx; i++)
2012 rxq->sw_ring[rxq->rx_tail + i] = NULL;
2017 static inline uint16_t
2018 iavf_rx_fill_from_stage(struct iavf_rx_queue *rxq,
2019 struct rte_mbuf **rx_pkts,
2023 struct rte_mbuf **stage = &rxq->rx_stage[rxq->rx_next_avail];
2025 nb_pkts = (uint16_t)RTE_MIN(nb_pkts, rxq->rx_nb_avail);
2027 for (i = 0; i < nb_pkts; i++)
2028 rx_pkts[i] = stage[i];
2030 rxq->rx_nb_avail = (uint16_t)(rxq->rx_nb_avail - nb_pkts);
2031 rxq->rx_next_avail = (uint16_t)(rxq->rx_next_avail + nb_pkts);
2037 iavf_rx_alloc_bufs(struct iavf_rx_queue *rxq)
2039 volatile union iavf_rx_desc *rxdp;
2040 struct rte_mbuf **rxep;
2041 struct rte_mbuf *mb;
2042 uint16_t alloc_idx, i;
2046 /* Allocate buffers in bulk */
2047 alloc_idx = (uint16_t)(rxq->rx_free_trigger -
2048 (rxq->rx_free_thresh - 1));
2049 rxep = &rxq->sw_ring[alloc_idx];
2050 diag = rte_mempool_get_bulk(rxq->mp, (void *)rxep,
2051 rxq->rx_free_thresh);
2052 if (unlikely(diag != 0)) {
2053 PMD_RX_LOG(ERR, "Failed to get mbufs in bulk");
2057 rxdp = &rxq->rx_ring[alloc_idx];
2058 for (i = 0; i < rxq->rx_free_thresh; i++) {
2059 if (likely(i < (rxq->rx_free_thresh - 1)))
2060 /* Prefetch next mbuf */
2061 rte_prefetch0(rxep[i + 1]);
2064 rte_mbuf_refcnt_set(mb, 1);
2066 mb->data_off = RTE_PKTMBUF_HEADROOM;
2068 mb->port = rxq->port_id;
2069 dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mb));
2070 rxdp[i].read.hdr_addr = 0;
2071 rxdp[i].read.pkt_addr = dma_addr;
2074 /* Update rx tail register */
2076 IAVF_PCI_REG_WC_WRITE_RELAXED(rxq->qrx_tail, rxq->rx_free_trigger);
2078 rxq->rx_free_trigger =
2079 (uint16_t)(rxq->rx_free_trigger + rxq->rx_free_thresh);
2080 if (rxq->rx_free_trigger >= rxq->nb_rx_desc)
2081 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
2086 static inline uint16_t
2087 rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
2089 struct iavf_rx_queue *rxq = (struct iavf_rx_queue *)rx_queue;
2095 if (rxq->rx_nb_avail)
2096 return iavf_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
2098 if (rxq->rxdid >= IAVF_RXDID_FLEX_NIC && rxq->rxdid <= IAVF_RXDID_LAST)
2099 nb_rx = (uint16_t)iavf_rx_scan_hw_ring_flex_rxd(rxq);
2101 nb_rx = (uint16_t)iavf_rx_scan_hw_ring(rxq);
2102 rxq->rx_next_avail = 0;
2103 rxq->rx_nb_avail = nb_rx;
2104 rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_rx);
2106 if (rxq->rx_tail > rxq->rx_free_trigger) {
2107 if (iavf_rx_alloc_bufs(rxq) != 0) {
2110 /* TODO: count rx_mbuf_alloc_failed here */
2112 rxq->rx_nb_avail = 0;
2113 rxq->rx_tail = (uint16_t)(rxq->rx_tail - nb_rx);
2114 for (i = 0, j = rxq->rx_tail; i < nb_rx; i++, j++)
2115 rxq->sw_ring[j] = rxq->rx_stage[i];
2121 if (rxq->rx_tail >= rxq->nb_rx_desc)
2124 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u, nb_rx=%u",
2125 rxq->port_id, rxq->queue_id,
2126 rxq->rx_tail, nb_rx);
2128 if (rxq->rx_nb_avail)
2129 return iavf_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
2135 iavf_recv_pkts_bulk_alloc(void *rx_queue,
2136 struct rte_mbuf **rx_pkts,
2139 uint16_t nb_rx = 0, n, count;
2141 if (unlikely(nb_pkts == 0))
2144 if (likely(nb_pkts <= IAVF_RX_MAX_BURST))
2145 return rx_recv_pkts(rx_queue, rx_pkts, nb_pkts);
2148 n = RTE_MIN(nb_pkts, IAVF_RX_MAX_BURST);
2149 count = rx_recv_pkts(rx_queue, &rx_pkts[nb_rx], n);
2150 nb_rx = (uint16_t)(nb_rx + count);
2151 nb_pkts = (uint16_t)(nb_pkts - count);
2160 iavf_xmit_cleanup(struct iavf_tx_queue *txq)
2162 struct iavf_tx_entry *sw_ring = txq->sw_ring;
2163 uint16_t last_desc_cleaned = txq->last_desc_cleaned;
2164 uint16_t nb_tx_desc = txq->nb_tx_desc;
2165 uint16_t desc_to_clean_to;
2166 uint16_t nb_tx_to_clean;
2168 volatile struct iavf_tx_desc *txd = txq->tx_ring;
2170 desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->rs_thresh);
2171 if (desc_to_clean_to >= nb_tx_desc)
2172 desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
2174 desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
2175 if ((txd[desc_to_clean_to].cmd_type_offset_bsz &
2176 rte_cpu_to_le_64(IAVF_TXD_QW1_DTYPE_MASK)) !=
2177 rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DESC_DONE)) {
2178 PMD_TX_LOG(DEBUG, "TX descriptor %4u is not done "
2179 "(port=%d queue=%d)", desc_to_clean_to,
2180 txq->port_id, txq->queue_id);
2184 if (last_desc_cleaned > desc_to_clean_to)
2185 nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) +
2188 nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
2191 txd[desc_to_clean_to].cmd_type_offset_bsz = 0;
2193 txq->last_desc_cleaned = desc_to_clean_to;
2194 txq->nb_free = (uint16_t)(txq->nb_free + nb_tx_to_clean);
2199 /* Check if the context descriptor is needed for TX offloading */
2200 static inline uint16_t
2201 iavf_calc_context_desc(uint64_t flags, uint8_t vlan_flag)
2203 if (flags & (RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_UDP_SEG |
2204 RTE_MBUF_F_TX_TUNNEL_MASK))
2206 if (flags & RTE_MBUF_F_TX_VLAN &&
2207 vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2)
2213 iavf_fill_ctx_desc_cmd_field(volatile uint64_t *field, struct rte_mbuf *m,
2219 if (m->ol_flags & (RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_UDP_SEG))
2220 cmd = IAVF_TX_CTX_DESC_TSO << IAVF_TXD_CTX_QW1_CMD_SHIFT;
2222 if (m->ol_flags & RTE_MBUF_F_TX_VLAN &&
2223 vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2) {
2224 cmd |= IAVF_TX_CTX_DESC_IL2TAG2
2225 << IAVF_TXD_CTX_QW1_CMD_SHIFT;
2232 iavf_fill_ctx_desc_ipsec_field(volatile uint64_t *field,
2233 struct iavf_ipsec_crypto_pkt_metadata *ipsec_md)
2235 uint64_t ipsec_field =
2236 (uint64_t)ipsec_md->ctx_desc_ipsec_params <<
2237 IAVF_TXD_CTX_QW1_IPSEC_PARAMS_CIPHERBLK_SHIFT;
2239 *field |= ipsec_field;
2244 iavf_fill_ctx_desc_tunnelling_field(volatile uint64_t *qw0,
2245 const struct rte_mbuf *m)
2247 uint64_t eip_typ = IAVF_TX_CTX_DESC_EIPT_NONE;
2248 uint64_t eip_len = 0;
2249 uint64_t eip_noinc = 0;
2250 /* Default - IP_ID is increment in each segment of LSO */
2252 switch (m->ol_flags & (RTE_MBUF_F_TX_OUTER_IPV4 |
2253 RTE_MBUF_F_TX_OUTER_IPV6 |
2254 RTE_MBUF_F_TX_OUTER_IP_CKSUM)) {
2255 case RTE_MBUF_F_TX_OUTER_IPV4:
2256 eip_typ = IAVF_TX_CTX_DESC_EIPT_IPV4_NO_CHECKSUM_OFFLOAD;
2257 eip_len = m->outer_l3_len >> 2;
2259 case RTE_MBUF_F_TX_OUTER_IPV4 | RTE_MBUF_F_TX_OUTER_IP_CKSUM:
2260 eip_typ = IAVF_TX_CTX_DESC_EIPT_IPV4_CHECKSUM_OFFLOAD;
2261 eip_len = m->outer_l3_len >> 2;
2263 case RTE_MBUF_F_TX_OUTER_IPV6:
2264 eip_typ = IAVF_TX_CTX_DESC_EIPT_IPV6;
2265 eip_len = m->outer_l3_len >> 2;
2269 *qw0 = eip_typ << IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPT_SHIFT |
2270 eip_len << IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPLEN_SHIFT |
2271 eip_noinc << IAVF_TXD_CTX_QW0_TUN_PARAMS_EIP_NOINC_SHIFT;
2274 static inline uint16_t
2275 iavf_fill_ctx_desc_segmentation_field(volatile uint64_t *field,
2276 struct rte_mbuf *m, struct iavf_ipsec_crypto_pkt_metadata *ipsec_md)
2278 uint64_t segmentation_field = 0;
2279 uint64_t total_length = 0;
2281 if (m->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD) {
2282 total_length = ipsec_md->l4_payload_len;
2284 total_length = m->pkt_len - (m->l2_len + m->l3_len + m->l4_len);
2286 if (m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)
2287 total_length -= m->outer_l3_len;
2290 #ifdef RTE_LIBRTE_IAVF_DEBUG_TX
2291 if (!m->l4_len || !m->tso_segsz)
2292 PMD_TX_LOG(DEBUG, "L4 length %d, LSO Segment size %d",
2293 m->l4_len, m->tso_segsz);
2294 if (m->tso_segsz < 88)
2295 PMD_TX_LOG(DEBUG, "LSO Segment size %d is less than minimum %d",
2298 segmentation_field =
2299 (((uint64_t)total_length << IAVF_TXD_CTX_QW1_TSO_LEN_SHIFT) &
2300 IAVF_TXD_CTX_QW1_TSO_LEN_MASK) |
2301 (((uint64_t)m->tso_segsz << IAVF_TXD_CTX_QW1_MSS_SHIFT) &
2302 IAVF_TXD_CTX_QW1_MSS_MASK);
2304 *field |= segmentation_field;
2306 return total_length;
2310 struct iavf_tx_context_desc_qws {
2316 iavf_fill_context_desc(volatile struct iavf_tx_context_desc *desc,
2317 struct rte_mbuf *m, struct iavf_ipsec_crypto_pkt_metadata *ipsec_md,
2318 uint16_t *tlen, uint8_t vlan_flag)
2320 volatile struct iavf_tx_context_desc_qws *desc_qws =
2321 (volatile struct iavf_tx_context_desc_qws *)desc;
2322 /* fill descriptor type field */
2323 desc_qws->qw1 = IAVF_TX_DESC_DTYPE_CONTEXT;
2325 /* fill command field */
2326 iavf_fill_ctx_desc_cmd_field(&desc_qws->qw1, m, vlan_flag);
2328 /* fill segmentation field */
2329 if (m->ol_flags & (RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_UDP_SEG)) {
2330 /* fill IPsec field */
2331 if (m->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD)
2332 iavf_fill_ctx_desc_ipsec_field(&desc_qws->qw1,
2335 *tlen = iavf_fill_ctx_desc_segmentation_field(&desc_qws->qw1,
2339 /* fill tunnelling field */
2340 if (m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)
2341 iavf_fill_ctx_desc_tunnelling_field(&desc_qws->qw0, m);
2345 desc_qws->qw0 = rte_cpu_to_le_64(desc_qws->qw0);
2346 desc_qws->qw1 = rte_cpu_to_le_64(desc_qws->qw1);
2348 if (vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2)
2349 desc->l2tag2 = m->vlan_tci;
2354 iavf_fill_ipsec_desc(volatile struct iavf_tx_ipsec_desc *desc,
2355 const struct iavf_ipsec_crypto_pkt_metadata *md, uint16_t *ipsec_len)
2357 desc->qw0 = rte_cpu_to_le_64(((uint64_t)md->l4_payload_len <<
2358 IAVF_IPSEC_TX_DESC_QW0_L4PAYLEN_SHIFT) |
2359 ((uint64_t)md->esn << IAVF_IPSEC_TX_DESC_QW0_IPSECESN_SHIFT) |
2360 ((uint64_t)md->esp_trailer_len <<
2361 IAVF_IPSEC_TX_DESC_QW0_TRAILERLEN_SHIFT));
2363 desc->qw1 = rte_cpu_to_le_64(((uint64_t)md->sa_idx <<
2364 IAVF_IPSEC_TX_DESC_QW1_IPSECSA_SHIFT) |
2365 ((uint64_t)md->next_proto <<
2366 IAVF_IPSEC_TX_DESC_QW1_IPSECNH_SHIFT) |
2367 ((uint64_t)(md->len_iv & 0x3) <<
2368 IAVF_IPSEC_TX_DESC_QW1_IVLEN_SHIFT) |
2369 ((uint64_t)(md->ol_flags & IAVF_IPSEC_CRYPTO_OL_FLAGS_NATT ?
2371 IAVF_IPSEC_TX_DESC_QW1_UDP_SHIFT) |
2372 (uint64_t)IAVF_TX_DESC_DTYPE_IPSEC);
2375 * TODO: Pre-calculate this in the Session initialization
2377 * Calculate IPsec length required in data descriptor func when TSO
2378 * offload is enabled
2380 *ipsec_len = sizeof(struct rte_esp_hdr) + (md->len_iv >> 2) +
2381 (md->ol_flags & IAVF_IPSEC_CRYPTO_OL_FLAGS_NATT ?
2382 sizeof(struct rte_udp_hdr) : 0);
2386 iavf_build_data_desc_cmd_offset_fields(volatile uint64_t *qw1,
2387 struct rte_mbuf *m, uint8_t vlan_flag)
2389 uint64_t command = 0;
2390 uint64_t offset = 0;
2391 uint64_t l2tag1 = 0;
2393 *qw1 = IAVF_TX_DESC_DTYPE_DATA;
2395 command = (uint64_t)IAVF_TX_DESC_CMD_ICRC;
2397 /* Descriptor based VLAN insertion */
2398 if ((vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG1) &&
2399 m->ol_flags & RTE_MBUF_F_TX_VLAN) {
2400 command |= (uint64_t)IAVF_TX_DESC_CMD_IL2TAG1;
2401 l2tag1 |= m->vlan_tci;
2405 offset |= (m->l2_len >> 1) << IAVF_TX_DESC_LENGTH_MACLEN_SHIFT;
2407 /* Enable L3 checksum offloading inner */
2408 if (m->ol_flags & (RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_IPV4)) {
2409 command |= IAVF_TX_DESC_CMD_IIPT_IPV4_CSUM;
2410 offset |= (m->l3_len >> 2) << IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
2411 } else if (m->ol_flags & RTE_MBUF_F_TX_IPV4) {
2412 command |= IAVF_TX_DESC_CMD_IIPT_IPV4;
2413 offset |= (m->l3_len >> 2) << IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
2414 } else if (m->ol_flags & RTE_MBUF_F_TX_IPV6) {
2415 command |= IAVF_TX_DESC_CMD_IIPT_IPV6;
2416 offset |= (m->l3_len >> 2) << IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
2419 if (m->ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
2420 command |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
2421 offset |= (m->l4_len >> 2) <<
2422 IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2425 /* Enable L4 checksum offloads */
2426 switch (m->ol_flags & RTE_MBUF_F_TX_L4_MASK) {
2427 case RTE_MBUF_F_TX_TCP_CKSUM:
2428 command |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
2429 offset |= (sizeof(struct rte_tcp_hdr) >> 2) <<
2430 IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2432 case RTE_MBUF_F_TX_SCTP_CKSUM:
2433 command |= IAVF_TX_DESC_CMD_L4T_EOFT_SCTP;
2434 offset |= (sizeof(struct rte_sctp_hdr) >> 2) <<
2435 IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2437 case RTE_MBUF_F_TX_UDP_CKSUM:
2438 command |= IAVF_TX_DESC_CMD_L4T_EOFT_UDP;
2439 offset |= (sizeof(struct rte_udp_hdr) >> 2) <<
2440 IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2444 *qw1 = rte_cpu_to_le_64((((uint64_t)command <<
2445 IAVF_TXD_DATA_QW1_CMD_SHIFT) & IAVF_TXD_DATA_QW1_CMD_MASK) |
2446 (((uint64_t)offset << IAVF_TXD_DATA_QW1_OFFSET_SHIFT) &
2447 IAVF_TXD_DATA_QW1_OFFSET_MASK) |
2448 ((uint64_t)l2tag1 << IAVF_TXD_DATA_QW1_L2TAG1_SHIFT));
2452 iavf_fill_data_desc(volatile struct iavf_tx_desc *desc,
2453 struct rte_mbuf *m, uint64_t desc_template,
2454 uint16_t tlen, uint16_t ipseclen)
2456 uint32_t hdrlen = m->l2_len;
2459 /* fill data descriptor qw1 from template */
2460 desc->cmd_type_offset_bsz = desc_template;
2462 /* set data buffer address */
2463 desc->buffer_addr = rte_mbuf_data_iova(m);
2465 /* calculate data buffer size less set header lengths */
2466 if ((m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) &&
2467 (m->ol_flags & (RTE_MBUF_F_TX_TCP_SEG |
2468 RTE_MBUF_F_TX_UDP_SEG))) {
2469 hdrlen += m->outer_l3_len;
2470 if (m->ol_flags & RTE_MBUF_F_TX_L4_MASK)
2471 hdrlen += m->l3_len + m->l4_len;
2473 hdrlen += m->l3_len;
2474 if (m->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD)
2476 bufsz = hdrlen + tlen;
2477 } else if ((m->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD) &&
2478 (m->ol_flags & (RTE_MBUF_F_TX_TCP_SEG |
2479 RTE_MBUF_F_TX_UDP_SEG))) {
2480 hdrlen += m->outer_l3_len + m->l3_len + ipseclen;
2481 if (m->ol_flags & RTE_MBUF_F_TX_L4_MASK)
2482 hdrlen += m->l4_len;
2483 bufsz = hdrlen + tlen;
2486 bufsz = m->data_len;
2489 /* set data buffer size */
2490 desc->cmd_type_offset_bsz |=
2491 (((uint64_t)bufsz << IAVF_TXD_DATA_QW1_TX_BUF_SZ_SHIFT) &
2492 IAVF_TXD_DATA_QW1_TX_BUF_SZ_MASK);
2494 desc->buffer_addr = rte_cpu_to_le_64(desc->buffer_addr);
2495 desc->cmd_type_offset_bsz = rte_cpu_to_le_64(desc->cmd_type_offset_bsz);
2499 static struct iavf_ipsec_crypto_pkt_metadata *
2500 iavf_ipsec_crypto_get_pkt_metadata(const struct iavf_tx_queue *txq,
2503 if (m->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD)
2504 return RTE_MBUF_DYNFIELD(m, txq->ipsec_crypto_pkt_md_offset,
2505 struct iavf_ipsec_crypto_pkt_metadata *);
2512 iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
2514 struct iavf_tx_queue *txq = tx_queue;
2515 volatile struct iavf_tx_desc *txr = txq->tx_ring;
2516 struct iavf_tx_entry *txe_ring = txq->sw_ring;
2517 struct iavf_tx_entry *txe, *txn;
2518 struct rte_mbuf *mb, *mb_seg;
2519 uint16_t desc_idx, desc_idx_last;
2523 /* Check if the descriptor ring needs to be cleaned. */
2524 if (txq->nb_free < txq->free_thresh)
2525 iavf_xmit_cleanup(txq);
2527 desc_idx = txq->tx_tail;
2528 txe = &txe_ring[desc_idx];
2530 #ifdef RTE_LIBRTE_IAVF_DEBUG_TX_DESC_RING
2531 iavf_dump_tx_entry_ring(txq);
2532 iavf_dump_tx_desc_ring(txq);
2536 for (idx = 0; idx < nb_pkts; idx++) {
2537 volatile struct iavf_tx_desc *ddesc;
2538 struct iavf_ipsec_crypto_pkt_metadata *ipsec_md;
2540 uint16_t nb_desc_ctx, nb_desc_ipsec;
2541 uint16_t nb_desc_data, nb_desc_required;
2542 uint16_t tlen = 0, ipseclen = 0;
2543 uint64_t ddesc_template = 0;
2544 uint64_t ddesc_cmd = 0;
2548 RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
2551 * Get metadata for ipsec crypto from mbuf dynamic fields if
2552 * security offload is specified.
2554 ipsec_md = iavf_ipsec_crypto_get_pkt_metadata(txq, mb);
2556 nb_desc_data = mb->nb_segs;
2558 iavf_calc_context_desc(mb->ol_flags, txq->vlan_flag);
2559 nb_desc_ipsec = !!(mb->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD);
2562 * The number of descriptors that must be allocated for
2563 * a packet equals to the number of the segments of that
2564 * packet plus the context and ipsec descriptors if needed.
2566 nb_desc_required = nb_desc_data + nb_desc_ctx + nb_desc_ipsec;
2568 desc_idx_last = (uint16_t)(desc_idx + nb_desc_required - 1);
2570 /* wrap descriptor ring */
2571 if (desc_idx_last >= txq->nb_tx_desc)
2573 (uint16_t)(desc_idx_last - txq->nb_tx_desc);
2576 "port_id=%u queue_id=%u tx_first=%u tx_last=%u",
2577 txq->port_id, txq->queue_id, desc_idx, desc_idx_last);
2579 if (nb_desc_required > txq->nb_free) {
2580 if (iavf_xmit_cleanup(txq)) {
2585 if (unlikely(nb_desc_required > txq->rs_thresh)) {
2586 while (nb_desc_required > txq->nb_free) {
2587 if (iavf_xmit_cleanup(txq)) {
2596 iavf_build_data_desc_cmd_offset_fields(&ddesc_template, mb,
2599 /* Setup TX context descriptor if required */
2601 volatile struct iavf_tx_context_desc *ctx_desc =
2602 (volatile struct iavf_tx_context_desc *)
2605 /* clear QW0 or the previous writeback value
2606 * may impact next write
2608 *(volatile uint64_t *)ctx_desc = 0;
2610 txn = &txe_ring[txe->next_id];
2611 RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
2614 rte_pktmbuf_free_seg(txe->mbuf);
2618 iavf_fill_context_desc(ctx_desc, mb, ipsec_md, &tlen,
2620 IAVF_DUMP_TX_DESC(txq, ctx_desc, desc_idx);
2622 txe->last_id = desc_idx_last;
2623 desc_idx = txe->next_id;
2627 if (nb_desc_ipsec) {
2628 volatile struct iavf_tx_ipsec_desc *ipsec_desc =
2629 (volatile struct iavf_tx_ipsec_desc *)
2632 txn = &txe_ring[txe->next_id];
2633 RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
2636 rte_pktmbuf_free_seg(txe->mbuf);
2640 iavf_fill_ipsec_desc(ipsec_desc, ipsec_md, &ipseclen);
2642 IAVF_DUMP_TX_DESC(txq, ipsec_desc, desc_idx);
2644 txe->last_id = desc_idx_last;
2645 desc_idx = txe->next_id;
2652 ddesc = (volatile struct iavf_tx_desc *)
2655 txn = &txe_ring[txe->next_id];
2656 RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
2659 rte_pktmbuf_free_seg(txe->mbuf);
2662 iavf_fill_data_desc(ddesc, mb_seg,
2663 ddesc_template, tlen, ipseclen);
2665 IAVF_DUMP_TX_DESC(txq, ddesc, desc_idx);
2667 txe->last_id = desc_idx_last;
2668 desc_idx = txe->next_id;
2670 mb_seg = mb_seg->next;
2673 /* The last packet data descriptor needs End Of Packet (EOP) */
2674 ddesc_cmd = IAVF_TX_DESC_CMD_EOP;
2676 txq->nb_used = (uint16_t)(txq->nb_used + nb_desc_required);
2677 txq->nb_free = (uint16_t)(txq->nb_free - nb_desc_required);
2679 if (txq->nb_used >= txq->rs_thresh) {
2680 PMD_TX_LOG(DEBUG, "Setting RS bit on TXD id="
2681 "%4u (port=%d queue=%d)",
2682 desc_idx_last, txq->port_id, txq->queue_id);
2684 ddesc_cmd |= IAVF_TX_DESC_CMD_RS;
2686 /* Update txq RS bit counters */
2690 ddesc->cmd_type_offset_bsz |= rte_cpu_to_le_64(ddesc_cmd <<
2691 IAVF_TXD_DATA_QW1_CMD_SHIFT);
2693 IAVF_DUMP_TX_DESC(txq, ddesc, desc_idx - 1);
2699 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
2700 txq->port_id, txq->queue_id, desc_idx, idx);
2702 IAVF_PCI_REG_WRITE_RELAXED(txq->qtx_tail, desc_idx);
2703 txq->tx_tail = desc_idx;
2708 /* Check if the packet with vlan user priority is transmitted in the
2712 iavf_check_vlan_up2tc(struct iavf_tx_queue *txq, struct rte_mbuf *m)
2714 struct rte_eth_dev *dev = &rte_eth_devices[txq->port_id];
2715 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2718 up = m->vlan_tci >> IAVF_VLAN_TAG_PCP_OFFSET;
2720 if (!(vf->qos_cap->cap[txq->tc].tc_prio & BIT(up))) {
2721 PMD_TX_LOG(ERR, "packet with vlan pcp %u cannot transmit in queue %u\n",
2729 /* TX prep functions */
2731 iavf_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
2737 struct iavf_tx_queue *txq = tx_queue;
2738 struct rte_eth_dev *dev = &rte_eth_devices[txq->port_id];
2739 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2741 for (i = 0; i < nb_pkts; i++) {
2743 ol_flags = m->ol_flags;
2745 /* Check condition for nb_segs > IAVF_TX_MAX_MTU_SEG. */
2746 if (!(ol_flags & RTE_MBUF_F_TX_TCP_SEG)) {
2747 if (m->nb_segs > IAVF_TX_MAX_MTU_SEG) {
2751 } else if ((m->tso_segsz < IAVF_MIN_TSO_MSS) ||
2752 (m->tso_segsz > IAVF_MAX_TSO_MSS)) {
2753 /* MSS outside the range are considered malicious */
2758 if (ol_flags & IAVF_TX_OFFLOAD_NOTSUP_MASK) {
2759 rte_errno = ENOTSUP;
2763 #ifdef RTE_ETHDEV_DEBUG_TX
2764 ret = rte_validate_tx_offload(m);
2770 ret = rte_net_intel_cksum_prepare(m);
2776 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_QOS &&
2777 ol_flags & (RTE_MBUF_F_RX_VLAN_STRIPPED | RTE_MBUF_F_RX_VLAN)) {
2778 ret = iavf_check_vlan_up2tc(txq, m);
2789 /* choose rx function*/
2791 iavf_set_rx_function(struct rte_eth_dev *dev)
2793 struct iavf_adapter *adapter =
2794 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2795 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2798 struct iavf_rx_queue *rxq;
2801 bool use_avx2 = false;
2802 bool use_avx512 = false;
2803 bool use_flex = false;
2805 check_ret = iavf_rx_vec_dev_check(dev);
2806 if (check_ret >= 0 &&
2807 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
2808 if ((rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
2809 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) &&
2810 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256)
2813 #ifdef CC_AVX512_SUPPORT
2814 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1 &&
2815 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512BW) == 1 &&
2816 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_512)
2820 if (vf->vf_res->vf_cap_flags &
2821 VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC)
2824 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2825 rxq = dev->data->rx_queues[i];
2826 (void)iavf_rxq_vec_setup(rxq);
2829 if (dev->data->scattered_rx) {
2832 "Using %sVector Scattered Rx (port %d).",
2833 use_avx2 ? "avx2 " : "",
2834 dev->data->port_id);
2836 if (check_ret == IAVF_VECTOR_PATH)
2838 "Using AVX512 Vector Scattered Rx (port %d).",
2839 dev->data->port_id);
2842 "Using AVX512 OFFLOAD Vector Scattered Rx (port %d).",
2843 dev->data->port_id);
2846 dev->rx_pkt_burst = use_avx2 ?
2847 iavf_recv_scattered_pkts_vec_avx2_flex_rxd :
2848 iavf_recv_scattered_pkts_vec_flex_rxd;
2849 #ifdef CC_AVX512_SUPPORT
2851 if (check_ret == IAVF_VECTOR_PATH)
2853 iavf_recv_scattered_pkts_vec_avx512_flex_rxd;
2856 iavf_recv_scattered_pkts_vec_avx512_flex_rxd_offload;
2860 dev->rx_pkt_burst = use_avx2 ?
2861 iavf_recv_scattered_pkts_vec_avx2 :
2862 iavf_recv_scattered_pkts_vec;
2863 #ifdef CC_AVX512_SUPPORT
2865 if (check_ret == IAVF_VECTOR_PATH)
2867 iavf_recv_scattered_pkts_vec_avx512;
2870 iavf_recv_scattered_pkts_vec_avx512_offload;
2876 PMD_DRV_LOG(DEBUG, "Using %sVector Rx (port %d).",
2877 use_avx2 ? "avx2 " : "",
2878 dev->data->port_id);
2880 if (check_ret == IAVF_VECTOR_PATH)
2882 "Using AVX512 Vector Rx (port %d).",
2883 dev->data->port_id);
2886 "Using AVX512 OFFLOAD Vector Rx (port %d).",
2887 dev->data->port_id);
2890 dev->rx_pkt_burst = use_avx2 ?
2891 iavf_recv_pkts_vec_avx2_flex_rxd :
2892 iavf_recv_pkts_vec_flex_rxd;
2893 #ifdef CC_AVX512_SUPPORT
2895 if (check_ret == IAVF_VECTOR_PATH)
2897 iavf_recv_pkts_vec_avx512_flex_rxd;
2900 iavf_recv_pkts_vec_avx512_flex_rxd_offload;
2904 dev->rx_pkt_burst = use_avx2 ?
2905 iavf_recv_pkts_vec_avx2 :
2907 #ifdef CC_AVX512_SUPPORT
2909 if (check_ret == IAVF_VECTOR_PATH)
2911 iavf_recv_pkts_vec_avx512;
2914 iavf_recv_pkts_vec_avx512_offload;
2924 if (dev->data->scattered_rx) {
2925 PMD_DRV_LOG(DEBUG, "Using a Scattered Rx callback (port=%d).",
2926 dev->data->port_id);
2927 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC)
2928 dev->rx_pkt_burst = iavf_recv_scattered_pkts_flex_rxd;
2930 dev->rx_pkt_burst = iavf_recv_scattered_pkts;
2931 } else if (adapter->rx_bulk_alloc_allowed) {
2932 PMD_DRV_LOG(DEBUG, "Using bulk Rx callback (port=%d).",
2933 dev->data->port_id);
2934 dev->rx_pkt_burst = iavf_recv_pkts_bulk_alloc;
2936 PMD_DRV_LOG(DEBUG, "Using Basic Rx callback (port=%d).",
2937 dev->data->port_id);
2938 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC)
2939 dev->rx_pkt_burst = iavf_recv_pkts_flex_rxd;
2941 dev->rx_pkt_burst = iavf_recv_pkts;
2945 /* choose tx function*/
2947 iavf_set_tx_function(struct rte_eth_dev *dev)
2950 struct iavf_tx_queue *txq;
2953 bool use_sse = false;
2954 bool use_avx2 = false;
2955 bool use_avx512 = false;
2957 check_ret = iavf_tx_vec_dev_check(dev);
2959 if (check_ret >= 0 &&
2960 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
2961 /* SSE and AVX2 not support offload path yet. */
2962 if (check_ret == IAVF_VECTOR_PATH) {
2964 if ((rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
2965 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) &&
2966 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256)
2969 #ifdef CC_AVX512_SUPPORT
2970 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1 &&
2971 rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512BW) == 1 &&
2972 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_512)
2976 if (!use_sse && !use_avx2 && !use_avx512)
2980 PMD_DRV_LOG(DEBUG, "Using %sVector Tx (port %d).",
2981 use_avx2 ? "avx2 " : "",
2982 dev->data->port_id);
2983 dev->tx_pkt_burst = use_avx2 ?
2984 iavf_xmit_pkts_vec_avx2 :
2987 dev->tx_pkt_prepare = NULL;
2988 #ifdef CC_AVX512_SUPPORT
2990 if (check_ret == IAVF_VECTOR_PATH) {
2991 dev->tx_pkt_burst = iavf_xmit_pkts_vec_avx512;
2992 PMD_DRV_LOG(DEBUG, "Using AVX512 Vector Tx (port %d).",
2993 dev->data->port_id);
2995 dev->tx_pkt_burst = iavf_xmit_pkts_vec_avx512_offload;
2996 dev->tx_pkt_prepare = iavf_prep_pkts;
2997 PMD_DRV_LOG(DEBUG, "Using AVX512 OFFLOAD Vector Tx (port %d).",
2998 dev->data->port_id);
3003 for (i = 0; i < dev->data->nb_tx_queues; i++) {
3004 txq = dev->data->tx_queues[i];
3007 #ifdef CC_AVX512_SUPPORT
3009 iavf_txq_vec_setup_avx512(txq);
3011 iavf_txq_vec_setup(txq);
3013 iavf_txq_vec_setup(txq);
3022 PMD_DRV_LOG(DEBUG, "Using Basic Tx callback (port=%d).",
3023 dev->data->port_id);
3024 dev->tx_pkt_burst = iavf_xmit_pkts;
3025 dev->tx_pkt_prepare = iavf_prep_pkts;
3029 iavf_tx_done_cleanup_full(struct iavf_tx_queue *txq,
3032 struct iavf_tx_entry *swr_ring = txq->sw_ring;
3033 uint16_t i, tx_last, tx_id;
3034 uint16_t nb_tx_free_last;
3035 uint16_t nb_tx_to_clean;
3038 /* Start free mbuf from the next of tx_tail */
3039 tx_last = txq->tx_tail;
3040 tx_id = swr_ring[tx_last].next_id;
3042 if (txq->nb_free == 0 && iavf_xmit_cleanup(txq))
3045 nb_tx_to_clean = txq->nb_free;
3046 nb_tx_free_last = txq->nb_free;
3048 free_cnt = txq->nb_tx_desc;
3050 /* Loop through swr_ring to count the amount of
3051 * freeable mubfs and packets.
3053 for (pkt_cnt = 0; pkt_cnt < free_cnt; ) {
3054 for (i = 0; i < nb_tx_to_clean &&
3055 pkt_cnt < free_cnt &&
3056 tx_id != tx_last; i++) {
3057 if (swr_ring[tx_id].mbuf != NULL) {
3058 rte_pktmbuf_free_seg(swr_ring[tx_id].mbuf);
3059 swr_ring[tx_id].mbuf = NULL;
3062 * last segment in the packet,
3063 * increment packet count
3065 pkt_cnt += (swr_ring[tx_id].last_id == tx_id);
3068 tx_id = swr_ring[tx_id].next_id;
3071 if (txq->rs_thresh > txq->nb_tx_desc -
3072 txq->nb_free || tx_id == tx_last)
3075 if (pkt_cnt < free_cnt) {
3076 if (iavf_xmit_cleanup(txq))
3079 nb_tx_to_clean = txq->nb_free - nb_tx_free_last;
3080 nb_tx_free_last = txq->nb_free;
3084 return (int)pkt_cnt;
3088 iavf_dev_tx_done_cleanup(void *txq, uint32_t free_cnt)
3090 struct iavf_tx_queue *q = (struct iavf_tx_queue *)txq;
3092 return iavf_tx_done_cleanup_full(q, free_cnt);
3096 iavf_dev_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
3097 struct rte_eth_rxq_info *qinfo)
3099 struct iavf_rx_queue *rxq;
3101 rxq = dev->data->rx_queues[queue_id];
3103 qinfo->mp = rxq->mp;
3104 qinfo->scattered_rx = dev->data->scattered_rx;
3105 qinfo->nb_desc = rxq->nb_rx_desc;
3107 qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
3108 qinfo->conf.rx_drop_en = true;
3109 qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
3113 iavf_dev_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
3114 struct rte_eth_txq_info *qinfo)
3116 struct iavf_tx_queue *txq;
3118 txq = dev->data->tx_queues[queue_id];
3120 qinfo->nb_desc = txq->nb_tx_desc;
3122 qinfo->conf.tx_free_thresh = txq->free_thresh;
3123 qinfo->conf.tx_rs_thresh = txq->rs_thresh;
3124 qinfo->conf.offloads = txq->offloads;
3125 qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
3128 /* Get the number of used descriptors of a rx queue */
3130 iavf_dev_rxq_count(void *rx_queue)
3132 #define IAVF_RXQ_SCAN_INTERVAL 4
3133 volatile union iavf_rx_desc *rxdp;
3134 struct iavf_rx_queue *rxq;
3138 rxdp = &rxq->rx_ring[rxq->rx_tail];
3140 while ((desc < rxq->nb_rx_desc) &&
3141 ((rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) &
3142 IAVF_RXD_QW1_STATUS_MASK) >> IAVF_RXD_QW1_STATUS_SHIFT) &
3143 (1 << IAVF_RX_DESC_STATUS_DD_SHIFT)) {
3144 /* Check the DD bit of a rx descriptor of each 4 in a group,
3145 * to avoid checking too frequently and downgrading performance
3148 desc += IAVF_RXQ_SCAN_INTERVAL;
3149 rxdp += IAVF_RXQ_SCAN_INTERVAL;
3150 if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
3151 rxdp = &(rxq->rx_ring[rxq->rx_tail +
3152 desc - rxq->nb_rx_desc]);
3159 iavf_dev_rx_desc_status(void *rx_queue, uint16_t offset)
3161 struct iavf_rx_queue *rxq = rx_queue;
3162 volatile uint64_t *status;
3166 if (unlikely(offset >= rxq->nb_rx_desc))
3169 if (offset >= rxq->nb_rx_desc - rxq->nb_rx_hold)
3170 return RTE_ETH_RX_DESC_UNAVAIL;
3172 desc = rxq->rx_tail + offset;
3173 if (desc >= rxq->nb_rx_desc)
3174 desc -= rxq->nb_rx_desc;
3176 status = &rxq->rx_ring[desc].wb.qword1.status_error_len;
3177 mask = rte_le_to_cpu_64((1ULL << IAVF_RX_DESC_STATUS_DD_SHIFT)
3178 << IAVF_RXD_QW1_STATUS_SHIFT);
3180 return RTE_ETH_RX_DESC_DONE;
3182 return RTE_ETH_RX_DESC_AVAIL;
3186 iavf_dev_tx_desc_status(void *tx_queue, uint16_t offset)
3188 struct iavf_tx_queue *txq = tx_queue;
3189 volatile uint64_t *status;
3190 uint64_t mask, expect;
3193 if (unlikely(offset >= txq->nb_tx_desc))
3196 desc = txq->tx_tail + offset;
3197 /* go to next desc that has the RS bit */
3198 desc = ((desc + txq->rs_thresh - 1) / txq->rs_thresh) *
3200 if (desc >= txq->nb_tx_desc) {
3201 desc -= txq->nb_tx_desc;
3202 if (desc >= txq->nb_tx_desc)
3203 desc -= txq->nb_tx_desc;
3206 status = &txq->tx_ring[desc].cmd_type_offset_bsz;
3207 mask = rte_le_to_cpu_64(IAVF_TXD_QW1_DTYPE_MASK);
3208 expect = rte_cpu_to_le_64(
3209 IAVF_TX_DESC_DTYPE_DESC_DONE << IAVF_TXD_QW1_DTYPE_SHIFT);
3210 if ((*status & mask) == expect)
3211 return RTE_ETH_TX_DESC_DONE;
3213 return RTE_ETH_TX_DESC_FULL;
3216 static inline uint32_t
3217 iavf_get_default_ptype(uint16_t ptype)
3219 static const uint32_t ptype_tbl[IAVF_MAX_PKT_TYPE]
3220 __rte_cache_aligned = {
3223 [1] = RTE_PTYPE_L2_ETHER,
3224 [2] = RTE_PTYPE_L2_ETHER_TIMESYNC,
3225 /* [3] - [5] reserved */
3226 [6] = RTE_PTYPE_L2_ETHER_LLDP,
3227 /* [7] - [10] reserved */
3228 [11] = RTE_PTYPE_L2_ETHER_ARP,
3229 /* [12] - [21] reserved */
3231 /* Non tunneled IPv4 */
3232 [22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3234 [23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3235 RTE_PTYPE_L4_NONFRAG,
3236 [24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3239 [26] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3241 [27] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3243 [28] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3247 [29] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3248 RTE_PTYPE_TUNNEL_IP |
3249 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3250 RTE_PTYPE_INNER_L4_FRAG,
3251 [30] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3252 RTE_PTYPE_TUNNEL_IP |
3253 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3254 RTE_PTYPE_INNER_L4_NONFRAG,
3255 [31] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3256 RTE_PTYPE_TUNNEL_IP |
3257 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3258 RTE_PTYPE_INNER_L4_UDP,
3260 [33] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3261 RTE_PTYPE_TUNNEL_IP |
3262 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3263 RTE_PTYPE_INNER_L4_TCP,
3264 [34] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3265 RTE_PTYPE_TUNNEL_IP |
3266 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3267 RTE_PTYPE_INNER_L4_SCTP,
3268 [35] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3269 RTE_PTYPE_TUNNEL_IP |
3270 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3271 RTE_PTYPE_INNER_L4_ICMP,
3274 [36] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3275 RTE_PTYPE_TUNNEL_IP |
3276 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3277 RTE_PTYPE_INNER_L4_FRAG,
3278 [37] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3279 RTE_PTYPE_TUNNEL_IP |
3280 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3281 RTE_PTYPE_INNER_L4_NONFRAG,
3282 [38] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3283 RTE_PTYPE_TUNNEL_IP |
3284 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3285 RTE_PTYPE_INNER_L4_UDP,
3287 [40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3288 RTE_PTYPE_TUNNEL_IP |
3289 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3290 RTE_PTYPE_INNER_L4_TCP,
3291 [41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3292 RTE_PTYPE_TUNNEL_IP |
3293 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3294 RTE_PTYPE_INNER_L4_SCTP,
3295 [42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3296 RTE_PTYPE_TUNNEL_IP |
3297 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3298 RTE_PTYPE_INNER_L4_ICMP,
3300 /* IPv4 --> GRE/Teredo/VXLAN */
3301 [43] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3302 RTE_PTYPE_TUNNEL_GRENAT,
3304 /* IPv4 --> GRE/Teredo/VXLAN --> IPv4 */
3305 [44] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3306 RTE_PTYPE_TUNNEL_GRENAT |
3307 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3308 RTE_PTYPE_INNER_L4_FRAG,
3309 [45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3310 RTE_PTYPE_TUNNEL_GRENAT |
3311 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3312 RTE_PTYPE_INNER_L4_NONFRAG,
3313 [46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3314 RTE_PTYPE_TUNNEL_GRENAT |
3315 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3316 RTE_PTYPE_INNER_L4_UDP,
3318 [48] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3319 RTE_PTYPE_TUNNEL_GRENAT |
3320 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3321 RTE_PTYPE_INNER_L4_TCP,
3322 [49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3323 RTE_PTYPE_TUNNEL_GRENAT |
3324 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3325 RTE_PTYPE_INNER_L4_SCTP,
3326 [50] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3327 RTE_PTYPE_TUNNEL_GRENAT |
3328 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3329 RTE_PTYPE_INNER_L4_ICMP,
3331 /* IPv4 --> GRE/Teredo/VXLAN --> IPv6 */
3332 [51] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3333 RTE_PTYPE_TUNNEL_GRENAT |
3334 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3335 RTE_PTYPE_INNER_L4_FRAG,
3336 [52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3337 RTE_PTYPE_TUNNEL_GRENAT |
3338 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3339 RTE_PTYPE_INNER_L4_NONFRAG,
3340 [53] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3341 RTE_PTYPE_TUNNEL_GRENAT |
3342 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3343 RTE_PTYPE_INNER_L4_UDP,
3345 [55] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3346 RTE_PTYPE_TUNNEL_GRENAT |
3347 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3348 RTE_PTYPE_INNER_L4_TCP,
3349 [56] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3350 RTE_PTYPE_TUNNEL_GRENAT |
3351 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3352 RTE_PTYPE_INNER_L4_SCTP,
3353 [57] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3354 RTE_PTYPE_TUNNEL_GRENAT |
3355 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3356 RTE_PTYPE_INNER_L4_ICMP,
3358 /* IPv4 --> GRE/Teredo/VXLAN --> MAC */
3359 [58] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3360 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
3362 /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
3363 [59] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3364 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3365 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3366 RTE_PTYPE_INNER_L4_FRAG,
3367 [60] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3368 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3369 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3370 RTE_PTYPE_INNER_L4_NONFRAG,
3371 [61] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3372 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3373 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3374 RTE_PTYPE_INNER_L4_UDP,
3376 [63] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3377 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3378 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3379 RTE_PTYPE_INNER_L4_TCP,
3380 [64] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3381 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3382 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3383 RTE_PTYPE_INNER_L4_SCTP,
3384 [65] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3385 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3386 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3387 RTE_PTYPE_INNER_L4_ICMP,
3389 /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
3390 [66] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3391 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3392 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3393 RTE_PTYPE_INNER_L4_FRAG,
3394 [67] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3395 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3396 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3397 RTE_PTYPE_INNER_L4_NONFRAG,
3398 [68] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3399 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3400 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3401 RTE_PTYPE_INNER_L4_UDP,
3403 [70] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3404 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3405 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3406 RTE_PTYPE_INNER_L4_TCP,
3407 [71] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3408 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3409 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3410 RTE_PTYPE_INNER_L4_SCTP,
3411 [72] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3412 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3413 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3414 RTE_PTYPE_INNER_L4_ICMP,
3415 /* [73] - [87] reserved */
3417 /* Non tunneled IPv6 */
3418 [88] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3420 [89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3421 RTE_PTYPE_L4_NONFRAG,
3422 [90] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3425 [92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3427 [93] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3429 [94] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3433 [95] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3434 RTE_PTYPE_TUNNEL_IP |
3435 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3436 RTE_PTYPE_INNER_L4_FRAG,
3437 [96] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3438 RTE_PTYPE_TUNNEL_IP |
3439 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3440 RTE_PTYPE_INNER_L4_NONFRAG,
3441 [97] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3442 RTE_PTYPE_TUNNEL_IP |
3443 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3444 RTE_PTYPE_INNER_L4_UDP,
3446 [99] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3447 RTE_PTYPE_TUNNEL_IP |
3448 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3449 RTE_PTYPE_INNER_L4_TCP,
3450 [100] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3451 RTE_PTYPE_TUNNEL_IP |
3452 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3453 RTE_PTYPE_INNER_L4_SCTP,
3454 [101] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3455 RTE_PTYPE_TUNNEL_IP |
3456 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3457 RTE_PTYPE_INNER_L4_ICMP,
3460 [102] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3461 RTE_PTYPE_TUNNEL_IP |
3462 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3463 RTE_PTYPE_INNER_L4_FRAG,
3464 [103] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3465 RTE_PTYPE_TUNNEL_IP |
3466 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3467 RTE_PTYPE_INNER_L4_NONFRAG,
3468 [104] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3469 RTE_PTYPE_TUNNEL_IP |
3470 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3471 RTE_PTYPE_INNER_L4_UDP,
3472 /* [105] reserved */
3473 [106] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3474 RTE_PTYPE_TUNNEL_IP |
3475 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3476 RTE_PTYPE_INNER_L4_TCP,
3477 [107] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3478 RTE_PTYPE_TUNNEL_IP |
3479 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3480 RTE_PTYPE_INNER_L4_SCTP,
3481 [108] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3482 RTE_PTYPE_TUNNEL_IP |
3483 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3484 RTE_PTYPE_INNER_L4_ICMP,
3486 /* IPv6 --> GRE/Teredo/VXLAN */
3487 [109] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3488 RTE_PTYPE_TUNNEL_GRENAT,
3490 /* IPv6 --> GRE/Teredo/VXLAN --> IPv4 */
3491 [110] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3492 RTE_PTYPE_TUNNEL_GRENAT |
3493 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3494 RTE_PTYPE_INNER_L4_FRAG,
3495 [111] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3496 RTE_PTYPE_TUNNEL_GRENAT |
3497 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3498 RTE_PTYPE_INNER_L4_NONFRAG,
3499 [112] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3500 RTE_PTYPE_TUNNEL_GRENAT |
3501 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3502 RTE_PTYPE_INNER_L4_UDP,
3503 /* [113] reserved */
3504 [114] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3505 RTE_PTYPE_TUNNEL_GRENAT |
3506 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3507 RTE_PTYPE_INNER_L4_TCP,
3508 [115] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3509 RTE_PTYPE_TUNNEL_GRENAT |
3510 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3511 RTE_PTYPE_INNER_L4_SCTP,
3512 [116] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3513 RTE_PTYPE_TUNNEL_GRENAT |
3514 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3515 RTE_PTYPE_INNER_L4_ICMP,
3517 /* IPv6 --> GRE/Teredo/VXLAN --> IPv6 */
3518 [117] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3519 RTE_PTYPE_TUNNEL_GRENAT |
3520 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3521 RTE_PTYPE_INNER_L4_FRAG,
3522 [118] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3523 RTE_PTYPE_TUNNEL_GRENAT |
3524 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3525 RTE_PTYPE_INNER_L4_NONFRAG,
3526 [119] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3527 RTE_PTYPE_TUNNEL_GRENAT |
3528 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3529 RTE_PTYPE_INNER_L4_UDP,
3530 /* [120] reserved */
3531 [121] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3532 RTE_PTYPE_TUNNEL_GRENAT |
3533 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3534 RTE_PTYPE_INNER_L4_TCP,
3535 [122] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3536 RTE_PTYPE_TUNNEL_GRENAT |
3537 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3538 RTE_PTYPE_INNER_L4_SCTP,
3539 [123] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3540 RTE_PTYPE_TUNNEL_GRENAT |
3541 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3542 RTE_PTYPE_INNER_L4_ICMP,
3544 /* IPv6 --> GRE/Teredo/VXLAN --> MAC */
3545 [124] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3546 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
3548 /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
3549 [125] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3550 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3551 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3552 RTE_PTYPE_INNER_L4_FRAG,
3553 [126] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3554 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3555 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3556 RTE_PTYPE_INNER_L4_NONFRAG,
3557 [127] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3558 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3559 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3560 RTE_PTYPE_INNER_L4_UDP,
3561 /* [128] reserved */
3562 [129] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3563 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3564 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3565 RTE_PTYPE_INNER_L4_TCP,
3566 [130] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3567 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3568 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3569 RTE_PTYPE_INNER_L4_SCTP,
3570 [131] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3571 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3572 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3573 RTE_PTYPE_INNER_L4_ICMP,
3575 /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
3576 [132] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3577 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3578 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3579 RTE_PTYPE_INNER_L4_FRAG,
3580 [133] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3581 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3582 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3583 RTE_PTYPE_INNER_L4_NONFRAG,
3584 [134] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3585 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3586 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3587 RTE_PTYPE_INNER_L4_UDP,
3588 /* [135] reserved */
3589 [136] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3590 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3591 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3592 RTE_PTYPE_INNER_L4_TCP,
3593 [137] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3594 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3595 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3596 RTE_PTYPE_INNER_L4_SCTP,
3597 [138] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3598 RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
3599 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3600 RTE_PTYPE_INNER_L4_ICMP,
3601 /* [139] - [299] reserved */
3604 [300] = RTE_PTYPE_L2_ETHER_PPPOE,
3605 [301] = RTE_PTYPE_L2_ETHER_PPPOE,
3607 /* PPPoE --> IPv4 */
3608 [302] = RTE_PTYPE_L2_ETHER_PPPOE |
3609 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3611 [303] = RTE_PTYPE_L2_ETHER_PPPOE |
3612 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3613 RTE_PTYPE_L4_NONFRAG,
3614 [304] = RTE_PTYPE_L2_ETHER_PPPOE |
3615 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3617 [305] = RTE_PTYPE_L2_ETHER_PPPOE |
3618 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3620 [306] = RTE_PTYPE_L2_ETHER_PPPOE |
3621 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3623 [307] = RTE_PTYPE_L2_ETHER_PPPOE |
3624 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3627 /* PPPoE --> IPv6 */
3628 [308] = RTE_PTYPE_L2_ETHER_PPPOE |
3629 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3631 [309] = RTE_PTYPE_L2_ETHER_PPPOE |
3632 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3633 RTE_PTYPE_L4_NONFRAG,
3634 [310] = RTE_PTYPE_L2_ETHER_PPPOE |
3635 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3637 [311] = RTE_PTYPE_L2_ETHER_PPPOE |
3638 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3640 [312] = RTE_PTYPE_L2_ETHER_PPPOE |
3641 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3643 [313] = RTE_PTYPE_L2_ETHER_PPPOE |
3644 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3646 /* [314] - [324] reserved */
3648 /* IPv4/IPv6 --> GTPC/GTPU */
3649 [325] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3650 RTE_PTYPE_TUNNEL_GTPC,
3651 [326] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3652 RTE_PTYPE_TUNNEL_GTPC,
3653 [327] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3654 RTE_PTYPE_TUNNEL_GTPC,
3655 [328] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3656 RTE_PTYPE_TUNNEL_GTPC,
3657 [329] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3658 RTE_PTYPE_TUNNEL_GTPU,
3659 [330] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3660 RTE_PTYPE_TUNNEL_GTPU,
3662 /* IPv4 --> GTPU --> IPv4 */
3663 [331] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3664 RTE_PTYPE_TUNNEL_GTPU |
3665 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3666 RTE_PTYPE_INNER_L4_FRAG,
3667 [332] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3668 RTE_PTYPE_TUNNEL_GTPU |
3669 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3670 RTE_PTYPE_INNER_L4_NONFRAG,
3671 [333] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3672 RTE_PTYPE_TUNNEL_GTPU |
3673 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3674 RTE_PTYPE_INNER_L4_UDP,
3675 [334] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3676 RTE_PTYPE_TUNNEL_GTPU |
3677 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3678 RTE_PTYPE_INNER_L4_TCP,
3679 [335] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3680 RTE_PTYPE_TUNNEL_GTPU |
3681 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3682 RTE_PTYPE_INNER_L4_ICMP,
3684 /* IPv6 --> GTPU --> IPv4 */
3685 [336] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3686 RTE_PTYPE_TUNNEL_GTPU |
3687 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3688 RTE_PTYPE_INNER_L4_FRAG,
3689 [337] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3690 RTE_PTYPE_TUNNEL_GTPU |
3691 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3692 RTE_PTYPE_INNER_L4_NONFRAG,
3693 [338] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3694 RTE_PTYPE_TUNNEL_GTPU |
3695 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3696 RTE_PTYPE_INNER_L4_UDP,
3697 [339] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3698 RTE_PTYPE_TUNNEL_GTPU |
3699 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3700 RTE_PTYPE_INNER_L4_TCP,
3701 [340] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3702 RTE_PTYPE_TUNNEL_GTPU |
3703 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
3704 RTE_PTYPE_INNER_L4_ICMP,
3706 /* IPv4 --> GTPU --> IPv6 */
3707 [341] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3708 RTE_PTYPE_TUNNEL_GTPU |
3709 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3710 RTE_PTYPE_INNER_L4_FRAG,
3711 [342] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3712 RTE_PTYPE_TUNNEL_GTPU |
3713 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3714 RTE_PTYPE_INNER_L4_NONFRAG,
3715 [343] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3716 RTE_PTYPE_TUNNEL_GTPU |
3717 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3718 RTE_PTYPE_INNER_L4_UDP,
3719 [344] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3720 RTE_PTYPE_TUNNEL_GTPU |
3721 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3722 RTE_PTYPE_INNER_L4_TCP,
3723 [345] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3724 RTE_PTYPE_TUNNEL_GTPU |
3725 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3726 RTE_PTYPE_INNER_L4_ICMP,
3728 /* IPv6 --> GTPU --> IPv6 */
3729 [346] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3730 RTE_PTYPE_TUNNEL_GTPU |
3731 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3732 RTE_PTYPE_INNER_L4_FRAG,
3733 [347] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3734 RTE_PTYPE_TUNNEL_GTPU |
3735 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3736 RTE_PTYPE_INNER_L4_NONFRAG,
3737 [348] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3738 RTE_PTYPE_TUNNEL_GTPU |
3739 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3740 RTE_PTYPE_INNER_L4_UDP,
3741 [349] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3742 RTE_PTYPE_TUNNEL_GTPU |
3743 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3744 RTE_PTYPE_INNER_L4_TCP,
3745 [350] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3746 RTE_PTYPE_TUNNEL_GTPU |
3747 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
3748 RTE_PTYPE_INNER_L4_ICMP,
3750 /* IPv4 --> UDP ECPRI */
3751 [372] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3753 [373] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3755 [374] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3757 [375] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3759 [376] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3761 [377] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3763 [378] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3765 [379] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3767 [380] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3769 [381] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
3772 /* IPV6 --> UDP ECPRI */
3773 [382] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3775 [383] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3777 [384] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3779 [385] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3781 [386] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3783 [387] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3785 [388] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3787 [389] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3789 [390] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3791 [391] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
3793 /* All others reserved */
3796 return ptype_tbl[ptype];
3800 iavf_set_default_ptype_table(struct rte_eth_dev *dev)
3802 struct iavf_adapter *ad =
3803 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
3806 for (i = 0; i < IAVF_MAX_PKT_TYPE; i++)
3807 ad->ptype_tbl[i] = iavf_get_default_ptype(i);