1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
8 /* In QLEN must be whole number of 32 descriptors. */
9 #define IAVF_ALIGN_RING_DESC 32
10 #define IAVF_MIN_RING_DESC 64
11 #define IAVF_MAX_RING_DESC 4096
12 #define IAVF_DMA_MEM_ALIGN 4096
13 /* Base address of the HW descriptor ring should be 128B aligned. */
14 #define IAVF_RING_BASE_ALIGN 128
16 /* used for Rx Bulk Allocate */
17 #define IAVF_RX_MAX_BURST 32
19 /* used for Vector PMD */
20 #define IAVF_VPMD_RX_MAX_BURST 32
21 #define IAVF_VPMD_TX_MAX_BURST 32
22 #define IAVF_RXQ_REARM_THRESH 32
23 #define IAVF_VPMD_DESCS_PER_LOOP 4
24 #define IAVF_VPMD_TX_MAX_FREE_BUF 64
26 #define IAVF_TX_NO_VECTOR_FLAGS ( \
27 RTE_ETH_TX_OFFLOAD_MULTI_SEGS | \
28 RTE_ETH_TX_OFFLOAD_TCP_TSO)
30 #define IAVF_TX_VECTOR_OFFLOAD ( \
31 RTE_ETH_TX_OFFLOAD_VLAN_INSERT | \
32 RTE_ETH_TX_OFFLOAD_QINQ_INSERT | \
33 RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | \
34 RTE_ETH_TX_OFFLOAD_SCTP_CKSUM | \
35 RTE_ETH_TX_OFFLOAD_UDP_CKSUM | \
36 RTE_ETH_TX_OFFLOAD_TCP_CKSUM)
38 #define IAVF_RX_VECTOR_OFFLOAD ( \
39 RTE_ETH_RX_OFFLOAD_CHECKSUM | \
40 RTE_ETH_RX_OFFLOAD_SCTP_CKSUM | \
41 RTE_ETH_RX_OFFLOAD_VLAN | \
42 RTE_ETH_RX_OFFLOAD_RSS_HASH)
44 #define IAVF_VECTOR_PATH 0
45 #define IAVF_VECTOR_OFFLOAD_PATH 1
47 #define DEFAULT_TX_RS_THRESH 32
48 #define DEFAULT_TX_FREE_THRESH 32
50 #define IAVF_MIN_TSO_MSS 88
51 #define IAVF_MAX_TSO_MSS 9668
52 #define IAVF_TSO_MAX_SEG UINT8_MAX
53 #define IAVF_TX_MAX_MTU_SEG 8
55 #define IAVF_TX_CKSUM_OFFLOAD_MASK (RTE_MBUF_F_TX_IP_CKSUM | \
56 RTE_MBUF_F_TX_L4_MASK | \
57 RTE_MBUF_F_TX_TCP_SEG)
59 #define IAVF_TX_OFFLOAD_MASK (RTE_MBUF_F_TX_OUTER_IPV6 | \
60 RTE_MBUF_F_TX_OUTER_IPV4 | \
61 RTE_MBUF_F_TX_IPV6 | \
62 RTE_MBUF_F_TX_IPV4 | \
63 RTE_MBUF_F_TX_VLAN | \
64 RTE_MBUF_F_TX_IP_CKSUM | \
65 RTE_MBUF_F_TX_L4_MASK | \
66 RTE_MBUF_F_TX_TCP_SEG)
68 #define IAVF_TX_OFFLOAD_NOTSUP_MASK \
69 (RTE_MBUF_F_TX_OFFLOAD_MASK ^ IAVF_TX_OFFLOAD_MASK)
73 * These descriptors are used instead of the legacy version descriptors
75 union iavf_16b_rx_flex_desc {
77 __le64 pkt_addr; /* Packet buffer address */
78 __le64 hdr_addr; /* Header buffer address */
79 /* bit 0 of hdr_addr is DD bit */
83 u8 rxdid; /* descriptor builder profile ID */
84 u8 mir_id_umb_cast; /* mirror=[5:0], umb=[7:6] */
85 __le16 ptype_flex_flags0; /* ptype=[9:0], ff0=[15:10] */
86 __le16 pkt_len; /* [15:14] are reserved */
87 __le16 hdr_len_sph_flex_flags1; /* header=[10:0] */
99 union iavf_32b_rx_flex_desc {
101 __le64 pkt_addr; /* Packet buffer address */
102 __le64 hdr_addr; /* Header buffer address */
103 /* bit 0 of hdr_addr is DD bit */
109 u8 rxdid; /* descriptor builder profile ID */
110 u8 mir_id_umb_cast; /* mirror=[5:0], umb=[7:6] */
111 __le16 ptype_flex_flags0; /* ptype=[9:0], ff0=[15:10] */
112 __le16 pkt_len; /* [15:14] are reserved */
113 __le16 hdr_len_sph_flex_flags1; /* header=[10:0] */
115 /* ff1/ext=[15:12] */
118 __le16 status_error0;
124 __le16 status_error1;
140 } wb; /* writeback */
143 /* HW desc structure, both 16-byte and 32-byte types are supported */
144 #ifdef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
145 #define iavf_rx_desc iavf_16byte_rx_desc
146 #define iavf_rx_flex_desc iavf_16b_rx_flex_desc
148 #define iavf_rx_desc iavf_32byte_rx_desc
149 #define iavf_rx_flex_desc iavf_32b_rx_flex_desc
152 typedef void (*iavf_rxd_to_pkt_fields_t)(struct iavf_rx_queue *rxq,
154 volatile union iavf_rx_flex_desc *rxdp);
156 struct iavf_rxq_ops {
157 void (*release_mbufs)(struct iavf_rx_queue *rxq);
160 struct iavf_txq_ops {
161 void (*release_mbufs)(struct iavf_tx_queue *txq);
164 /* Structure associated with each Rx queue. */
165 struct iavf_rx_queue {
166 struct rte_mempool *mp; /* mbuf pool to populate Rx ring */
167 const struct rte_memzone *mz; /* memzone for Rx ring */
168 volatile union iavf_rx_desc *rx_ring; /* Rx ring virtual address */
169 uint64_t rx_ring_phys_addr; /* Rx ring DMA address */
170 struct rte_mbuf **sw_ring; /* address of SW ring */
171 uint16_t nb_rx_desc; /* ring length */
172 uint16_t rx_tail; /* current value of tail */
173 volatile uint8_t *qrx_tail; /* register address of tail */
174 uint16_t rx_free_thresh; /* max free RX desc to hold */
175 uint16_t nb_rx_hold; /* number of held free RX desc */
176 struct rte_mbuf *pkt_first_seg; /* first segment of current packet */
177 struct rte_mbuf *pkt_last_seg; /* last segment of current packet */
178 struct rte_mbuf fake_mbuf; /* dummy mbuf */
182 uint16_t rxrearm_nb; /* number of remaining to be re-armed */
183 uint16_t rxrearm_start; /* the idx we start the re-arming from */
184 uint64_t mbuf_initializer; /* value to init mbufs */
187 uint16_t rx_nb_avail; /* number of staged packets ready */
188 uint16_t rx_next_avail; /* index of next staged packets */
189 uint16_t rx_free_trigger; /* triggers rx buffer allocation */
190 struct rte_mbuf *rx_stage[IAVF_RX_MAX_BURST * 2]; /* store mbuf */
192 uint16_t port_id; /* device port ID */
193 uint8_t crc_len; /* 0 if CRC stripped, 4 otherwise */
194 uint8_t fdir_enabled; /* 0 if FDIR disabled, 1 when enabled */
195 uint16_t queue_id; /* Rx queue index */
196 uint16_t rx_buf_len; /* The packet buffer size */
197 uint16_t rx_hdr_len; /* The header buffer size */
198 uint16_t max_pkt_len; /* Maximum packet length */
199 struct iavf_vsi *vsi; /**< the VSI this queue belongs to */
201 bool q_set; /* if rx queue has been configured */
202 bool rx_deferred_start; /* don't start this queue in dev start */
203 const struct iavf_rxq_ops *ops;
205 #define IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG1 BIT(0)
206 #define IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG2_2 BIT(1)
207 uint8_t proto_xtr; /* protocol extraction type */
208 uint64_t xtr_ol_flag;
209 /* flexible descriptor metadata extraction offload flag */
210 iavf_rxd_to_pkt_fields_t rxd_to_pkt_fields;
211 /* handle flexible descriptor by RXDID */
215 struct iavf_tx_entry {
216 struct rte_mbuf *mbuf;
221 struct iavf_tx_vec_entry {
222 struct rte_mbuf *mbuf;
225 /* Structure associated with each TX queue. */
226 struct iavf_tx_queue {
227 const struct rte_memzone *mz; /* memzone for Tx ring */
228 volatile struct iavf_tx_desc *tx_ring; /* Tx ring virtual address */
229 uint64_t tx_ring_phys_addr; /* Tx ring DMA address */
230 struct iavf_tx_entry *sw_ring; /* address array of SW ring */
231 uint16_t nb_tx_desc; /* ring length */
232 uint16_t tx_tail; /* current value of tail */
233 volatile uint8_t *qtx_tail; /* register address of tail */
234 /* number of used desc since RS bit set */
237 uint16_t last_desc_cleaned; /* last desc have been cleaned*/
238 uint16_t free_thresh;
244 uint16_t next_dd; /* next to set RS, for VPMD */
245 uint16_t next_rs; /* next to check DD, for VPMD */
247 bool q_set; /* if rx queue has been configured */
248 bool tx_deferred_start; /* don't start this queue in dev start */
249 const struct iavf_txq_ops *ops;
250 #define IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG1 BIT(0)
251 #define IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2 BIT(1)
256 /* Offload features */
257 union iavf_tx_offload {
260 uint64_t l2_len:7; /* L2 (MAC) Header Length. */
261 uint64_t l3_len:9; /* L3 (IP) Header Length. */
262 uint64_t l4_len:8; /* L4 Header Length. */
263 uint64_t tso_segsz:16; /* TCP TSO segment size */
264 /* uint64_t unused : 24; */
268 /* Rx Flex Descriptor
269 * RxDID Profile ID 16-21
270 * Flex-field 0: RSS hash lower 16-bits
271 * Flex-field 1: RSS hash upper 16-bits
272 * Flex-field 2: Flow ID lower 16-bits
273 * Flex-field 3: Flow ID upper 16-bits
277 struct iavf_32b_rx_flex_desc_comms {
281 __le16 ptype_flexi_flags0;
283 __le16 hdr_len_sph_flex_flags1;
286 __le16 status_error0;
291 __le16 status_error1;
308 /* Rx Flex Descriptor
309 * RxDID Profile ID 22-23 (swap Hash and FlowID)
310 * Flex-field 0: Flow ID lower 16-bits
311 * Flex-field 1: Flow ID upper 16-bits
312 * Flex-field 2: RSS hash lower 16-bits
313 * Flex-field 3: RSS hash upper 16-bits
317 struct iavf_32b_rx_flex_desc_comms_ovs {
321 __le16 ptype_flexi_flags0;
323 __le16 hdr_len_sph_flex_flags1;
326 __le16 status_error0;
331 __le16 status_error1;
348 /* Receive Flex Descriptor profile IDs: There are a total
349 * of 64 profiles where profile IDs 0/1 are for legacy; and
350 * profiles 2-63 are flex profiles that can be programmed
351 * with a specific metadata (profile 7 reserved for HW)
354 IAVF_RXDID_LEGACY_0 = 0,
355 IAVF_RXDID_LEGACY_1 = 1,
356 IAVF_RXDID_FLEX_NIC = 2,
357 IAVF_RXDID_FLEX_NIC_2 = 6,
359 IAVF_RXDID_COMMS_GENERIC = 16,
360 IAVF_RXDID_COMMS_AUX_VLAN = 17,
361 IAVF_RXDID_COMMS_AUX_IPV4 = 18,
362 IAVF_RXDID_COMMS_AUX_IPV6 = 19,
363 IAVF_RXDID_COMMS_AUX_IPV6_FLOW = 20,
364 IAVF_RXDID_COMMS_AUX_TCP = 21,
365 IAVF_RXDID_COMMS_OVS_1 = 22,
366 IAVF_RXDID_COMMS_OVS_2 = 23,
367 IAVF_RXDID_COMMS_AUX_IP_OFFSET = 25,
368 IAVF_RXDID_LAST = 63,
371 enum iavf_rx_flex_desc_status_error_0_bits {
372 /* Note: These are predefined bit offsets */
373 IAVF_RX_FLEX_DESC_STATUS0_DD_S = 0,
374 IAVF_RX_FLEX_DESC_STATUS0_EOF_S,
375 IAVF_RX_FLEX_DESC_STATUS0_HBO_S,
376 IAVF_RX_FLEX_DESC_STATUS0_L3L4P_S,
377 IAVF_RX_FLEX_DESC_STATUS0_XSUM_IPE_S,
378 IAVF_RX_FLEX_DESC_STATUS0_XSUM_L4E_S,
379 IAVF_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S,
380 IAVF_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S,
381 IAVF_RX_FLEX_DESC_STATUS0_LPBK_S,
382 IAVF_RX_FLEX_DESC_STATUS0_IPV6EXADD_S,
383 IAVF_RX_FLEX_DESC_STATUS0_RXE_S,
384 IAVF_RX_FLEX_DESC_STATUS0_CRCP_S,
385 IAVF_RX_FLEX_DESC_STATUS0_RSS_VALID_S,
386 IAVF_RX_FLEX_DESC_STATUS0_L2TAG1P_S,
387 IAVF_RX_FLEX_DESC_STATUS0_XTRMD0_VALID_S,
388 IAVF_RX_FLEX_DESC_STATUS0_XTRMD1_VALID_S,
389 IAVF_RX_FLEX_DESC_STATUS0_LAST /* this entry must be last!!! */
392 enum iavf_rx_flex_desc_status_error_1_bits {
393 /* Note: These are predefined bit offsets */
394 IAVF_RX_FLEX_DESC_STATUS1_CPM_S = 0, /* 4 bits */
395 IAVF_RX_FLEX_DESC_STATUS1_NAT_S = 4,
396 IAVF_RX_FLEX_DESC_STATUS1_CRYPTO_S = 5,
397 /* [10:6] reserved */
398 IAVF_RX_FLEX_DESC_STATUS1_L2TAG2P_S = 11,
399 IAVF_RX_FLEX_DESC_STATUS1_XTRMD2_VALID_S = 12,
400 IAVF_RX_FLEX_DESC_STATUS1_XTRMD3_VALID_S = 13,
401 IAVF_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S = 14,
402 IAVF_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S = 15,
403 IAVF_RX_FLEX_DESC_STATUS1_LAST /* this entry must be last!!! */
406 /* for iavf_32b_rx_flex_desc.ptype_flex_flags0 member */
407 #define IAVF_RX_FLEX_DESC_PTYPE_M (0x3FF) /* 10-bits */
409 /* for iavf_32b_rx_flex_desc.pkt_len member */
410 #define IAVF_RX_FLX_DESC_PKT_LEN_M (0x3FFF) /* 14-bits */
412 int iavf_dev_rx_queue_setup(struct rte_eth_dev *dev,
415 unsigned int socket_id,
416 const struct rte_eth_rxconf *rx_conf,
417 struct rte_mempool *mp);
419 int iavf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);
420 int iavf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id);
421 void iavf_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
423 int iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
426 unsigned int socket_id,
427 const struct rte_eth_txconf *tx_conf);
428 int iavf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);
429 int iavf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);
430 int iavf_dev_tx_done_cleanup(void *txq, uint32_t free_cnt);
431 void iavf_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
432 void iavf_stop_queues(struct rte_eth_dev *dev);
433 uint16_t iavf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
435 uint16_t iavf_recv_pkts_flex_rxd(void *rx_queue,
436 struct rte_mbuf **rx_pkts,
438 uint16_t iavf_recv_scattered_pkts(void *rx_queue,
439 struct rte_mbuf **rx_pkts,
441 uint16_t iavf_recv_scattered_pkts_flex_rxd(void *rx_queue,
442 struct rte_mbuf **rx_pkts,
444 uint16_t iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
446 uint16_t iavf_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
448 void iavf_set_rx_function(struct rte_eth_dev *dev);
449 void iavf_set_tx_function(struct rte_eth_dev *dev);
450 void iavf_dev_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
451 struct rte_eth_rxq_info *qinfo);
452 void iavf_dev_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
453 struct rte_eth_txq_info *qinfo);
454 uint32_t iavf_dev_rxq_count(void *rx_queue);
455 int iavf_dev_rx_desc_status(void *rx_queue, uint16_t offset);
456 int iavf_dev_tx_desc_status(void *tx_queue, uint16_t offset);
458 uint16_t iavf_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
460 uint16_t iavf_recv_pkts_vec_flex_rxd(void *rx_queue, struct rte_mbuf **rx_pkts,
462 uint16_t iavf_recv_scattered_pkts_vec(void *rx_queue,
463 struct rte_mbuf **rx_pkts,
465 uint16_t iavf_recv_scattered_pkts_vec_flex_rxd(void *rx_queue,
466 struct rte_mbuf **rx_pkts,
468 uint16_t iavf_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
470 uint16_t iavf_recv_pkts_vec_avx2(void *rx_queue, struct rte_mbuf **rx_pkts,
472 uint16_t iavf_recv_pkts_vec_avx2_flex_rxd(void *rx_queue,
473 struct rte_mbuf **rx_pkts,
475 uint16_t iavf_recv_scattered_pkts_vec_avx2(void *rx_queue,
476 struct rte_mbuf **rx_pkts,
478 uint16_t iavf_recv_scattered_pkts_vec_avx2_flex_rxd(void *rx_queue,
479 struct rte_mbuf **rx_pkts,
481 uint16_t iavf_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
483 uint16_t iavf_xmit_pkts_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
485 int iavf_get_monitor_addr(void *rx_queue, struct rte_power_monitor_cond *pmc);
486 int iavf_rx_vec_dev_check(struct rte_eth_dev *dev);
487 int iavf_tx_vec_dev_check(struct rte_eth_dev *dev);
488 int iavf_rxq_vec_setup(struct iavf_rx_queue *rxq);
489 int iavf_txq_vec_setup(struct iavf_tx_queue *txq);
490 uint16_t iavf_recv_pkts_vec_avx512(void *rx_queue, struct rte_mbuf **rx_pkts,
492 uint16_t iavf_recv_pkts_vec_avx512_offload(void *rx_queue,
493 struct rte_mbuf **rx_pkts,
495 uint16_t iavf_recv_pkts_vec_avx512_flex_rxd(void *rx_queue,
496 struct rte_mbuf **rx_pkts,
498 uint16_t iavf_recv_pkts_vec_avx512_flex_rxd_offload(void *rx_queue,
499 struct rte_mbuf **rx_pkts,
501 uint16_t iavf_recv_scattered_pkts_vec_avx512(void *rx_queue,
502 struct rte_mbuf **rx_pkts,
504 uint16_t iavf_recv_scattered_pkts_vec_avx512_offload(void *rx_queue,
505 struct rte_mbuf **rx_pkts,
507 uint16_t iavf_recv_scattered_pkts_vec_avx512_flex_rxd(void *rx_queue,
508 struct rte_mbuf **rx_pkts,
510 uint16_t iavf_recv_scattered_pkts_vec_avx512_flex_rxd_offload(void *rx_queue,
511 struct rte_mbuf **rx_pkts,
513 uint16_t iavf_xmit_pkts_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
515 uint16_t iavf_xmit_pkts_vec_avx512_offload(void *tx_queue,
516 struct rte_mbuf **tx_pkts,
518 int iavf_txq_vec_setup_avx512(struct iavf_tx_queue *txq);
520 uint8_t iavf_proto_xtr_type_to_rxdid(uint8_t xtr_type);
522 void iavf_set_default_ptype_table(struct rte_eth_dev *dev);
525 void iavf_dump_rx_descriptor(struct iavf_rx_queue *rxq,
526 const volatile void *desc,
529 #ifdef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
530 const volatile union iavf_16byte_rx_desc *rx_desc = desc;
532 printf("Queue %d Rx_desc %d: QW0: 0x%016"PRIx64" QW1: 0x%016"PRIx64"\n",
533 rxq->queue_id, rx_id, rx_desc->read.pkt_addr,
534 rx_desc->read.hdr_addr);
536 const volatile union iavf_32byte_rx_desc *rx_desc = desc;
538 printf("Queue %d Rx_desc %d: QW0: 0x%016"PRIx64" QW1: 0x%016"PRIx64
539 " QW2: 0x%016"PRIx64" QW3: 0x%016"PRIx64"\n", rxq->queue_id,
540 rx_id, rx_desc->read.pkt_addr, rx_desc->read.hdr_addr,
541 rx_desc->read.rsvd1, rx_desc->read.rsvd2);
545 /* All the descriptors are 16 bytes, so just use one of them
546 * to print the qwords
549 void iavf_dump_tx_descriptor(const struct iavf_tx_queue *txq,
550 const volatile void *desc, uint16_t tx_id)
553 const volatile struct iavf_tx_desc *tx_desc = desc;
554 enum iavf_tx_desc_dtype_value type;
556 type = (enum iavf_tx_desc_dtype_value)rte_le_to_cpu_64(
557 tx_desc->cmd_type_offset_bsz &
558 rte_cpu_to_le_64(IAVF_TXD_QW1_DTYPE_MASK));
560 case IAVF_TX_DESC_DTYPE_DATA:
561 name = "Tx_data_desc";
563 case IAVF_TX_DESC_DTYPE_CONTEXT:
564 name = "Tx_context_desc";
567 name = "unknown_desc";
571 printf("Queue %d %s %d: QW0: 0x%016"PRIx64" QW1: 0x%016"PRIx64"\n",
572 txq->queue_id, name, tx_id, tx_desc->buffer_addr,
573 tx_desc->cmd_type_offset_bsz);
576 #define FDIR_PROC_ENABLE_PER_QUEUE(ad, on) do { \
578 for (i = 0; i < (ad)->dev_data->nb_rx_queues; i++) { \
579 struct iavf_rx_queue *rxq = (ad)->dev_data->rx_queues[i]; \
582 rxq->fdir_enabled = on; \
584 PMD_DRV_LOG(DEBUG, "FDIR processing on RX set to %d", on); \
587 /* Enable/disable flow director Rx processing in data path. */
589 void iavf_fdir_rx_proc_enable(struct iavf_adapter *ad, bool on)
592 /* enable flow director processing */
593 FDIR_PROC_ENABLE_PER_QUEUE(ad, on);
596 if (ad->fdir_ref_cnt >= 1) {
599 if (ad->fdir_ref_cnt == 0)
600 FDIR_PROC_ENABLE_PER_QUEUE(ad, on);
605 #ifdef RTE_LIBRTE_IAVF_DEBUG_DUMP_DESC
606 #define IAVF_DUMP_RX_DESC(rxq, desc, rx_id) \
607 iavf_dump_rx_descriptor(rxq, desc, rx_id)
608 #define IAVF_DUMP_TX_DESC(txq, desc, tx_id) \
609 iavf_dump_tx_descriptor(txq, desc, tx_id)
611 #define IAVF_DUMP_RX_DESC(rxq, desc, rx_id) do { } while (0)
612 #define IAVF_DUMP_TX_DESC(txq, desc, tx_id) do { } while (0)
615 #endif /* _IAVF_RXTX_H_ */