1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
8 /* In QLEN must be whole number of 32 descriptors. */
9 #define IAVF_ALIGN_RING_DESC 32
10 #define IAVF_MIN_RING_DESC 64
11 #define IAVF_MAX_RING_DESC 4096
12 #define IAVF_DMA_MEM_ALIGN 4096
13 /* Base address of the HW descriptor ring should be 128B aligned. */
14 #define IAVF_RING_BASE_ALIGN 128
16 /* used for Rx Bulk Allocate */
17 #define IAVF_RX_MAX_BURST 32
19 /* used for Vector PMD */
20 #define IAVF_VPMD_RX_MAX_BURST 32
21 #define IAVF_VPMD_TX_MAX_BURST 32
22 #define IAVF_RXQ_REARM_THRESH 32
23 #define IAVF_VPMD_DESCS_PER_LOOP 4
24 #define IAVF_VPMD_TX_MAX_FREE_BUF 64
26 #define IAVF_TX_NO_VECTOR_FLAGS ( \
27 RTE_ETH_TX_OFFLOAD_MULTI_SEGS | \
28 RTE_ETH_TX_OFFLOAD_TCP_TSO | \
29 RTE_ETH_TX_OFFLOAD_SECURITY)
31 #define IAVF_TX_VECTOR_OFFLOAD ( \
32 RTE_ETH_TX_OFFLOAD_VLAN_INSERT | \
33 RTE_ETH_TX_OFFLOAD_QINQ_INSERT | \
34 RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | \
35 RTE_ETH_TX_OFFLOAD_SCTP_CKSUM | \
36 RTE_ETH_TX_OFFLOAD_UDP_CKSUM | \
37 RTE_ETH_TX_OFFLOAD_TCP_CKSUM)
39 #define IAVF_RX_VECTOR_OFFLOAD ( \
40 RTE_ETH_RX_OFFLOAD_CHECKSUM | \
41 RTE_ETH_RX_OFFLOAD_SCTP_CKSUM | \
42 RTE_ETH_RX_OFFLOAD_VLAN | \
43 RTE_ETH_RX_OFFLOAD_RSS_HASH)
45 #define IAVF_VECTOR_PATH 0
46 #define IAVF_VECTOR_OFFLOAD_PATH 1
48 #define DEFAULT_TX_RS_THRESH 32
49 #define DEFAULT_TX_FREE_THRESH 32
51 #define IAVF_MIN_TSO_MSS 256
52 #define IAVF_MAX_TSO_MSS 9668
53 #define IAVF_TSO_MAX_SEG UINT8_MAX
54 #define IAVF_TX_MAX_MTU_SEG 8
56 #define IAVF_TX_CKSUM_OFFLOAD_MASK ( \
57 RTE_MBUF_F_TX_IP_CKSUM | \
58 RTE_MBUF_F_TX_L4_MASK | \
59 RTE_MBUF_F_TX_TCP_SEG)
61 #define IAVF_TX_OFFLOAD_MASK ( \
62 RTE_MBUF_F_TX_OUTER_IPV6 | \
63 RTE_MBUF_F_TX_OUTER_IPV4 | \
64 RTE_MBUF_F_TX_IPV6 | \
65 RTE_MBUF_F_TX_IPV4 | \
66 RTE_MBUF_F_TX_VLAN | \
67 RTE_MBUF_F_TX_IP_CKSUM | \
68 RTE_MBUF_F_TX_L4_MASK | \
69 RTE_MBUF_F_TX_TCP_SEG | \
70 RTE_ETH_TX_OFFLOAD_SECURITY)
72 #define IAVF_TX_OFFLOAD_NOTSUP_MASK \
73 (RTE_MBUF_F_TX_OFFLOAD_MASK ^ IAVF_TX_OFFLOAD_MASK)
77 * These descriptors are used instead of the legacy version descriptors
79 union iavf_16b_rx_flex_desc {
81 __le64 pkt_addr; /* Packet buffer address */
82 __le64 hdr_addr; /* Header buffer address */
83 /* bit 0 of hdr_addr is DD bit */
87 u8 rxdid; /* descriptor builder profile ID */
88 u8 mir_id_umb_cast; /* mirror=[5:0], umb=[7:6] */
89 __le16 ptype_flex_flags0; /* ptype=[9:0], ff0=[15:10] */
90 __le16 pkt_len; /* [15:14] are reserved */
91 __le16 hdr_len_sph_flex_flags1; /* header=[10:0] */
100 } wb; /* writeback */
103 union iavf_32b_rx_flex_desc {
105 __le64 pkt_addr; /* Packet buffer address */
106 __le64 hdr_addr; /* Header buffer address */
107 /* bit 0 of hdr_addr is DD bit */
113 u8 rxdid; /* descriptor builder profile ID */
114 u8 mir_id_umb_cast; /* mirror=[5:0], umb=[7:6] */
115 __le16 ptype_flex_flags0; /* ptype=[9:0], ff0=[15:10] */
116 __le16 pkt_len; /* [15:14] are reserved */
117 __le16 hdr_len_sph_flex_flags1; /* header=[10:0] */
119 /* ff1/ext=[15:12] */
122 __le16 status_error0;
128 __le16 status_error1;
144 } wb; /* writeback */
147 /* HW desc structure, both 16-byte and 32-byte types are supported */
148 #ifdef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
149 #define iavf_rx_desc iavf_16byte_rx_desc
150 #define iavf_rx_flex_desc iavf_16b_rx_flex_desc
152 #define iavf_rx_desc iavf_32byte_rx_desc
153 #define iavf_rx_flex_desc iavf_32b_rx_flex_desc
156 typedef void (*iavf_rxd_to_pkt_fields_t)(struct iavf_rx_queue *rxq,
158 volatile union iavf_rx_flex_desc *rxdp);
160 struct iavf_rxq_ops {
161 void (*release_mbufs)(struct iavf_rx_queue *rxq);
164 struct iavf_txq_ops {
165 void (*release_mbufs)(struct iavf_tx_queue *txq);
169 struct iavf_rx_queue_stats {
171 struct iavf_ipsec_crypto_stats ipsec_crypto;
174 /* Structure associated with each Rx queue. */
175 struct iavf_rx_queue {
176 struct rte_mempool *mp; /* mbuf pool to populate Rx ring */
177 const struct rte_memzone *mz; /* memzone for Rx ring */
178 volatile union iavf_rx_desc *rx_ring; /* Rx ring virtual address */
179 uint64_t rx_ring_phys_addr; /* Rx ring DMA address */
180 struct rte_mbuf **sw_ring; /* address of SW ring */
181 uint16_t nb_rx_desc; /* ring length */
182 uint16_t rx_tail; /* current value of tail */
183 volatile uint8_t *qrx_tail; /* register address of tail */
184 uint16_t rx_free_thresh; /* max free RX desc to hold */
185 uint16_t nb_rx_hold; /* number of held free RX desc */
186 struct rte_mbuf *pkt_first_seg; /* first segment of current packet */
187 struct rte_mbuf *pkt_last_seg; /* last segment of current packet */
188 struct rte_mbuf fake_mbuf; /* dummy mbuf */
192 uint16_t rxrearm_nb; /* number of remaining to be re-armed */
193 uint16_t rxrearm_start; /* the idx we start the re-arming from */
194 uint64_t mbuf_initializer; /* value to init mbufs */
197 uint16_t rx_nb_avail; /* number of staged packets ready */
198 uint16_t rx_next_avail; /* index of next staged packets */
199 uint16_t rx_free_trigger; /* triggers rx buffer allocation */
200 struct rte_mbuf *rx_stage[IAVF_RX_MAX_BURST * 2]; /* store mbuf */
202 uint16_t port_id; /* device port ID */
203 uint8_t crc_len; /* 0 if CRC stripped, 4 otherwise */
204 uint8_t fdir_enabled; /* 0 if FDIR disabled, 1 when enabled */
205 uint16_t queue_id; /* Rx queue index */
206 uint16_t rx_buf_len; /* The packet buffer size */
207 uint16_t rx_hdr_len; /* The header buffer size */
208 uint16_t max_pkt_len; /* Maximum packet length */
209 struct iavf_vsi *vsi; /**< the VSI this queue belongs to */
211 bool q_set; /* if rx queue has been configured */
212 bool rx_deferred_start; /* don't start this queue in dev start */
213 const struct iavf_rxq_ops *ops;
215 #define IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG1 BIT(0)
216 #define IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG2_2 BIT(1)
217 uint8_t proto_xtr; /* protocol extraction type */
218 uint64_t xtr_ol_flag;
219 /* flexible descriptor metadata extraction offload flag */
220 iavf_rxd_to_pkt_fields_t rxd_to_pkt_fields;
221 /* handle flexible descriptor by RXDID */
222 struct iavf_rx_queue_stats stats;
226 struct iavf_tx_entry {
227 struct rte_mbuf *mbuf;
232 struct iavf_tx_vec_entry {
233 struct rte_mbuf *mbuf;
236 /* Structure associated with each TX queue. */
237 struct iavf_tx_queue {
238 const struct rte_memzone *mz; /* memzone for Tx ring */
239 volatile struct iavf_tx_desc *tx_ring; /* Tx ring virtual address */
240 uint64_t tx_ring_phys_addr; /* Tx ring DMA address */
241 struct iavf_tx_entry *sw_ring; /* address array of SW ring */
242 uint16_t nb_tx_desc; /* ring length */
243 uint16_t tx_tail; /* current value of tail */
244 volatile uint8_t *qtx_tail; /* register address of tail */
245 /* number of used desc since RS bit set */
248 uint16_t last_desc_cleaned; /* last desc have been cleaned*/
249 uint16_t free_thresh;
255 uint16_t next_dd; /* next to set RS, for VPMD */
256 uint16_t next_rs; /* next to check DD, for VPMD */
257 uint16_t ipsec_crypto_pkt_md_offset;
259 bool q_set; /* if rx queue has been configured */
260 bool tx_deferred_start; /* don't start this queue in dev start */
261 const struct iavf_txq_ops *ops;
262 #define IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG1 BIT(0)
263 #define IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2 BIT(1)
268 /* Offload features */
269 union iavf_tx_offload {
272 uint64_t l2_len:7; /* L2 (MAC) Header Length. */
273 uint64_t l3_len:9; /* L3 (IP) Header Length. */
274 uint64_t l4_len:8; /* L4 Header Length. */
275 uint64_t tso_segsz:16; /* TCP TSO segment size */
276 /* uint64_t unused : 24; */
280 /* Rx Flex Descriptor
281 * RxDID Profile ID 16-21
282 * Flex-field 0: RSS hash lower 16-bits
283 * Flex-field 1: RSS hash upper 16-bits
284 * Flex-field 2: Flow ID lower 16-bits
285 * Flex-field 3: Flow ID upper 16-bits
289 struct iavf_32b_rx_flex_desc_comms {
293 __le16 ptype_flexi_flags0;
295 __le16 hdr_len_sph_flex_flags1;
298 __le16 status_error0;
303 __le16 status_error1;
320 /* Rx Flex Descriptor
321 * RxDID Profile ID 22-23 (swap Hash and FlowID)
322 * Flex-field 0: Flow ID lower 16-bits
323 * Flex-field 1: Flow ID upper 16-bits
324 * Flex-field 2: RSS hash lower 16-bits
325 * Flex-field 3: RSS hash upper 16-bits
329 struct iavf_32b_rx_flex_desc_comms_ovs {
333 __le16 ptype_flexi_flags0;
335 __le16 hdr_len_sph_flex_flags1;
338 __le16 status_error0;
343 __le16 status_error1;
360 /* Rx Flex Descriptor
361 * RxDID Profile ID 24 Inline IPsec
362 * Flex-field 0: RSS hash lower 16-bits
363 * Flex-field 1: RSS hash upper 16-bits
364 * Flex-field 2: Flow ID lower 16-bits
365 * Flex-field 3: Flow ID upper 16-bits
366 * Flex-field 4: Inline IPsec SAID lower 16-bits
367 * Flex-field 5: Inline IPsec SAID upper 16-bits
369 struct iavf_32b_rx_flex_desc_comms_ipsec {
373 __le16 ptype_flexi_flags0;
375 __le16 hdr_len_sph_flex_flags1;
378 __le16 status_error0;
383 __le16 status_error1;
394 /* Receive Flex Descriptor profile IDs: There are a total
395 * of 64 profiles where profile IDs 0/1 are for legacy; and
396 * profiles 2-63 are flex profiles that can be programmed
397 * with a specific metadata (profile 7 reserved for HW)
400 IAVF_RXDID_LEGACY_0 = 0,
401 IAVF_RXDID_LEGACY_1 = 1,
402 IAVF_RXDID_FLEX_NIC = 2,
403 IAVF_RXDID_FLEX_NIC_2 = 6,
405 IAVF_RXDID_COMMS_GENERIC = 16,
406 IAVF_RXDID_COMMS_AUX_VLAN = 17,
407 IAVF_RXDID_COMMS_AUX_IPV4 = 18,
408 IAVF_RXDID_COMMS_AUX_IPV6 = 19,
409 IAVF_RXDID_COMMS_AUX_IPV6_FLOW = 20,
410 IAVF_RXDID_COMMS_AUX_TCP = 21,
411 IAVF_RXDID_COMMS_OVS_1 = 22,
412 IAVF_RXDID_COMMS_OVS_2 = 23,
413 IAVF_RXDID_COMMS_IPSEC_CRYPTO = 24,
414 IAVF_RXDID_COMMS_AUX_IP_OFFSET = 25,
415 IAVF_RXDID_LAST = 63,
418 enum iavf_rx_flex_desc_status_error_0_bits {
419 /* Note: These are predefined bit offsets */
420 IAVF_RX_FLEX_DESC_STATUS0_DD_S = 0,
421 IAVF_RX_FLEX_DESC_STATUS0_EOF_S,
422 IAVF_RX_FLEX_DESC_STATUS0_HBO_S,
423 IAVF_RX_FLEX_DESC_STATUS0_L3L4P_S,
424 IAVF_RX_FLEX_DESC_STATUS0_XSUM_IPE_S,
425 IAVF_RX_FLEX_DESC_STATUS0_XSUM_L4E_S,
426 IAVF_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S,
427 IAVF_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S,
428 IAVF_RX_FLEX_DESC_STATUS0_LPBK_S,
429 IAVF_RX_FLEX_DESC_STATUS0_IPV6EXADD_S,
430 IAVF_RX_FLEX_DESC_STATUS0_RXE_S,
431 IAVF_RX_FLEX_DESC_STATUS0_CRCP_S,
432 IAVF_RX_FLEX_DESC_STATUS0_RSS_VALID_S,
433 IAVF_RX_FLEX_DESC_STATUS0_L2TAG1P_S,
434 IAVF_RX_FLEX_DESC_STATUS0_XTRMD0_VALID_S,
435 IAVF_RX_FLEX_DESC_STATUS0_XTRMD1_VALID_S,
436 IAVF_RX_FLEX_DESC_STATUS0_LAST /* this entry must be last!!! */
439 enum iavf_rx_flex_desc_status_error_1_bits {
440 /* Note: These are predefined bit offsets */
441 /* Bits 3:0 are reserved for inline ipsec status */
442 IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_0 = 0,
443 IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_1,
444 IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_2,
445 IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_3,
446 IAVF_RX_FLEX_DESC_STATUS1_NAT_S,
447 IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_PROCESSED,
448 /* [10:6] reserved */
449 IAVF_RX_FLEX_DESC_STATUS1_L2TAG2P_S = 11,
450 IAVF_RX_FLEX_DESC_STATUS1_XTRMD2_VALID_S = 12,
451 IAVF_RX_FLEX_DESC_STATUS1_XTRMD3_VALID_S = 13,
452 IAVF_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S = 14,
453 IAVF_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S = 15,
454 IAVF_RX_FLEX_DESC_STATUS1_LAST /* this entry must be last!!! */
457 #define IAVF_RX_FLEX_DESC_IPSEC_CRYPTO_STATUS_MASK ( \
458 BIT(IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_0) | \
459 BIT(IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_1) | \
460 BIT(IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_2) | \
461 BIT(IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_3))
463 enum iavf_rx_flex_desc_ipsec_crypto_status {
464 IAVF_IPSEC_CRYPTO_STATUS_SUCCESS = 0,
465 IAVF_IPSEC_CRYPTO_STATUS_SAD_MISS,
466 IAVF_IPSEC_CRYPTO_STATUS_NOT_PROCESSED,
467 IAVF_IPSEC_CRYPTO_STATUS_ICV_CHECK_FAIL,
468 IAVF_IPSEC_CRYPTO_STATUS_LENGTH_ERR,
470 IAVF_IPSEC_CRYPTO_STATUS_MISC_ERR = 0xF
475 #define IAVF_TXD_DATA_QW1_DTYPE_SHIFT (0)
476 #define IAVF_TXD_DATA_QW1_DTYPE_MASK (0xFUL << IAVF_TXD_QW1_DTYPE_SHIFT)
478 #define IAVF_TXD_DATA_QW1_CMD_SHIFT (4)
479 #define IAVF_TXD_DATA_QW1_CMD_MASK (0x3FFUL << IAVF_TXD_DATA_QW1_CMD_SHIFT)
481 #define IAVF_TXD_DATA_QW1_OFFSET_SHIFT (16)
482 #define IAVF_TXD_DATA_QW1_OFFSET_MASK (0x3FFFFULL << \
483 IAVF_TXD_DATA_QW1_OFFSET_SHIFT)
485 #define IAVF_TXD_DATA_QW1_OFFSET_MACLEN_SHIFT (IAVF_TXD_DATA_QW1_OFFSET_SHIFT)
486 #define IAVF_TXD_DATA_QW1_OFFSET_MACLEN_MASK \
487 (0x7FUL << IAVF_TXD_DATA_QW1_OFFSET_MACLEN_SHIFT)
489 #define IAVF_TXD_DATA_QW1_OFFSET_IPLEN_SHIFT \
490 (IAVF_TXD_DATA_QW1_OFFSET_SHIFT + IAVF_TX_DESC_LENGTH_IPLEN_SHIFT)
491 #define IAVF_TXD_DATA_QW1_OFFSET_IPLEN_MASK \
492 (0x7FUL << IAVF_TXD_DATA_QW1_OFFSET_IPLEN_SHIFT)
494 #define IAVF_TXD_DATA_QW1_OFFSET_L4LEN_SHIFT \
495 (IAVF_TXD_DATA_QW1_OFFSET_SHIFT + IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT)
496 #define IAVF_TXD_DATA_QW1_OFFSET_L4LEN_MASK \
497 (0xFUL << IAVF_TXD_DATA_QW1_OFFSET_L4LEN_SHIFT)
499 #define IAVF_TXD_DATA_QW1_MACLEN_MASK \
500 (0x7FUL << IAVF_TX_DESC_LENGTH_MACLEN_SHIFT)
501 #define IAVF_TXD_DATA_QW1_IPLEN_MASK \
502 (0x7FUL << IAVF_TX_DESC_LENGTH_IPLEN_SHIFT)
503 #define IAVF_TXD_DATA_QW1_L4LEN_MASK \
504 (0xFUL << IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT)
505 #define IAVF_TXD_DATA_QW1_FCLEN_MASK \
506 (0xFUL << IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT)
508 #define IAVF_TXD_DATA_QW1_TX_BUF_SZ_SHIFT (34)
509 #define IAVF_TXD_DATA_QW1_TX_BUF_SZ_MASK \
510 (0x3FFFULL << IAVF_TXD_DATA_QW1_TX_BUF_SZ_SHIFT)
512 #define IAVF_TXD_DATA_QW1_L2TAG1_SHIFT (48)
513 #define IAVF_TXD_DATA_QW1_L2TAG1_MASK \
514 (0xFFFFULL << IAVF_TXD_DATA_QW1_L2TAG1_SHIFT)
516 #define IAVF_TXD_CTX_QW1_IPSEC_PARAMS_CIPHERBLK_SHIFT (11)
517 #define IAVF_TXD_CTX_QW1_IPSEC_PARAMS_CIPHERBLK_MASK \
518 (0x7UL << IAVF_TXD_CTX_QW1_IPSEC_PARAMS_CIPHERBLK_SHIFT)
520 #define IAVF_TXD_CTX_QW1_IPSEC_PARAMS_ICVLEN_SHIFT (14)
521 #define IAVF_TXD_CTX_QW1_IPSEC_PARAMS_ICVLEN_MASK \
522 (0xFUL << IAVF_TXD_CTX_QW1_IPSEC_PARAMS_ICVLEN_SHIFT)
524 #define IAVF_TXD_CTX_QW1_SEG_PARAMS_TLEN_SHIFT (30)
525 #define IAVF_TXD_CTX_QW1_SEG_PARAMS_TLEN_MASK \
526 (0x3FFFFUL << IAVF_TXD_CTX_QW1_SEG_PARAMS_TLEN_SHIFT)
528 #define IAVF_TXD_CTX_QW1_TSYNC_PARAMS_TLEN_SHIFT (30)
529 #define IAVF_TXD_CTX_QW1_TSYNC_PARAMS_TLEN_MASK \
530 (0x3FUL << IAVF_TXD_CTX_QW1_SEG_PARAMS_TLEN_SHIFT)
532 #define IAVF_TXD_CTX_QW1_SEG_PARAMS_MSS_SHIFT (50)
533 #define IAVF_TXD_CTX_QW1_SEG_PARAMS_MSS_MASK \
534 (0x3FFFUL << IAVF_TXD_CTX_QW1_SEG_PARAMS_MSS_SHIFT)
536 #define IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPT_SHIFT (0)
537 #define IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPT_MASK (0x3UL)
539 enum iavf_tx_ctx_desc_tunnel_external_ip_type {
540 IAVF_TX_CTX_DESC_EIPT_NONE,
541 IAVF_TX_CTX_DESC_EIPT_IPV6,
542 IAVF_TX_CTX_DESC_EIPT_IPV4_NO_CHECKSUM_OFFLOAD,
543 IAVF_TX_CTX_DESC_EIPT_IPV4_CHECKSUM_OFFLOAD
546 #define IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPLEN_SHIFT (2)
547 #define IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPLEN_MASK (0x7FUL)
549 #define IAVF_TXD_CTX_QW0_TUN_PARAMS_L4TUNT_SHIFT (9)
550 #define IAVF_TXD_CTX_QW0_TUN_PARAMS_L4TUNT_MASK (0x3UL)
552 enum iavf_tx_ctx_desc_tunnel_l4_tunnel_type {
553 IAVF_TX_CTX_DESC_L4_TUN_TYP_NO_UDP_GRE,
554 IAVF_TX_CTX_DESC_L4_TUN_TYP_UDP,
555 IAVF_TX_CTX_DESC_L4_TUN_TYP_GRE
558 #define IAVF_TXD_CTX_QW0_TUN_PARAMS_EIP_NOINC_SHIFT (11)
559 #define IAVF_TXD_CTX_QW0_TUN_PARAMS_EIP_NOINC_MASK (0x1UL)
561 #define IAVF_TXD_CTX_QW0_TUN_PARAMS_L4TUNLEN_SHIFT (12)
562 #define IAVF_TXD_CTX_QW0_TUN_PARAMS_L4TUNLEN_MASK (0x7FUL)
564 #define IAVF_TXD_CTX_QW0_TUN_PARAMS_DECTTL_SHIFT (19)
565 #define IAVF_TXD_CTX_QW0_TUN_PARAMS_DECTTL_MASK (0xFUL)
567 #define IAVF_TXD_CTX_QW0_TUN_PARAMS_L4T_CS_SHIFT (23)
568 #define IAVF_TXD_CTX_QW0_TUN_PARAMS_L4T_CS_MASK (0x1UL)
570 #define IAVF_TXD_CTX_QW0_L2TAG2_PARAM (32)
571 #define IAVF_TXD_CTX_QW0_L2TAG2_MASK (0xFFFFUL)
574 #define IAVF_RX_FLEX_DESC_IPSEC_CRYPTO_SAID_MASK (0xFFFFF)
576 /* for iavf_32b_rx_flex_desc.ptype_flex_flags0 member */
577 #define IAVF_RX_FLEX_DESC_PTYPE_M (0x3FF) /* 10-bits */
580 /* for iavf_32b_rx_flex_desc.ptype_flex_flags0 member */
581 #define IAVF_RX_FLEX_DESC_PTYPE_M (0x3FF) /* 10-bits */
583 /* for iavf_32b_rx_flex_desc.pkt_len member */
584 #define IAVF_RX_FLX_DESC_PKT_LEN_M (0x3FFF) /* 14-bits */
586 int iavf_dev_rx_queue_setup(struct rte_eth_dev *dev,
589 unsigned int socket_id,
590 const struct rte_eth_rxconf *rx_conf,
591 struct rte_mempool *mp);
593 int iavf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);
594 int iavf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id);
595 void iavf_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
597 int iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
600 unsigned int socket_id,
601 const struct rte_eth_txconf *tx_conf);
602 int iavf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);
603 int iavf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);
604 int iavf_dev_tx_done_cleanup(void *txq, uint32_t free_cnt);
605 void iavf_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
606 void iavf_stop_queues(struct rte_eth_dev *dev);
607 uint16_t iavf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
609 uint16_t iavf_recv_pkts_flex_rxd(void *rx_queue,
610 struct rte_mbuf **rx_pkts,
612 uint16_t iavf_recv_scattered_pkts(void *rx_queue,
613 struct rte_mbuf **rx_pkts,
615 uint16_t iavf_recv_scattered_pkts_flex_rxd(void *rx_queue,
616 struct rte_mbuf **rx_pkts,
618 uint16_t iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
620 uint16_t iavf_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
622 void iavf_set_rx_function(struct rte_eth_dev *dev);
623 void iavf_set_tx_function(struct rte_eth_dev *dev);
624 void iavf_dev_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
625 struct rte_eth_rxq_info *qinfo);
626 void iavf_dev_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
627 struct rte_eth_txq_info *qinfo);
628 uint32_t iavf_dev_rxq_count(void *rx_queue);
629 int iavf_dev_rx_desc_status(void *rx_queue, uint16_t offset);
630 int iavf_dev_tx_desc_status(void *tx_queue, uint16_t offset);
632 uint16_t iavf_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
634 uint16_t iavf_recv_pkts_vec_flex_rxd(void *rx_queue, struct rte_mbuf **rx_pkts,
636 uint16_t iavf_recv_scattered_pkts_vec(void *rx_queue,
637 struct rte_mbuf **rx_pkts,
639 uint16_t iavf_recv_scattered_pkts_vec_flex_rxd(void *rx_queue,
640 struct rte_mbuf **rx_pkts,
642 uint16_t iavf_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
644 uint16_t iavf_recv_pkts_vec_avx2(void *rx_queue, struct rte_mbuf **rx_pkts,
646 uint16_t iavf_recv_pkts_vec_avx2_flex_rxd(void *rx_queue,
647 struct rte_mbuf **rx_pkts,
649 uint16_t iavf_recv_scattered_pkts_vec_avx2(void *rx_queue,
650 struct rte_mbuf **rx_pkts,
652 uint16_t iavf_recv_scattered_pkts_vec_avx2_flex_rxd(void *rx_queue,
653 struct rte_mbuf **rx_pkts,
655 uint16_t iavf_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
657 uint16_t iavf_xmit_pkts_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
659 int iavf_get_monitor_addr(void *rx_queue, struct rte_power_monitor_cond *pmc);
660 int iavf_rx_vec_dev_check(struct rte_eth_dev *dev);
661 int iavf_tx_vec_dev_check(struct rte_eth_dev *dev);
662 int iavf_rxq_vec_setup(struct iavf_rx_queue *rxq);
663 int iavf_txq_vec_setup(struct iavf_tx_queue *txq);
664 uint16_t iavf_recv_pkts_vec_avx512(void *rx_queue, struct rte_mbuf **rx_pkts,
666 uint16_t iavf_recv_pkts_vec_avx512_offload(void *rx_queue,
667 struct rte_mbuf **rx_pkts,
669 uint16_t iavf_recv_pkts_vec_avx512_flex_rxd(void *rx_queue,
670 struct rte_mbuf **rx_pkts,
672 uint16_t iavf_recv_pkts_vec_avx512_flex_rxd_offload(void *rx_queue,
673 struct rte_mbuf **rx_pkts,
675 uint16_t iavf_recv_scattered_pkts_vec_avx512(void *rx_queue,
676 struct rte_mbuf **rx_pkts,
678 uint16_t iavf_recv_scattered_pkts_vec_avx512_offload(void *rx_queue,
679 struct rte_mbuf **rx_pkts,
681 uint16_t iavf_recv_scattered_pkts_vec_avx512_flex_rxd(void *rx_queue,
682 struct rte_mbuf **rx_pkts,
684 uint16_t iavf_recv_scattered_pkts_vec_avx512_flex_rxd_offload(void *rx_queue,
685 struct rte_mbuf **rx_pkts,
687 uint16_t iavf_xmit_pkts_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
689 uint16_t iavf_xmit_pkts_vec_avx512_offload(void *tx_queue,
690 struct rte_mbuf **tx_pkts,
692 int iavf_txq_vec_setup_avx512(struct iavf_tx_queue *txq);
694 uint8_t iavf_proto_xtr_type_to_rxdid(uint8_t xtr_type);
696 void iavf_set_default_ptype_table(struct rte_eth_dev *dev);
699 void iavf_dump_rx_descriptor(struct iavf_rx_queue *rxq,
700 const volatile void *desc,
703 #ifdef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
704 const volatile union iavf_16byte_rx_desc *rx_desc = desc;
706 printf("Queue %d Rx_desc %d: QW0: 0x%016"PRIx64" QW1: 0x%016"PRIx64"\n",
707 rxq->queue_id, rx_id, rx_desc->read.pkt_addr,
708 rx_desc->read.hdr_addr);
710 const volatile union iavf_32byte_rx_desc *rx_desc = desc;
712 printf("Queue %d Rx_desc %d: QW0: 0x%016"PRIx64" QW1: 0x%016"PRIx64
713 " QW2: 0x%016"PRIx64" QW3: 0x%016"PRIx64"\n", rxq->queue_id,
714 rx_id, rx_desc->read.pkt_addr, rx_desc->read.hdr_addr,
715 rx_desc->read.rsvd1, rx_desc->read.rsvd2);
719 /* All the descriptors are 16 bytes, so just use one of them
720 * to print the qwords
723 void iavf_dump_tx_descriptor(const struct iavf_tx_queue *txq,
724 const volatile void *desc, uint16_t tx_id)
727 const volatile struct iavf_tx_desc *tx_desc = desc;
728 enum iavf_tx_desc_dtype_value type;
731 type = (enum iavf_tx_desc_dtype_value)
732 rte_le_to_cpu_64(tx_desc->cmd_type_offset_bsz &
733 rte_cpu_to_le_64(IAVF_TXD_DATA_QW1_DTYPE_MASK));
735 case IAVF_TX_DESC_DTYPE_DATA:
736 name = "Tx_data_desc";
738 case IAVF_TX_DESC_DTYPE_CONTEXT:
739 name = "Tx_context_desc";
741 case IAVF_TX_DESC_DTYPE_IPSEC:
742 name = "Tx_IPsec_desc";
745 name = "unknown_desc";
749 printf("Queue %d %s %d: QW0: 0x%016"PRIx64" QW1: 0x%016"PRIx64"\n",
750 txq->queue_id, name, tx_id, tx_desc->buffer_addr,
751 tx_desc->cmd_type_offset_bsz);
754 #define FDIR_PROC_ENABLE_PER_QUEUE(ad, on) do { \
756 for (i = 0; i < (ad)->dev_data->nb_rx_queues; i++) { \
757 struct iavf_rx_queue *rxq = (ad)->dev_data->rx_queues[i]; \
760 rxq->fdir_enabled = on; \
762 PMD_DRV_LOG(DEBUG, "FDIR processing on RX set to %d", on); \
765 /* Enable/disable flow director Rx processing in data path. */
767 void iavf_fdir_rx_proc_enable(struct iavf_adapter *ad, bool on)
770 /* enable flow director processing */
771 FDIR_PROC_ENABLE_PER_QUEUE(ad, on);
774 if (ad->fdir_ref_cnt >= 1) {
777 if (ad->fdir_ref_cnt == 0)
778 FDIR_PROC_ENABLE_PER_QUEUE(ad, on);
783 #ifdef RTE_LIBRTE_IAVF_DEBUG_DUMP_DESC
784 #define IAVF_DUMP_RX_DESC(rxq, desc, rx_id) \
785 iavf_dump_rx_descriptor(rxq, desc, rx_id)
786 #define IAVF_DUMP_TX_DESC(txq, desc, tx_id) \
787 iavf_dump_tx_descriptor(txq, desc, tx_id)
789 #define IAVF_DUMP_RX_DESC(rxq, desc, rx_id) do { } while (0)
790 #define IAVF_DUMP_TX_DESC(txq, desc, tx_id) do { } while (0)
793 #endif /* _IAVF_RXTX_H_ */