1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
8 /* In QLEN must be whole number of 32 descriptors. */
9 #define IAVF_ALIGN_RING_DESC 32
10 #define IAVF_MIN_RING_DESC 64
11 #define IAVF_MAX_RING_DESC 4096
12 #define IAVF_DMA_MEM_ALIGN 4096
13 /* Base address of the HW descriptor ring should be 128B aligned. */
14 #define IAVF_RING_BASE_ALIGN 128
16 /* used for Rx Bulk Allocate */
17 #define IAVF_RX_MAX_BURST 32
19 /* used for Vector PMD */
20 #define IAVF_VPMD_RX_MAX_BURST 32
21 #define IAVF_VPMD_TX_MAX_BURST 32
22 #define IAVF_RXQ_REARM_THRESH 32
23 #define IAVF_VPMD_DESCS_PER_LOOP 4
24 #define IAVF_VPMD_TX_MAX_FREE_BUF 64
26 #define IAVF_TX_NO_VECTOR_FLAGS ( \
27 RTE_ETH_TX_OFFLOAD_MULTI_SEGS | \
28 RTE_ETH_TX_OFFLOAD_TCP_TSO | \
29 RTE_ETH_TX_OFFLOAD_SECURITY)
31 #define IAVF_TX_VECTOR_OFFLOAD ( \
32 RTE_ETH_TX_OFFLOAD_VLAN_INSERT | \
33 RTE_ETH_TX_OFFLOAD_QINQ_INSERT | \
34 RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | \
35 RTE_ETH_TX_OFFLOAD_SCTP_CKSUM | \
36 RTE_ETH_TX_OFFLOAD_UDP_CKSUM | \
37 RTE_ETH_TX_OFFLOAD_TCP_CKSUM)
39 #define IAVF_RX_VECTOR_OFFLOAD ( \
40 RTE_ETH_RX_OFFLOAD_CHECKSUM | \
41 RTE_ETH_RX_OFFLOAD_SCTP_CKSUM | \
42 RTE_ETH_RX_OFFLOAD_VLAN | \
43 RTE_ETH_RX_OFFLOAD_RSS_HASH)
45 #define IAVF_VECTOR_PATH 0
46 #define IAVF_VECTOR_OFFLOAD_PATH 1
48 #define DEFAULT_TX_RS_THRESH 32
49 #define DEFAULT_TX_FREE_THRESH 32
51 #define IAVF_MIN_TSO_MSS 256
52 #define IAVF_MAX_TSO_MSS 9668
53 #define IAVF_TSO_MAX_SEG UINT8_MAX
54 #define IAVF_TX_MAX_MTU_SEG 8
56 #define IAVF_TX_CKSUM_OFFLOAD_MASK ( \
57 RTE_MBUF_F_TX_IP_CKSUM | \
58 RTE_MBUF_F_TX_L4_MASK | \
59 RTE_MBUF_F_TX_TCP_SEG)
61 #define IAVF_TX_OFFLOAD_MASK ( \
62 RTE_MBUF_F_TX_OUTER_IPV6 | \
63 RTE_MBUF_F_TX_OUTER_IPV4 | \
64 RTE_MBUF_F_TX_IPV6 | \
65 RTE_MBUF_F_TX_IPV4 | \
66 RTE_MBUF_F_TX_VLAN | \
67 RTE_MBUF_F_TX_IP_CKSUM | \
68 RTE_MBUF_F_TX_L4_MASK | \
69 RTE_MBUF_F_TX_TCP_SEG | \
70 RTE_ETH_TX_OFFLOAD_SECURITY)
72 #define IAVF_TX_OFFLOAD_NOTSUP_MASK \
73 (RTE_MBUF_F_TX_OFFLOAD_MASK ^ IAVF_TX_OFFLOAD_MASK)
77 * These descriptors are used instead of the legacy version descriptors
79 union iavf_16b_rx_flex_desc {
81 __le64 pkt_addr; /* Packet buffer address */
82 __le64 hdr_addr; /* Header buffer address */
83 /* bit 0 of hdr_addr is DD bit */
87 u8 rxdid; /* descriptor builder profile ID */
88 u8 mir_id_umb_cast; /* mirror=[5:0], umb=[7:6] */
89 __le16 ptype_flex_flags0; /* ptype=[9:0], ff0=[15:10] */
90 __le16 pkt_len; /* [15:14] are reserved */
91 __le16 hdr_len_sph_flex_flags1; /* header=[10:0] */
100 } wb; /* writeback */
103 union iavf_32b_rx_flex_desc {
105 __le64 pkt_addr; /* Packet buffer address */
106 __le64 hdr_addr; /* Header buffer address */
107 /* bit 0 of hdr_addr is DD bit */
113 u8 rxdid; /* descriptor builder profile ID */
114 u8 mir_id_umb_cast; /* mirror=[5:0], umb=[7:6] */
115 __le16 ptype_flex_flags0; /* ptype=[9:0], ff0=[15:10] */
116 __le16 pkt_len; /* [15:14] are reserved */
117 __le16 hdr_len_sph_flex_flags1; /* header=[10:0] */
119 /* ff1/ext=[15:12] */
122 __le16 status_error0;
128 __le16 status_error1;
144 } wb; /* writeback */
147 /* HW desc structure, both 16-byte and 32-byte types are supported */
148 #ifdef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
149 #define iavf_rx_desc iavf_16byte_rx_desc
150 #define iavf_rx_flex_desc iavf_16b_rx_flex_desc
152 #define iavf_rx_desc iavf_32byte_rx_desc
153 #define iavf_rx_flex_desc iavf_32b_rx_flex_desc
156 typedef void (*iavf_rxd_to_pkt_fields_t)(struct iavf_rx_queue *rxq,
158 volatile union iavf_rx_flex_desc *rxdp);
160 struct iavf_rxq_ops {
161 void (*release_mbufs)(struct iavf_rx_queue *rxq);
164 struct iavf_txq_ops {
165 void (*release_mbufs)(struct iavf_tx_queue *txq);
169 struct iavf_rx_queue_stats {
171 struct iavf_ipsec_crypto_stats ipsec_crypto;
174 /* Structure associated with each Rx queue. */
175 struct iavf_rx_queue {
176 struct rte_mempool *mp; /* mbuf pool to populate Rx ring */
177 const struct rte_memzone *mz; /* memzone for Rx ring */
178 volatile union iavf_rx_desc *rx_ring; /* Rx ring virtual address */
179 uint64_t rx_ring_phys_addr; /* Rx ring DMA address */
180 struct rte_mbuf **sw_ring; /* address of SW ring */
181 uint16_t nb_rx_desc; /* ring length */
182 uint16_t rx_tail; /* current value of tail */
183 volatile uint8_t *qrx_tail; /* register address of tail */
184 uint16_t rx_free_thresh; /* max free RX desc to hold */
185 uint16_t nb_rx_hold; /* number of held free RX desc */
186 struct rte_mbuf *pkt_first_seg; /* first segment of current packet */
187 struct rte_mbuf *pkt_last_seg; /* last segment of current packet */
188 struct rte_mbuf fake_mbuf; /* dummy mbuf */
192 uint16_t rxrearm_nb; /* number of remaining to be re-armed */
193 uint16_t rxrearm_start; /* the idx we start the re-arming from */
194 uint64_t mbuf_initializer; /* value to init mbufs */
197 uint16_t rx_nb_avail; /* number of staged packets ready */
198 uint16_t rx_next_avail; /* index of next staged packets */
199 uint16_t rx_free_trigger; /* triggers rx buffer allocation */
200 struct rte_mbuf *rx_stage[IAVF_RX_MAX_BURST * 2]; /* store mbuf */
202 uint16_t port_id; /* device port ID */
203 uint8_t crc_len; /* 0 if CRC stripped, 4 otherwise */
204 uint8_t fdir_enabled; /* 0 if FDIR disabled, 1 when enabled */
205 uint16_t queue_id; /* Rx queue index */
206 uint16_t rx_buf_len; /* The packet buffer size */
207 uint16_t rx_hdr_len; /* The header buffer size */
208 uint16_t max_pkt_len; /* Maximum packet length */
209 struct iavf_vsi *vsi; /**< the VSI this queue belongs to */
211 bool q_set; /* if rx queue has been configured */
212 bool rx_deferred_start; /* don't start this queue in dev start */
213 const struct iavf_rxq_ops *ops;
215 #define IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG1 BIT(0)
216 #define IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG2_2 BIT(1)
217 uint8_t proto_xtr; /* protocol extraction type */
218 uint64_t xtr_ol_flag;
219 /* flexible descriptor metadata extraction offload flag */
220 struct iavf_rx_queue_stats stats;
224 struct iavf_tx_entry {
225 struct rte_mbuf *mbuf;
230 struct iavf_tx_vec_entry {
231 struct rte_mbuf *mbuf;
234 /* Structure associated with each TX queue. */
235 struct iavf_tx_queue {
236 const struct rte_memzone *mz; /* memzone for Tx ring */
237 volatile struct iavf_tx_desc *tx_ring; /* Tx ring virtual address */
238 uint64_t tx_ring_phys_addr; /* Tx ring DMA address */
239 struct iavf_tx_entry *sw_ring; /* address array of SW ring */
240 uint16_t nb_tx_desc; /* ring length */
241 uint16_t tx_tail; /* current value of tail */
242 volatile uint8_t *qtx_tail; /* register address of tail */
243 /* number of used desc since RS bit set */
246 uint16_t last_desc_cleaned; /* last desc have been cleaned*/
247 uint16_t free_thresh;
253 uint16_t next_dd; /* next to set RS, for VPMD */
254 uint16_t next_rs; /* next to check DD, for VPMD */
255 uint16_t ipsec_crypto_pkt_md_offset;
257 bool q_set; /* if rx queue has been configured */
258 bool tx_deferred_start; /* don't start this queue in dev start */
259 const struct iavf_txq_ops *ops;
260 #define IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG1 BIT(0)
261 #define IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2 BIT(1)
266 /* Offload features */
267 union iavf_tx_offload {
270 uint64_t l2_len:7; /* L2 (MAC) Header Length. */
271 uint64_t l3_len:9; /* L3 (IP) Header Length. */
272 uint64_t l4_len:8; /* L4 Header Length. */
273 uint64_t tso_segsz:16; /* TCP TSO segment size */
274 /* uint64_t unused : 24; */
278 /* Rx Flex Descriptor
279 * RxDID Profile ID 16-21
280 * Flex-field 0: RSS hash lower 16-bits
281 * Flex-field 1: RSS hash upper 16-bits
282 * Flex-field 2: Flow ID lower 16-bits
283 * Flex-field 3: Flow ID upper 16-bits
287 struct iavf_32b_rx_flex_desc_comms {
291 __le16 ptype_flexi_flags0;
293 __le16 hdr_len_sph_flex_flags1;
296 __le16 status_error0;
301 __le16 status_error1;
318 /* Rx Flex Descriptor
319 * RxDID Profile ID 22-23 (swap Hash and FlowID)
320 * Flex-field 0: Flow ID lower 16-bits
321 * Flex-field 1: Flow ID upper 16-bits
322 * Flex-field 2: RSS hash lower 16-bits
323 * Flex-field 3: RSS hash upper 16-bits
327 struct iavf_32b_rx_flex_desc_comms_ovs {
331 __le16 ptype_flexi_flags0;
333 __le16 hdr_len_sph_flex_flags1;
336 __le16 status_error0;
341 __le16 status_error1;
358 /* Rx Flex Descriptor
359 * RxDID Profile ID 24 Inline IPsec
360 * Flex-field 0: RSS hash lower 16-bits
361 * Flex-field 1: RSS hash upper 16-bits
362 * Flex-field 2: Flow ID lower 16-bits
363 * Flex-field 3: Flow ID upper 16-bits
364 * Flex-field 4: Inline IPsec SAID lower 16-bits
365 * Flex-field 5: Inline IPsec SAID upper 16-bits
367 struct iavf_32b_rx_flex_desc_comms_ipsec {
371 __le16 ptype_flexi_flags0;
373 __le16 hdr_len_sph_flex_flags1;
376 __le16 status_error0;
381 __le16 status_error1;
392 /* Receive Flex Descriptor profile IDs: There are a total
393 * of 64 profiles where profile IDs 0/1 are for legacy; and
394 * profiles 2-63 are flex profiles that can be programmed
395 * with a specific metadata (profile 7 reserved for HW)
398 IAVF_RXDID_LEGACY_0 = 0,
399 IAVF_RXDID_LEGACY_1 = 1,
400 IAVF_RXDID_FLEX_NIC = 2,
401 IAVF_RXDID_FLEX_NIC_2 = 6,
403 IAVF_RXDID_COMMS_GENERIC = 16,
404 IAVF_RXDID_COMMS_AUX_VLAN = 17,
405 IAVF_RXDID_COMMS_AUX_IPV4 = 18,
406 IAVF_RXDID_COMMS_AUX_IPV6 = 19,
407 IAVF_RXDID_COMMS_AUX_IPV6_FLOW = 20,
408 IAVF_RXDID_COMMS_AUX_TCP = 21,
409 IAVF_RXDID_COMMS_OVS_1 = 22,
410 IAVF_RXDID_COMMS_OVS_2 = 23,
411 IAVF_RXDID_COMMS_IPSEC_CRYPTO = 24,
412 IAVF_RXDID_COMMS_AUX_IP_OFFSET = 25,
413 IAVF_RXDID_LAST = 63,
416 enum iavf_rx_flex_desc_status_error_0_bits {
417 /* Note: These are predefined bit offsets */
418 IAVF_RX_FLEX_DESC_STATUS0_DD_S = 0,
419 IAVF_RX_FLEX_DESC_STATUS0_EOF_S,
420 IAVF_RX_FLEX_DESC_STATUS0_HBO_S,
421 IAVF_RX_FLEX_DESC_STATUS0_L3L4P_S,
422 IAVF_RX_FLEX_DESC_STATUS0_XSUM_IPE_S,
423 IAVF_RX_FLEX_DESC_STATUS0_XSUM_L4E_S,
424 IAVF_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S,
425 IAVF_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S,
426 IAVF_RX_FLEX_DESC_STATUS0_LPBK_S,
427 IAVF_RX_FLEX_DESC_STATUS0_IPV6EXADD_S,
428 IAVF_RX_FLEX_DESC_STATUS0_RXE_S,
429 IAVF_RX_FLEX_DESC_STATUS0_CRCP_S,
430 IAVF_RX_FLEX_DESC_STATUS0_RSS_VALID_S,
431 IAVF_RX_FLEX_DESC_STATUS0_L2TAG1P_S,
432 IAVF_RX_FLEX_DESC_STATUS0_XTRMD0_VALID_S,
433 IAVF_RX_FLEX_DESC_STATUS0_XTRMD1_VALID_S,
434 IAVF_RX_FLEX_DESC_STATUS0_LAST /* this entry must be last!!! */
437 enum iavf_rx_flex_desc_status_error_1_bits {
438 /* Note: These are predefined bit offsets */
439 /* Bits 3:0 are reserved for inline ipsec status */
440 IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_0 = 0,
441 IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_1,
442 IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_2,
443 IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_3,
444 IAVF_RX_FLEX_DESC_STATUS1_NAT_S,
445 IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_PROCESSED,
446 /* [10:6] reserved */
447 IAVF_RX_FLEX_DESC_STATUS1_L2TAG2P_S = 11,
448 IAVF_RX_FLEX_DESC_STATUS1_XTRMD2_VALID_S = 12,
449 IAVF_RX_FLEX_DESC_STATUS1_XTRMD3_VALID_S = 13,
450 IAVF_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S = 14,
451 IAVF_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S = 15,
452 IAVF_RX_FLEX_DESC_STATUS1_LAST /* this entry must be last!!! */
455 #define IAVF_RX_FLEX_DESC_IPSEC_CRYPTO_STATUS_MASK ( \
456 BIT(IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_0) | \
457 BIT(IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_1) | \
458 BIT(IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_2) | \
459 BIT(IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_3))
461 enum iavf_rx_flex_desc_ipsec_crypto_status {
462 IAVF_IPSEC_CRYPTO_STATUS_SUCCESS = 0,
463 IAVF_IPSEC_CRYPTO_STATUS_SAD_MISS,
464 IAVF_IPSEC_CRYPTO_STATUS_NOT_PROCESSED,
465 IAVF_IPSEC_CRYPTO_STATUS_ICV_CHECK_FAIL,
466 IAVF_IPSEC_CRYPTO_STATUS_LENGTH_ERR,
468 IAVF_IPSEC_CRYPTO_STATUS_MISC_ERR = 0xF
473 #define IAVF_TXD_DATA_QW1_DTYPE_SHIFT (0)
474 #define IAVF_TXD_DATA_QW1_DTYPE_MASK (0xFUL << IAVF_TXD_QW1_DTYPE_SHIFT)
476 #define IAVF_TXD_DATA_QW1_CMD_SHIFT (4)
477 #define IAVF_TXD_DATA_QW1_CMD_MASK (0x3FFUL << IAVF_TXD_DATA_QW1_CMD_SHIFT)
479 #define IAVF_TXD_DATA_QW1_OFFSET_SHIFT (16)
480 #define IAVF_TXD_DATA_QW1_OFFSET_MASK (0x3FFFFULL << \
481 IAVF_TXD_DATA_QW1_OFFSET_SHIFT)
483 #define IAVF_TXD_DATA_QW1_OFFSET_MACLEN_SHIFT (IAVF_TXD_DATA_QW1_OFFSET_SHIFT)
484 #define IAVF_TXD_DATA_QW1_OFFSET_MACLEN_MASK \
485 (0x7FUL << IAVF_TXD_DATA_QW1_OFFSET_MACLEN_SHIFT)
487 #define IAVF_TXD_DATA_QW1_OFFSET_IPLEN_SHIFT \
488 (IAVF_TXD_DATA_QW1_OFFSET_SHIFT + IAVF_TX_DESC_LENGTH_IPLEN_SHIFT)
489 #define IAVF_TXD_DATA_QW1_OFFSET_IPLEN_MASK \
490 (0x7FUL << IAVF_TXD_DATA_QW1_OFFSET_IPLEN_SHIFT)
492 #define IAVF_TXD_DATA_QW1_OFFSET_L4LEN_SHIFT \
493 (IAVF_TXD_DATA_QW1_OFFSET_SHIFT + IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT)
494 #define IAVF_TXD_DATA_QW1_OFFSET_L4LEN_MASK \
495 (0xFUL << IAVF_TXD_DATA_QW1_OFFSET_L4LEN_SHIFT)
497 #define IAVF_TXD_DATA_QW1_MACLEN_MASK \
498 (0x7FUL << IAVF_TX_DESC_LENGTH_MACLEN_SHIFT)
499 #define IAVF_TXD_DATA_QW1_IPLEN_MASK \
500 (0x7FUL << IAVF_TX_DESC_LENGTH_IPLEN_SHIFT)
501 #define IAVF_TXD_DATA_QW1_L4LEN_MASK \
502 (0xFUL << IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT)
503 #define IAVF_TXD_DATA_QW1_FCLEN_MASK \
504 (0xFUL << IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT)
506 #define IAVF_TXD_DATA_QW1_TX_BUF_SZ_SHIFT (34)
507 #define IAVF_TXD_DATA_QW1_TX_BUF_SZ_MASK \
508 (0x3FFFULL << IAVF_TXD_DATA_QW1_TX_BUF_SZ_SHIFT)
510 #define IAVF_TXD_DATA_QW1_L2TAG1_SHIFT (48)
511 #define IAVF_TXD_DATA_QW1_L2TAG1_MASK \
512 (0xFFFFULL << IAVF_TXD_DATA_QW1_L2TAG1_SHIFT)
514 #define IAVF_TXD_CTX_QW1_IPSEC_PARAMS_CIPHERBLK_SHIFT (11)
515 #define IAVF_TXD_CTX_QW1_IPSEC_PARAMS_CIPHERBLK_MASK \
516 (0x7UL << IAVF_TXD_CTX_QW1_IPSEC_PARAMS_CIPHERBLK_SHIFT)
518 #define IAVF_TXD_CTX_QW1_IPSEC_PARAMS_ICVLEN_SHIFT (14)
519 #define IAVF_TXD_CTX_QW1_IPSEC_PARAMS_ICVLEN_MASK \
520 (0xFUL << IAVF_TXD_CTX_QW1_IPSEC_PARAMS_ICVLEN_SHIFT)
522 #define IAVF_TXD_CTX_QW1_SEG_PARAMS_TLEN_SHIFT (30)
523 #define IAVF_TXD_CTX_QW1_SEG_PARAMS_TLEN_MASK \
524 (0x3FFFFUL << IAVF_TXD_CTX_QW1_SEG_PARAMS_TLEN_SHIFT)
526 #define IAVF_TXD_CTX_QW1_TSYNC_PARAMS_TLEN_SHIFT (30)
527 #define IAVF_TXD_CTX_QW1_TSYNC_PARAMS_TLEN_MASK \
528 (0x3FUL << IAVF_TXD_CTX_QW1_SEG_PARAMS_TLEN_SHIFT)
530 #define IAVF_TXD_CTX_QW1_SEG_PARAMS_MSS_SHIFT (50)
531 #define IAVF_TXD_CTX_QW1_SEG_PARAMS_MSS_MASK \
532 (0x3FFFUL << IAVF_TXD_CTX_QW1_SEG_PARAMS_MSS_SHIFT)
534 #define IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPT_SHIFT (0)
535 #define IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPT_MASK (0x3UL)
537 enum iavf_tx_ctx_desc_tunnel_external_ip_type {
538 IAVF_TX_CTX_DESC_EIPT_NONE,
539 IAVF_TX_CTX_DESC_EIPT_IPV6,
540 IAVF_TX_CTX_DESC_EIPT_IPV4_NO_CHECKSUM_OFFLOAD,
541 IAVF_TX_CTX_DESC_EIPT_IPV4_CHECKSUM_OFFLOAD
544 #define IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPLEN_SHIFT (2)
545 #define IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPLEN_MASK (0x7FUL)
547 #define IAVF_TXD_CTX_QW0_TUN_PARAMS_L4TUNT_SHIFT (9)
548 #define IAVF_TXD_CTX_QW0_TUN_PARAMS_L4TUNT_MASK (0x3UL)
550 enum iavf_tx_ctx_desc_tunnel_l4_tunnel_type {
551 IAVF_TX_CTX_DESC_L4_TUN_TYP_NO_UDP_GRE,
552 IAVF_TX_CTX_DESC_L4_TUN_TYP_UDP,
553 IAVF_TX_CTX_DESC_L4_TUN_TYP_GRE
556 #define IAVF_TXD_CTX_QW0_TUN_PARAMS_EIP_NOINC_SHIFT (11)
557 #define IAVF_TXD_CTX_QW0_TUN_PARAMS_EIP_NOINC_MASK (0x1UL)
559 #define IAVF_TXD_CTX_QW0_TUN_PARAMS_L4TUNLEN_SHIFT (12)
560 #define IAVF_TXD_CTX_QW0_TUN_PARAMS_L4TUNLEN_MASK (0x7FUL)
562 #define IAVF_TXD_CTX_QW0_TUN_PARAMS_DECTTL_SHIFT (19)
563 #define IAVF_TXD_CTX_QW0_TUN_PARAMS_DECTTL_MASK (0xFUL)
565 #define IAVF_TXD_CTX_QW0_TUN_PARAMS_L4T_CS_SHIFT (23)
566 #define IAVF_TXD_CTX_QW0_TUN_PARAMS_L4T_CS_MASK (0x1UL)
568 #define IAVF_TXD_CTX_QW0_L2TAG2_PARAM (32)
569 #define IAVF_TXD_CTX_QW0_L2TAG2_MASK (0xFFFFUL)
572 #define IAVF_RX_FLEX_DESC_IPSEC_CRYPTO_SAID_MASK (0xFFFFF)
574 /* for iavf_32b_rx_flex_desc.ptype_flex_flags0 member */
575 #define IAVF_RX_FLEX_DESC_PTYPE_M (0x3FF) /* 10-bits */
578 /* for iavf_32b_rx_flex_desc.ptype_flex_flags0 member */
579 #define IAVF_RX_FLEX_DESC_PTYPE_M (0x3FF) /* 10-bits */
581 /* for iavf_32b_rx_flex_desc.pkt_len member */
582 #define IAVF_RX_FLX_DESC_PKT_LEN_M (0x3FFF) /* 14-bits */
584 int iavf_dev_rx_queue_setup(struct rte_eth_dev *dev,
587 unsigned int socket_id,
588 const struct rte_eth_rxconf *rx_conf,
589 struct rte_mempool *mp);
591 int iavf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);
592 int iavf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id);
593 void iavf_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
595 int iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
598 unsigned int socket_id,
599 const struct rte_eth_txconf *tx_conf);
600 int iavf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);
601 int iavf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);
602 int iavf_dev_tx_done_cleanup(void *txq, uint32_t free_cnt);
603 void iavf_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
604 void iavf_stop_queues(struct rte_eth_dev *dev);
605 uint16_t iavf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
607 uint16_t iavf_recv_pkts_flex_rxd(void *rx_queue,
608 struct rte_mbuf **rx_pkts,
610 uint16_t iavf_recv_scattered_pkts(void *rx_queue,
611 struct rte_mbuf **rx_pkts,
613 uint16_t iavf_recv_scattered_pkts_flex_rxd(void *rx_queue,
614 struct rte_mbuf **rx_pkts,
616 uint16_t iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
618 uint16_t iavf_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
620 void iavf_set_rx_function(struct rte_eth_dev *dev);
621 void iavf_set_tx_function(struct rte_eth_dev *dev);
622 void iavf_dev_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
623 struct rte_eth_rxq_info *qinfo);
624 void iavf_dev_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
625 struct rte_eth_txq_info *qinfo);
626 uint32_t iavf_dev_rxq_count(void *rx_queue);
627 int iavf_dev_rx_desc_status(void *rx_queue, uint16_t offset);
628 int iavf_dev_tx_desc_status(void *tx_queue, uint16_t offset);
630 uint16_t iavf_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
632 uint16_t iavf_recv_pkts_vec_flex_rxd(void *rx_queue, struct rte_mbuf **rx_pkts,
634 uint16_t iavf_recv_scattered_pkts_vec(void *rx_queue,
635 struct rte_mbuf **rx_pkts,
637 uint16_t iavf_recv_scattered_pkts_vec_flex_rxd(void *rx_queue,
638 struct rte_mbuf **rx_pkts,
640 uint16_t iavf_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
642 uint16_t iavf_recv_pkts_vec_avx2(void *rx_queue, struct rte_mbuf **rx_pkts,
644 uint16_t iavf_recv_pkts_vec_avx2_flex_rxd(void *rx_queue,
645 struct rte_mbuf **rx_pkts,
647 uint16_t iavf_recv_scattered_pkts_vec_avx2(void *rx_queue,
648 struct rte_mbuf **rx_pkts,
650 uint16_t iavf_recv_scattered_pkts_vec_avx2_flex_rxd(void *rx_queue,
651 struct rte_mbuf **rx_pkts,
653 uint16_t iavf_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
655 uint16_t iavf_xmit_pkts_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
657 int iavf_get_monitor_addr(void *rx_queue, struct rte_power_monitor_cond *pmc);
658 int iavf_rx_vec_dev_check(struct rte_eth_dev *dev);
659 int iavf_tx_vec_dev_check(struct rte_eth_dev *dev);
660 int iavf_rxq_vec_setup(struct iavf_rx_queue *rxq);
661 int iavf_txq_vec_setup(struct iavf_tx_queue *txq);
662 uint16_t iavf_recv_pkts_vec_avx512(void *rx_queue, struct rte_mbuf **rx_pkts,
664 uint16_t iavf_recv_pkts_vec_avx512_offload(void *rx_queue,
665 struct rte_mbuf **rx_pkts,
667 uint16_t iavf_recv_pkts_vec_avx512_flex_rxd(void *rx_queue,
668 struct rte_mbuf **rx_pkts,
670 uint16_t iavf_recv_pkts_vec_avx512_flex_rxd_offload(void *rx_queue,
671 struct rte_mbuf **rx_pkts,
673 uint16_t iavf_recv_scattered_pkts_vec_avx512(void *rx_queue,
674 struct rte_mbuf **rx_pkts,
676 uint16_t iavf_recv_scattered_pkts_vec_avx512_offload(void *rx_queue,
677 struct rte_mbuf **rx_pkts,
679 uint16_t iavf_recv_scattered_pkts_vec_avx512_flex_rxd(void *rx_queue,
680 struct rte_mbuf **rx_pkts,
682 uint16_t iavf_recv_scattered_pkts_vec_avx512_flex_rxd_offload(void *rx_queue,
683 struct rte_mbuf **rx_pkts,
685 uint16_t iavf_xmit_pkts_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
687 uint16_t iavf_xmit_pkts_vec_avx512_offload(void *tx_queue,
688 struct rte_mbuf **tx_pkts,
690 int iavf_txq_vec_setup_avx512(struct iavf_tx_queue *txq);
692 uint8_t iavf_proto_xtr_type_to_rxdid(uint8_t xtr_type);
694 void iavf_set_default_ptype_table(struct rte_eth_dev *dev);
697 void iavf_dump_rx_descriptor(struct iavf_rx_queue *rxq,
698 const volatile void *desc,
701 #ifdef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
702 const volatile union iavf_16byte_rx_desc *rx_desc = desc;
704 printf("Queue %d Rx_desc %d: QW0: 0x%016"PRIx64" QW1: 0x%016"PRIx64"\n",
705 rxq->queue_id, rx_id, rx_desc->read.pkt_addr,
706 rx_desc->read.hdr_addr);
708 const volatile union iavf_32byte_rx_desc *rx_desc = desc;
710 printf("Queue %d Rx_desc %d: QW0: 0x%016"PRIx64" QW1: 0x%016"PRIx64
711 " QW2: 0x%016"PRIx64" QW3: 0x%016"PRIx64"\n", rxq->queue_id,
712 rx_id, rx_desc->read.pkt_addr, rx_desc->read.hdr_addr,
713 rx_desc->read.rsvd1, rx_desc->read.rsvd2);
717 /* All the descriptors are 16 bytes, so just use one of them
718 * to print the qwords
721 void iavf_dump_tx_descriptor(const struct iavf_tx_queue *txq,
722 const volatile void *desc, uint16_t tx_id)
725 const volatile struct iavf_tx_desc *tx_desc = desc;
726 enum iavf_tx_desc_dtype_value type;
729 type = (enum iavf_tx_desc_dtype_value)
730 rte_le_to_cpu_64(tx_desc->cmd_type_offset_bsz &
731 rte_cpu_to_le_64(IAVF_TXD_DATA_QW1_DTYPE_MASK));
733 case IAVF_TX_DESC_DTYPE_DATA:
734 name = "Tx_data_desc";
736 case IAVF_TX_DESC_DTYPE_CONTEXT:
737 name = "Tx_context_desc";
739 case IAVF_TX_DESC_DTYPE_IPSEC:
740 name = "Tx_IPsec_desc";
743 name = "unknown_desc";
747 printf("Queue %d %s %d: QW0: 0x%016"PRIx64" QW1: 0x%016"PRIx64"\n",
748 txq->queue_id, name, tx_id, tx_desc->buffer_addr,
749 tx_desc->cmd_type_offset_bsz);
752 #define FDIR_PROC_ENABLE_PER_QUEUE(ad, on) do { \
754 for (i = 0; i < (ad)->dev_data->nb_rx_queues; i++) { \
755 struct iavf_rx_queue *rxq = (ad)->dev_data->rx_queues[i]; \
758 rxq->fdir_enabled = on; \
760 PMD_DRV_LOG(DEBUG, "FDIR processing on RX set to %d", on); \
763 /* Enable/disable flow director Rx processing in data path. */
765 void iavf_fdir_rx_proc_enable(struct iavf_adapter *ad, bool on)
768 /* enable flow director processing */
769 FDIR_PROC_ENABLE_PER_QUEUE(ad, on);
772 if (ad->fdir_ref_cnt >= 1) {
775 if (ad->fdir_ref_cnt == 0)
776 FDIR_PROC_ENABLE_PER_QUEUE(ad, on);
781 #ifdef RTE_LIBRTE_IAVF_DEBUG_DUMP_DESC
782 #define IAVF_DUMP_RX_DESC(rxq, desc, rx_id) \
783 iavf_dump_rx_descriptor(rxq, desc, rx_id)
784 #define IAVF_DUMP_TX_DESC(txq, desc, tx_id) \
785 iavf_dump_tx_descriptor(txq, desc, tx_id)
787 #define IAVF_DUMP_RX_DESC(rxq, desc, rx_id) do { } while (0)
788 #define IAVF_DUMP_TX_DESC(txq, desc, tx_id) do { } while (0)
791 #endif /* _IAVF_RXTX_H_ */