1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016 Cavium, Inc
5 #ifndef _THUNDERX_NICVF_HW_DEFS_H
6 #define _THUNDERX_NICVF_HW_DEFS_H
11 #include "nicvf_plat.h"
13 /* Virtual function register offsets */
15 #define NIC_VF_CFG (0x000020)
16 #define NIC_VF_PF_MAILBOX_0_1 (0x000130)
17 #define NIC_VF_INT (0x000200)
18 #define NIC_VF_INT_W1S (0x000220)
19 #define NIC_VF_ENA_W1C (0x000240)
20 #define NIC_VF_ENA_W1S (0x000260)
22 #define NIC_VNIC_RSS_CFG (0x0020E0)
23 #define NIC_VNIC_RSS_KEY_0_4 (0x002200)
24 #define NIC_VNIC_TX_STAT_0_4 (0x004000)
25 #define NIC_VNIC_RX_STAT_0_13 (0x004100)
26 #define NIC_VNIC_RQ_GEN_CFG (0x010010)
28 #define NIC_QSET_CQ_0_7_CFG (0x010400)
29 #define NIC_QSET_CQ_0_7_CFG2 (0x010408)
30 #define NIC_QSET_CQ_0_7_THRESH (0x010410)
31 #define NIC_QSET_CQ_0_7_BASE (0x010420)
32 #define NIC_QSET_CQ_0_7_HEAD (0x010428)
33 #define NIC_QSET_CQ_0_7_TAIL (0x010430)
34 #define NIC_QSET_CQ_0_7_DOOR (0x010438)
35 #define NIC_QSET_CQ_0_7_STATUS (0x010440)
36 #define NIC_QSET_CQ_0_7_STATUS2 (0x010448)
37 #define NIC_QSET_CQ_0_7_DEBUG (0x010450)
39 #define NIC_QSET_RQ_0_7_CFG (0x010600)
40 #define NIC_QSET_RQ_0_7_STATUS0 (0x010700)
41 #define NIC_QSET_RQ_0_7_STATUS1 (0x010708)
43 #define NIC_QSET_SQ_0_7_CFG (0x010800)
44 #define NIC_QSET_SQ_0_7_THRESH (0x010810)
45 #define NIC_QSET_SQ_0_7_BASE (0x010820)
46 #define NIC_QSET_SQ_0_7_HEAD (0x010828)
47 #define NIC_QSET_SQ_0_7_TAIL (0x010830)
48 #define NIC_QSET_SQ_0_7_DOOR (0x010838)
49 #define NIC_QSET_SQ_0_7_STATUS (0x010840)
50 #define NIC_QSET_SQ_0_7_DEBUG (0x010848)
51 #define NIC_QSET_SQ_0_7_STATUS0 (0x010900)
52 #define NIC_QSET_SQ_0_7_STATUS1 (0x010908)
54 #define NIC_QSET_RBDR_0_1_CFG (0x010C00)
55 #define NIC_QSET_RBDR_0_1_THRESH (0x010C10)
56 #define NIC_QSET_RBDR_0_1_BASE (0x010C20)
57 #define NIC_QSET_RBDR_0_1_HEAD (0x010C28)
58 #define NIC_QSET_RBDR_0_1_TAIL (0x010C30)
59 #define NIC_QSET_RBDR_0_1_DOOR (0x010C38)
60 #define NIC_QSET_RBDR_0_1_STATUS0 (0x010C40)
61 #define NIC_QSET_RBDR_0_1_STATUS1 (0x010C48)
62 #define NIC_QSET_RBDR_0_1_PRFCH_STATUS (0x010C50)
64 /* vNIC HW Constants */
66 #define NIC_Q_NUM_SHIFT 18
68 #define MAX_QUEUE_SET 128
69 #define MAX_RCV_QUEUES_PER_QS 8
70 #define MAX_RCV_BUF_DESC_RINGS_PER_QS 2
71 #define MAX_SND_QUEUES_PER_QS 8
72 #define MAX_CMP_QUEUES_PER_QS 8
74 #define NICVF_INTR_CQ_SHIFT 0
75 #define NICVF_INTR_SQ_SHIFT 8
76 #define NICVF_INTR_RBDR_SHIFT 16
77 #define NICVF_INTR_PKT_DROP_SHIFT 20
78 #define NICVF_INTR_TCP_TIMER_SHIFT 21
79 #define NICVF_INTR_MBOX_SHIFT 22
80 #define NICVF_INTR_QS_ERR_SHIFT 23
82 #define NICVF_QS_RQ_DIS_APAD_SHIFT 22
84 #define NICVF_INTR_CQ_MASK (0xFF << NICVF_INTR_CQ_SHIFT)
85 #define NICVF_INTR_SQ_MASK (0xFF << NICVF_INTR_SQ_SHIFT)
86 #define NICVF_INTR_RBDR_MASK (0x03 << NICVF_INTR_RBDR_SHIFT)
87 #define NICVF_INTR_PKT_DROP_MASK (1 << NICVF_INTR_PKT_DROP_SHIFT)
88 #define NICVF_INTR_TCP_TIMER_MASK (1 << NICVF_INTR_TCP_TIMER_SHIFT)
89 #define NICVF_INTR_MBOX_MASK (1 << NICVF_INTR_MBOX_SHIFT)
90 #define NICVF_INTR_QS_ERR_MASK (1 << NICVF_INTR_QS_ERR_SHIFT)
91 #define NICVF_INTR_ALL_MASK (0x7FFFFF)
93 #define NICVF_CQ_WR_FULL (1ULL << 26)
94 #define NICVF_CQ_WR_DISABLE (1ULL << 25)
95 #define NICVF_CQ_WR_FAULT (1ULL << 24)
96 #define NICVF_CQ_ERR_MASK (NICVF_CQ_WR_FULL |\
97 NICVF_CQ_WR_DISABLE |\
99 #define NICVF_CQ_CQE_COUNT_MASK (0xFFFF)
101 #define NICVF_SQ_ERR_STOPPED (1ULL << 21)
102 #define NICVF_SQ_ERR_SEND (1ULL << 20)
103 #define NICVF_SQ_ERR_DPE (1ULL << 19)
104 #define NICVF_SQ_ERR_MASK (NICVF_SQ_ERR_STOPPED |\
107 #define NICVF_SQ_STATUS_STOPPED_BIT (21)
109 #define NICVF_RBDR_FIFO_STATE_SHIFT (62)
110 #define NICVF_RBDR_FIFO_STATE_MASK (3ULL << NICVF_RBDR_FIFO_STATE_SHIFT)
111 #define NICVF_RBDR_COUNT_MASK (0x7FFFF)
114 #define NICVF_CQ_RESET (1ULL << 41)
115 #define NICVF_SQ_RESET (1ULL << 17)
116 #define NICVF_RBDR_RESET (1ULL << 43)
119 #define NIC_MAX_RSS_HASH_BITS (8)
120 #define NIC_MAX_RSS_IDR_TBL_SIZE (1 << NIC_MAX_RSS_HASH_BITS)
121 #define RSS_HASH_KEY_SIZE (5) /* 320 bit key */
122 #define RSS_HASH_KEY_BYTE_SIZE (40) /* 320 bit key */
124 #define RSS_L2_EXTENDED_HASH_ENA (1 << 0)
125 #define RSS_IP_ENA (1 << 1)
126 #define RSS_TCP_ENA (1 << 2)
127 #define RSS_TCP_SYN_ENA (1 << 3)
128 #define RSS_UDP_ENA (1 << 4)
129 #define RSS_L4_EXTENDED_ENA (1 << 5)
130 #define RSS_L3_BI_DIRECTION_ENA (1 << 7)
131 #define RSS_L4_BI_DIRECTION_ENA (1 << 8)
132 #define RSS_TUN_VXLAN_ENA (1 << 9)
133 #define RSS_TUN_GENEVE_ENA (1 << 10)
134 #define RSS_TUN_NVGRE_ENA (1 << 11)
136 #define RBDR_QUEUE_SZ_8K (8 * 1024)
137 #define RBDR_QUEUE_SZ_16K (16 * 1024)
138 #define RBDR_QUEUE_SZ_32K (32 * 1024)
139 #define RBDR_QUEUE_SZ_64K (64 * 1024)
140 #define RBDR_QUEUE_SZ_128K (128 * 1024)
141 #define RBDR_QUEUE_SZ_256K (256 * 1024)
142 #define RBDR_QUEUE_SZ_512K (512 * 1024)
143 #define RBDR_QUEUE_SZ_MAX RBDR_QUEUE_SZ_512K
145 #define RBDR_SIZE_SHIFT (13) /* 8k */
147 #define SND_QUEUE_SZ_1K (1 * 1024)
148 #define SND_QUEUE_SZ_2K (2 * 1024)
149 #define SND_QUEUE_SZ_4K (4 * 1024)
150 #define SND_QUEUE_SZ_8K (8 * 1024)
151 #define SND_QUEUE_SZ_16K (16 * 1024)
152 #define SND_QUEUE_SZ_32K (32 * 1024)
153 #define SND_QUEUE_SZ_64K (64 * 1024)
154 #define SND_QUEUE_SZ_MAX SND_QUEUE_SZ_64K
156 #define SND_QSIZE_SHIFT (10) /* 1k */
158 #define CMP_QUEUE_SZ_1K (1 * 1024)
159 #define CMP_QUEUE_SZ_2K (2 * 1024)
160 #define CMP_QUEUE_SZ_4K (4 * 1024)
161 #define CMP_QUEUE_SZ_8K (8 * 1024)
162 #define CMP_QUEUE_SZ_16K (16 * 1024)
163 #define CMP_QUEUE_SZ_32K (32 * 1024)
164 #define CMP_QUEUE_SZ_64K (64 * 1024)
165 #define CMP_QUEUE_SZ_MAX CMP_QUEUE_SZ_64K
167 #define CMP_QSIZE_SHIFT (10) /* 1k */
169 #define NICVF_QSIZE_MIN_VAL (0)
170 #define NICVF_QSIZE_MAX_VAL (6)
172 /* Min/Max packet size */
173 #define NIC_HW_MIN_FRS (64)
174 /* ETH_HLEN+ETH_FCS_LEN+2*VLAN_HLEN */
175 #define NIC_HW_L2_OVERHEAD (26)
176 #define NIC_HW_MAX_MTU (9190)
177 #define NIC_HW_MAX_FRS (NIC_HW_MAX_MTU + NIC_HW_L2_OVERHEAD)
178 #define NIC_HW_MAX_SEGS (12)
180 /* Descriptor alignments */
181 #define NICVF_RBDR_BASE_ALIGN_BYTES (128) /* 7 bits */
182 #define NICVF_CQ_BASE_ALIGN_BYTES (512) /* 9 bits */
183 #define NICVF_SQ_BASE_ALIGN_BYTES (128) /* 7 bits */
185 #define NICVF_CQE_RBPTR_WORD (6)
186 #define NICVF_CQE_RX2_RBPTR_WORD (7)
188 #define NICVF_STATIC_ASSERT(s) _Static_assert(s, #s)
189 #define assert_primary(nic) assert((nic)->sqs_mode == 0)
191 typedef uint64_t nicvf_iova_addr_t;
193 /* vNIC HW Enumerations */
195 enum nic_send_ld_type_e {
196 NIC_SEND_LD_TYPE_E_LDD,
197 NIC_SEND_LD_TYPE_E_LDT,
198 NIC_SEND_LD_TYPE_E_LDWB,
199 NIC_SEND_LD_TYPE_E_ENUM_LAST,
202 enum ether_type_algorithm {
207 ETYPE_ALG_VLAN_STRIP,
214 L3TYPE_IPV4_OPTIONS = 0x5,
216 L3TYPE_IPV6_OPTIONS = 0x7,
217 L3TYPE_ET_STOP = 0xD,
221 #define NICVF_L3TYPE_OPTIONS_MASK ((uint8_t)1)
222 #define NICVF_L3TYPE_IPVX_MASK ((uint8_t)0x06)
237 /* CPI and RSSI configuration */
238 enum cpi_algorithm_type {
245 enum rss_algorithm_type {
260 RSS_HASH_TCP_SYN_DIS,
268 /* Completion queue entry types */
272 CQE_TYPE_RX_SPLIT = 0x3,
273 CQE_TYPE_RX_TCP = 0x4,
275 CQE_TYPE_SEND_PTP = 0x9,
278 enum cqe_rx_tcp_status {
279 CQE_RX_STATUS_VALID_TCP_CNXT,
280 CQE_RX_STATUS_INVALID_TCP_CNXT = 0x0F,
283 enum cqe_send_status {
284 CQE_SEND_STATUS_GOOD,
285 CQE_SEND_STATUS_DESC_FAULT = 0x01,
286 CQE_SEND_STATUS_HDR_CONS_ERR = 0x11,
287 CQE_SEND_STATUS_SUBDESC_ERR = 0x12,
288 CQE_SEND_STATUS_IMM_SIZE_OFLOW = 0x80,
289 CQE_SEND_STATUS_CRC_SEQ_ERR = 0x81,
290 CQE_SEND_STATUS_DATA_SEQ_ERR = 0x82,
291 CQE_SEND_STATUS_MEM_SEQ_ERR = 0x83,
292 CQE_SEND_STATUS_LOCK_VIOL = 0x84,
293 CQE_SEND_STATUS_LOCK_UFLOW = 0x85,
294 CQE_SEND_STATUS_DATA_FAULT = 0x86,
295 CQE_SEND_STATUS_TSTMP_CONFLICT = 0x87,
296 CQE_SEND_STATUS_TSTMP_TIMEOUT = 0x88,
297 CQE_SEND_STATUS_MEM_FAULT = 0x89,
298 CQE_SEND_STATUS_CSUM_OVERLAP = 0x8A,
299 CQE_SEND_STATUS_CSUM_OVERFLOW = 0x8B,
302 enum cqe_rx_tcp_end_reason {
303 CQE_RX_TCP_END_FIN_FLAG_DET,
304 CQE_RX_TCP_END_INVALID_FLAG,
305 CQE_RX_TCP_END_TIMEOUT,
306 CQE_RX_TCP_END_OUT_OF_SEQ,
307 CQE_RX_TCP_END_PKT_ERR,
308 CQE_RX_TCP_END_QS_DISABLED = 0x0F,
311 /* Packet protocol level error enumeration */
312 enum cqe_rx_err_level {
319 /* Packet protocol level error type enumeration */
320 enum cqe_rx_err_opcode {
322 CQE_RX_ERR_RE_PARTIAL,
323 CQE_RX_ERR_RE_JABBER,
324 CQE_RX_ERR_RE_FCS = 0x7,
325 CQE_RX_ERR_RE_TERMINATE = 0x9,
326 CQE_RX_ERR_RE_RX_CTL = 0xb,
327 CQE_RX_ERR_PREL2_ERR = 0x1f,
328 CQE_RX_ERR_L2_FRAGMENT = 0x20,
329 CQE_RX_ERR_L2_OVERRUN = 0x21,
330 CQE_RX_ERR_L2_PFCS = 0x22,
331 CQE_RX_ERR_L2_PUNY = 0x23,
332 CQE_RX_ERR_L2_MAL = 0x24,
333 CQE_RX_ERR_L2_OVERSIZE = 0x25,
334 CQE_RX_ERR_L2_UNDERSIZE = 0x26,
335 CQE_RX_ERR_L2_LENMISM = 0x27,
336 CQE_RX_ERR_L2_PCLP = 0x28,
337 CQE_RX_ERR_IP_NOT = 0x41,
338 CQE_RX_ERR_IP_CHK = 0x42,
339 CQE_RX_ERR_IP_MAL = 0x43,
340 CQE_RX_ERR_IP_MALD = 0x44,
341 CQE_RX_ERR_IP_HOP = 0x45,
342 CQE_RX_ERR_L3_ICRC = 0x46,
343 CQE_RX_ERR_L3_PCLP = 0x47,
344 CQE_RX_ERR_L4_MAL = 0x61,
345 CQE_RX_ERR_L4_CHK = 0x62,
346 CQE_RX_ERR_UDP_LEN = 0x63,
347 CQE_RX_ERR_L4_PORT = 0x64,
348 CQE_RX_ERR_TCP_FLAG = 0x65,
349 CQE_RX_ERR_TCP_OFFSET = 0x66,
350 CQE_RX_ERR_L4_PCLP = 0x67,
351 CQE_RX_ERR_RBDR_TRUNC = 0x70,
354 enum send_l4_csum_type {
355 SEND_L4_CSUM_DISABLE,
366 enum send_load_type {
372 enum send_mem_alg_type {
374 SEND_MEMALG_ADD = 0x08,
375 SEND_MEMALG_SUB = 0x09,
376 SEND_MEMALG_ADDLEN = 0x0A,
377 SEND_MEMALG_SUBLEN = 0x0B,
380 enum send_mem_dsz_type {
383 SEND_MEMDSZ_B8 = 0x03,
386 enum sq_subdesc_type {
387 SQ_DESC_TYPE_INVALID,
390 SQ_DESC_TYPE_IMMEDIATE,
414 L4_UDP_GENEVE = 0x09,
428 RBDR_FIFO_STATE_INACTIVE,
429 RBDR_FIFO_STATE_ACTIVE,
430 RBDR_FIFO_STATE_RESET,
431 RBDR_FIFO_STATE_FAIL,
434 enum rq_cache_allocation {
437 RQ_CACHE_ALLOC_FIRST,
441 enum cq_rx_errlvl_e {
450 CQ_RX_ERROP_RE_PARTIAL = 0x1,
451 CQ_RX_ERROP_RE_JABBER = 0x2,
452 CQ_RX_ERROP_RE_FCS = 0x7,
453 CQ_RX_ERROP_RE_TERMINATE = 0x9,
454 CQ_RX_ERROP_RE_RX_CTL = 0xb,
455 CQ_RX_ERROP_PREL2_ERR = 0x1f,
456 CQ_RX_ERROP_L2_FRAGMENT = 0x20,
457 CQ_RX_ERROP_L2_OVERRUN = 0x21,
458 CQ_RX_ERROP_L2_PFCS = 0x22,
459 CQ_RX_ERROP_L2_PUNY = 0x23,
460 CQ_RX_ERROP_L2_MAL = 0x24,
461 CQ_RX_ERROP_L2_OVERSIZE = 0x25,
462 CQ_RX_ERROP_L2_UNDERSIZE = 0x26,
463 CQ_RX_ERROP_L2_LENMISM = 0x27,
464 CQ_RX_ERROP_L2_PCLP = 0x28,
465 CQ_RX_ERROP_IP_NOT = 0x41,
466 CQ_RX_ERROP_IP_CSUM_ERR = 0x42,
467 CQ_RX_ERROP_IP_MAL = 0x43,
468 CQ_RX_ERROP_IP_MALD = 0x44,
469 CQ_RX_ERROP_IP_HOP = 0x45,
470 CQ_RX_ERROP_L3_ICRC = 0x46,
471 CQ_RX_ERROP_L3_PCLP = 0x47,
472 CQ_RX_ERROP_L4_MAL = 0x61,
473 CQ_RX_ERROP_L4_CHK = 0x62,
474 CQ_RX_ERROP_UDP_LEN = 0x63,
475 CQ_RX_ERROP_L4_PORT = 0x64,
476 CQ_RX_ERROP_TCP_FLAG = 0x65,
477 CQ_RX_ERROP_TCP_OFFSET = 0x66,
478 CQ_RX_ERROP_L4_PCLP = 0x67,
479 CQ_RX_ERROP_RBDR_TRUNC = 0x70,
484 CQ_TX_ERROP_DESC_FAULT = 0x10,
485 CQ_TX_ERROP_HDR_CONS_ERR = 0x11,
486 CQ_TX_ERROP_SUBDC_ERR = 0x12,
487 CQ_TX_ERROP_IMM_SIZE_OFLOW = 0x80,
488 CQ_TX_ERROP_DATA_SEQUENCE_ERR = 0x81,
489 CQ_TX_ERROP_MEM_SEQUENCE_ERR = 0x82,
490 CQ_TX_ERROP_LOCK_VIOL = 0x83,
491 CQ_TX_ERROP_DATA_FAULT = 0x84,
492 CQ_TX_ERROP_TSTMP_CONFLICT = 0x85,
493 CQ_TX_ERROP_TSTMP_TIMEOUT = 0x86,
494 CQ_TX_ERROP_MEM_FAULT = 0x87,
495 CQ_TX_ERROP_CK_OVERLAP = 0x88,
496 CQ_TX_ERROP_CK_OFLOW = 0x89,
497 CQ_TX_ERROP_ENUM_LAST = 0x8a,
500 enum rq_sq_stats_reg_offset {
505 enum nic_stat_vnic_rx_e {
522 enum nic_stat_vnic_tx_e {
530 /* vNIC HW Register structures */
535 #if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
537 uint64_t stdn_fault:1;
545 uint64_t vlan_found:1;
546 uint64_t vlan_stripped:1;
547 uint64_t vlan2_found:1;
548 uint64_t vlan2_stripped:1;
551 uint64_t l2_present:1;
552 uint64_t err_level:3;
553 uint64_t err_opcode:8;
555 uint64_t err_opcode:8;
556 uint64_t err_level:3;
557 uint64_t l2_present:1;
560 uint64_t vlan2_stripped:1;
561 uint64_t vlan2_found:1;
562 uint64_t vlan_stripped:1;
563 uint64_t vlan_found:1;
571 uint64_t stdn_fault:1;
580 #if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
585 uint64_t cq_pkt_len:8;
586 uint64_t align_pad:3;
592 uint64_t align_pad:3;
593 uint64_t cq_pkt_len:8;
605 #if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
607 uint64_t vlan_tci:16;
609 uint64_t vlan2_ptr:8;
611 uint64_t vlan2_ptr:8;
613 uint64_t vlan_tci:16;
622 #if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
639 #if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
656 #if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
673 #if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
674 uint64_t vlan_found:1;
675 uint64_t vlan_stripped:1;
676 uint64_t vlan2_found:1;
677 uint64_t vlan2_stripped:1;
680 uint64_t inner_l4type:4;
681 uint64_t inner_l3type:4;
683 uint64_t vlan2_ptr:8;
686 uint64_t inner_l3ptr:8;
687 uint64_t inner_l4ptr:8;
689 uint64_t inner_l4ptr:8;
690 uint64_t inner_l3ptr:8;
693 uint64_t vlan2_ptr:8;
695 uint64_t inner_l3type:4;
696 uint64_t inner_l4type:4;
699 uint64_t vlan2_stripped:1;
700 uint64_t vlan2_found:1;
701 uint64_t vlan_stripped:1;
702 uint64_t vlan_found:1;
708 cqe_rx_word0_t word0;
709 cqe_rx_word1_t word1;
710 cqe_rx_word2_t word2;
711 cqe_rx_word3_t word3;
712 cqe_rx_word4_t word4;
713 cqe_rx_word5_t word5;
714 cqe_rx2_word6_t word6; /* if NIC_PF_RX_CFG[CQE_RX2_ENA] set */
717 struct cqe_rx_tcp_err_t {
718 #if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
719 uint64_t cqe_type:4; /* W0 */
722 uint64_t rsvd1:4; /* W1 */
723 uint64_t partial_first:1;
725 uint64_t rbdr_bytes:8;
732 uint64_t rbdr_bytes:8;
734 uint64_t partial_first:1;
739 struct cqe_rx_tcp_t {
740 #if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
741 uint64_t cqe_type:4; /* W0 */
743 uint64_t cq_tcp_status:8;
745 uint64_t rsvd1:32; /* W1 */
746 uint64_t tcp_cntx_bytes:8;
748 uint64_t tcp_err_bytes:16;
750 uint64_t cq_tcp_status:8;
752 uint64_t cqe_type:4; /* W0 */
754 uint64_t tcp_err_bytes:16;
756 uint64_t tcp_cntx_bytes:8;
757 uint64_t rsvd1:32; /* W1 */
762 #if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
763 uint64_t cqe_type:4; /* W0 */
771 uint64_t send_status:8;
773 uint64_t ptp_timestamp:64; /* W1 */
774 #elif NICVF_BYTE_ORDER == NICVF_LITTLE_ENDIAN
775 uint64_t send_status:8;
783 uint64_t cqe_type:4; /* W0 */
785 uint64_t ptp_timestamp:64;
789 struct cq_entry_type_t {
790 #if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
801 struct cq_entry_type_t type;
802 struct cqe_rx_t rx_hdr;
803 struct cqe_rx_tcp_t rx_tcp_hdr;
804 struct cqe_rx_tcp_err_t rx_tcp_err_hdr;
805 struct cqe_send_t cqe_send;
808 NICVF_STATIC_ASSERT(sizeof(union cq_entry_t) == 512);
810 struct rbdr_entry_t {
811 #if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
815 uint64_t buf_addr:42;
816 uint64_t cache_align:7;
818 nicvf_iova_addr_t full_addr;
823 uint64_t cache_align:7;
824 uint64_t buf_addr:42;
827 nicvf_iova_addr_t full_addr;
832 NICVF_STATIC_ASSERT(sizeof(struct rbdr_entry_t) == sizeof(uint64_t));
834 /* TCP reassembly context */
835 struct rbe_tcp_cnxt_t {
836 #if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
837 uint64_t tcp_pkt_cnt:12;
839 uint64_t align_hdr_bytes:4;
840 uint64_t align_ptr_bytes:4;
841 uint64_t ptr_bytes:16;
845 uint64_t tcp_end_reason:2;
846 uint64_t tcp_status:4;
848 uint64_t tcp_status:4;
849 uint64_t tcp_end_reason:2;
853 uint64_t ptr_bytes:16;
854 uint64_t align_ptr_bytes:4;
855 uint64_t align_hdr_bytes:4;
857 uint64_t tcp_pkt_cnt:12;
861 /* Always Big endian */
865 uint64_t skip_length:6;
866 uint64_t disable_rss:1;
867 uint64_t disable_tcp_reassembly:1;
874 struct sq_crc_subdesc {
875 #if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
877 uint64_t crc_ival:32;
878 uint64_t subdesc_type:4;
881 uint64_t crc_insert_pos:16;
882 uint64_t hdr_start:16;
886 uint64_t hdr_start:16;
887 uint64_t crc_insert_pos:16;
890 uint64_t subdesc_type:4;
891 uint64_t crc_ival:32;
896 struct sq_gather_subdesc {
897 #if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
898 uint64_t subdesc_type:4; /* W0 */
903 uint64_t rsvd1:15; /* W1 */
909 uint64_t subdesc_type:4; /* W0 */
912 uint64_t rsvd1:15; /* W1 */
916 /* SQ immediate subdescriptor */
917 struct sq_imm_subdesc {
918 #if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
919 uint64_t subdesc_type:4; /* W0 */
923 uint64_t data:64; /* W1 */
927 uint64_t subdesc_type:4; /* W0 */
929 uint64_t data:64; /* W1 */
933 struct sq_mem_subdesc {
934 #if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
935 uint64_t subdesc_type:4; /* W0 */
942 uint64_t rsvd1:15; /* W1 */
950 uint64_t subdesc_type:4; /* W0 */
953 uint64_t rsvd1:15; /* W1 */
957 struct sq_hdr_subdesc {
958 #if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
959 uint64_t subdesc_type:4;
961 uint64_t post_cqe:1; /* Post CQE on no error also */
962 uint64_t dont_send:1;
964 uint64_t subdesc_cnt:8;
967 uint64_t csum_inner_l4:2;
968 uint64_t csum_inner_l3:1;
970 uint64_t l4_offset:8;
971 uint64_t l3_offset:8;
973 uint64_t tot_len:20; /* W0 */
976 uint64_t inner_l4_offset:8;
977 uint64_t inner_l3_offset:8;
978 uint64_t tso_start:8;
980 uint64_t tso_max_paysize:14; /* W1 */
984 uint64_t l3_offset:8;
985 uint64_t l4_offset:8;
987 uint64_t csum_inner_l3:1;
988 uint64_t csum_inner_l4:2;
991 uint64_t subdesc_cnt:8;
993 uint64_t dont_send:1;
994 uint64_t post_cqe:1; /* Post CQE on no error also */
996 uint64_t subdesc_type:4; /* W0 */
998 uint64_t tso_max_paysize:14;
1000 uint64_t tso_start:8;
1001 uint64_t inner_l3_offset:8;
1002 uint64_t inner_l4_offset:8;
1003 uint64_t rsvd2:24; /* W1 */
1007 /* Each sq entry is 128 bits wide */
1010 struct sq_hdr_subdesc hdr;
1011 struct sq_imm_subdesc imm;
1012 struct sq_gather_subdesc gather;
1013 struct sq_crc_subdesc crc;
1014 struct sq_mem_subdesc mem;
1017 NICVF_STATIC_ASSERT(sizeof(union sq_entry_t) == 16);
1019 /* Queue config register formats */
1020 struct rq_cfg { union { struct {
1021 #if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
1022 uint64_t reserved_2_63:62;
1024 uint64_t reserved_0:1;
1026 uint64_t reserved_0:1;
1028 uint64_t reserved_2_63:62;
1034 struct cq_cfg { union { struct {
1035 #if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
1036 uint64_t reserved_43_63:21;
1040 uint64_t reserved_35_39:5;
1042 uint64_t reserved_25_31:7;
1044 uint64_t reserved_0_15:16;
1046 uint64_t reserved_0_15:16;
1048 uint64_t reserved_25_31:7;
1050 uint64_t reserved_35_39:5;
1054 uint64_t reserved_43_63:21;
1060 struct sq_cfg { union { struct {
1061 #if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
1062 uint64_t reserved_32_63:32;
1063 uint64_t cq_limit:8;
1065 uint64_t reserved_18_18:1;
1068 uint64_t reserved_11_15:5;
1070 uint64_t reserved_3_7:5;
1071 uint64_t tstmp_bgx_intf:3;
1073 uint64_t tstmp_bgx_intf:3;
1074 uint64_t reserved_3_7:5;
1076 uint64_t reserved_11_15:5;
1079 uint64_t reserved_18_18:1;
1081 uint64_t cq_limit:8;
1082 uint64_t reserved_32_63:32;
1088 struct rbdr_cfg { union { struct {
1089 #if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
1090 uint64_t reserved_45_63:19;
1094 uint64_t reserved_36_41:6;
1096 uint64_t reserved_25_31:7;
1098 uint64_t reserved_12_15:4;
1102 uint64_t reserved_12_15:4;
1104 uint64_t reserved_25_31:7;
1106 uint64_t reserved_36_41:6;
1110 uint64_t reserved_45_63:19;
1116 struct pf_qs_cfg { union { struct {
1117 #if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
1118 uint64_t reserved_32_63:32;
1120 uint64_t reserved_27_30:4;
1121 uint64_t sq_ins_ena:1;
1122 uint64_t sq_ins_pos:6;
1123 uint64_t lock_ena:1;
1124 uint64_t lock_viol_cqe_ena:1;
1125 uint64_t send_tstmp_ena:1;
1127 uint64_t reserved_7_15:9;
1131 uint64_t reserved_7_15:9;
1133 uint64_t send_tstmp_ena:1;
1134 uint64_t lock_viol_cqe_ena:1;
1135 uint64_t lock_ena:1;
1136 uint64_t sq_ins_pos:6;
1137 uint64_t sq_ins_ena:1;
1138 uint64_t reserved_27_30:4;
1140 uint64_t reserved_32_63:32;
1146 struct pf_rq_cfg { union { struct {
1147 #if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
1148 uint64_t reserved1:1;
1149 uint64_t reserved0:34;
1150 uint64_t strip_pre_l2:1;
1154 uint64_t rbdr_cont_qs:7;
1155 uint64_t rbdr_cont_idx:1;
1156 uint64_t rbdr_strt_qs:7;
1157 uint64_t rbdr_strt_idx:1;
1159 uint64_t rbdr_strt_idx:1;
1160 uint64_t rbdr_strt_qs:7;
1161 uint64_t rbdr_cont_idx:1;
1162 uint64_t rbdr_cont_qs:7;
1166 uint64_t strip_pre_l2:1;
1167 uint64_t reserved0:34;
1168 uint64_t reserved1:1;
1174 struct pf_rq_drop_cfg { union { struct {
1175 #if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
1176 uint64_t rbdr_red:1;
1178 uint64_t reserved3:14;
1179 uint64_t rbdr_pass:8;
1180 uint64_t rbdr_drop:8;
1181 uint64_t reserved2:8;
1184 uint64_t reserved1:8;
1186 uint64_t reserved1:8;
1189 uint64_t reserved2:8;
1190 uint64_t rbdr_drop:8;
1191 uint64_t rbdr_pass:8;
1192 uint64_t reserved3:14;
1194 uint64_t rbdr_red:1;
1200 #endif /* _THUNDERX_NICVF_HW_DEFS_H */