4 * Copyright (C) Cavium networks Ltd. 2016.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
16 * * Neither the name of Cavium networks nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 #ifndef _THUNDERX_NICVF_HW_DEFS_H
34 #define _THUNDERX_NICVF_HW_DEFS_H
39 /* Virtual function register offsets */
41 #define NIC_VF_CFG (0x000020)
42 #define NIC_VF_PF_MAILBOX_0_1 (0x000130)
43 #define NIC_VF_INT (0x000200)
44 #define NIC_VF_INT_W1S (0x000220)
45 #define NIC_VF_ENA_W1C (0x000240)
46 #define NIC_VF_ENA_W1S (0x000260)
48 #define NIC_VNIC_RSS_CFG (0x0020E0)
49 #define NIC_VNIC_RSS_KEY_0_4 (0x002200)
50 #define NIC_VNIC_TX_STAT_0_4 (0x004000)
51 #define NIC_VNIC_RX_STAT_0_13 (0x004100)
52 #define NIC_VNIC_RQ_GEN_CFG (0x010010)
54 #define NIC_QSET_CQ_0_7_CFG (0x010400)
55 #define NIC_QSET_CQ_0_7_CFG2 (0x010408)
56 #define NIC_QSET_CQ_0_7_THRESH (0x010410)
57 #define NIC_QSET_CQ_0_7_BASE (0x010420)
58 #define NIC_QSET_CQ_0_7_HEAD (0x010428)
59 #define NIC_QSET_CQ_0_7_TAIL (0x010430)
60 #define NIC_QSET_CQ_0_7_DOOR (0x010438)
61 #define NIC_QSET_CQ_0_7_STATUS (0x010440)
62 #define NIC_QSET_CQ_0_7_STATUS2 (0x010448)
63 #define NIC_QSET_CQ_0_7_DEBUG (0x010450)
65 #define NIC_QSET_RQ_0_7_CFG (0x010600)
66 #define NIC_QSET_RQ_0_7_STATUS0 (0x010700)
67 #define NIC_QSET_RQ_0_7_STATUS1 (0x010708)
69 #define NIC_QSET_SQ_0_7_CFG (0x010800)
70 #define NIC_QSET_SQ_0_7_THRESH (0x010810)
71 #define NIC_QSET_SQ_0_7_BASE (0x010820)
72 #define NIC_QSET_SQ_0_7_HEAD (0x010828)
73 #define NIC_QSET_SQ_0_7_TAIL (0x010830)
74 #define NIC_QSET_SQ_0_7_DOOR (0x010838)
75 #define NIC_QSET_SQ_0_7_STATUS (0x010840)
76 #define NIC_QSET_SQ_0_7_DEBUG (0x010848)
77 #define NIC_QSET_SQ_0_7_STATUS0 (0x010900)
78 #define NIC_QSET_SQ_0_7_STATUS1 (0x010908)
80 #define NIC_QSET_RBDR_0_1_CFG (0x010C00)
81 #define NIC_QSET_RBDR_0_1_THRESH (0x010C10)
82 #define NIC_QSET_RBDR_0_1_BASE (0x010C20)
83 #define NIC_QSET_RBDR_0_1_HEAD (0x010C28)
84 #define NIC_QSET_RBDR_0_1_TAIL (0x010C30)
85 #define NIC_QSET_RBDR_0_1_DOOR (0x010C38)
86 #define NIC_QSET_RBDR_0_1_STATUS0 (0x010C40)
87 #define NIC_QSET_RBDR_0_1_STATUS1 (0x010C48)
88 #define NIC_QSET_RBDR_0_1_PRFCH_STATUS (0x010C50)
90 /* vNIC HW Constants */
92 #define NIC_Q_NUM_SHIFT 18
94 #define MAX_QUEUE_SET 128
95 #define MAX_RCV_QUEUES_PER_QS 8
96 #define MAX_RCV_BUF_DESC_RINGS_PER_QS 2
97 #define MAX_SND_QUEUES_PER_QS 8
98 #define MAX_CMP_QUEUES_PER_QS 8
100 #define NICVF_INTR_CQ_SHIFT 0
101 #define NICVF_INTR_SQ_SHIFT 8
102 #define NICVF_INTR_RBDR_SHIFT 16
103 #define NICVF_INTR_PKT_DROP_SHIFT 20
104 #define NICVF_INTR_TCP_TIMER_SHIFT 21
105 #define NICVF_INTR_MBOX_SHIFT 22
106 #define NICVF_INTR_QS_ERR_SHIFT 23
108 #define NICVF_INTR_CQ_MASK (0xFF << NICVF_INTR_CQ_SHIFT)
109 #define NICVF_INTR_SQ_MASK (0xFF << NICVF_INTR_SQ_SHIFT)
110 #define NICVF_INTR_RBDR_MASK (0x03 << NICVF_INTR_RBDR_SHIFT)
111 #define NICVF_INTR_PKT_DROP_MASK (1 << NICVF_INTR_PKT_DROP_SHIFT)
112 #define NICVF_INTR_TCP_TIMER_MASK (1 << NICVF_INTR_TCP_TIMER_SHIFT)
113 #define NICVF_INTR_MBOX_MASK (1 << NICVF_INTR_MBOX_SHIFT)
114 #define NICVF_INTR_QS_ERR_MASK (1 << NICVF_INTR_QS_ERR_SHIFT)
115 #define NICVF_INTR_ALL_MASK (0x7FFFFF)
117 #define NICVF_CQ_WR_FULL (1ULL << 26)
118 #define NICVF_CQ_WR_DISABLE (1ULL << 25)
119 #define NICVF_CQ_WR_FAULT (1ULL << 24)
120 #define NICVF_CQ_ERR_MASK (NICVF_CQ_WR_FULL |\
121 NICVF_CQ_WR_DISABLE |\
123 #define NICVF_CQ_CQE_COUNT_MASK (0xFFFF)
125 #define NICVF_SQ_ERR_STOPPED (1ULL << 21)
126 #define NICVF_SQ_ERR_SEND (1ULL << 20)
127 #define NICVF_SQ_ERR_DPE (1ULL << 19)
128 #define NICVF_SQ_ERR_MASK (NICVF_SQ_ERR_STOPPED |\
131 #define NICVF_SQ_STATUS_STOPPED_BIT (21)
133 #define NICVF_RBDR_FIFO_STATE_SHIFT (62)
134 #define NICVF_RBDR_FIFO_STATE_MASK (3ULL << NICVF_RBDR_FIFO_STATE_SHIFT)
135 #define NICVF_RBDR_COUNT_MASK (0x7FFFF)
138 #define NICVF_CQ_RESET (1ULL << 41)
139 #define NICVF_SQ_RESET (1ULL << 17)
140 #define NICVF_RBDR_RESET (1ULL << 43)
143 #define NIC_MAX_RSS_HASH_BITS (8)
144 #define NIC_MAX_RSS_IDR_TBL_SIZE (1 << NIC_MAX_RSS_HASH_BITS)
145 #define RSS_HASH_KEY_SIZE (5) /* 320 bit key */
146 #define RSS_HASH_KEY_BYTE_SIZE (40) /* 320 bit key */
148 #define RSS_L2_EXTENDED_HASH_ENA (1 << 0)
149 #define RSS_IP_ENA (1 << 1)
150 #define RSS_TCP_ENA (1 << 2)
151 #define RSS_TCP_SYN_ENA (1 << 3)
152 #define RSS_UDP_ENA (1 << 4)
153 #define RSS_L4_EXTENDED_ENA (1 << 5)
154 #define RSS_L3_BI_DIRECTION_ENA (1 << 7)
155 #define RSS_L4_BI_DIRECTION_ENA (1 << 8)
156 #define RSS_TUN_VXLAN_ENA (1 << 9)
157 #define RSS_TUN_GENEVE_ENA (1 << 10)
158 #define RSS_TUN_NVGRE_ENA (1 << 11)
160 #define RBDR_QUEUE_SZ_8K (8 * 1024)
161 #define RBDR_QUEUE_SZ_16K (16 * 1024)
162 #define RBDR_QUEUE_SZ_32K (32 * 1024)
163 #define RBDR_QUEUE_SZ_64K (64 * 1024)
164 #define RBDR_QUEUE_SZ_128K (128 * 1024)
165 #define RBDR_QUEUE_SZ_256K (256 * 1024)
166 #define RBDR_QUEUE_SZ_512K (512 * 1024)
167 #define RBDR_QUEUE_SZ_MAX RBDR_QUEUE_SZ_512K
169 #define RBDR_SIZE_SHIFT (13) /* 8k */
171 #define SND_QUEUE_SZ_1K (1 * 1024)
172 #define SND_QUEUE_SZ_2K (2 * 1024)
173 #define SND_QUEUE_SZ_4K (4 * 1024)
174 #define SND_QUEUE_SZ_8K (8 * 1024)
175 #define SND_QUEUE_SZ_16K (16 * 1024)
176 #define SND_QUEUE_SZ_32K (32 * 1024)
177 #define SND_QUEUE_SZ_64K (64 * 1024)
178 #define SND_QUEUE_SZ_MAX SND_QUEUE_SZ_64K
180 #define SND_QSIZE_SHIFT (10) /* 1k */
182 #define CMP_QUEUE_SZ_1K (1 * 1024)
183 #define CMP_QUEUE_SZ_2K (2 * 1024)
184 #define CMP_QUEUE_SZ_4K (4 * 1024)
185 #define CMP_QUEUE_SZ_8K (8 * 1024)
186 #define CMP_QUEUE_SZ_16K (16 * 1024)
187 #define CMP_QUEUE_SZ_32K (32 * 1024)
188 #define CMP_QUEUE_SZ_64K (64 * 1024)
189 #define CMP_QUEUE_SZ_MAX CMP_QUEUE_SZ_64K
191 #define CMP_QSIZE_SHIFT (10) /* 1k */
193 #define NICVF_QSIZE_MIN_VAL (0)
194 #define NICVF_QSIZE_MAX_VAL (6)
196 /* Min/Max packet size */
197 #define NIC_HW_MIN_FRS (64)
198 #define NIC_HW_MAX_FRS (9200) /* 9216 max pkt including FCS */
199 #define NIC_HW_MAX_SEGS (12)
201 /* Descriptor alignments */
202 #define NICVF_RBDR_BASE_ALIGN_BYTES (128) /* 7 bits */
203 #define NICVF_CQ_BASE_ALIGN_BYTES (512) /* 9 bits */
204 #define NICVF_SQ_BASE_ALIGN_BYTES (128) /* 7 bits */
206 #define NICVF_CQE_RBPTR_WORD (6)
207 #define NICVF_CQE_RX2_RBPTR_WORD (7)
209 #define NICVF_STATIC_ASSERT(s) _Static_assert(s, #s)
211 typedef uint64_t nicvf_phys_addr_t;
213 #ifndef __BYTE_ORDER__
214 #error __BYTE_ORDER__ not defined
217 /* vNIC HW Enumerations */
219 enum nic_send_ld_type_e {
220 NIC_SEND_LD_TYPE_E_LDD,
221 NIC_SEND_LD_TYPE_E_LDT,
222 NIC_SEND_LD_TYPE_E_LDWB,
223 NIC_SEND_LD_TYPE_E_ENUM_LAST,
226 enum ether_type_algorithm {
231 ETYPE_ALG_VLAN_STRIP,
238 L3TYPE_IPV4_OPTIONS = 0x5,
240 L3TYPE_IPV6_OPTIONS = 0x7,
241 L3TYPE_ET_STOP = 0xD,
245 #define NICVF_L3TYPE_OPTIONS_MASK ((uint8_t)1)
246 #define NICVF_L3TYPE_IPVX_MASK ((uint8_t)0x06)
261 /* CPI and RSSI configuration */
262 enum cpi_algorithm_type {
269 enum rss_algorithm_type {
284 RSS_HASH_TCP_SYN_DIS,
292 /* Completion queue entry types */
296 CQE_TYPE_RX_SPLIT = 0x3,
297 CQE_TYPE_RX_TCP = 0x4,
299 CQE_TYPE_SEND_PTP = 0x9,
302 enum cqe_rx_tcp_status {
303 CQE_RX_STATUS_VALID_TCP_CNXT,
304 CQE_RX_STATUS_INVALID_TCP_CNXT = 0x0F,
307 enum cqe_send_status {
308 CQE_SEND_STATUS_GOOD,
309 CQE_SEND_STATUS_DESC_FAULT = 0x01,
310 CQE_SEND_STATUS_HDR_CONS_ERR = 0x11,
311 CQE_SEND_STATUS_SUBDESC_ERR = 0x12,
312 CQE_SEND_STATUS_IMM_SIZE_OFLOW = 0x80,
313 CQE_SEND_STATUS_CRC_SEQ_ERR = 0x81,
314 CQE_SEND_STATUS_DATA_SEQ_ERR = 0x82,
315 CQE_SEND_STATUS_MEM_SEQ_ERR = 0x83,
316 CQE_SEND_STATUS_LOCK_VIOL = 0x84,
317 CQE_SEND_STATUS_LOCK_UFLOW = 0x85,
318 CQE_SEND_STATUS_DATA_FAULT = 0x86,
319 CQE_SEND_STATUS_TSTMP_CONFLICT = 0x87,
320 CQE_SEND_STATUS_TSTMP_TIMEOUT = 0x88,
321 CQE_SEND_STATUS_MEM_FAULT = 0x89,
322 CQE_SEND_STATUS_CSUM_OVERLAP = 0x8A,
323 CQE_SEND_STATUS_CSUM_OVERFLOW = 0x8B,
326 enum cqe_rx_tcp_end_reason {
327 CQE_RX_TCP_END_FIN_FLAG_DET,
328 CQE_RX_TCP_END_INVALID_FLAG,
329 CQE_RX_TCP_END_TIMEOUT,
330 CQE_RX_TCP_END_OUT_OF_SEQ,
331 CQE_RX_TCP_END_PKT_ERR,
332 CQE_RX_TCP_END_QS_DISABLED = 0x0F,
335 /* Packet protocol level error enumeration */
336 enum cqe_rx_err_level {
343 /* Packet protocol level error type enumeration */
344 enum cqe_rx_err_opcode {
346 CQE_RX_ERR_RE_PARTIAL,
347 CQE_RX_ERR_RE_JABBER,
348 CQE_RX_ERR_RE_FCS = 0x7,
349 CQE_RX_ERR_RE_TERMINATE = 0x9,
350 CQE_RX_ERR_RE_RX_CTL = 0xb,
351 CQE_RX_ERR_PREL2_ERR = 0x1f,
352 CQE_RX_ERR_L2_FRAGMENT = 0x20,
353 CQE_RX_ERR_L2_OVERRUN = 0x21,
354 CQE_RX_ERR_L2_PFCS = 0x22,
355 CQE_RX_ERR_L2_PUNY = 0x23,
356 CQE_RX_ERR_L2_MAL = 0x24,
357 CQE_RX_ERR_L2_OVERSIZE = 0x25,
358 CQE_RX_ERR_L2_UNDERSIZE = 0x26,
359 CQE_RX_ERR_L2_LENMISM = 0x27,
360 CQE_RX_ERR_L2_PCLP = 0x28,
361 CQE_RX_ERR_IP_NOT = 0x41,
362 CQE_RX_ERR_IP_CHK = 0x42,
363 CQE_RX_ERR_IP_MAL = 0x43,
364 CQE_RX_ERR_IP_MALD = 0x44,
365 CQE_RX_ERR_IP_HOP = 0x45,
366 CQE_RX_ERR_L3_ICRC = 0x46,
367 CQE_RX_ERR_L3_PCLP = 0x47,
368 CQE_RX_ERR_L4_MAL = 0x61,
369 CQE_RX_ERR_L4_CHK = 0x62,
370 CQE_RX_ERR_UDP_LEN = 0x63,
371 CQE_RX_ERR_L4_PORT = 0x64,
372 CQE_RX_ERR_TCP_FLAG = 0x65,
373 CQE_RX_ERR_TCP_OFFSET = 0x66,
374 CQE_RX_ERR_L4_PCLP = 0x67,
375 CQE_RX_ERR_RBDR_TRUNC = 0x70,
378 enum send_l4_csum_type {
379 SEND_L4_CSUM_DISABLE,
390 enum send_load_type {
396 enum send_mem_alg_type {
398 SEND_MEMALG_ADD = 0x08,
399 SEND_MEMALG_SUB = 0x09,
400 SEND_MEMALG_ADDLEN = 0x0A,
401 SEND_MEMALG_SUBLEN = 0x0B,
404 enum send_mem_dsz_type {
407 SEND_MEMDSZ_B8 = 0x03,
410 enum sq_subdesc_type {
411 SQ_DESC_TYPE_INVALID,
414 SQ_DESC_TYPE_IMMEDIATE,
438 L4_UDP_GENEVE = 0x09,
452 RBDR_FIFO_STATE_INACTIVE,
453 RBDR_FIFO_STATE_ACTIVE,
454 RBDR_FIFO_STATE_RESET,
455 RBDR_FIFO_STATE_FAIL,
458 enum rq_cache_allocation {
461 RQ_CACHE_ALLOC_FIRST,
465 enum cq_rx_errlvl_e {
474 CQ_RX_ERROP_RE_PARTIAL = 0x1,
475 CQ_RX_ERROP_RE_JABBER = 0x2,
476 CQ_RX_ERROP_RE_FCS = 0x7,
477 CQ_RX_ERROP_RE_TERMINATE = 0x9,
478 CQ_RX_ERROP_RE_RX_CTL = 0xb,
479 CQ_RX_ERROP_PREL2_ERR = 0x1f,
480 CQ_RX_ERROP_L2_FRAGMENT = 0x20,
481 CQ_RX_ERROP_L2_OVERRUN = 0x21,
482 CQ_RX_ERROP_L2_PFCS = 0x22,
483 CQ_RX_ERROP_L2_PUNY = 0x23,
484 CQ_RX_ERROP_L2_MAL = 0x24,
485 CQ_RX_ERROP_L2_OVERSIZE = 0x25,
486 CQ_RX_ERROP_L2_UNDERSIZE = 0x26,
487 CQ_RX_ERROP_L2_LENMISM = 0x27,
488 CQ_RX_ERROP_L2_PCLP = 0x28,
489 CQ_RX_ERROP_IP_NOT = 0x41,
490 CQ_RX_ERROP_IP_CSUM_ERR = 0x42,
491 CQ_RX_ERROP_IP_MAL = 0x43,
492 CQ_RX_ERROP_IP_MALD = 0x44,
493 CQ_RX_ERROP_IP_HOP = 0x45,
494 CQ_RX_ERROP_L3_ICRC = 0x46,
495 CQ_RX_ERROP_L3_PCLP = 0x47,
496 CQ_RX_ERROP_L4_MAL = 0x61,
497 CQ_RX_ERROP_L4_CHK = 0x62,
498 CQ_RX_ERROP_UDP_LEN = 0x63,
499 CQ_RX_ERROP_L4_PORT = 0x64,
500 CQ_RX_ERROP_TCP_FLAG = 0x65,
501 CQ_RX_ERROP_TCP_OFFSET = 0x66,
502 CQ_RX_ERROP_L4_PCLP = 0x67,
503 CQ_RX_ERROP_RBDR_TRUNC = 0x70,
508 CQ_TX_ERROP_DESC_FAULT = 0x10,
509 CQ_TX_ERROP_HDR_CONS_ERR = 0x11,
510 CQ_TX_ERROP_SUBDC_ERR = 0x12,
511 CQ_TX_ERROP_IMM_SIZE_OFLOW = 0x80,
512 CQ_TX_ERROP_DATA_SEQUENCE_ERR = 0x81,
513 CQ_TX_ERROP_MEM_SEQUENCE_ERR = 0x82,
514 CQ_TX_ERROP_LOCK_VIOL = 0x83,
515 CQ_TX_ERROP_DATA_FAULT = 0x84,
516 CQ_TX_ERROP_TSTMP_CONFLICT = 0x85,
517 CQ_TX_ERROP_TSTMP_TIMEOUT = 0x86,
518 CQ_TX_ERROP_MEM_FAULT = 0x87,
519 CQ_TX_ERROP_CK_OVERLAP = 0x88,
520 CQ_TX_ERROP_CK_OFLOW = 0x89,
521 CQ_TX_ERROP_ENUM_LAST = 0x8a,
524 enum rq_sq_stats_reg_offset {
529 enum nic_stat_vnic_rx_e {
546 enum nic_stat_vnic_tx_e {
554 /* vNIC HW Register structures */
559 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
561 uint64_t stdn_fault:1;
569 uint64_t vlan_found:1;
570 uint64_t vlan_stripped:1;
571 uint64_t vlan2_found:1;
572 uint64_t vlan2_stripped:1;
575 uint64_t l2_present:1;
576 uint64_t err_level:3;
577 uint64_t err_opcode:8;
579 uint64_t err_opcode:8;
580 uint64_t err_level:3;
581 uint64_t l2_present:1;
584 uint64_t vlan2_stripped:1;
585 uint64_t vlan2_found:1;
586 uint64_t vlan_stripped:1;
587 uint64_t vlan_found:1;
595 uint64_t stdn_fault:1;
604 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
609 uint64_t cq_pkt_len:8;
610 uint64_t align_pad:3;
616 uint64_t align_pad:3;
617 uint64_t cq_pkt_len:8;
629 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
631 uint64_t vlan_tci:16;
633 uint64_t vlan2_ptr:8;
635 uint64_t vlan2_ptr:8;
637 uint64_t vlan_tci:16;
646 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
663 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
680 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
697 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
698 uint64_t vlan_found:1;
699 uint64_t vlan_stripped:1;
700 uint64_t vlan2_found:1;
701 uint64_t vlan2_stripped:1;
704 uint64_t inner_l4type:4;
705 uint64_t inner_l3type:4;
707 uint64_t vlan2_ptr:8;
710 uint64_t inner_l3ptr:8;
711 uint64_t inner_l4ptr:8;
713 uint64_t inner_l4ptr:8;
714 uint64_t inner_l3ptr:8;
717 uint64_t vlan2_ptr:8;
719 uint64_t inner_l3type:4;
720 uint64_t inner_l4type:4;
723 uint64_t vlan2_stripped:1;
724 uint64_t vlan2_found:1;
725 uint64_t vlan_stripped:1;
726 uint64_t vlan_found:1;
732 cqe_rx_word0_t word0;
733 cqe_rx_word1_t word1;
734 cqe_rx_word2_t word2;
735 cqe_rx_word3_t word3;
736 cqe_rx_word4_t word4;
737 cqe_rx_word5_t word5;
738 cqe_rx2_word6_t word6; /* if NIC_PF_RX_CFG[CQE_RX2_ENA] set */
741 struct cqe_rx_tcp_err_t {
742 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
743 uint64_t cqe_type:4; /* W0 */
746 uint64_t rsvd1:4; /* W1 */
747 uint64_t partial_first:1;
749 uint64_t rbdr_bytes:8;
756 uint64_t rbdr_bytes:8;
758 uint64_t partial_first:1;
763 struct cqe_rx_tcp_t {
764 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
765 uint64_t cqe_type:4; /* W0 */
767 uint64_t cq_tcp_status:8;
769 uint64_t rsvd1:32; /* W1 */
770 uint64_t tcp_cntx_bytes:8;
772 uint64_t tcp_err_bytes:16;
774 uint64_t cq_tcp_status:8;
776 uint64_t cqe_type:4; /* W0 */
778 uint64_t tcp_err_bytes:16;
780 uint64_t tcp_cntx_bytes:8;
781 uint64_t rsvd1:32; /* W1 */
786 #if defined(__BIG_ENDIAN_BITFIELD)
787 uint64_t cqe_type:4; /* W0 */
795 uint64_t send_status:8;
797 uint64_t ptp_timestamp:64; /* W1 */
798 #elif defined(__LITTLE_ENDIAN_BITFIELD)
799 uint64_t send_status:8;
807 uint64_t cqe_type:4; /* W0 */
809 uint64_t ptp_timestamp:64;
813 struct cq_entry_type_t {
814 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
825 struct cq_entry_type_t type;
826 struct cqe_rx_t rx_hdr;
827 struct cqe_rx_tcp_t rx_tcp_hdr;
828 struct cqe_rx_tcp_err_t rx_tcp_err_hdr;
829 struct cqe_send_t cqe_send;
832 NICVF_STATIC_ASSERT(sizeof(union cq_entry_t) == 512);
834 struct rbdr_entry_t {
835 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
839 uint64_t buf_addr:42;
840 uint64_t cache_align:7;
842 nicvf_phys_addr_t full_addr;
847 uint64_t cache_align:7;
848 uint64_t buf_addr:42;
851 nicvf_phys_addr_t full_addr;
856 NICVF_STATIC_ASSERT(sizeof(struct rbdr_entry_t) == sizeof(uint64_t));
858 /* TCP reassembly context */
859 struct rbe_tcp_cnxt_t {
860 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
861 uint64_t tcp_pkt_cnt:12;
863 uint64_t align_hdr_bytes:4;
864 uint64_t align_ptr_bytes:4;
865 uint64_t ptr_bytes:16;
869 uint64_t tcp_end_reason:2;
870 uint64_t tcp_status:4;
872 uint64_t tcp_status:4;
873 uint64_t tcp_end_reason:2;
877 uint64_t ptr_bytes:16;
878 uint64_t align_ptr_bytes:4;
879 uint64_t align_hdr_bytes:4;
881 uint64_t tcp_pkt_cnt:12;
885 /* Always Big endian */
889 uint64_t skip_length:6;
890 uint64_t disable_rss:1;
891 uint64_t disable_tcp_reassembly:1;
898 struct sq_crc_subdesc {
899 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
901 uint64_t crc_ival:32;
902 uint64_t subdesc_type:4;
905 uint64_t crc_insert_pos:16;
906 uint64_t hdr_start:16;
910 uint64_t hdr_start:16;
911 uint64_t crc_insert_pos:16;
914 uint64_t subdesc_type:4;
915 uint64_t crc_ival:32;
920 struct sq_gather_subdesc {
921 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
922 uint64_t subdesc_type:4; /* W0 */
927 uint64_t rsvd1:15; /* W1 */
933 uint64_t subdesc_type:4; /* W0 */
936 uint64_t rsvd1:15; /* W1 */
940 /* SQ immediate subdescriptor */
941 struct sq_imm_subdesc {
942 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
943 uint64_t subdesc_type:4; /* W0 */
947 uint64_t data:64; /* W1 */
951 uint64_t subdesc_type:4; /* W0 */
953 uint64_t data:64; /* W1 */
957 struct sq_mem_subdesc {
958 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
959 uint64_t subdesc_type:4; /* W0 */
966 uint64_t rsvd1:15; /* W1 */
974 uint64_t subdesc_type:4; /* W0 */
977 uint64_t rsvd1:15; /* W1 */
981 struct sq_hdr_subdesc {
982 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
983 uint64_t subdesc_type:4;
985 uint64_t post_cqe:1; /* Post CQE on no error also */
986 uint64_t dont_send:1;
988 uint64_t subdesc_cnt:8;
991 uint64_t csum_inner_l4:2;
992 uint64_t csum_inner_l3:1;
994 uint64_t l4_offset:8;
995 uint64_t l3_offset:8;
997 uint64_t tot_len:20; /* W0 */
1000 uint64_t inner_l4_offset:8;
1001 uint64_t inner_l3_offset:8;
1002 uint64_t tso_start:8;
1004 uint64_t tso_max_paysize:14; /* W1 */
1006 uint64_t tot_len:20;
1008 uint64_t l3_offset:8;
1009 uint64_t l4_offset:8;
1011 uint64_t csum_inner_l3:1;
1012 uint64_t csum_inner_l4:2;
1015 uint64_t subdesc_cnt:8;
1017 uint64_t dont_send:1;
1018 uint64_t post_cqe:1; /* Post CQE on no error also */
1020 uint64_t subdesc_type:4; /* W0 */
1022 uint64_t tso_max_paysize:14;
1024 uint64_t tso_start:8;
1025 uint64_t inner_l3_offset:8;
1026 uint64_t inner_l4_offset:8;
1027 uint64_t rsvd2:24; /* W1 */
1031 /* Each sq entry is 128 bits wide */
1034 struct sq_hdr_subdesc hdr;
1035 struct sq_imm_subdesc imm;
1036 struct sq_gather_subdesc gather;
1037 struct sq_crc_subdesc crc;
1038 struct sq_mem_subdesc mem;
1041 NICVF_STATIC_ASSERT(sizeof(union sq_entry_t) == 16);
1043 /* Queue config register formats */
1044 struct rq_cfg { union { struct {
1045 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
1046 uint64_t reserved_2_63:62;
1048 uint64_t reserved_0:1;
1050 uint64_t reserved_0:1;
1052 uint64_t reserved_2_63:62;
1058 struct cq_cfg { union { struct {
1059 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
1060 uint64_t reserved_43_63:21;
1064 uint64_t reserved_35_39:5;
1066 uint64_t reserved_25_31:7;
1068 uint64_t reserved_0_15:16;
1070 uint64_t reserved_0_15:16;
1072 uint64_t reserved_25_31:7;
1074 uint64_t reserved_35_39:5;
1078 uint64_t reserved_43_63:21;
1084 struct sq_cfg { union { struct {
1085 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
1086 uint64_t reserved_20_63:44;
1088 uint64_t reserved_18_18:1;
1091 uint64_t reserved_11_15:5;
1093 uint64_t reserved_3_7:5;
1094 uint64_t tstmp_bgx_intf:3;
1096 uint64_t tstmp_bgx_intf:3;
1097 uint64_t reserved_3_7:5;
1099 uint64_t reserved_11_15:5;
1102 uint64_t reserved_18_18:1;
1104 uint64_t reserved_20_63:44;
1110 struct rbdr_cfg { union { struct {
1111 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
1112 uint64_t reserved_45_63:19;
1116 uint64_t reserved_36_41:6;
1118 uint64_t reserved_25_31:7;
1120 uint64_t reserved_12_15:4;
1124 uint64_t reserved_12_15:4;
1126 uint64_t reserved_25_31:7;
1128 uint64_t reserved_36_41:6;
1132 uint64_t reserved_45_63:19;
1138 struct pf_qs_cfg { union { struct {
1139 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
1140 uint64_t reserved_32_63:32;
1142 uint64_t reserved_27_30:4;
1143 uint64_t sq_ins_ena:1;
1144 uint64_t sq_ins_pos:6;
1145 uint64_t lock_ena:1;
1146 uint64_t lock_viol_cqe_ena:1;
1147 uint64_t send_tstmp_ena:1;
1149 uint64_t reserved_7_15:9;
1153 uint64_t reserved_7_15:9;
1155 uint64_t send_tstmp_ena:1;
1156 uint64_t lock_viol_cqe_ena:1;
1157 uint64_t lock_ena:1;
1158 uint64_t sq_ins_pos:6;
1159 uint64_t sq_ins_ena:1;
1160 uint64_t reserved_27_30:4;
1162 uint64_t reserved_32_63:32;
1168 struct pf_rq_cfg { union { struct {
1169 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
1170 uint64_t reserved1:1;
1171 uint64_t reserved0:34;
1172 uint64_t strip_pre_l2:1;
1176 uint64_t rbdr_cont_qs:7;
1177 uint64_t rbdr_cont_idx:1;
1178 uint64_t rbdr_strt_qs:7;
1179 uint64_t rbdr_strt_idx:1;
1181 uint64_t rbdr_strt_idx:1;
1182 uint64_t rbdr_strt_qs:7;
1183 uint64_t rbdr_cont_idx:1;
1184 uint64_t rbdr_cont_qs:7;
1188 uint64_t strip_pre_l2:1;
1189 uint64_t reserved0:34;
1190 uint64_t reserved1:1;
1196 struct pf_rq_drop_cfg { union { struct {
1197 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
1198 uint64_t rbdr_red:1;
1200 uint64_t reserved3:14;
1201 uint64_t rbdr_pass:8;
1202 uint64_t rbdr_drop:8;
1203 uint64_t reserved2:8;
1206 uint64_t reserved1:8;
1208 uint64_t reserved1:8;
1211 uint64_t reserved2:8;
1212 uint64_t rbdr_drop:8;
1213 uint64_t rbdr_pass:8;
1214 uint64_t reserved3:14;
1216 uint64_t rbdr_red:1;
1222 #endif /* _THUNDERX_NICVF_HW_DEFS_H */