1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019-2020 Intel Corporation
8 #include <rte_ethdev.h>
11 #include "base/igc_osdep.h"
12 #include "base/igc_hw.h"
13 #include "base/igc_i225.h"
14 #include "base/igc_api.h"
20 #define IGC_RSS_RDT_SIZD 128
22 /* VLAN filter table size */
23 #define IGC_VFTA_SIZE 128
25 #define IGC_QUEUE_PAIRS_NUM 4
27 #define IGC_HKEY_MAX_INDEX 10
28 #define IGC_RSS_RDT_SIZD 128
30 #define IGC_DEFAULT_REG_SIZE 4
31 #define IGC_DEFAULT_REG_SIZE_MASK 0xf
33 #define IGC_RSS_RDT_REG_SIZE IGC_DEFAULT_REG_SIZE
34 #define IGC_RSS_RDT_REG_SIZE_MASK IGC_DEFAULT_REG_SIZE_MASK
35 #define IGC_HKEY_REG_SIZE IGC_DEFAULT_REG_SIZE
36 #define IGC_HKEY_SIZE (IGC_HKEY_REG_SIZE * IGC_HKEY_MAX_INDEX)
39 * The overhead from MTU to max frame size.
40 * Considering VLAN so tag needs to be counted.
42 #define IGC_ETH_OVERHEAD (RTE_ETHER_HDR_LEN + \
43 RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE * 2)
46 * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be
47 * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary.
48 * This will also optimize cache line size effect.
49 * H/W supports up to cache line size 128.
53 #define IGC_TX_DESCRIPTOR_MULTIPLE 8
54 #define IGC_RX_DESCRIPTOR_MULTIPLE 8
56 #define IGC_RXD_ALIGN ((uint16_t)(IGC_ALIGN / \
57 sizeof(union igc_adv_rx_desc)))
58 #define IGC_TXD_ALIGN ((uint16_t)(IGC_ALIGN / \
59 sizeof(union igc_adv_tx_desc)))
60 #define IGC_MIN_TXD IGC_TX_DESCRIPTOR_MULTIPLE
61 #define IGC_MAX_TXD ((uint16_t)(0x80000 / sizeof(union igc_adv_tx_desc)))
62 #define IGC_MIN_RXD IGC_RX_DESCRIPTOR_MULTIPLE
63 #define IGC_MAX_RXD ((uint16_t)(0x80000 / sizeof(union igc_adv_rx_desc)))
65 #define IGC_TX_MAX_SEG UINT8_MAX
66 #define IGC_TX_MAX_MTU_SEG UINT8_MAX
68 #define IGC_RX_OFFLOAD_ALL ( \
69 RTE_ETH_RX_OFFLOAD_VLAN_STRIP | \
70 RTE_ETH_RX_OFFLOAD_VLAN_FILTER | \
71 RTE_ETH_RX_OFFLOAD_VLAN_EXTEND | \
72 RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | \
73 RTE_ETH_RX_OFFLOAD_UDP_CKSUM | \
74 RTE_ETH_RX_OFFLOAD_TCP_CKSUM | \
75 RTE_ETH_RX_OFFLOAD_SCTP_CKSUM | \
76 RTE_ETH_RX_OFFLOAD_KEEP_CRC | \
77 RTE_ETH_RX_OFFLOAD_SCATTER | \
78 RTE_ETH_RX_OFFLOAD_RSS_HASH)
80 #define IGC_TX_OFFLOAD_ALL ( \
81 RTE_ETH_TX_OFFLOAD_VLAN_INSERT | \
82 RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | \
83 RTE_ETH_TX_OFFLOAD_UDP_CKSUM | \
84 RTE_ETH_TX_OFFLOAD_TCP_CKSUM | \
85 RTE_ETH_TX_OFFLOAD_SCTP_CKSUM | \
86 RTE_ETH_TX_OFFLOAD_TCP_TSO | \
87 RTE_ETH_TX_OFFLOAD_UDP_TSO | \
88 RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
90 #define IGC_RSS_OFFLOAD_ALL ( \
92 RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
93 RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
95 RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
96 RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
97 RTE_ETH_RSS_IPV6_EX | \
98 RTE_ETH_RSS_IPV6_TCP_EX | \
99 RTE_ETH_RSS_IPV6_UDP_EX)
101 #define IGC_MAX_ETQF_FILTERS 3 /* etqf(3) is used for 1588 */
102 #define IGC_ETQF_FILTER_1588 3
103 #define IGC_ETQF_QUEUE_SHIFT 16
104 #define IGC_ETQF_QUEUE_MASK (7u << IGC_ETQF_QUEUE_SHIFT)
106 #define IGC_MAX_NTUPLE_FILTERS 8
107 #define IGC_NTUPLE_MAX_PRI 7
109 #define IGC_SYN_FILTER_ENABLE 0x01 /* syn filter enable field */
110 #define IGC_SYN_FILTER_QUEUE_SHIFT 1 /* syn filter queue field */
111 #define IGC_SYN_FILTER_QUEUE 0x0000000E /* syn filter queue field */
112 #define IGC_RFCTL_SYNQFP 0x00080000 /* SYNQFP in RFCTL register */
114 /* structure for interrupt relative data */
115 struct igc_interrupt {
120 /* Union of RSS redirect table register */
121 union igc_rss_reta_reg {
126 /* Structure to per-queue statics */
127 struct igc_hw_queue_stats {
128 u64 pqgprc[IGC_QUEUE_PAIRS_NUM];
129 /* per queue good packets received count */
130 u64 pqgptc[IGC_QUEUE_PAIRS_NUM];
131 /* per queue good packets transmitted count */
132 u64 pqgorc[IGC_QUEUE_PAIRS_NUM];
133 /* per queue good octets received count */
134 u64 pqgotc[IGC_QUEUE_PAIRS_NUM];
135 /* per queue good octets transmitted count */
136 u64 pqmprc[IGC_QUEUE_PAIRS_NUM];
137 /* per queue multicast packets received count */
138 u64 rqdpc[IGC_QUEUE_PAIRS_NUM];
139 /* per receive queue drop packet count */
140 u64 tqdpc[IGC_QUEUE_PAIRS_NUM];
141 /* per transmit queue drop packet count */
144 /* local vfta copy */
146 uint32_t vfta[IGC_VFTA_SIZE];
149 /* ethertype filter structure */
150 struct igc_ethertype_filter {
155 /* Structure of ntuple filter info. */
156 struct igc_ntuple_info {
158 uint8_t proto; /* l4 protocol. */
161 * the packet matched above 2tuple and contain any set bit will hit
167 * seven levels (001b-111b), 111b is highest, used when more than one
171 uint8_t dst_port_mask:1, /* if mask is 1b, do compare dst port. */
172 proto_mask:1; /* if mask is 1b, do compare protocol. */
175 /* Structure of n-tuple filter */
176 struct igc_ntuple_filter {
180 struct igc_ntuple_info tuple_info;
186 /* Structure of TCP SYN filter */
187 struct igc_syn_filter {
190 uint8_t hig_pri:1, /* 1 - higher priority than other filters, */
191 /* 0 - lower priority. */
192 enable:1; /* 1-enable; 0-disable */
195 /* Structure to store RTE flow RSS configure. */
196 struct igc_rss_filter {
197 struct rte_flow_action_rss conf; /* RSS parameters. */
198 uint8_t key[IGC_HKEY_MAX_INDEX * sizeof(uint32_t)]; /* Hash key. */
199 uint16_t queue[IGC_RSS_RDT_SIZD];/* Queues indices to use. */
200 uint8_t enable; /* 1-enabled, 0-disabled */
203 /* Feature filter types */
204 enum igc_filter_type {
205 IGC_FILTER_TYPE_ETHERTYPE,
206 IGC_FILTER_TYPE_NTUPLE,
211 /* Structure to store flow */
213 TAILQ_ENTRY(rte_flow) node;
214 enum igc_filter_type filter_type;
216 char filter[0]; /* filter data */
219 /* Flow list header */
220 TAILQ_HEAD(igc_flow_list, rte_flow);
223 * Structure to store private data for each driver instance (for each port).
227 struct igc_hw_stats stats;
228 struct igc_hw_queue_stats queue_stats;
229 int16_t txq_stats_map[IGC_QUEUE_PAIRS_NUM];
230 int16_t rxq_stats_map[IGC_QUEUE_PAIRS_NUM];
232 struct igc_interrupt intr;
233 struct igc_vfta shadow_vfta;
236 struct igc_ethertype_filter ethertype_filters[IGC_MAX_ETQF_FILTERS];
237 struct igc_ntuple_filter ntuple_filters[IGC_MAX_NTUPLE_FILTERS];
238 struct igc_syn_filter syn_filter;
239 struct igc_rss_filter rss_filter;
240 struct igc_flow_list flow_list;
243 #define IGC_DEV_PRIVATE(_dev) ((_dev)->data->dev_private)
245 #define IGC_DEV_PRIVATE_HW(_dev) \
246 (&((struct igc_adapter *)(_dev)->data->dev_private)->hw)
248 #define IGC_DEV_PRIVATE_STATS(_dev) \
249 (&((struct igc_adapter *)(_dev)->data->dev_private)->stats)
251 #define IGC_DEV_PRIVATE_QUEUE_STATS(_dev) \
252 (&((struct igc_adapter *)(_dev)->data->dev_private)->queue_stats)
254 #define IGC_DEV_PRIVATE_INTR(_dev) \
255 (&((struct igc_adapter *)(_dev)->data->dev_private)->intr)
257 #define IGC_DEV_PRIVATE_VFTA(_dev) \
258 (&((struct igc_adapter *)(_dev)->data->dev_private)->shadow_vfta)
260 #define IGC_DEV_PRIVATE_RSS_FILTER(_dev) \
261 (&((struct igc_adapter *)(_dev)->data->dev_private)->rss_filter)
263 #define IGC_DEV_PRIVATE_FLOW_LIST(_dev) \
264 (&((struct igc_adapter *)(_dev)->data->dev_private)->flow_list)
267 igc_read_reg_check_set_bits(struct igc_hw *hw, uint32_t reg, uint32_t bits)
269 uint32_t reg_val = IGC_READ_REG(hw, reg);
273 return; /* no need to write back */
275 IGC_WRITE_REG(hw, reg, bits);
279 igc_read_reg_check_clear_bits(struct igc_hw *hw, uint32_t reg, uint32_t bits)
281 uint32_t reg_val = IGC_READ_REG(hw, reg);
283 bits = reg_val & ~bits;
285 return; /* no need to write back */
287 IGC_WRITE_REG(hw, reg, bits);
294 #endif /* _IGC_ETHDEV_H_ */