1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2015 Intel Corporation
5 #ifndef _E1000_ETHDEV_H_
6 #define _E1000_ETHDEV_H_
10 #define E1000_INTEL_VENDOR_ID 0x8086
12 /* need update link, bit flag */
13 #define E1000_FLAG_NEED_LINK_UPDATE (uint32_t)(1 << 0)
14 #define E1000_FLAG_MAILBOX (uint32_t)(1 << 1)
17 * Defines that were not part of e1000_hw.h as they are not used by the FreeBSD
20 #define E1000_ADVTXD_POPTS_TXSM 0x00000200 /* L4 Checksum offload request */
21 #define E1000_ADVTXD_POPTS_IXSM 0x00000100 /* IP Checksum offload request */
22 #define E1000_ADVTXD_TUCMD_L4T_RSV 0x00001800 /* L4 Packet TYPE of Reserved */
23 #define E1000_RXD_STAT_TMST 0x10000 /* Timestamped Packet indication */
24 #define E1000_RXD_ERR_CKSUM_BIT 29
25 #define E1000_RXD_ERR_CKSUM_MSK 3
26 #define E1000_ADVTXD_MACLEN_SHIFT 9 /* Bit shift for l2_len */
27 #define E1000_CTRL_EXT_EXTEND_VLAN (1<<26) /* EXTENDED VLAN */
28 #define IGB_VFTA_SIZE 128
30 #define IGB_MAX_RX_QUEUE_NUM 8
31 #define IGB_MAX_RX_QUEUE_NUM_82576 16
33 #define E1000_SYN_FILTER_ENABLE 0x00000001 /* syn filter enable field */
34 #define E1000_SYN_FILTER_QUEUE 0x0000000E /* syn filter queue field */
35 #define E1000_SYN_FILTER_QUEUE_SHIFT 1 /* syn filter queue field */
36 #define E1000_RFCTL_SYNQFP 0x00080000 /* SYNQFP in RFCTL register */
38 #define E1000_ETQF_ETHERTYPE 0x0000FFFF
39 #define E1000_ETQF_QUEUE 0x00070000
40 #define E1000_ETQF_QUEUE_SHIFT 16
41 #define E1000_MAX_ETQF_FILTERS 8
43 #define E1000_IMIR_DSTPORT 0x0000FFFF
44 #define E1000_IMIR_PRIORITY 0xE0000000
45 #define E1000_MAX_TTQF_FILTERS 8
46 #define E1000_2TUPLE_MAX_PRI 7
48 #define E1000_MAX_FLEX_FILTERS 8
49 #define E1000_MAX_FHFT 4
50 #define E1000_MAX_FHFT_EXT 4
51 #define E1000_FHFT_SIZE_IN_DWD 64
52 #define E1000_MAX_FLEX_FILTER_PRI 7
53 #define E1000_MAX_FLEX_FILTER_LEN 128
54 #define E1000_MAX_FLEX_FILTER_DWDS \
55 (E1000_MAX_FLEX_FILTER_LEN / sizeof(uint32_t))
56 #define E1000_FLEX_FILTERS_MASK_SIZE \
57 (E1000_MAX_FLEX_FILTER_DWDS / 2)
58 #define E1000_FHFT_QUEUEING_LEN 0x0000007F
59 #define E1000_FHFT_QUEUEING_QUEUE 0x00000700
60 #define E1000_FHFT_QUEUEING_PRIO 0x00070000
61 #define E1000_FHFT_QUEUEING_OFFSET 0xFC
62 #define E1000_FHFT_QUEUEING_QUEUE_SHIFT 8
63 #define E1000_FHFT_QUEUEING_PRIO_SHIFT 16
64 #define E1000_WUFC_FLEX_HQ 0x00004000
66 #define E1000_SPQF_SRCPORT 0x0000FFFF
68 #define E1000_MAX_FTQF_FILTERS 8
69 #define E1000_FTQF_PROTOCOL_MASK 0x000000FF
70 #define E1000_FTQF_5TUPLE_MASK_SHIFT 28
71 #define E1000_FTQF_QUEUE_MASK 0x03ff0000
72 #define E1000_FTQF_QUEUE_SHIFT 16
73 #define E1000_FTQF_QUEUE_ENABLE 0x00000100
75 #define IGB_RSS_OFFLOAD_ALL ( \
77 ETH_RSS_NONFRAG_IPV4_TCP | \
78 ETH_RSS_NONFRAG_IPV4_UDP | \
80 ETH_RSS_NONFRAG_IPV6_TCP | \
81 ETH_RSS_NONFRAG_IPV6_UDP | \
83 ETH_RSS_IPV6_TCP_EX | \
87 * Maximum number of Ring Descriptors.
89 * Since RDLEN/TDLEN should be multiple of 128 bytes, the number of ring
90 * desscriptors should meet the following condition:
91 * (num_ring_desc * sizeof(struct e1000_rx/tx_desc)) % 128 == 0
93 #define E1000_MIN_RING_DESC 32
94 #define E1000_MAX_RING_DESC 4096
97 * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be
98 * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary.
99 * This will also optimize cache line size effect.
100 * H/W supports up to cache line size 128.
102 #define E1000_ALIGN 128
104 #define IGB_RXD_ALIGN (E1000_ALIGN / sizeof(union e1000_adv_rx_desc))
105 #define IGB_TXD_ALIGN (E1000_ALIGN / sizeof(union e1000_adv_tx_desc))
107 #define EM_RXD_ALIGN (E1000_ALIGN / sizeof(struct e1000_rx_desc))
108 #define EM_TXD_ALIGN (E1000_ALIGN / sizeof(struct e1000_data_desc))
110 #define E1000_MISC_VEC_ID RTE_INTR_VEC_ZERO_OFFSET
111 #define E1000_RX_VEC_START RTE_INTR_VEC_RXTX_OFFSET
113 #define IGB_TX_MAX_SEG UINT8_MAX
114 #define IGB_TX_MAX_MTU_SEG UINT8_MAX
115 #define EM_TX_MAX_SEG UINT8_MAX
116 #define EM_TX_MAX_MTU_SEG UINT8_MAX
118 #define MAC_TYPE_FILTER_SUP(type) do {\
119 if ((type) != e1000_82580 && (type) != e1000_i350 &&\
120 (type) != e1000_82576 && (type) != e1000_i210 &&\
121 (type) != e1000_i211)\
125 #define MAC_TYPE_FILTER_SUP_EXT(type) do {\
126 if ((type) != e1000_82580 && (type) != e1000_i350 &&\
127 (type) != e1000_i210 && (type) != e1000_i211)\
131 /* structure for interrupt relative data */
132 struct e1000_interrupt {
137 /* local vfta copy */
139 uint32_t vfta[IGB_VFTA_SIZE];
143 * VF data which used by PF host only
145 #define E1000_MAX_VF_MC_ENTRIES 30
146 struct e1000_vf_info {
147 uint8_t vf_mac_addresses[ETHER_ADDR_LEN];
148 uint16_t vf_mc_hashes[E1000_MAX_VF_MC_ENTRIES];
149 uint16_t num_vf_mc_hashes;
150 uint16_t default_vf_vlan_id;
151 uint16_t vlans_enabled;
157 TAILQ_HEAD(e1000_flex_filter_list, e1000_flex_filter);
159 struct e1000_flex_filter_info {
161 uint32_t dwords[E1000_MAX_FLEX_FILTER_DWDS]; /* flex bytes in dword. */
162 /* if mask bit is 1b, do not compare corresponding byte in dwords. */
163 uint8_t mask[E1000_FLEX_FILTERS_MASK_SIZE];
167 /* Flex filter structure */
168 struct e1000_flex_filter {
169 TAILQ_ENTRY(e1000_flex_filter) entries;
170 uint16_t index; /* index of flex filter */
171 struct e1000_flex_filter_info filter_info;
172 uint16_t queue; /* rx queue assigned to */
175 TAILQ_HEAD(e1000_5tuple_filter_list, e1000_5tuple_filter);
176 TAILQ_HEAD(e1000_2tuple_filter_list, e1000_2tuple_filter);
178 struct e1000_5tuple_filter_info {
183 uint8_t proto; /* l4 protocol. */
184 /* the packet matched above 5tuple and contain any set bit will hit this filter. */
186 uint8_t priority; /* seven levels (001b-111b), 111b is highest,
187 used when more than one filter matches. */
188 uint8_t dst_ip_mask:1, /* if mask is 1b, do not compare dst ip. */
189 src_ip_mask:1, /* if mask is 1b, do not compare src ip. */
190 dst_port_mask:1, /* if mask is 1b, do not compare dst port. */
191 src_port_mask:1, /* if mask is 1b, do not compare src port. */
192 proto_mask:1; /* if mask is 1b, do not compare protocol. */
195 struct e1000_2tuple_filter_info {
197 uint8_t proto; /* l4 protocol. */
198 /* the packet matched above 2tuple and contain any set bit will hit this filter. */
200 uint8_t priority; /* seven levels (001b-111b), 111b is highest,
201 used when more than one filter matches. */
202 uint8_t dst_ip_mask:1, /* if mask is 1b, do not compare dst ip. */
203 src_ip_mask:1, /* if mask is 1b, do not compare src ip. */
204 dst_port_mask:1, /* if mask is 1b, do not compare dst port. */
205 src_port_mask:1, /* if mask is 1b, do not compare src port. */
206 proto_mask:1; /* if mask is 1b, do not compare protocol. */
209 /* 5tuple filter structure */
210 struct e1000_5tuple_filter {
211 TAILQ_ENTRY(e1000_5tuple_filter) entries;
212 uint16_t index; /* the index of 5tuple filter */
213 struct e1000_5tuple_filter_info filter_info;
214 uint16_t queue; /* rx queue assigned to */
217 /* 2tuple filter structure */
218 struct e1000_2tuple_filter {
219 TAILQ_ENTRY(e1000_2tuple_filter) entries;
220 uint16_t index; /* the index of 2tuple filter */
221 struct e1000_2tuple_filter_info filter_info;
222 uint16_t queue; /* rx queue assigned to */
225 /* ethertype filter structure */
226 struct igb_ethertype_filter {
231 struct igb_rte_flow_rss_conf {
232 struct rte_eth_rss_conf rss_conf; /**< RSS parameters. */
233 uint16_t num; /**< Number of entries in queue[]. */
234 uint16_t queue[IGB_MAX_RX_QUEUE_NUM]; /**< Queues indices to use. */
238 * Structure to store filters'info.
240 struct e1000_filter_info {
241 uint8_t ethertype_mask; /* Bit mask for every used ethertype filter */
242 /* store used ethertype filters*/
243 struct igb_ethertype_filter ethertype_filters[E1000_MAX_ETQF_FILTERS];
244 uint8_t flex_mask; /* Bit mask for every used flex filter */
245 struct e1000_flex_filter_list flex_list;
246 /* Bit mask for every used 5tuple filter */
247 uint8_t fivetuple_mask;
248 struct e1000_5tuple_filter_list fivetuple_list;
249 /* Bit mask for every used 2tuple filter */
250 uint8_t twotuple_mask;
251 struct e1000_2tuple_filter_list twotuple_list;
252 /* store the SYN filter info */
254 /* store the rss filter info */
255 struct igb_rte_flow_rss_conf rss_info;
259 * Structure to store private data for each driver instance (for each port).
261 struct e1000_adapter {
263 struct e1000_hw_stats stats;
264 struct e1000_interrupt intr;
265 struct e1000_vfta shadow_vfta;
266 struct e1000_vf_info *vfdata;
267 struct e1000_filter_info filter;
269 struct rte_timecounter systime_tc;
270 struct rte_timecounter rx_tstamp_tc;
271 struct rte_timecounter tx_tstamp_tc;
274 #define E1000_DEV_PRIVATE(adapter) \
275 ((struct e1000_adapter *)adapter)
277 #define E1000_DEV_PRIVATE_TO_HW(adapter) \
278 (&((struct e1000_adapter *)adapter)->hw)
280 #define E1000_DEV_PRIVATE_TO_STATS(adapter) \
281 (&((struct e1000_adapter *)adapter)->stats)
283 #define E1000_DEV_PRIVATE_TO_INTR(adapter) \
284 (&((struct e1000_adapter *)adapter)->intr)
286 #define E1000_DEV_PRIVATE_TO_VFTA(adapter) \
287 (&((struct e1000_adapter *)adapter)->shadow_vfta)
289 #define E1000_DEV_PRIVATE_TO_P_VFDATA(adapter) \
290 (&((struct e1000_adapter *)adapter)->vfdata)
292 #define E1000_DEV_PRIVATE_TO_FILTER_INFO(adapter) \
293 (&((struct e1000_adapter *)adapter)->filter)
296 enum rte_filter_type filter_type;
300 /* ntuple filter list structure */
301 struct igb_ntuple_filter_ele {
302 TAILQ_ENTRY(igb_ntuple_filter_ele) entries;
303 struct rte_eth_ntuple_filter filter_info;
306 /* ethertype filter list structure */
307 struct igb_ethertype_filter_ele {
308 TAILQ_ENTRY(igb_ethertype_filter_ele) entries;
309 struct rte_eth_ethertype_filter filter_info;
312 /* syn filter list structure */
313 struct igb_eth_syn_filter_ele {
314 TAILQ_ENTRY(igb_eth_syn_filter_ele) entries;
315 struct rte_eth_syn_filter filter_info;
318 /* flex filter list structure */
319 struct igb_flex_filter_ele {
320 TAILQ_ENTRY(igb_flex_filter_ele) entries;
321 struct rte_eth_flex_filter filter_info;
324 /* rss filter list structure */
325 struct igb_rss_conf_ele {
326 TAILQ_ENTRY(igb_rss_conf_ele) entries;
327 struct igb_rte_flow_rss_conf filter_info;
330 /* igb_flow memory list structure */
331 struct igb_flow_mem {
332 TAILQ_ENTRY(igb_flow_mem) entries;
333 struct rte_flow *flow;
334 struct rte_eth_dev *dev;
337 TAILQ_HEAD(igb_ntuple_filter_list, igb_ntuple_filter_ele);
338 struct igb_ntuple_filter_list igb_filter_ntuple_list;
339 TAILQ_HEAD(igb_ethertype_filter_list, igb_ethertype_filter_ele);
340 struct igb_ethertype_filter_list igb_filter_ethertype_list;
341 TAILQ_HEAD(igb_syn_filter_list, igb_eth_syn_filter_ele);
342 struct igb_syn_filter_list igb_filter_syn_list;
343 TAILQ_HEAD(igb_flex_filter_list, igb_flex_filter_ele);
344 struct igb_flex_filter_list igb_filter_flex_list;
345 TAILQ_HEAD(igb_rss_filter_list, igb_rss_conf_ele);
346 struct igb_rss_filter_list igb_filter_rss_list;
347 TAILQ_HEAD(igb_flow_mem_list, igb_flow_mem);
348 struct igb_flow_mem_list igb_flow_list;
350 extern const struct rte_flow_ops igb_flow_ops;
353 * RX/TX IGB function prototypes
355 void eth_igb_tx_queue_release(void *txq);
356 void eth_igb_rx_queue_release(void *rxq);
357 void igb_dev_clear_queues(struct rte_eth_dev *dev);
358 void igb_dev_free_queues(struct rte_eth_dev *dev);
360 int eth_igb_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
361 uint16_t nb_rx_desc, unsigned int socket_id,
362 const struct rte_eth_rxconf *rx_conf,
363 struct rte_mempool *mb_pool);
365 uint32_t eth_igb_rx_queue_count(struct rte_eth_dev *dev,
366 uint16_t rx_queue_id);
368 int eth_igb_rx_descriptor_done(void *rx_queue, uint16_t offset);
370 int eth_igb_rx_descriptor_status(void *rx_queue, uint16_t offset);
371 int eth_igb_tx_descriptor_status(void *tx_queue, uint16_t offset);
373 int eth_igb_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
374 uint16_t nb_tx_desc, unsigned int socket_id,
375 const struct rte_eth_txconf *tx_conf);
377 int eth_igb_tx_done_cleanup(void *txq, uint32_t free_cnt);
379 int eth_igb_rx_init(struct rte_eth_dev *dev);
381 void eth_igb_tx_init(struct rte_eth_dev *dev);
383 uint16_t eth_igb_xmit_pkts(void *txq, struct rte_mbuf **tx_pkts,
386 uint16_t eth_igb_prep_pkts(void *txq, struct rte_mbuf **tx_pkts,
389 uint16_t eth_igb_recv_pkts(void *rxq, struct rte_mbuf **rx_pkts,
392 uint16_t eth_igb_recv_scattered_pkts(void *rxq,
393 struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
395 int eth_igb_rss_hash_update(struct rte_eth_dev *dev,
396 struct rte_eth_rss_conf *rss_conf);
398 int eth_igb_rss_hash_conf_get(struct rte_eth_dev *dev,
399 struct rte_eth_rss_conf *rss_conf);
401 int eth_igbvf_rx_init(struct rte_eth_dev *dev);
403 void eth_igbvf_tx_init(struct rte_eth_dev *dev);
406 * misc function prototypes
408 void igb_pf_host_init(struct rte_eth_dev *eth_dev);
410 void igb_pf_mbx_process(struct rte_eth_dev *eth_dev);
412 int igb_pf_host_configure(struct rte_eth_dev *eth_dev);
414 void igb_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
415 struct rte_eth_rxq_info *qinfo);
417 void igb_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
418 struct rte_eth_txq_info *qinfo);
421 * RX/TX EM function prototypes
423 void eth_em_tx_queue_release(void *txq);
424 void eth_em_rx_queue_release(void *rxq);
426 void em_dev_clear_queues(struct rte_eth_dev *dev);
427 void em_dev_free_queues(struct rte_eth_dev *dev);
429 int eth_em_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
430 uint16_t nb_rx_desc, unsigned int socket_id,
431 const struct rte_eth_rxconf *rx_conf,
432 struct rte_mempool *mb_pool);
434 uint32_t eth_em_rx_queue_count(struct rte_eth_dev *dev,
435 uint16_t rx_queue_id);
437 int eth_em_rx_descriptor_done(void *rx_queue, uint16_t offset);
439 int eth_em_rx_descriptor_status(void *rx_queue, uint16_t offset);
440 int eth_em_tx_descriptor_status(void *tx_queue, uint16_t offset);
442 int eth_em_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
443 uint16_t nb_tx_desc, unsigned int socket_id,
444 const struct rte_eth_txconf *tx_conf);
446 int eth_em_rx_init(struct rte_eth_dev *dev);
448 void eth_em_tx_init(struct rte_eth_dev *dev);
450 uint16_t eth_em_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
453 uint16_t eth_em_prep_pkts(void *txq, struct rte_mbuf **tx_pkts,
456 uint16_t eth_em_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
459 uint16_t eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
462 void em_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
463 struct rte_eth_rxq_info *qinfo);
465 void em_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
466 struct rte_eth_txq_info *qinfo);
468 void igb_pf_host_uninit(struct rte_eth_dev *dev);
470 void igb_filterlist_flush(struct rte_eth_dev *dev);
471 int igb_delete_5tuple_filter_82576(struct rte_eth_dev *dev,
472 struct e1000_5tuple_filter *filter);
473 int igb_delete_2tuple_filter(struct rte_eth_dev *dev,
474 struct e1000_2tuple_filter *filter);
475 void igb_remove_flex_filter(struct rte_eth_dev *dev,
476 struct e1000_flex_filter *filter);
477 int igb_ethertype_filter_remove(struct e1000_filter_info *filter_info,
479 int igb_add_del_ntuple_filter(struct rte_eth_dev *dev,
480 struct rte_eth_ntuple_filter *ntuple_filter, bool add);
481 int igb_add_del_ethertype_filter(struct rte_eth_dev *dev,
482 struct rte_eth_ethertype_filter *filter,
484 int eth_igb_syn_filter_set(struct rte_eth_dev *dev,
485 struct rte_eth_syn_filter *filter,
487 int eth_igb_add_del_flex_filter(struct rte_eth_dev *dev,
488 struct rte_eth_flex_filter *filter,
490 int igb_config_rss_filter(struct rte_eth_dev *dev,
491 struct igb_rte_flow_rss_conf *conf,
494 #endif /* _E1000_ETHDEV_H_ */