1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2016 Intel Corporation
5 #ifndef _IXGBE_ETHDEV_H_
6 #define _IXGBE_ETHDEV_H_
7 #include "base/ixgbe_type.h"
8 #include "base/ixgbe_dcb.h"
9 #include "base/ixgbe_dcb_82599.h"
10 #include "base/ixgbe_dcb_82598.h"
11 #include "ixgbe_bypass.h"
12 #ifdef RTE_LIBRTE_SECURITY
13 #include "ixgbe_ipsec.h"
18 #include <rte_bus_pci.h>
19 #include <rte_tm_driver.h>
21 /* need update link, bit flag */
22 #define IXGBE_FLAG_NEED_LINK_UPDATE (uint32_t)(1 << 0)
23 #define IXGBE_FLAG_MAILBOX (uint32_t)(1 << 1)
24 #define IXGBE_FLAG_PHY_INTERRUPT (uint32_t)(1 << 2)
25 #define IXGBE_FLAG_MACSEC (uint32_t)(1 << 3)
26 #define IXGBE_FLAG_NEED_LINK_CONFIG (uint32_t)(1 << 4)
29 * Defines that were not part of ixgbe_type.h as they are not used by the
32 #define IXGBE_ADVTXD_MAC_1588 0x00080000 /* IEEE1588 Timestamp packet */
33 #define IXGBE_RXD_STAT_TMST 0x10000 /* Timestamped Packet indication */
34 #define IXGBE_ADVTXD_TUCMD_L4T_RSV 0x00001800 /* L4 Packet TYPE, resvd */
35 #define IXGBE_RXDADV_ERR_CKSUM_BIT 30
36 #define IXGBE_RXDADV_ERR_CKSUM_MSK 3
37 #define IXGBE_ADVTXD_MACLEN_SHIFT 9 /* Bit shift for l2_len */
38 #define IXGBE_NB_STAT_MAPPING_REGS 32
39 #define IXGBE_EXTENDED_VLAN (uint32_t)(1 << 26) /* EXTENDED VLAN ENABLE */
40 #define IXGBE_VFTA_SIZE 128
41 #define IXGBE_VLAN_TAG_SIZE 4
42 #define IXGBE_MAX_RX_QUEUE_NUM 128
43 #define IXGBE_MAX_INTR_QUEUE_NUM 15
44 #define IXGBE_VMDQ_DCB_NB_QUEUES IXGBE_MAX_RX_QUEUE_NUM
45 #define IXGBE_DCB_NB_QUEUES IXGBE_MAX_RX_QUEUE_NUM
46 #define IXGBE_NONE_MODE_TX_NB_QUEUES 64
49 #define NBBY 8 /* number of bits in a byte */
51 #define IXGBE_HWSTRIP_BITMAP_SIZE (IXGBE_MAX_RX_QUEUE_NUM / (sizeof(uint32_t) * NBBY))
53 /* EITR Interval is in 2048ns uinits for 1G and 10G link */
54 #define IXGBE_EITR_INTERVAL_UNIT_NS 2048
55 #define IXGBE_EITR_ITR_INT_SHIFT 3
56 #define IXGBE_EITR_INTERVAL_US(us) \
57 (((us) * 1000 / IXGBE_EITR_INTERVAL_UNIT_NS << IXGBE_EITR_ITR_INT_SHIFT) & \
58 IXGBE_EITR_ITR_INT_MASK)
61 /* Loopback operation modes */
62 /* 82599 specific loopback operation types */
63 #define IXGBE_LPBK_82599_NONE 0x0 /* Default value. Loopback is disabled. */
64 #define IXGBE_LPBK_82599_TX_RX 0x1 /* Tx->Rx loopback operation is enabled. */
66 #define IXGBE_MAX_JUMBO_FRAME_SIZE 0x2600 /* Maximum Jumbo frame size. */
68 #define IXGBE_RTTBCNRC_RF_INT_MASK_BASE 0x000003FF
69 #define IXGBE_RTTBCNRC_RF_INT_MASK_M \
70 (IXGBE_RTTBCNRC_RF_INT_MASK_BASE << IXGBE_RTTBCNRC_RF_INT_SHIFT)
72 #define IXGBE_MAX_QUEUE_NUM_PER_VF 8
74 #define IXGBE_SYN_FILTER_ENABLE 0x00000001 /* syn filter enable field */
75 #define IXGBE_SYN_FILTER_QUEUE 0x000000FE /* syn filter queue field */
76 #define IXGBE_SYN_FILTER_QUEUE_SHIFT 1 /* syn filter queue field shift */
77 #define IXGBE_SYN_FILTER_SYNQFP 0x80000000 /* syn filter SYNQFP */
79 #define IXGBE_ETQF_UP 0x00070000 /* ethertype filter priority field */
80 #define IXGBE_ETQF_SHIFT 16
81 #define IXGBE_ETQF_UP_EN 0x00080000
82 #define IXGBE_ETQF_ETHERTYPE 0x0000FFFF /* ethertype filter ethertype field */
83 #define IXGBE_ETQF_MAX_PRI 7
85 #define IXGBE_SDPQF_DSTPORT 0xFFFF0000 /* dst port field */
86 #define IXGBE_SDPQF_DSTPORT_SHIFT 16 /* dst port field shift */
87 #define IXGBE_SDPQF_SRCPORT 0x0000FFFF /* src port field */
89 #define IXGBE_L34T_IMIR_SIZE_BP 0x00001000
90 #define IXGBE_L34T_IMIR_RESERVE 0x00080000 /* bit 13 to 19 must be set to 1000000b. */
91 #define IXGBE_L34T_IMIR_LLI 0x00100000
92 #define IXGBE_L34T_IMIR_QUEUE 0x0FE00000
93 #define IXGBE_L34T_IMIR_QUEUE_SHIFT 21
94 #define IXGBE_5TUPLE_MAX_PRI 7
95 #define IXGBE_5TUPLE_MIN_PRI 1
97 #define IXGBE_RSS_OFFLOAD_ALL ( \
99 ETH_RSS_NONFRAG_IPV4_TCP | \
100 ETH_RSS_NONFRAG_IPV4_UDP | \
102 ETH_RSS_NONFRAG_IPV6_TCP | \
103 ETH_RSS_NONFRAG_IPV6_UDP | \
105 ETH_RSS_IPV6_TCP_EX | \
108 #define IXGBE_VF_IRQ_ENABLE_MASK 3 /* vf irq enable mask */
109 #define IXGBE_VF_MAXMSIVECTOR 1
111 #define IXGBE_MISC_VEC_ID RTE_INTR_VEC_ZERO_OFFSET
112 #define IXGBE_RX_VEC_START RTE_INTR_VEC_RXTX_OFFSET
114 #define IXGBE_SECTX_MINSECIFG_MASK 0x0000000F
116 #define IXGBE_MACSEC_PNTHRSH 0xFFFFFE00
118 #define IXGBE_MAX_FDIR_FILTER_NUM (1024 * 32)
119 #define IXGBE_MAX_L2_TN_FILTER_NUM 128
121 #define MAC_TYPE_FILTER_SUP_EXT(type) do {\
122 if ((type) != ixgbe_mac_82599EB && (type) != ixgbe_mac_X540)\
126 #define MAC_TYPE_FILTER_SUP(type) do {\
127 if ((type) != ixgbe_mac_82599EB && (type) != ixgbe_mac_X540 &&\
128 (type) != ixgbe_mac_X550 && (type) != ixgbe_mac_X550EM_x &&\
129 (type) != ixgbe_mac_X550EM_a)\
133 /* Link speed for X550 auto negotiation */
134 #define IXGBE_LINK_SPEED_X550_AUTONEG (IXGBE_LINK_SPEED_100_FULL | \
135 IXGBE_LINK_SPEED_1GB_FULL | \
136 IXGBE_LINK_SPEED_2_5GB_FULL | \
137 IXGBE_LINK_SPEED_5GB_FULL | \
138 IXGBE_LINK_SPEED_10GB_FULL)
141 * Information about the fdir mode.
143 struct ixgbe_hw_fdir_mask {
144 uint16_t vlan_tci_mask;
145 uint32_t src_ipv4_mask;
146 uint32_t dst_ipv4_mask;
147 uint16_t src_ipv6_mask;
148 uint16_t dst_ipv6_mask;
149 uint16_t src_port_mask;
150 uint16_t dst_port_mask;
151 uint16_t flex_bytes_mask;
152 uint8_t mac_addr_byte_mask;
153 uint32_t tunnel_id_mask;
154 uint8_t tunnel_type_mask;
157 struct ixgbe_fdir_filter {
158 TAILQ_ENTRY(ixgbe_fdir_filter) entries;
159 union ixgbe_atr_input ixgbe_fdir; /* key of fdir filter*/
160 uint32_t fdirflags; /* drop or forward */
161 uint32_t fdirhash; /* hash value for fdir */
162 uint8_t queue; /* assigned rx queue */
165 /* list of fdir filters */
166 TAILQ_HEAD(ixgbe_fdir_filter_list, ixgbe_fdir_filter);
168 struct ixgbe_fdir_rule {
169 struct ixgbe_hw_fdir_mask mask;
170 union ixgbe_atr_input ixgbe_fdir; /* key of fdir filter*/
171 bool b_spec; /* If TRUE, ixgbe_fdir, fdirflags, queue have meaning. */
172 bool b_mask; /* If TRUE, mask has meaning. */
173 enum rte_fdir_mode mode; /* IP, MAC VLAN, Tunnel */
174 uint32_t fdirflags; /* drop or forward */
175 uint32_t soft_id; /* an unique value for this rule */
176 uint8_t queue; /* assigned rx queue */
177 uint8_t flex_bytes_offset;
180 struct ixgbe_hw_fdir_info {
181 struct ixgbe_hw_fdir_mask mask;
182 uint8_t flex_bytes_offset;
191 struct ixgbe_fdir_filter_list fdir_list; /* filter list*/
192 /* store the pointers of the filters, index is the hash value. */
193 struct ixgbe_fdir_filter **hash_map;
194 struct rte_hash *hash_handle; /* cuckoo hash handler */
195 bool mask_added; /* If already got mask from consistent filter */
198 /* structure for interrupt relative data */
199 struct ixgbe_interrupt {
202 /*to save original mask during delayed handler */
203 uint32_t mask_original;
206 struct ixgbe_stat_mapping_registers {
207 uint32_t tqsm[IXGBE_NB_STAT_MAPPING_REGS];
208 uint32_t rqsmr[IXGBE_NB_STAT_MAPPING_REGS];
212 uint32_t vfta[IXGBE_VFTA_SIZE];
215 struct ixgbe_hwstrip {
216 uint32_t bitmap[IXGBE_HWSTRIP_BITMAP_SIZE];
220 * VF data which used by PF host only
222 #define IXGBE_MAX_VF_MC_ENTRIES 30
223 #define IXGBE_MAX_MR_RULE_ENTRIES 4 /* number of mirroring rules supported */
224 #define IXGBE_MAX_UTA 128
226 struct ixgbe_uta_info {
227 uint8_t uc_filter_type;
229 uint32_t uta_shadow[IXGBE_MAX_UTA];
232 #define IXGBE_MAX_MIRROR_RULES 4 /* Maximum nb. of mirror rules. */
234 struct ixgbe_mirror_info {
235 struct rte_eth_mirror_conf mr_conf[IXGBE_MAX_MIRROR_RULES];
236 /**< store PF mirror rules configuration*/
239 struct ixgbe_vf_info {
240 uint8_t vf_mac_addresses[ETHER_ADDR_LEN];
241 uint16_t vf_mc_hashes[IXGBE_MAX_VF_MC_ENTRIES];
242 uint16_t num_vf_mc_hashes;
243 uint16_t default_vf_vlan_id;
244 uint16_t vlans_enabled;
246 uint16_t tx_rate[IXGBE_MAX_QUEUE_NUM_PER_VF];
248 uint8_t spoofchk_enabled;
253 * Possible l4type of 5tuple filters.
255 enum ixgbe_5tuple_protocol {
256 IXGBE_FILTER_PROTOCOL_TCP = 0,
257 IXGBE_FILTER_PROTOCOL_UDP,
258 IXGBE_FILTER_PROTOCOL_SCTP,
259 IXGBE_FILTER_PROTOCOL_NONE,
262 TAILQ_HEAD(ixgbe_5tuple_filter_list, ixgbe_5tuple_filter);
264 struct ixgbe_5tuple_filter_info {
269 enum ixgbe_5tuple_protocol proto; /* l4 protocol. */
270 uint8_t priority; /* seven levels (001b-111b), 111b is highest,
271 used when more than one filter matches. */
272 uint8_t dst_ip_mask:1, /* if mask is 1b, do not compare dst ip. */
273 src_ip_mask:1, /* if mask is 1b, do not compare src ip. */
274 dst_port_mask:1, /* if mask is 1b, do not compare dst port. */
275 src_port_mask:1, /* if mask is 1b, do not compare src port. */
276 proto_mask:1; /* if mask is 1b, do not compare protocol. */
279 /* 5tuple filter structure */
280 struct ixgbe_5tuple_filter {
281 TAILQ_ENTRY(ixgbe_5tuple_filter) entries;
282 uint16_t index; /* the index of 5tuple filter */
283 struct ixgbe_5tuple_filter_info filter_info;
284 uint16_t queue; /* rx queue assigned to */
287 #define IXGBE_5TUPLE_ARRAY_SIZE \
288 (RTE_ALIGN(IXGBE_MAX_FTQF_FILTERS, (sizeof(uint32_t) * NBBY)) / \
289 (sizeof(uint32_t) * NBBY))
291 struct ixgbe_ethertype_filter {
296 * If this filter is added by configuration,
297 * it should not be removed.
303 * Structure to store filters' info.
305 struct ixgbe_filter_info {
306 uint8_t ethertype_mask; /* Bit mask for every used ethertype filter */
307 /* store used ethertype filters*/
308 struct ixgbe_ethertype_filter ethertype_filters[IXGBE_MAX_ETQF_FILTERS];
309 /* Bit mask for every used 5tuple filter */
310 uint32_t fivetuple_mask[IXGBE_5TUPLE_ARRAY_SIZE];
311 struct ixgbe_5tuple_filter_list fivetuple_list;
312 /* store the SYN filter info */
316 struct ixgbe_l2_tn_key {
317 enum rte_eth_tunnel_type l2_tn_type;
321 struct ixgbe_l2_tn_filter {
322 TAILQ_ENTRY(ixgbe_l2_tn_filter) entries;
323 struct ixgbe_l2_tn_key key;
327 TAILQ_HEAD(ixgbe_l2_tn_filter_list, ixgbe_l2_tn_filter);
329 struct ixgbe_l2_tn_info {
330 struct ixgbe_l2_tn_filter_list l2_tn_list;
331 struct ixgbe_l2_tn_filter **hash_map;
332 struct rte_hash *hash_handle;
333 bool e_tag_en; /* e-tag enabled */
334 bool e_tag_fwd_en; /* e-tag based forwarding enabled */
335 bool e_tag_ether_type; /* ether type for e-tag */
339 enum rte_filter_type filter_type;
344 * Statistics counters collected by the MACsec
346 struct ixgbe_macsec_stats {
347 /* TX port statistics */
348 uint64_t out_pkts_untagged;
349 uint64_t out_pkts_encrypted;
350 uint64_t out_pkts_protected;
351 uint64_t out_octets_encrypted;
352 uint64_t out_octets_protected;
354 /* RX port statistics */
355 uint64_t in_pkts_untagged;
356 uint64_t in_pkts_badtag;
357 uint64_t in_pkts_nosci;
358 uint64_t in_pkts_unknownsci;
359 uint64_t in_octets_decrypted;
360 uint64_t in_octets_validated;
362 /* RX SC statistics */
363 uint64_t in_pkts_unchecked;
364 uint64_t in_pkts_delayed;
365 uint64_t in_pkts_late;
367 /* RX SA statistics */
369 uint64_t in_pkts_invalid;
370 uint64_t in_pkts_notvalid;
371 uint64_t in_pkts_unusedsa;
372 uint64_t in_pkts_notusingsa;
375 /* The configuration of bandwidth */
376 struct ixgbe_bw_conf {
377 uint8_t tc_num; /* Number of TCs. */
380 /* Struct to store Traffic Manager shaper profile. */
381 struct ixgbe_tm_shaper_profile {
382 TAILQ_ENTRY(ixgbe_tm_shaper_profile) node;
383 uint32_t shaper_profile_id;
384 uint32_t reference_count;
385 struct rte_tm_shaper_params profile;
388 TAILQ_HEAD(ixgbe_shaper_profile_list, ixgbe_tm_shaper_profile);
390 /* node type of Traffic Manager */
391 enum ixgbe_tm_node_type {
392 IXGBE_TM_NODE_TYPE_PORT,
393 IXGBE_TM_NODE_TYPE_TC,
394 IXGBE_TM_NODE_TYPE_QUEUE,
395 IXGBE_TM_NODE_TYPE_MAX,
398 /* Struct to store Traffic Manager node configuration. */
399 struct ixgbe_tm_node {
400 TAILQ_ENTRY(ixgbe_tm_node) node;
404 uint32_t reference_count;
406 struct ixgbe_tm_node *parent;
407 struct ixgbe_tm_shaper_profile *shaper_profile;
408 struct rte_tm_node_params params;
411 TAILQ_HEAD(ixgbe_tm_node_list, ixgbe_tm_node);
413 /* The configuration of Traffic Manager */
414 struct ixgbe_tm_conf {
415 struct ixgbe_shaper_profile_list shaper_profile_list;
416 struct ixgbe_tm_node *root; /* root node - port */
417 struct ixgbe_tm_node_list tc_list; /* node list for all the TCs */
418 struct ixgbe_tm_node_list queue_list; /* node list for all the queues */
420 * The number of added TC nodes.
421 * It should be no more than the TC number of this port.
425 * The number of added queue nodes.
426 * It should be no more than the queue number of this port.
428 uint32_t nb_queue_node;
430 * This flag is used to check if APP can change the TM node
432 * When it's true, means the configuration is applied to HW,
433 * APP should not change the configuration.
434 * As we don't support on-the-fly configuration, when starting
435 * the port, APP should call the hierarchy_commit API to set this
436 * flag to true. When stopping the port, this flag should be set
443 * Structure to store private data for each driver instance (for each port).
445 struct ixgbe_adapter {
447 struct ixgbe_hw_stats stats;
448 struct ixgbe_macsec_stats macsec_stats;
449 struct ixgbe_hw_fdir_info fdir;
450 struct ixgbe_interrupt intr;
451 struct ixgbe_stat_mapping_registers stat_mappings;
452 struct ixgbe_vfta shadow_vfta;
453 struct ixgbe_hwstrip hwstrip;
454 struct ixgbe_dcb_config dcb_config;
455 struct ixgbe_mirror_info mr_data;
456 struct ixgbe_vf_info *vfdata;
457 struct ixgbe_uta_info uta_info;
458 #ifdef RTE_LIBRTE_IXGBE_BYPASS
459 struct ixgbe_bypass_info bps;
460 #endif /* RTE_LIBRTE_IXGBE_BYPASS */
461 struct ixgbe_filter_info filter;
462 struct ixgbe_l2_tn_info l2_tn;
463 struct ixgbe_bw_conf bw_conf;
464 #ifdef RTE_LIBRTE_SECURITY
465 struct ixgbe_ipsec ipsec;
467 bool rx_bulk_alloc_allowed;
469 struct rte_timecounter systime_tc;
470 struct rte_timecounter rx_tstamp_tc;
471 struct rte_timecounter tx_tstamp_tc;
472 struct ixgbe_tm_conf tm_conf;
475 #define IXGBE_DEV_PRIVATE_TO_HW(adapter)\
476 (&((struct ixgbe_adapter *)adapter)->hw)
478 #define IXGBE_DEV_PRIVATE_TO_STATS(adapter) \
479 (&((struct ixgbe_adapter *)adapter)->stats)
481 #define IXGBE_DEV_PRIVATE_TO_MACSEC_STATS(adapter) \
482 (&((struct ixgbe_adapter *)adapter)->macsec_stats)
484 #define IXGBE_DEV_PRIVATE_TO_INTR(adapter) \
485 (&((struct ixgbe_adapter *)adapter)->intr)
487 #define IXGBE_DEV_PRIVATE_TO_FDIR_INFO(adapter) \
488 (&((struct ixgbe_adapter *)adapter)->fdir)
490 #define IXGBE_DEV_PRIVATE_TO_STAT_MAPPINGS(adapter) \
491 (&((struct ixgbe_adapter *)adapter)->stat_mappings)
493 #define IXGBE_DEV_PRIVATE_TO_VFTA(adapter) \
494 (&((struct ixgbe_adapter *)adapter)->shadow_vfta)
496 #define IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(adapter) \
497 (&((struct ixgbe_adapter *)adapter)->hwstrip)
499 #define IXGBE_DEV_PRIVATE_TO_DCB_CFG(adapter) \
500 (&((struct ixgbe_adapter *)adapter)->dcb_config)
502 #define IXGBE_DEV_PRIVATE_TO_P_VFDATA(adapter) \
503 (&((struct ixgbe_adapter *)adapter)->vfdata)
505 #define IXGBE_DEV_PRIVATE_TO_PFDATA(adapter) \
506 (&((struct ixgbe_adapter *)adapter)->mr_data)
508 #define IXGBE_DEV_PRIVATE_TO_UTA(adapter) \
509 (&((struct ixgbe_adapter *)adapter)->uta_info)
511 #define IXGBE_DEV_PRIVATE_TO_FILTER_INFO(adapter) \
512 (&((struct ixgbe_adapter *)adapter)->filter)
514 #define IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(adapter) \
515 (&((struct ixgbe_adapter *)adapter)->l2_tn)
517 #define IXGBE_DEV_PRIVATE_TO_BW_CONF(adapter) \
518 (&((struct ixgbe_adapter *)adapter)->bw_conf)
520 #define IXGBE_DEV_PRIVATE_TO_TM_CONF(adapter) \
521 (&((struct ixgbe_adapter *)adapter)->tm_conf)
523 #define IXGBE_DEV_PRIVATE_TO_IPSEC(adapter)\
524 (&((struct ixgbe_adapter *)adapter)->ipsec)
527 * RX/TX function prototypes
529 void ixgbe_dev_clear_queues(struct rte_eth_dev *dev);
531 void ixgbe_dev_free_queues(struct rte_eth_dev *dev);
533 void ixgbe_dev_rx_queue_release(void *rxq);
535 void ixgbe_dev_tx_queue_release(void *txq);
537 int ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
538 uint16_t nb_rx_desc, unsigned int socket_id,
539 const struct rte_eth_rxconf *rx_conf,
540 struct rte_mempool *mb_pool);
542 int ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
543 uint16_t nb_tx_desc, unsigned int socket_id,
544 const struct rte_eth_txconf *tx_conf);
546 uint32_t ixgbe_dev_rx_queue_count(struct rte_eth_dev *dev,
547 uint16_t rx_queue_id);
549 int ixgbe_dev_rx_descriptor_done(void *rx_queue, uint16_t offset);
551 int ixgbe_dev_rx_descriptor_status(void *rx_queue, uint16_t offset);
552 int ixgbe_dev_tx_descriptor_status(void *tx_queue, uint16_t offset);
554 int ixgbe_dev_rx_init(struct rte_eth_dev *dev);
556 void ixgbe_dev_tx_init(struct rte_eth_dev *dev);
558 int ixgbe_dev_rxtx_start(struct rte_eth_dev *dev);
560 int ixgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);
562 int ixgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id);
564 int ixgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);
566 int ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);
568 void ixgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
569 struct rte_eth_rxq_info *qinfo);
571 void ixgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
572 struct rte_eth_txq_info *qinfo);
574 int ixgbevf_dev_rx_init(struct rte_eth_dev *dev);
576 void ixgbevf_dev_tx_init(struct rte_eth_dev *dev);
578 void ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev);
580 uint16_t ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
583 uint16_t ixgbe_recv_pkts_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
586 uint16_t ixgbe_recv_pkts_lro_single_alloc(void *rx_queue,
587 struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
588 uint16_t ixgbe_recv_pkts_lro_bulk_alloc(void *rx_queue,
589 struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
591 uint16_t ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
594 uint16_t ixgbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
597 uint16_t ixgbe_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
600 int ixgbe_dev_rss_hash_update(struct rte_eth_dev *dev,
601 struct rte_eth_rss_conf *rss_conf);
603 int ixgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
604 struct rte_eth_rss_conf *rss_conf);
606 uint16_t ixgbe_reta_size_get(enum ixgbe_mac_type mac_type);
608 uint32_t ixgbe_reta_reg_get(enum ixgbe_mac_type mac_type, uint16_t reta_idx);
610 uint32_t ixgbe_mrqc_reg_get(enum ixgbe_mac_type mac_type);
612 uint32_t ixgbe_rssrk_reg_get(enum ixgbe_mac_type mac_type, uint8_t i);
614 bool ixgbe_rss_update_sp(enum ixgbe_mac_type mac_type);
616 int ixgbe_add_del_ntuple_filter(struct rte_eth_dev *dev,
617 struct rte_eth_ntuple_filter *filter,
619 int ixgbe_add_del_ethertype_filter(struct rte_eth_dev *dev,
620 struct rte_eth_ethertype_filter *filter,
622 int ixgbe_syn_filter_set(struct rte_eth_dev *dev,
623 struct rte_eth_syn_filter *filter,
626 ixgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev,
627 struct rte_eth_l2_tunnel_conf *l2_tunnel,
630 ixgbe_dev_l2_tunnel_filter_del(struct rte_eth_dev *dev,
631 struct rte_eth_l2_tunnel_conf *l2_tunnel);
632 void ixgbe_filterlist_init(void);
633 void ixgbe_filterlist_flush(void);
635 * Flow director function prototypes
637 int ixgbe_fdir_configure(struct rte_eth_dev *dev);
638 int ixgbe_fdir_set_input_mask(struct rte_eth_dev *dev);
639 int ixgbe_fdir_set_flexbytes_offset(struct rte_eth_dev *dev,
641 int ixgbe_fdir_filter_program(struct rte_eth_dev *dev,
642 struct ixgbe_fdir_rule *rule,
643 bool del, bool update);
645 void ixgbe_configure_dcb(struct rte_eth_dev *dev);
648 * misc function prototypes
650 void ixgbe_vlan_hw_filter_enable(struct rte_eth_dev *dev);
652 void ixgbe_vlan_hw_filter_disable(struct rte_eth_dev *dev);
654 void ixgbe_vlan_hw_strip_enable_all(struct rte_eth_dev *dev);
656 void ixgbe_vlan_hw_strip_disable_all(struct rte_eth_dev *dev);
658 void ixgbe_pf_host_init(struct rte_eth_dev *eth_dev);
660 void ixgbe_pf_host_uninit(struct rte_eth_dev *eth_dev);
662 void ixgbe_pf_mbx_process(struct rte_eth_dev *eth_dev);
664 int ixgbe_pf_host_configure(struct rte_eth_dev *eth_dev);
666 uint32_t ixgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val);
668 int ixgbe_fdir_ctrl_func(struct rte_eth_dev *dev,
669 enum rte_filter_op filter_op, void *arg);
670 void ixgbe_fdir_filter_restore(struct rte_eth_dev *dev);
671 int ixgbe_clear_all_fdir_filter(struct rte_eth_dev *dev);
673 extern const struct rte_flow_ops ixgbe_flow_ops;
675 void ixgbe_clear_all_ethertype_filter(struct rte_eth_dev *dev);
676 void ixgbe_clear_all_ntuple_filter(struct rte_eth_dev *dev);
677 void ixgbe_clear_syn_filter(struct rte_eth_dev *dev);
678 int ixgbe_clear_all_l2_tn_filter(struct rte_eth_dev *dev);
680 int ixgbe_disable_sec_tx_path_generic(struct ixgbe_hw *hw);
682 int ixgbe_enable_sec_tx_path_generic(struct ixgbe_hw *hw);
684 int ixgbe_vt_check(struct ixgbe_hw *hw);
685 int ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
686 uint16_t tx_rate, uint64_t q_msk);
687 bool is_ixgbe_supported(struct rte_eth_dev *dev);
688 int ixgbe_tm_ops_get(struct rte_eth_dev *dev, void *ops);
689 void ixgbe_tm_conf_init(struct rte_eth_dev *dev);
690 void ixgbe_tm_conf_uninit(struct rte_eth_dev *dev);
691 int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev, uint16_t queue_idx,
695 ixgbe_ethertype_filter_lookup(struct ixgbe_filter_info *filter_info,
700 for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) {
701 if (filter_info->ethertype_filters[i].ethertype == ethertype &&
702 (filter_info->ethertype_mask & (1 << i)))
709 ixgbe_ethertype_filter_insert(struct ixgbe_filter_info *filter_info,
710 struct ixgbe_ethertype_filter *ethertype_filter)
714 for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) {
715 if (!(filter_info->ethertype_mask & (1 << i))) {
716 filter_info->ethertype_mask |= 1 << i;
717 filter_info->ethertype_filters[i].ethertype =
718 ethertype_filter->ethertype;
719 filter_info->ethertype_filters[i].etqf =
720 ethertype_filter->etqf;
721 filter_info->ethertype_filters[i].etqs =
722 ethertype_filter->etqs;
723 filter_info->ethertype_filters[i].conf =
724 ethertype_filter->conf;
732 ixgbe_ethertype_filter_remove(struct ixgbe_filter_info *filter_info,
735 if (idx >= IXGBE_MAX_ETQF_FILTERS)
737 filter_info->ethertype_mask &= ~(1 << idx);
738 filter_info->ethertype_filters[idx].ethertype = 0;
739 filter_info->ethertype_filters[idx].etqf = 0;
740 filter_info->ethertype_filters[idx].etqs = 0;
741 filter_info->ethertype_filters[idx].etqs = FALSE;
745 #endif /* _IXGBE_ETHDEV_H_ */