1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2015-2020
5 #ifndef _TXGBE_ETHDEV_H_
6 #define _TXGBE_ETHDEV_H_
10 #include "base/txgbe.h"
11 #include "txgbe_ptypes.h"
13 #include <rte_flow_driver.h>
15 #include <rte_ethdev.h>
16 #include <rte_ethdev_core.h>
18 #include <rte_hash_crc.h>
19 #include <rte_bus_pci.h>
20 #include <rte_tm_driver.h>
22 /* need update link, bit flag */
23 #define TXGBE_FLAG_NEED_LINK_UPDATE (uint32_t)(1 << 0)
24 #define TXGBE_FLAG_MAILBOX (uint32_t)(1 << 1)
25 #define TXGBE_FLAG_PHY_INTERRUPT (uint32_t)(1 << 2)
26 #define TXGBE_FLAG_MACSEC (uint32_t)(1 << 3)
27 #define TXGBE_FLAG_NEED_LINK_CONFIG (uint32_t)(1 << 4)
30 * Defines that were not part of txgbe_type.h as they are not used by the
33 #define TXGBE_VFTA_SIZE 128
34 #define TXGBE_VLAN_TAG_SIZE 4
35 #define TXGBE_HKEY_MAX_INDEX 10
36 /*Default value of Max Rx Queue*/
37 #define TXGBE_MAX_RX_QUEUE_NUM 128
38 #define TXGBE_VMDQ_DCB_NB_QUEUES TXGBE_MAX_RX_QUEUE_NUM
41 #define NBBY 8 /* number of bits in a byte */
43 #define TXGBE_HWSTRIP_BITMAP_SIZE \
44 (TXGBE_MAX_RX_QUEUE_NUM / (sizeof(uint32_t) * NBBY))
46 #define TXGBE_QUEUE_ITR_INTERVAL_DEFAULT 500 /* 500us */
48 #define TXGBE_MAX_QUEUE_NUM_PER_VF 8
50 #define TXGBE_5TUPLE_MAX_PRI 7
51 #define TXGBE_5TUPLE_MIN_PRI 1
53 #define TXGBE_RSS_OFFLOAD_ALL ( \
55 ETH_RSS_NONFRAG_IPV4_TCP | \
56 ETH_RSS_NONFRAG_IPV4_UDP | \
58 ETH_RSS_NONFRAG_IPV6_TCP | \
59 ETH_RSS_NONFRAG_IPV6_UDP | \
61 ETH_RSS_IPV6_TCP_EX | \
64 #define TXGBE_MISC_VEC_ID RTE_INTR_VEC_ZERO_OFFSET
65 #define TXGBE_RX_VEC_START RTE_INTR_VEC_RXTX_OFFSET
67 #define TXGBE_MAX_FDIR_FILTER_NUM (1024 * 32)
68 #define TXGBE_MAX_L2_TN_FILTER_NUM 128
71 * Information about the fdir mode.
73 struct txgbe_hw_fdir_mask {
74 uint16_t vlan_tci_mask;
75 uint32_t src_ipv4_mask;
76 uint32_t dst_ipv4_mask;
77 uint16_t src_ipv6_mask;
78 uint16_t dst_ipv6_mask;
79 uint16_t src_port_mask;
80 uint16_t dst_port_mask;
81 uint16_t flex_bytes_mask;
82 uint8_t mac_addr_byte_mask;
83 uint32_t tunnel_id_mask;
84 uint8_t tunnel_type_mask;
87 struct txgbe_fdir_filter {
88 TAILQ_ENTRY(txgbe_fdir_filter) entries;
89 struct txgbe_atr_input input; /* key of fdir filter*/
90 uint32_t fdirflags; /* drop or forward */
91 uint32_t fdirhash; /* hash value for fdir */
92 uint8_t queue; /* assigned rx queue */
95 /* list of fdir filters */
96 TAILQ_HEAD(txgbe_fdir_filter_list, txgbe_fdir_filter);
98 struct txgbe_fdir_rule {
99 struct txgbe_hw_fdir_mask mask;
100 struct txgbe_atr_input input; /* key of fdir filter */
101 bool b_spec; /* If TRUE, input, fdirflags, queue have meaning. */
102 bool b_mask; /* If TRUE, mask has meaning. */
103 enum rte_fdir_mode mode; /* IP, MAC VLAN, Tunnel */
104 uint32_t fdirflags; /* drop or forward */
105 uint32_t soft_id; /* an unique value for this rule */
106 uint8_t queue; /* assigned rx queue */
107 uint8_t flex_bytes_offset;
110 struct txgbe_hw_fdir_info {
111 struct txgbe_hw_fdir_mask mask;
112 uint8_t flex_bytes_offset;
121 struct txgbe_fdir_filter_list fdir_list; /* filter list*/
122 /* store the pointers of the filters, index is the hash value. */
123 struct txgbe_fdir_filter **hash_map;
124 struct rte_hash *hash_handle; /* cuckoo hash handler */
125 bool mask_added; /* If already got mask from consistent filter */
128 struct txgbe_rte_flow_rss_conf {
129 struct rte_flow_action_rss conf; /**< RSS parameters. */
130 uint8_t key[TXGBE_HKEY_MAX_INDEX * sizeof(uint32_t)]; /* Hash key. */
131 uint16_t queue[TXGBE_MAX_RX_QUEUE_NUM]; /**< Queues indices to use. */
134 /* structure for interrupt relative data */
135 struct txgbe_interrupt {
138 /* to save original mask during delayed handler */
139 uint32_t mask_misc_orig;
143 #define TXGBE_NB_STAT_MAPPING 32
144 #define QSM_REG_NB_BITS_PER_QMAP_FIELD 8
145 #define NB_QMAP_FIELDS_PER_QSM_REG 4
146 #define QMAP_FIELD_RESERVED_BITS_MASK 0x0f
147 struct txgbe_stat_mappings {
148 uint32_t tqsm[TXGBE_NB_STAT_MAPPING];
149 uint32_t rqsm[TXGBE_NB_STAT_MAPPING];
153 uint32_t vfta[TXGBE_VFTA_SIZE];
156 struct txgbe_hwstrip {
157 uint32_t bitmap[TXGBE_HWSTRIP_BITMAP_SIZE];
161 * VF data which used by PF host only
163 #define TXGBE_MAX_VF_MC_ENTRIES 30
165 struct txgbe_uta_info {
166 uint8_t uc_filter_type;
168 uint32_t uta_shadow[TXGBE_MAX_UTA];
171 #define TXGBE_MAX_MIRROR_RULES 4 /* Maximum nb. of mirror rules. */
173 struct txgbe_mirror_info {
174 struct rte_eth_mirror_conf mr_conf[TXGBE_MAX_MIRROR_RULES];
175 /* store PF mirror rules configuration */
178 struct txgbe_vf_info {
179 uint8_t vf_mac_addresses[RTE_ETHER_ADDR_LEN];
180 uint16_t vf_mc_hashes[TXGBE_MAX_VF_MC_ENTRIES];
181 uint16_t num_vf_mc_hashes;
183 uint16_t tx_rate[TXGBE_MAX_QUEUE_NUM_PER_VF];
186 uint16_t switch_domain_id;
191 TAILQ_HEAD(txgbe_5tuple_filter_list, txgbe_5tuple_filter);
193 struct txgbe_5tuple_filter_info {
198 enum txgbe_5tuple_protocol proto; /* l4 protocol. */
199 uint8_t priority; /* seven levels (001b-111b), 111b is highest,
200 * used when more than one filter matches.
202 uint8_t dst_ip_mask:1, /* if mask is 1b, do not compare dst ip. */
203 src_ip_mask:1, /* if mask is 1b, do not compare src ip. */
204 dst_port_mask:1, /* if mask is 1b, do not compare dst port. */
205 src_port_mask:1, /* if mask is 1b, do not compare src port. */
206 proto_mask:1; /* if mask is 1b, do not compare protocol. */
209 /* 5tuple filter structure */
210 struct txgbe_5tuple_filter {
211 TAILQ_ENTRY(txgbe_5tuple_filter) entries;
212 uint16_t index; /* the index of 5tuple filter */
213 struct txgbe_5tuple_filter_info filter_info;
214 uint16_t queue; /* rx queue assigned to */
217 #define TXGBE_5TUPLE_ARRAY_SIZE \
218 (RTE_ALIGN(TXGBE_MAX_FTQF_FILTERS, (sizeof(uint32_t) * NBBY)) / \
219 (sizeof(uint32_t) * NBBY))
221 struct txgbe_ethertype_filter {
226 * If this filter is added by configuration,
227 * it should not be removed.
233 * Structure to store filters' info.
235 struct txgbe_filter_info {
236 uint8_t ethertype_mask; /* Bit mask for every used ethertype filter */
237 /* store used ethertype filters*/
238 struct txgbe_ethertype_filter ethertype_filters[TXGBE_ETF_ID_MAX];
239 /* Bit mask for every used 5tuple filter */
240 uint32_t fivetuple_mask[TXGBE_5TUPLE_ARRAY_SIZE];
241 struct txgbe_5tuple_filter_list fivetuple_list;
242 /* store the SYN filter info */
244 /* store the rss filter info */
245 struct txgbe_rte_flow_rss_conf rss_info;
248 struct txgbe_l2_tn_key {
249 enum rte_eth_tunnel_type l2_tn_type;
253 struct txgbe_l2_tn_filter {
254 TAILQ_ENTRY(txgbe_l2_tn_filter) entries;
255 struct txgbe_l2_tn_key key;
259 TAILQ_HEAD(txgbe_l2_tn_filter_list, txgbe_l2_tn_filter);
261 struct txgbe_l2_tn_info {
262 struct txgbe_l2_tn_filter_list l2_tn_list;
263 struct txgbe_l2_tn_filter **hash_map;
264 struct rte_hash *hash_handle;
265 bool e_tag_en; /* e-tag enabled */
266 bool e_tag_fwd_en; /* e-tag based forwarding enabled */
267 uint16_t e_tag_ether_type; /* ether type for e-tag */
271 enum rte_filter_type filter_type;
275 /* The configuration of bandwidth */
276 struct txgbe_bw_conf {
277 uint8_t tc_num; /* Number of TCs. */
280 /* Struct to store Traffic Manager shaper profile. */
281 struct txgbe_tm_shaper_profile {
282 TAILQ_ENTRY(txgbe_tm_shaper_profile) node;
283 uint32_t shaper_profile_id;
284 uint32_t reference_count;
285 struct rte_tm_shaper_params profile;
288 TAILQ_HEAD(txgbe_shaper_profile_list, txgbe_tm_shaper_profile);
290 /* node type of Traffic Manager */
291 enum txgbe_tm_node_type {
292 TXGBE_TM_NODE_TYPE_PORT,
293 TXGBE_TM_NODE_TYPE_TC,
294 TXGBE_TM_NODE_TYPE_QUEUE,
295 TXGBE_TM_NODE_TYPE_MAX,
298 /* Struct to store Traffic Manager node configuration. */
299 struct txgbe_tm_node {
300 TAILQ_ENTRY(txgbe_tm_node) node;
304 uint32_t reference_count;
306 struct txgbe_tm_node *parent;
307 struct txgbe_tm_shaper_profile *shaper_profile;
308 struct rte_tm_node_params params;
311 TAILQ_HEAD(txgbe_tm_node_list, txgbe_tm_node);
313 /* The configuration of Traffic Manager */
314 struct txgbe_tm_conf {
315 struct txgbe_shaper_profile_list shaper_profile_list;
316 struct txgbe_tm_node *root; /* root node - port */
317 struct txgbe_tm_node_list tc_list; /* node list for all the TCs */
318 struct txgbe_tm_node_list queue_list; /* node list for all the queues */
320 * The number of added TC nodes.
321 * It should be no more than the TC number of this port.
325 * The number of added queue nodes.
326 * It should be no more than the queue number of this port.
328 uint32_t nb_queue_node;
330 * This flag is used to check if APP can change the TM node
332 * When it's true, means the configuration is applied to HW,
333 * APP should not change the configuration.
334 * As we don't support on-the-fly configuration, when starting
335 * the port, APP should call the hierarchy_commit API to set this
336 * flag to true. When stopping the port, this flag should be set
343 * Structure to store private data for each driver instance (for each port).
345 struct txgbe_adapter {
347 struct txgbe_hw_stats stats;
348 struct txgbe_hw_fdir_info fdir;
349 struct txgbe_interrupt intr;
350 struct txgbe_stat_mappings stat_mappings;
351 struct txgbe_vfta shadow_vfta;
352 struct txgbe_hwstrip hwstrip;
353 struct txgbe_dcb_config dcb_config;
354 struct txgbe_mirror_info mr_data;
355 struct txgbe_vf_info *vfdata;
356 struct txgbe_uta_info uta_info;
357 struct txgbe_filter_info filter;
358 struct txgbe_l2_tn_info l2_tn;
359 struct txgbe_bw_conf bw_conf;
360 bool rx_bulk_alloc_allowed;
361 struct rte_timecounter systime_tc;
362 struct rte_timecounter rx_tstamp_tc;
363 struct rte_timecounter tx_tstamp_tc;
364 struct txgbe_tm_conf tm_conf;
366 /* For RSS reta table update */
367 uint8_t rss_reta_updated;
370 #define TXGBE_DEV_ADAPTER(dev) \
371 ((struct txgbe_adapter *)(dev)->data->dev_private)
373 #define TXGBE_DEV_HW(dev) \
374 (&((struct txgbe_adapter *)(dev)->data->dev_private)->hw)
376 #define TXGBE_DEV_STATS(dev) \
377 (&((struct txgbe_adapter *)(dev)->data->dev_private)->stats)
379 #define TXGBE_DEV_INTR(dev) \
380 (&((struct txgbe_adapter *)(dev)->data->dev_private)->intr)
382 #define TXGBE_DEV_FDIR(dev) \
383 (&((struct txgbe_adapter *)(dev)->data->dev_private)->fdir)
385 #define TXGBE_DEV_STAT_MAPPINGS(dev) \
386 (&((struct txgbe_adapter *)(dev)->data->dev_private)->stat_mappings)
388 #define TXGBE_DEV_VFTA(dev) \
389 (&((struct txgbe_adapter *)(dev)->data->dev_private)->shadow_vfta)
391 #define TXGBE_DEV_HWSTRIP(dev) \
392 (&((struct txgbe_adapter *)(dev)->data->dev_private)->hwstrip)
394 #define TXGBE_DEV_DCB_CONFIG(dev) \
395 (&((struct txgbe_adapter *)(dev)->data->dev_private)->dcb_config)
397 #define TXGBE_DEV_VFDATA(dev) \
398 (&((struct txgbe_adapter *)(dev)->data->dev_private)->vfdata)
400 #define TXGBE_DEV_MR_INFO(dev) \
401 (&((struct txgbe_adapter *)(dev)->data->dev_private)->mr_data)
403 #define TXGBE_DEV_UTA_INFO(dev) \
404 (&((struct txgbe_adapter *)(dev)->data->dev_private)->uta_info)
406 #define TXGBE_DEV_FILTER(dev) \
407 (&((struct txgbe_adapter *)(dev)->data->dev_private)->filter)
409 #define TXGBE_DEV_L2_TN(dev) \
410 (&((struct txgbe_adapter *)(dev)->data->dev_private)->l2_tn)
412 #define TXGBE_DEV_BW_CONF(dev) \
413 (&((struct txgbe_adapter *)(dev)->data->dev_private)->bw_conf)
415 #define TXGBE_DEV_TM_CONF(dev) \
416 (&((struct txgbe_adapter *)(dev)->data->dev_private)->tm_conf)
419 * RX/TX function prototypes
421 void txgbe_dev_clear_queues(struct rte_eth_dev *dev);
423 void txgbe_dev_free_queues(struct rte_eth_dev *dev);
425 void txgbe_dev_rx_queue_release(void *rxq);
427 void txgbe_dev_tx_queue_release(void *txq);
429 int txgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
430 uint16_t nb_rx_desc, unsigned int socket_id,
431 const struct rte_eth_rxconf *rx_conf,
432 struct rte_mempool *mb_pool);
434 int txgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
435 uint16_t nb_tx_desc, unsigned int socket_id,
436 const struct rte_eth_txconf *tx_conf);
438 uint32_t txgbe_dev_rx_queue_count(struct rte_eth_dev *dev,
439 uint16_t rx_queue_id);
441 int txgbe_dev_rx_descriptor_status(void *rx_queue, uint16_t offset);
442 int txgbe_dev_tx_descriptor_status(void *tx_queue, uint16_t offset);
444 int txgbe_dev_rx_init(struct rte_eth_dev *dev);
446 void txgbe_dev_tx_init(struct rte_eth_dev *dev);
448 int txgbe_dev_rxtx_start(struct rte_eth_dev *dev);
450 void txgbe_dev_save_rx_queue(struct txgbe_hw *hw, uint16_t rx_queue_id);
451 void txgbe_dev_store_rx_queue(struct txgbe_hw *hw, uint16_t rx_queue_id);
452 void txgbe_dev_save_tx_queue(struct txgbe_hw *hw, uint16_t tx_queue_id);
453 void txgbe_dev_store_tx_queue(struct txgbe_hw *hw, uint16_t tx_queue_id);
455 int txgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);
457 int txgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id);
459 int txgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);
461 int txgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);
463 void txgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
464 struct rte_eth_rxq_info *qinfo);
466 void txgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
467 struct rte_eth_txq_info *qinfo);
469 uint16_t txgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
472 uint16_t txgbe_recv_pkts_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
475 uint16_t txgbe_recv_pkts_lro_single_alloc(void *rx_queue,
476 struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
477 uint16_t txgbe_recv_pkts_lro_bulk_alloc(void *rx_queue,
478 struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
480 uint16_t txgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
483 uint16_t txgbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
486 uint16_t txgbe_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
489 int txgbe_dev_rss_hash_update(struct rte_eth_dev *dev,
490 struct rte_eth_rss_conf *rss_conf);
492 int txgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
493 struct rte_eth_rss_conf *rss_conf);
495 bool txgbe_rss_update_sp(enum txgbe_mac_type mac_type);
497 int txgbe_add_del_ntuple_filter(struct rte_eth_dev *dev,
498 struct rte_eth_ntuple_filter *filter,
500 int txgbe_add_del_ethertype_filter(struct rte_eth_dev *dev,
501 struct rte_eth_ethertype_filter *filter,
503 int txgbe_syn_filter_set(struct rte_eth_dev *dev,
504 struct rte_eth_syn_filter *filter,
508 * l2 tunnel configuration.
510 struct txgbe_l2_tunnel_conf {
511 enum rte_eth_tunnel_type l2_tunnel_type;
512 uint16_t ether_type; /* ether type in l2 header */
513 uint32_t tunnel_id; /* port tag id for e-tag */
514 uint16_t vf_id; /* VF id for tag insertion */
515 uint32_t pool; /* destination pool for tag based forwarding */
519 txgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev,
520 struct txgbe_l2_tunnel_conf *l2_tunnel,
523 txgbe_dev_l2_tunnel_filter_del(struct rte_eth_dev *dev,
524 struct txgbe_l2_tunnel_conf *l2_tunnel);
525 void txgbe_filterlist_init(void);
526 void txgbe_filterlist_flush(void);
528 void txgbe_set_ivar_map(struct txgbe_hw *hw, int8_t direction,
529 uint8_t queue, uint8_t msix_vector);
532 * Flow director function prototypes
534 int txgbe_fdir_configure(struct rte_eth_dev *dev);
535 int txgbe_fdir_set_input_mask(struct rte_eth_dev *dev);
536 int txgbe_fdir_set_flexbytes_offset(struct rte_eth_dev *dev,
538 int txgbe_fdir_filter_program(struct rte_eth_dev *dev,
539 struct txgbe_fdir_rule *rule,
540 bool del, bool update);
542 void txgbe_configure_pb(struct rte_eth_dev *dev);
543 void txgbe_configure_port(struct rte_eth_dev *dev);
544 void txgbe_configure_dcb(struct rte_eth_dev *dev);
547 txgbe_dev_link_update_share(struct rte_eth_dev *dev,
548 int wait_to_complete);
549 int txgbe_pf_host_init(struct rte_eth_dev *eth_dev);
551 void txgbe_pf_host_uninit(struct rte_eth_dev *eth_dev);
553 void txgbe_pf_mbx_process(struct rte_eth_dev *eth_dev);
555 int txgbe_pf_host_configure(struct rte_eth_dev *eth_dev);
557 uint32_t txgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val);
559 void txgbe_fdir_filter_restore(struct rte_eth_dev *dev);
560 int txgbe_clear_all_fdir_filter(struct rte_eth_dev *dev);
562 extern const struct rte_flow_ops txgbe_flow_ops;
564 void txgbe_clear_all_ethertype_filter(struct rte_eth_dev *dev);
565 void txgbe_clear_all_ntuple_filter(struct rte_eth_dev *dev);
566 void txgbe_clear_syn_filter(struct rte_eth_dev *dev);
567 int txgbe_clear_all_l2_tn_filter(struct rte_eth_dev *dev);
569 int txgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
570 uint16_t tx_rate, uint64_t q_msk);
571 int txgbe_tm_ops_get(struct rte_eth_dev *dev, void *ops);
572 void txgbe_tm_conf_init(struct rte_eth_dev *dev);
573 void txgbe_tm_conf_uninit(struct rte_eth_dev *dev);
574 int txgbe_set_queue_rate_limit(struct rte_eth_dev *dev, uint16_t queue_idx,
576 int txgbe_rss_conf_init(struct txgbe_rte_flow_rss_conf *out,
577 const struct rte_flow_action_rss *in);
578 int txgbe_action_rss_same(const struct rte_flow_action_rss *comp,
579 const struct rte_flow_action_rss *with);
580 int txgbe_config_rss_filter(struct rte_eth_dev *dev,
581 struct txgbe_rte_flow_rss_conf *conf, bool add);
584 txgbe_ethertype_filter_lookup(struct txgbe_filter_info *filter_info,
589 for (i = 0; i < TXGBE_ETF_ID_MAX; i++) {
590 if (filter_info->ethertype_filters[i].ethertype == ethertype &&
591 (filter_info->ethertype_mask & (1 << i)))
598 txgbe_ethertype_filter_insert(struct txgbe_filter_info *filter_info,
599 struct txgbe_ethertype_filter *ethertype_filter)
603 for (i = 0; i < TXGBE_ETF_ID_MAX; i++) {
604 if (filter_info->ethertype_mask & (1 << i))
607 filter_info->ethertype_mask |= 1 << i;
608 filter_info->ethertype_filters[i].ethertype =
609 ethertype_filter->ethertype;
610 filter_info->ethertype_filters[i].etqf =
611 ethertype_filter->etqf;
612 filter_info->ethertype_filters[i].etqs =
613 ethertype_filter->etqs;
614 filter_info->ethertype_filters[i].conf =
615 ethertype_filter->conf;
618 return (i < TXGBE_ETF_ID_MAX ? i : -1);
622 txgbe_ethertype_filter_remove(struct txgbe_filter_info *filter_info,
625 if (idx >= TXGBE_ETF_ID_MAX)
627 filter_info->ethertype_mask &= ~(1 << idx);
628 filter_info->ethertype_filters[idx].ethertype = 0;
629 filter_info->ethertype_filters[idx].etqf = 0;
630 filter_info->ethertype_filters[idx].etqs = 0;
631 filter_info->ethertype_filters[idx].etqs = FALSE;
635 /* High threshold controlling when to start sending XOFF frames. */
636 #define TXGBE_FC_XOFF_HITH 128 /*KB*/
637 /* Low threshold controlling when to start sending XON frames. */
638 #define TXGBE_FC_XON_LOTH 64 /*KB*/
640 /* Timer value included in XOFF frames. */
641 #define TXGBE_FC_PAUSE_TIME 0x680
643 #define TXGBE_LINK_DOWN_CHECK_TIMEOUT 4000 /* ms */
644 #define TXGBE_LINK_UP_CHECK_TIMEOUT 1000 /* ms */
645 #define TXGBE_VMDQ_NUM_UC_MAC 4096 /* Maximum nb. of UC MAC addr. */
648 * Default values for RX/TX configuration
650 #define TXGBE_DEFAULT_RX_FREE_THRESH 32
651 #define TXGBE_DEFAULT_RX_PTHRESH 8
652 #define TXGBE_DEFAULT_RX_HTHRESH 8
653 #define TXGBE_DEFAULT_RX_WTHRESH 0
655 #define TXGBE_DEFAULT_TX_FREE_THRESH 32
656 #define TXGBE_DEFAULT_TX_PTHRESH 32
657 #define TXGBE_DEFAULT_TX_HTHRESH 0
658 #define TXGBE_DEFAULT_TX_WTHRESH 0
660 /* Additional timesync values. */
661 #define NSEC_PER_SEC 1000000000L
662 #define TXGBE_INCVAL_10GB 0xCCCCCC
663 #define TXGBE_INCVAL_1GB 0x800000
664 #define TXGBE_INCVAL_100 0xA00000
665 #define TXGBE_INCVAL_10 0xC7F380
666 #define TXGBE_INCVAL_FPGA 0x800000
667 #define TXGBE_INCVAL_SHIFT_10GB 20
668 #define TXGBE_INCVAL_SHIFT_1GB 18
669 #define TXGBE_INCVAL_SHIFT_100 15
670 #define TXGBE_INCVAL_SHIFT_10 12
671 #define TXGBE_INCVAL_SHIFT_FPGA 17
673 #define TXGBE_CYCLECOUNTER_MASK 0xffffffffffffffffULL
675 /* store statistics names and its offset in stats structure */
676 struct rte_txgbe_xstats_name_off {
677 char name[RTE_ETH_XSTATS_NAME_SIZE];
681 const uint32_t *txgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev);
682 int txgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
683 struct rte_ether_addr *mc_addr_set,
684 uint32_t nb_mc_addr);
685 int txgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
686 struct rte_eth_rss_reta_entry64 *reta_conf,
688 int txgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
689 struct rte_eth_rss_reta_entry64 *reta_conf,
691 void txgbe_dev_setup_link_alarm_handler(void *param);
692 void txgbe_read_stats_registers(struct txgbe_hw *hw,
693 struct txgbe_hw_stats *hw_stats);
695 void txgbe_vlan_hw_filter_enable(struct rte_eth_dev *dev);
696 void txgbe_vlan_hw_filter_disable(struct rte_eth_dev *dev);
697 void txgbe_vlan_hw_strip_config(struct rte_eth_dev *dev);
698 void txgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev,
699 uint16_t queue, bool on);
700 void txgbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev,
703 #endif /* _TXGBE_ETHDEV_H_ */