1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2015-2020 Beijing WangXun Technology Co., Ltd.
3 * Copyright(c) 2010-2017 Intel Corporation
6 #ifndef _TXGBE_ETHDEV_H_
7 #define _TXGBE_ETHDEV_H_
11 #include "base/txgbe.h"
12 #include "txgbe_ptypes.h"
13 #ifdef RTE_LIB_SECURITY
14 #include "txgbe_ipsec.h"
17 #include <rte_flow_driver.h>
19 #include <rte_ethdev.h>
20 #include <rte_ethdev_core.h>
22 #include <rte_hash_crc.h>
23 #include <rte_bus_pci.h>
24 #include <rte_tm_driver.h>
26 /* need update link, bit flag */
27 #define TXGBE_FLAG_NEED_LINK_UPDATE (uint32_t)(1 << 0)
28 #define TXGBE_FLAG_MAILBOX (uint32_t)(1 << 1)
29 #define TXGBE_FLAG_PHY_INTERRUPT (uint32_t)(1 << 2)
30 #define TXGBE_FLAG_MACSEC (uint32_t)(1 << 3)
31 #define TXGBE_FLAG_NEED_LINK_CONFIG (uint32_t)(1 << 4)
32 #define TXGBE_FLAG_NEED_AN_CONFIG (uint32_t)(1 << 5)
35 * Defines that were not part of txgbe_type.h as they are not used by the
38 #define TXGBE_VFTA_SIZE 128
39 #define TXGBE_HKEY_MAX_INDEX 10
40 /*Default value of Max Rx Queue*/
41 #define TXGBE_MAX_RX_QUEUE_NUM 128
42 #define TXGBE_VMDQ_DCB_NB_QUEUES TXGBE_MAX_RX_QUEUE_NUM
45 #define NBBY 8 /* number of bits in a byte */
47 #define TXGBE_HWSTRIP_BITMAP_SIZE \
48 (TXGBE_MAX_RX_QUEUE_NUM / (sizeof(uint32_t) * NBBY))
50 #define TXGBE_QUEUE_ITR_INTERVAL_DEFAULT 500 /* 500us */
52 #define TXGBE_MAX_QUEUE_NUM_PER_VF 8
54 #define TXGBE_5TUPLE_MAX_PRI 7
55 #define TXGBE_5TUPLE_MIN_PRI 1
58 /* The overhead from MTU to max frame size. */
59 #define TXGBE_ETH_OVERHEAD (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN)
61 #define TXGBE_RSS_OFFLOAD_ALL ( \
63 RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
64 RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
66 RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
67 RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
68 RTE_ETH_RSS_IPV6_EX | \
69 RTE_ETH_RSS_IPV6_TCP_EX | \
70 RTE_ETH_RSS_IPV6_UDP_EX)
72 #define TXGBE_MISC_VEC_ID RTE_INTR_VEC_ZERO_OFFSET
73 #define TXGBE_RX_VEC_START RTE_INTR_VEC_RXTX_OFFSET
75 #define TXGBE_MAX_FDIR_FILTER_NUM (1024 * 32)
76 #define TXGBE_MAX_L2_TN_FILTER_NUM 128
79 * Information about the fdir mode.
81 struct txgbe_hw_fdir_mask {
82 uint16_t vlan_tci_mask;
83 uint32_t src_ipv4_mask;
84 uint32_t dst_ipv4_mask;
85 uint16_t src_ipv6_mask;
86 uint16_t dst_ipv6_mask;
87 uint16_t src_port_mask;
88 uint16_t dst_port_mask;
89 uint16_t flex_bytes_mask;
90 uint8_t mac_addr_byte_mask;
91 uint32_t tunnel_id_mask;
92 uint8_t tunnel_type_mask;
95 struct txgbe_fdir_filter {
96 TAILQ_ENTRY(txgbe_fdir_filter) entries;
97 struct txgbe_atr_input input; /* key of fdir filter*/
98 uint32_t fdirflags; /* drop or forward */
99 uint32_t fdirhash; /* hash value for fdir */
100 uint8_t queue; /* assigned rx queue */
103 /* list of fdir filters */
104 TAILQ_HEAD(txgbe_fdir_filter_list, txgbe_fdir_filter);
106 struct txgbe_fdir_rule {
107 struct txgbe_hw_fdir_mask mask;
108 struct txgbe_atr_input input; /* key of fdir filter */
109 bool b_spec; /* If TRUE, input, fdirflags, queue have meaning. */
110 bool b_mask; /* If TRUE, mask has meaning. */
111 enum rte_fdir_mode mode; /* IP, MAC VLAN, Tunnel */
112 uint32_t fdirflags; /* drop or forward */
113 uint32_t soft_id; /* an unique value for this rule */
114 uint8_t queue; /* assigned rx queue */
115 uint8_t flex_bytes_offset;
118 struct txgbe_hw_fdir_info {
119 struct txgbe_hw_fdir_mask mask;
120 uint8_t flex_bytes_offset;
129 struct txgbe_fdir_filter_list fdir_list; /* filter list*/
130 /* store the pointers of the filters, index is the hash value. */
131 struct txgbe_fdir_filter **hash_map;
132 struct rte_hash *hash_handle; /* cuckoo hash handler */
133 bool mask_added; /* If already got mask from consistent filter */
136 struct txgbe_rte_flow_rss_conf {
137 struct rte_flow_action_rss conf; /**< RSS parameters. */
138 uint8_t key[TXGBE_HKEY_MAX_INDEX * sizeof(uint32_t)]; /* Hash key. */
139 uint16_t queue[TXGBE_MAX_RX_QUEUE_NUM]; /**< Queues indices to use. */
142 /* structure for interrupt relative data */
143 struct txgbe_interrupt {
146 uint32_t mask_misc_orig; /* save mask during delayed handler */
148 uint64_t mask_orig; /* save mask during delayed handler */
151 #define TXGBE_NB_STAT_MAPPING 32
152 #define QSM_REG_NB_BITS_PER_QMAP_FIELD 8
153 #define NB_QMAP_FIELDS_PER_QSM_REG 4
154 #define QMAP_FIELD_RESERVED_BITS_MASK 0x0f
155 struct txgbe_stat_mappings {
156 uint32_t tqsm[TXGBE_NB_STAT_MAPPING];
157 uint32_t rqsm[TXGBE_NB_STAT_MAPPING];
161 uint32_t vfta[TXGBE_VFTA_SIZE];
164 struct txgbe_hwstrip {
165 uint32_t bitmap[TXGBE_HWSTRIP_BITMAP_SIZE];
169 * VF data which used by PF host only
171 #define TXGBE_MAX_VF_MC_ENTRIES 30
173 struct txgbe_uta_info {
174 uint8_t uc_filter_type;
176 uint32_t uta_shadow[TXGBE_MAX_UTA];
179 struct txgbe_vf_info {
180 uint8_t vf_mac_addresses[RTE_ETHER_ADDR_LEN];
181 uint16_t vf_mc_hashes[TXGBE_MAX_VF_MC_ENTRIES];
182 uint16_t num_vf_mc_hashes;
184 uint16_t tx_rate[TXGBE_MAX_QUEUE_NUM_PER_VF];
187 uint16_t switch_domain_id;
192 TAILQ_HEAD(txgbe_5tuple_filter_list, txgbe_5tuple_filter);
194 struct txgbe_5tuple_filter_info {
199 enum txgbe_5tuple_protocol proto; /* l4 protocol. */
200 uint8_t priority; /* seven levels (001b-111b), 111b is highest,
201 * used when more than one filter matches.
203 uint8_t dst_ip_mask:1, /* if mask is 1b, do not compare dst ip. */
204 src_ip_mask:1, /* if mask is 1b, do not compare src ip. */
205 dst_port_mask:1, /* if mask is 1b, do not compare dst port. */
206 src_port_mask:1, /* if mask is 1b, do not compare src port. */
207 proto_mask:1; /* if mask is 1b, do not compare protocol. */
210 /* 5tuple filter structure */
211 struct txgbe_5tuple_filter {
212 TAILQ_ENTRY(txgbe_5tuple_filter) entries;
213 uint16_t index; /* the index of 5tuple filter */
214 struct txgbe_5tuple_filter_info filter_info;
215 uint16_t queue; /* rx queue assigned to */
218 #define TXGBE_5TUPLE_ARRAY_SIZE \
219 (RTE_ALIGN(TXGBE_MAX_FTQF_FILTERS, (sizeof(uint32_t) * NBBY)) / \
220 (sizeof(uint32_t) * NBBY))
222 struct txgbe_ethertype_filter {
227 * If this filter is added by configuration,
228 * it should not be removed.
234 * Structure to store filters' info.
236 struct txgbe_filter_info {
237 uint8_t ethertype_mask; /* Bit mask for every used ethertype filter */
238 /* store used ethertype filters*/
239 struct txgbe_ethertype_filter ethertype_filters[TXGBE_ETF_ID_MAX];
240 /* Bit mask for every used 5tuple filter */
241 uint32_t fivetuple_mask[TXGBE_5TUPLE_ARRAY_SIZE];
242 struct txgbe_5tuple_filter_list fivetuple_list;
243 /* store the SYN filter info */
245 /* store the rss filter info */
246 struct txgbe_rte_flow_rss_conf rss_info;
249 struct txgbe_l2_tn_key {
250 enum rte_eth_tunnel_type l2_tn_type;
254 struct txgbe_l2_tn_filter {
255 TAILQ_ENTRY(txgbe_l2_tn_filter) entries;
256 struct txgbe_l2_tn_key key;
260 TAILQ_HEAD(txgbe_l2_tn_filter_list, txgbe_l2_tn_filter);
262 struct txgbe_l2_tn_info {
263 struct txgbe_l2_tn_filter_list l2_tn_list;
264 struct txgbe_l2_tn_filter **hash_map;
265 struct rte_hash *hash_handle;
266 bool e_tag_en; /* e-tag enabled */
267 bool e_tag_fwd_en; /* e-tag based forwarding enabled */
268 uint16_t e_tag_ether_type; /* ether type for e-tag */
272 enum rte_filter_type filter_type;
276 /* The configuration of bandwidth */
277 struct txgbe_bw_conf {
278 uint8_t tc_num; /* Number of TCs. */
281 /* Struct to store Traffic Manager shaper profile. */
282 struct txgbe_tm_shaper_profile {
283 TAILQ_ENTRY(txgbe_tm_shaper_profile) node;
284 uint32_t shaper_profile_id;
285 uint32_t reference_count;
286 struct rte_tm_shaper_params profile;
289 TAILQ_HEAD(txgbe_shaper_profile_list, txgbe_tm_shaper_profile);
291 /* node type of Traffic Manager */
292 enum txgbe_tm_node_type {
293 TXGBE_TM_NODE_TYPE_PORT,
294 TXGBE_TM_NODE_TYPE_TC,
295 TXGBE_TM_NODE_TYPE_QUEUE,
296 TXGBE_TM_NODE_TYPE_MAX,
299 /* Struct to store Traffic Manager node configuration. */
300 struct txgbe_tm_node {
301 TAILQ_ENTRY(txgbe_tm_node) node;
305 uint32_t reference_count;
307 struct txgbe_tm_node *parent;
308 struct txgbe_tm_shaper_profile *shaper_profile;
309 struct rte_tm_node_params params;
312 TAILQ_HEAD(txgbe_tm_node_list, txgbe_tm_node);
314 /* The configuration of Traffic Manager */
315 struct txgbe_tm_conf {
316 struct txgbe_shaper_profile_list shaper_profile_list;
317 struct txgbe_tm_node *root; /* root node - port */
318 struct txgbe_tm_node_list tc_list; /* node list for all the TCs */
319 struct txgbe_tm_node_list queue_list; /* node list for all the queues */
321 * The number of added TC nodes.
322 * It should be no more than the TC number of this port.
326 * The number of added queue nodes.
327 * It should be no more than the queue number of this port.
329 uint32_t nb_queue_node;
331 * This flag is used to check if APP can change the TM node
333 * When it's true, means the configuration is applied to HW,
334 * APP should not change the configuration.
335 * As we don't support on-the-fly configuration, when starting
336 * the port, APP should call the hierarchy_commit API to set this
337 * flag to true. When stopping the port, this flag should be set
344 * Structure to store private data for each driver instance (for each port).
346 struct txgbe_adapter {
348 struct txgbe_hw_stats stats;
349 struct txgbe_hw_fdir_info fdir;
350 struct txgbe_interrupt intr;
351 struct txgbe_stat_mappings stat_mappings;
352 struct txgbe_vfta shadow_vfta;
353 struct txgbe_hwstrip hwstrip;
354 struct txgbe_dcb_config dcb_config;
355 struct txgbe_vf_info *vfdata;
356 struct txgbe_uta_info uta_info;
357 struct txgbe_filter_info filter;
358 struct txgbe_l2_tn_info l2_tn;
359 struct txgbe_bw_conf bw_conf;
360 #ifdef RTE_LIB_SECURITY
361 struct txgbe_ipsec ipsec;
363 bool rx_bulk_alloc_allowed;
364 struct rte_timecounter systime_tc;
365 struct rte_timecounter rx_tstamp_tc;
366 struct rte_timecounter tx_tstamp_tc;
367 struct txgbe_tm_conf tm_conf;
369 /* For RSS reta table update */
370 uint8_t rss_reta_updated;
373 #define TXGBE_DEV_ADAPTER(dev) \
374 ((struct txgbe_adapter *)(dev)->data->dev_private)
376 #define TXGBE_DEV_HW(dev) \
377 (&((struct txgbe_adapter *)(dev)->data->dev_private)->hw)
379 #define TXGBE_DEV_STATS(dev) \
380 (&((struct txgbe_adapter *)(dev)->data->dev_private)->stats)
382 #define TXGBE_DEV_INTR(dev) \
383 (&((struct txgbe_adapter *)(dev)->data->dev_private)->intr)
385 #define TXGBE_DEV_FDIR(dev) \
386 (&((struct txgbe_adapter *)(dev)->data->dev_private)->fdir)
388 #define TXGBE_DEV_STAT_MAPPINGS(dev) \
389 (&((struct txgbe_adapter *)(dev)->data->dev_private)->stat_mappings)
391 #define TXGBE_DEV_VFTA(dev) \
392 (&((struct txgbe_adapter *)(dev)->data->dev_private)->shadow_vfta)
394 #define TXGBE_DEV_HWSTRIP(dev) \
395 (&((struct txgbe_adapter *)(dev)->data->dev_private)->hwstrip)
397 #define TXGBE_DEV_DCB_CONFIG(dev) \
398 (&((struct txgbe_adapter *)(dev)->data->dev_private)->dcb_config)
400 #define TXGBE_DEV_VFDATA(dev) \
401 (&((struct txgbe_adapter *)(dev)->data->dev_private)->vfdata)
403 #define TXGBE_DEV_MR_INFO(dev) \
404 (&((struct txgbe_adapter *)(dev)->data->dev_private)->mr_data)
406 #define TXGBE_DEV_UTA_INFO(dev) \
407 (&((struct txgbe_adapter *)(dev)->data->dev_private)->uta_info)
409 #define TXGBE_DEV_FILTER(dev) \
410 (&((struct txgbe_adapter *)(dev)->data->dev_private)->filter)
412 #define TXGBE_DEV_L2_TN(dev) \
413 (&((struct txgbe_adapter *)(dev)->data->dev_private)->l2_tn)
415 #define TXGBE_DEV_BW_CONF(dev) \
416 (&((struct txgbe_adapter *)(dev)->data->dev_private)->bw_conf)
418 #define TXGBE_DEV_TM_CONF(dev) \
419 (&((struct txgbe_adapter *)(dev)->data->dev_private)->tm_conf)
421 #define TXGBE_DEV_IPSEC(dev) \
422 (&((struct txgbe_adapter *)(dev)->data->dev_private)->ipsec)
425 * RX/TX function prototypes
427 void txgbe_dev_clear_queues(struct rte_eth_dev *dev);
429 void txgbe_dev_free_queues(struct rte_eth_dev *dev);
431 void txgbe_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
433 void txgbe_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
435 int txgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
436 uint16_t nb_rx_desc, unsigned int socket_id,
437 const struct rte_eth_rxconf *rx_conf,
438 struct rte_mempool *mb_pool);
440 int txgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
441 uint16_t nb_tx_desc, unsigned int socket_id,
442 const struct rte_eth_txconf *tx_conf);
444 uint32_t txgbe_dev_rx_queue_count(void *rx_queue);
446 int txgbe_dev_rx_descriptor_status(void *rx_queue, uint16_t offset);
447 int txgbe_dev_tx_descriptor_status(void *tx_queue, uint16_t offset);
449 int txgbe_dev_rx_init(struct rte_eth_dev *dev);
451 void txgbe_dev_tx_init(struct rte_eth_dev *dev);
453 int txgbe_dev_rxtx_start(struct rte_eth_dev *dev);
455 void txgbe_dev_save_rx_queue(struct txgbe_hw *hw, uint16_t rx_queue_id);
456 void txgbe_dev_store_rx_queue(struct txgbe_hw *hw, uint16_t rx_queue_id);
457 void txgbe_dev_save_tx_queue(struct txgbe_hw *hw, uint16_t tx_queue_id);
458 void txgbe_dev_store_tx_queue(struct txgbe_hw *hw, uint16_t tx_queue_id);
460 int txgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);
462 int txgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id);
464 int txgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);
466 int txgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);
468 void txgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
469 struct rte_eth_rxq_info *qinfo);
471 void txgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
472 struct rte_eth_txq_info *qinfo);
474 int txgbevf_dev_rx_init(struct rte_eth_dev *dev);
476 void txgbevf_dev_tx_init(struct rte_eth_dev *dev);
478 void txgbevf_dev_rxtx_start(struct rte_eth_dev *dev);
480 uint16_t txgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
483 uint16_t txgbe_recv_pkts_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
486 uint16_t txgbe_recv_pkts_lro_single_alloc(void *rx_queue,
487 struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
488 uint16_t txgbe_recv_pkts_lro_bulk_alloc(void *rx_queue,
489 struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
491 uint16_t txgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
494 uint16_t txgbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
497 uint16_t txgbe_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
500 int txgbe_dev_rss_hash_update(struct rte_eth_dev *dev,
501 struct rte_eth_rss_conf *rss_conf);
503 int txgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
504 struct rte_eth_rss_conf *rss_conf);
506 bool txgbe_rss_update_sp(enum txgbe_mac_type mac_type);
508 int txgbe_add_del_ntuple_filter(struct rte_eth_dev *dev,
509 struct rte_eth_ntuple_filter *filter,
511 int txgbe_add_del_ethertype_filter(struct rte_eth_dev *dev,
512 struct rte_eth_ethertype_filter *filter,
514 int txgbe_syn_filter_set(struct rte_eth_dev *dev,
515 struct rte_eth_syn_filter *filter,
519 * l2 tunnel configuration.
521 struct txgbe_l2_tunnel_conf {
522 enum rte_eth_tunnel_type l2_tunnel_type;
523 uint16_t ether_type; /* ether type in l2 header */
524 uint32_t tunnel_id; /* port tag id for e-tag */
525 uint16_t vf_id; /* VF id for tag insertion */
526 uint32_t pool; /* destination pool for tag based forwarding */
530 txgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev,
531 struct txgbe_l2_tunnel_conf *l2_tunnel,
534 txgbe_dev_l2_tunnel_filter_del(struct rte_eth_dev *dev,
535 struct txgbe_l2_tunnel_conf *l2_tunnel);
536 void txgbe_filterlist_init(void);
537 void txgbe_filterlist_flush(void);
539 void txgbe_set_ivar_map(struct txgbe_hw *hw, int8_t direction,
540 uint8_t queue, uint8_t msix_vector);
543 * Flow director function prototypes
545 int txgbe_fdir_configure(struct rte_eth_dev *dev);
546 int txgbe_fdir_set_input_mask(struct rte_eth_dev *dev);
547 int txgbe_fdir_set_flexbytes_offset(struct rte_eth_dev *dev,
549 int txgbe_fdir_filter_program(struct rte_eth_dev *dev,
550 struct txgbe_fdir_rule *rule,
551 bool del, bool update);
553 void txgbe_configure_pb(struct rte_eth_dev *dev);
554 void txgbe_configure_port(struct rte_eth_dev *dev);
555 void txgbe_configure_dcb(struct rte_eth_dev *dev);
558 txgbe_dev_link_update_share(struct rte_eth_dev *dev,
559 int wait_to_complete);
560 int txgbe_pf_host_init(struct rte_eth_dev *eth_dev);
562 void txgbe_pf_host_uninit(struct rte_eth_dev *eth_dev);
564 void txgbe_pf_mbx_process(struct rte_eth_dev *eth_dev);
566 int txgbe_pf_host_configure(struct rte_eth_dev *eth_dev);
568 uint32_t txgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val);
570 void txgbe_fdir_filter_restore(struct rte_eth_dev *dev);
571 int txgbe_clear_all_fdir_filter(struct rte_eth_dev *dev);
573 extern const struct rte_flow_ops txgbe_flow_ops;
575 void txgbe_clear_all_ethertype_filter(struct rte_eth_dev *dev);
576 void txgbe_clear_all_ntuple_filter(struct rte_eth_dev *dev);
577 void txgbe_clear_syn_filter(struct rte_eth_dev *dev);
578 int txgbe_clear_all_l2_tn_filter(struct rte_eth_dev *dev);
580 int txgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
581 uint16_t tx_rate, uint64_t q_msk);
582 int txgbe_tm_ops_get(struct rte_eth_dev *dev, void *ops);
583 void txgbe_tm_conf_init(struct rte_eth_dev *dev);
584 void txgbe_tm_conf_uninit(struct rte_eth_dev *dev);
585 int txgbe_set_queue_rate_limit(struct rte_eth_dev *dev, uint16_t queue_idx,
587 int txgbe_rss_conf_init(struct txgbe_rte_flow_rss_conf *out,
588 const struct rte_flow_action_rss *in);
589 int txgbe_action_rss_same(const struct rte_flow_action_rss *comp,
590 const struct rte_flow_action_rss *with);
591 int txgbe_config_rss_filter(struct rte_eth_dev *dev,
592 struct txgbe_rte_flow_rss_conf *conf, bool add);
595 txgbe_ethertype_filter_lookup(struct txgbe_filter_info *filter_info,
600 for (i = 0; i < TXGBE_ETF_ID_MAX; i++) {
601 if (filter_info->ethertype_filters[i].ethertype == ethertype &&
602 (filter_info->ethertype_mask & (1 << i)))
609 txgbe_ethertype_filter_insert(struct txgbe_filter_info *filter_info,
610 struct txgbe_ethertype_filter *ethertype_filter)
614 for (i = 0; i < TXGBE_ETF_ID_MAX; i++) {
615 if (filter_info->ethertype_mask & (1 << i))
618 filter_info->ethertype_mask |= 1 << i;
619 filter_info->ethertype_filters[i].ethertype =
620 ethertype_filter->ethertype;
621 filter_info->ethertype_filters[i].etqf =
622 ethertype_filter->etqf;
623 filter_info->ethertype_filters[i].etqs =
624 ethertype_filter->etqs;
625 filter_info->ethertype_filters[i].conf =
626 ethertype_filter->conf;
629 return (i < TXGBE_ETF_ID_MAX ? i : -1);
633 txgbe_ethertype_filter_remove(struct txgbe_filter_info *filter_info,
636 if (idx >= TXGBE_ETF_ID_MAX)
638 filter_info->ethertype_mask &= ~(1 << idx);
639 filter_info->ethertype_filters[idx].ethertype = 0;
640 filter_info->ethertype_filters[idx].etqf = 0;
641 filter_info->ethertype_filters[idx].etqs = 0;
642 filter_info->ethertype_filters[idx].etqs = FALSE;
646 #ifdef RTE_LIB_SECURITY
647 int txgbe_ipsec_ctx_create(struct rte_eth_dev *dev);
650 /* High threshold controlling when to start sending XOFF frames. */
651 #define TXGBE_FC_XOFF_HITH 128 /*KB*/
652 /* Low threshold controlling when to start sending XON frames. */
653 #define TXGBE_FC_XON_LOTH 64 /*KB*/
655 /* Timer value included in XOFF frames. */
656 #define TXGBE_FC_PAUSE_TIME 0x680
658 #define TXGBE_LINK_DOWN_CHECK_TIMEOUT 4000 /* ms */
659 #define TXGBE_LINK_UP_CHECK_TIMEOUT 1000 /* ms */
660 #define TXGBE_VMDQ_NUM_UC_MAC 4096 /* Maximum nb. of UC MAC addr. */
663 * Default values for RX/TX configuration
665 #define TXGBE_DEFAULT_RX_FREE_THRESH 32
666 #define TXGBE_DEFAULT_RX_PTHRESH 8
667 #define TXGBE_DEFAULT_RX_HTHRESH 8
668 #define TXGBE_DEFAULT_RX_WTHRESH 0
670 #define TXGBE_DEFAULT_TX_FREE_THRESH 32
671 #define TXGBE_DEFAULT_TX_PTHRESH 32
672 #define TXGBE_DEFAULT_TX_HTHRESH 0
673 #define TXGBE_DEFAULT_TX_WTHRESH 0
675 /* Additional timesync values. */
676 #define NSEC_PER_SEC 1000000000L
677 #define TXGBE_INCVAL_10GB 0xCCCCCC
678 #define TXGBE_INCVAL_1GB 0x800000
679 #define TXGBE_INCVAL_100 0xA00000
680 #define TXGBE_INCVAL_10 0xC7F380
681 #define TXGBE_INCVAL_FPGA 0x800000
682 #define TXGBE_INCVAL_SHIFT_10GB 20
683 #define TXGBE_INCVAL_SHIFT_1GB 18
684 #define TXGBE_INCVAL_SHIFT_100 15
685 #define TXGBE_INCVAL_SHIFT_10 12
686 #define TXGBE_INCVAL_SHIFT_FPGA 17
688 #define TXGBE_CYCLECOUNTER_MASK 0xffffffffffffffffULL
690 /* store statistics names and its offset in stats structure */
691 struct rte_txgbe_xstats_name_off {
692 char name[RTE_ETH_XSTATS_NAME_SIZE];
696 const uint32_t *txgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev);
697 int txgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
698 struct rte_ether_addr *mc_addr_set,
699 uint32_t nb_mc_addr);
700 int txgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
701 struct rte_eth_rss_reta_entry64 *reta_conf,
703 int txgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
704 struct rte_eth_rss_reta_entry64 *reta_conf,
706 void txgbe_dev_setup_link_alarm_handler(void *param);
707 void txgbe_read_stats_registers(struct txgbe_hw *hw,
708 struct txgbe_hw_stats *hw_stats);
710 void txgbe_vlan_hw_filter_enable(struct rte_eth_dev *dev);
711 void txgbe_vlan_hw_filter_disable(struct rte_eth_dev *dev);
712 void txgbe_vlan_hw_strip_config(struct rte_eth_dev *dev);
713 void txgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev,
714 uint16_t queue, bool on);
715 void txgbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev,
718 #endif /* _TXGBE_ETHDEV_H_ */