1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018-2019 Hisilicon Limited.
5 #ifndef _HNS3_ETHDEV_H_
6 #define _HNS3_ETHDEV_H_
14 #include "hns3_fdir.h"
15 #include "hns3_stats.h"
18 #define PCI_VENDOR_ID_HUAWEI 0x19e5
21 #define HNS3_DEV_ID_GE 0xA220
22 #define HNS3_DEV_ID_25GE 0xA221
23 #define HNS3_DEV_ID_25GE_RDMA 0xA222
24 #define HNS3_DEV_ID_50GE_RDMA 0xA224
25 #define HNS3_DEV_ID_100G_RDMA_MACSEC 0xA226
26 #define HNS3_DEV_ID_200G_RDMA 0xA228
27 #define HNS3_DEV_ID_100G_VF 0xA22E
28 #define HNS3_DEV_ID_100G_RDMA_PFC_VF 0xA22F
30 /* PCI Config offsets */
31 #define HNS3_PCI_REVISION_ID 0x08
32 #define HNS3_PCI_REVISION_ID_LEN 1
34 #define PCI_REVISION_ID_HIP08_B 0x21
35 #define PCI_REVISION_ID_HIP09_A 0x30
37 #define HNS3_PF_FUNC_ID 0
38 #define HNS3_1ST_VF_FUNC_ID 1
40 #define HNS3_UC_MACADDR_NUM 128
41 #define HNS3_VF_UC_MACADDR_NUM 48
42 #define HNS3_MC_MACADDR_NUM 128
44 #define HNS3_MAX_BD_SIZE 65535
45 #define HNS3_MAX_NON_TSO_BD_PER_PKT 8
46 #define HNS3_MAX_TSO_BD_PER_PKT 63
47 #define HNS3_MAX_FRAME_LEN 9728
48 #define HNS3_VLAN_TAG_SIZE 4
49 #define HNS3_DEFAULT_RX_BUF_LEN 2048
50 #define HNS3_MAX_BD_PAYLEN (1024 * 1024 - 1)
51 #define HNS3_MAX_TSO_HDR_SIZE 512
52 #define HNS3_MAX_TSO_HDR_BD_NUM 3
53 #define HNS3_MAX_LRO_SIZE 64512
55 #define HNS3_ETH_OVERHEAD \
56 (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + HNS3_VLAN_TAG_SIZE * 2)
57 #define HNS3_PKTLEN_TO_MTU(pktlen) ((pktlen) - HNS3_ETH_OVERHEAD)
58 #define HNS3_MAX_MTU (HNS3_MAX_FRAME_LEN - HNS3_ETH_OVERHEAD)
59 #define HNS3_DEFAULT_MTU 1500UL
60 #define HNS3_DEFAULT_FRAME_LEN (HNS3_DEFAULT_MTU + HNS3_ETH_OVERHEAD)
61 #define HNS3_MIN_PKT_SIZE 60
66 #define HNS3_MAX_PF_NUM 8
67 #define HNS3_UMV_TBL_SIZE 3072
68 #define HNS3_DEFAULT_UMV_SPACE_PER_PF \
69 (HNS3_UMV_TBL_SIZE / HNS3_MAX_PF_NUM)
71 #define HNS3_PF_CFG_BLOCK_SIZE 32
72 #define HNS3_PF_CFG_DESC_NUM \
73 (HNS3_PF_CFG_BLOCK_SIZE / HNS3_CFG_RD_LEN_BYTES)
75 #define HNS3_DEFAULT_ENABLE_PFC_NUM 0
77 #define HNS3_INTR_UNREG_FAIL_RETRY_CNT 5
78 #define HNS3_INTR_UNREG_FAIL_DELAY_MS 500
80 #define HNS3_QUIT_RESET_CNT 10
81 #define HNS3_QUIT_RESET_DELAY_MS 100
83 #define HNS3_POLL_RESPONE_MS 1
85 #define HNS3_MAX_USER_PRIO 8
95 #define HNS3_SCH_MODE_SP 0
96 #define HNS3_SCH_MODE_DWRR 1
99 uint8_t pg_sch_mode; /* 0: sp; 1: dwrr */
102 uint8_t tc_dwrr[HNS3_MAX_TC_NUM];
105 struct hns3_tc_info {
107 uint8_t tc_sch_mode; /* 0: sp; 1: dwrr */
110 uint8_t up_to_tc_map; /* user priority maping on the TC */
113 struct hns3_dcb_info {
115 uint8_t num_pg; /* It must be 1 if vNET-Base schd */
116 uint8_t pg_dwrr[HNS3_PG_NUM];
117 uint8_t prio_tc[HNS3_MAX_USER_PRIO];
118 struct hns3_pg_info pg_info[HNS3_PG_NUM];
119 struct hns3_tc_info tc_info[HNS3_MAX_TC_NUM];
120 uint8_t hw_pfc_map; /* Allow for packet drop or not on this TC */
121 uint8_t pfc_en; /* Pfc enabled or not for user priority */
124 enum hns3_fc_status {
126 HNS3_FC_STATUS_MAC_PAUSE,
130 struct hns3_tc_queue_info {
131 uint8_t tqp_offset; /* TQP offset from base TQP */
132 uint8_t tqp_count; /* Total TQPs */
133 uint8_t tc; /* TC index */
134 bool enable; /* If this TC is enable or not */
138 uint8_t vmdq_vport_num;
140 uint16_t tqp_desc_num;
142 uint16_t rss_size_max;
145 uint8_t mac_addr[RTE_ETHER_ADDR_LEN];
146 uint8_t default_speed;
147 uint32_t numa_node_map;
148 uint8_t speed_ability;
153 enum hns3_media_type {
154 HNS3_MEDIA_TYPE_UNKNOWN,
155 HNS3_MEDIA_TYPE_FIBER,
156 HNS3_MEDIA_TYPE_COPPER,
157 HNS3_MEDIA_TYPE_BACKPLANE,
158 HNS3_MEDIA_TYPE_NONE,
162 uint8_t mac_addr[RTE_ETHER_ADDR_LEN];
163 bool default_addr_setted; /* whether default addr(mac_addr) is setted */
166 uint8_t link_duplex : 1; /* ETH_LINK_[HALF/FULL]_DUPLEX */
167 uint8_t link_autoneg : 1; /* ETH_LINK_[AUTONEG/FIXED] */
168 uint8_t link_status : 1; /* ETH_LINK_[DOWN/UP] */
169 uint32_t link_speed; /* ETH_SPEED_NUM_ */
172 struct hns3_fake_queue_data {
173 void **rx_queues; /* Array of pointers to fake RX queues. */
174 void **tx_queues; /* Array of pointers to fake TX queues. */
175 uint16_t nb_fake_rx_queues; /* Number of fake RX queues. */
176 uint16_t nb_fake_tx_queues; /* Number of fake TX queues. */
179 #define HNS3_PORT_BASE_VLAN_DISABLE 0
180 #define HNS3_PORT_BASE_VLAN_ENABLE 1
181 struct hns3_port_base_vlan_config {
186 /* Primary process maintains driver state in main thread.
189 * | UNINITIALIZED |<-----------+
190 * +---------------+ |
191 * |.eth_dev_init |.eth_dev_uninit
193 * +---------------+------------+
195 * +---------------+<-----------<---------------+
196 * |.dev_configure | |
198 * +---------------+------------+ |
200 * +---------------+----+ |
202 * | | +---------------+
204 * | | +---------------+
206 * V |.dev_configure |
207 * +---------------+----+ |.dev_close
208 * | CONFIGURED |----------------------------+
209 * +---------------+<-----------+
212 * +---------------+ |
213 * | STARTING |------------^
214 * +---------------+ failed |
216 * | +---------------+
218 * | +---------------+
221 * +---------------+------------+
225 enum hns3_adapter_state {
226 HNS3_NIC_UNINITIALIZED = 0,
227 HNS3_NIC_INITIALIZED,
228 HNS3_NIC_CONFIGURING,
239 /* Reset various stages, execute in order */
240 enum hns3_reset_stage {
241 /* Stop query services, stop transceiver, disable MAC */
243 /* Clear reset completion flags, disable send command */
245 /* Inform IMP to start resetting */
246 RESET_STAGE_REQ_HW_RESET,
247 /* Waiting for hardware reset to complete */
249 /* Reinitialize hardware */
250 RESET_STAGE_DEV_INIT,
251 /* Restore user settings and enable MAC */
253 /* Restart query services, start transceiver */
255 /* Not in reset state */
259 enum hns3_reset_level {
261 HNS3_VF_FUNC_RESET, /* A VF function reset */
263 * All VFs under a PF perform function reset.
264 * Kernel PF driver use mailbox to inform DPDK VF to do reset, the value
265 * of the reset level and the one defined in kernel driver should be
268 HNS3_VF_PF_FUNC_RESET = 2,
270 * All VFs under a PF perform FLR reset.
271 * Kernel PF driver use mailbox to inform DPDK VF to do reset, the value
272 * of the reset level and the one defined in kernel driver should be
275 HNS3_VF_FULL_RESET = 3,
276 HNS3_FLR_RESET, /* A VF perform FLR reset */
277 /* All VFs under the rootport perform a global or IMP reset */
279 HNS3_FUNC_RESET, /* A PF function reset */
280 /* All PFs under the rootport perform a global reset */
282 HNS3_IMP_RESET, /* All PFs under the rootport perform a IMP reset */
286 enum hns3_wait_result {
293 #define HNS3_RESET_SYNC_US 100000
295 struct hns3_reset_stats {
296 uint64_t request_cnt; /* Total request reset times */
297 uint64_t global_cnt; /* Total GLOBAL reset times */
298 uint64_t imp_cnt; /* Total IMP reset times */
299 uint64_t exec_cnt; /* Total reset executive times */
300 uint64_t success_cnt; /* Total reset successful times */
301 uint64_t fail_cnt; /* Total reset failed times */
302 uint64_t merge_cnt; /* Total merged in high reset times */
305 typedef bool (*check_completion_func)(struct hns3_hw *hw);
307 struct hns3_wait_data {
312 enum hns3_wait_result result;
313 check_completion_func check_completion;
316 struct hns3_reset_ops {
317 void (*reset_service)(void *arg);
318 int (*stop_service)(struct hns3_adapter *hns);
319 int (*prepare_reset)(struct hns3_adapter *hns);
320 int (*wait_hardware_ready)(struct hns3_adapter *hns);
321 int (*reinit_dev)(struct hns3_adapter *hns);
322 int (*restore_conf)(struct hns3_adapter *hns);
323 int (*start_service)(struct hns3_adapter *hns);
333 struct hns3_reset_data {
334 enum hns3_reset_stage stage;
335 rte_atomic16_t schedule;
336 /* Reset flag, covering the entire reset process */
337 rte_atomic16_t resetting;
338 /* Used to disable sending cmds during reset */
339 rte_atomic16_t disable_cmd;
340 /* The reset level being processed */
341 enum hns3_reset_level level;
342 /* Reset level set, each bit represents a reset level */
344 /* Request reset level set, from interrupt or mailbox */
346 int attempts; /* Reset failure retry */
347 int retries; /* Timeout failure retry in reset_post */
349 * At the time of global or IMP reset, the command cannot be sent to
350 * stop the tx/rx queues. Tx/Rx queues may be access mbuf during the
351 * reset process, so the mbuf is required to be released after the reset
352 * is completed.The mbuf_deferred_free is used to mark whether mbuf
353 * needs to be released.
355 bool mbuf_deferred_free;
356 struct timeval start_time;
357 struct hns3_reset_stats stats;
358 const struct hns3_reset_ops *ops;
359 struct hns3_wait_data *wait_data;
362 #define HNS3_INTR_MAPPING_VEC_RSV_ONE 0
363 #define HNS3_INTR_MAPPING_VEC_ALL 1
365 #define HNS3_INTR_COALESCE_NON_QL 0
366 #define HNS3_INTR_COALESCE_QL 1
368 #define HNS3_INTR_COALESCE_GL_UINT_2US 0
369 #define HNS3_INTR_COALESCE_GL_UINT_1US 1
371 struct hns3_queue_intr {
373 * interrupt mapping mode.
375 * HNS3_INTR_MAPPING_VEC_RSV_ONE/HNS3_INTR_MAPPING_VEC_ALL
377 * - HNS3_INTR_MAPPING_VEC_RSV_ONE
378 * For some versions of hardware network engine, because of the
379 * hardware constraint, we need implement clearing the mapping
380 * relationship configurations by binding all queues to the last
381 * interrupt vector and reserving the last interrupt vector. This
382 * method results in a decrease of the maximum queues when upper
383 * applications call the rte_eth_dev_configure API function to
384 * enable Rx interrupt.
386 * - HNS3_INTR_MAPPING_VEC_ALL
387 * PMD driver can map/unmmap all interrupt vectors with queues When
388 * Rx interrupt in enabled.
390 uint8_t mapping_mode;
392 * interrupt coalesce mode.
394 * HNS3_INTR_COALESCE_NON_QL/HNS3_INTR_COALESCE_QL
396 * - HNS3_INTR_COALESCE_NON_QL
397 * For some versions of hardware network engine, hardware doesn't
398 * support QL(quanity limiter) algorithm for interrupt coalesce
399 * of queue's interrupt.
401 * - HNS3_INTR_COALESCE_QL
402 * In this mode, hardware support QL(quanity limiter) algorithm for
403 * interrupt coalesce of queue's interrupt.
405 uint8_t coalesce_mode;
407 * The unit of GL(gap limiter) configuration for interrupt coalesce of
410 * HNS3_INTR_COALESCE_GL_UINT_2US/HNS3_INTR_COALESCE_GL_UINT_1US
416 struct rte_eth_dev_data *data;
418 uint8_t revision; /* PCI revision, low byte of class word */
420 struct hns3_mbx_resp_status mbx_resp; /* mailbox response */
421 struct hns3_mbx_arq_ring arq; /* mailbox async rx queue */
422 pthread_t irq_thread_id;
424 unsigned int secondary_cnt; /* Number of secondary processes init'd. */
425 struct hns3_tqp_stats tqp_stats;
426 /* Include Mac stats | Rx stats | Tx stats */
427 struct hns3_mac_stats mac_stats;
431 uint16_t total_tqps_num; /* total task queue pairs of this PF */
432 uint16_t tqps_num; /* num task queue pairs of this function */
433 uint16_t intr_tqps_num; /* num queue pairs mapping interrupt */
434 uint16_t rss_size_max; /* HW defined max RSS task queue */
435 uint16_t num_tx_desc; /* desc num of per tx queue */
436 uint16_t num_rx_desc; /* desc num of per rx queue */
437 uint32_t mng_entry_num; /* number of manager table entry */
438 uint32_t mac_entry_num; /* number of mac-vlan table entry */
440 struct rte_ether_addr mc_addrs[HNS3_MC_MACADDR_NUM];
441 int mc_addrs_num; /* Multicast mac addresses number */
443 /* The configuration info of RSS */
444 struct hns3_rss_conf rss_info;
445 bool rss_dis_flag; /* disable rss flag. true: disable, false: enable */
446 uint16_t rss_ind_tbl_size;
447 uint16_t rss_key_size;
449 uint8_t num_tc; /* Total number of enabled TCs */
451 enum hns3_fc_mode current_mode;
452 enum hns3_fc_mode requested_mode;
453 struct hns3_dcb_info dcb_info;
454 enum hns3_fc_status current_fc_status; /* current flow control status */
455 struct hns3_tc_queue_info tc_queue[HNS3_MAX_TC_NUM];
456 uint16_t used_rx_queues;
457 uint16_t used_tx_queues;
459 /* Config max queue numbers between rx and tx queues from user */
460 uint16_t cfg_max_queues;
461 struct hns3_fake_queue_data fkq_data; /* fake queue data */
462 uint16_t alloc_rss_size; /* RX queue number per TC */
463 uint16_t tx_qnum_per_tc; /* TX queue number per TC */
466 uint32_t max_tm_rate;
468 struct hns3_queue_intr intr;
470 uint8_t max_non_tso_bd_num; /* max BD number of one non-TSO packet */
472 struct hns3_port_base_vlan_config port_base_vlan_cfg;
474 * PMD setup and configuration is not thread safe. Since it is not
475 * performance sensitive, it is better to guarantee thread-safety
476 * and add device level lock. Adapter control operations which
477 * change its state should acquire the lock.
480 enum hns3_adapter_state adapter_state;
481 struct hns3_reset_data reset;
484 #define HNS3_FLAG_TC_BASE_SCH_MODE 1
485 #define HNS3_FLAG_VNET_BASE_SCH_MODE 2
487 struct hns3_err_msix_intr_stats {
488 uint64_t mac_afifo_tnl_intr_cnt;
489 uint64_t ppu_mpf_abnormal_intr_st2_cnt;
490 uint64_t ssu_port_based_pf_intr_cnt;
491 uint64_t ppp_pf_abnormal_intr_cnt;
492 uint64_t ppu_pf_abnormal_intr_cnt;
495 /* vlan entry information. */
496 struct hns3_user_vlan_table {
497 LIST_ENTRY(hns3_user_vlan_table) next;
502 /* Vlan tag configuration for RX direction */
503 struct hns3_rx_vtag_cfg {
504 uint8_t rx_vlan_offload_en; /* Whether enable rx vlan offload */
505 uint8_t strip_tag1_en; /* Whether strip inner vlan tag */
506 uint8_t strip_tag2_en; /* Whether strip outer vlan tag */
507 uint8_t vlan1_vlan_prionly; /* Inner VLAN Tag up to descriptor Enable */
508 uint8_t vlan2_vlan_prionly; /* Outer VLAN Tag up to descriptor Enable */
511 /* Vlan tag configuration for TX direction */
512 struct hns3_tx_vtag_cfg {
513 bool accept_tag1; /* Whether accept tag1 packet from host */
514 bool accept_untag1; /* Whether accept untag1 packet from host */
517 bool insert_tag1_en; /* Whether insert inner vlan tag */
518 bool insert_tag2_en; /* Whether insert outer vlan tag */
519 uint16_t default_tag1; /* The default inner vlan tag to insert */
520 uint16_t default_tag2; /* The default outer vlan tag to insert */
523 struct hns3_vtag_cfg {
524 struct hns3_rx_vtag_cfg rx_vcfg;
525 struct hns3_tx_vtag_cfg tx_vcfg;
528 /* Request types for IPC. */
529 enum hns3_mp_req_type {
530 HNS3_MP_REQ_START_RXTX = 1,
531 HNS3_MP_REQ_STOP_RXTX,
535 /* Pameters for IPC. */
536 struct hns3_mp_param {
537 enum hns3_mp_req_type type;
542 /* Request timeout for IPC. */
543 #define HNS3_MP_REQ_TIMEOUT_SEC 5
545 /* Key string for IPC. */
546 #define HNS3_MP_NAME "net_hns3_mp"
549 struct hns3_adapter *adapter;
551 uint16_t func_num; /* num functions of this pf, include pf and vfs */
553 uint32_t pkt_buf_size; /* Total pf buf size for tx/rx */
554 uint32_t tx_buf_size; /* Tx buffer size for each TC */
555 uint32_t dv_buf_size; /* Dv buffer size for each TC */
557 uint16_t mps; /* Max packet size */
560 uint8_t tc_max; /* max number of tc driver supported */
561 uint8_t local_max_tc; /* max number of local tc */
563 uint8_t prio_tc[HNS3_MAX_USER_PRIO]; /* TC indexed by prio */
565 bool support_fc_autoneg; /* support FC autonegotiate */
567 uint16_t wanted_umv_size;
568 uint16_t max_umv_size;
569 uint16_t used_umv_size;
571 /* Statistics information for abnormal interrupt */
572 struct hns3_err_msix_intr_stats abn_int_stats;
574 bool support_sfp_query;
576 struct hns3_vtag_cfg vtag_config;
577 LIST_HEAD(vlan_tbl, hns3_user_vlan_table) vlan_list;
579 struct hns3_fdir_info fdir; /* flow director info */
580 LIST_HEAD(counters, hns3_flow_counter) flow_counters;
584 struct hns3_adapter *adapter;
587 struct hns3_adapter {
590 /* Specific for PF or VF */
591 bool is_vf; /* false - PF, true - VF */
598 #define HNS3_DEV_SUPPORT_DCB_B 0x0
599 #define HNS3_DEV_SUPPORT_COPPER_B 0x1
600 #define HNS3_DEV_SUPPORT_UDP_GSO_B 0x2
601 #define HNS3_DEV_SUPPORT_ADQ_B 0x3
602 #define HNS3_DEV_SUPPORT_PTP_B 0x4
603 #define HNS3_DEV_SUPPORT_TX_PUSH_B 0x5
604 #define HNS3_DEV_SUPPORT_INDEP_TXRX_B 0x6
605 #define HNS3_DEV_SUPPORT_STASH_B 0x7
607 #define hns3_dev_dcb_supported(hw) \
608 hns3_get_bit((hw)->capability, HNS3_DEV_SUPPORT_DCB_B)
610 /* Support copper media type */
611 #define hns3_dev_copper_supported(hw) \
612 hns3_get_bit((hw)->capability, HNS3_DEV_SUPPORT_COPPER_B)
614 /* Support UDP GSO offload */
615 #define hns3_dev_udp_gso_supported(hw) \
616 hns3_get_bit((hw)->capability, HNS3_DEV_SUPPORT_UDP_GSO_B)
618 /* Support Application Device Queue */
619 #define hns3_dev_adq_supported(hw) \
620 hns3_get_bit((hw)->capability, HNS3_DEV_SUPPORT_ADQ_B)
622 /* Support PTP timestamp offload */
623 #define hns3_dev_ptp_supported(hw) \
624 hns3_get_bit((hw)->capability, HNS3_DEV_SUPPORT_PTP_B)
626 #define hns3_dev_tx_push_supported(hw) \
627 hns3_get_bit((hw)->capability, HNS3_DEV_SUPPORT_TX_PUSH_B)
629 /* Support to Independently enable/disable/reset Tx or Rx queues */
630 #define hns3_dev_indep_txrx_supported(hw) \
631 hns3_get_bit((hw)->capability, HNS3_DEV_SUPPORT_INDEP_TXRX_B)
633 #define hns3_dev_stash_supported(hw) \
634 hns3_get_bit((hw)->capability, HNS3_DEV_SUPPORT_STASH_B)
636 #define HNS3_DEV_PRIVATE_TO_HW(adapter) \
637 (&((struct hns3_adapter *)adapter)->hw)
638 #define HNS3_DEV_PRIVATE_TO_ADAPTER(adapter) \
639 ((struct hns3_adapter *)adapter)
640 #define HNS3_DEV_PRIVATE_TO_PF(adapter) \
641 (&((struct hns3_adapter *)adapter)->pf)
642 #define HNS3VF_DEV_PRIVATE_TO_VF(adapter) \
643 (&((struct hns3_adapter *)adapter)->vf)
644 #define HNS3_DEV_HW_TO_ADAPTER(hw) \
645 container_of(hw, struct hns3_adapter, hw)
647 #define hns3_set_field(origin, mask, shift, val) \
649 (origin) &= (~(mask)); \
650 (origin) |= ((val) << (shift)) & (mask); \
652 #define hns3_get_field(origin, mask, shift) \
653 (((origin) & (mask)) >> (shift))
654 #define hns3_set_bit(origin, shift, val) \
655 hns3_set_field((origin), (0x1UL << (shift)), (shift), (val))
656 #define hns3_get_bit(origin, shift) \
657 hns3_get_field((origin), (0x1UL << (shift)), (shift))
660 * upper_32_bits - return bits 32-63 of a number
661 * A basic shift-right of a 64- or 32-bit quantity. Use this to suppress
662 * the "right shift count >= width of type" warning when that quantity is
665 #define upper_32_bits(n) ((uint32_t)(((n) >> 16) >> 16))
667 /* lower_32_bits - return bits 0-31 of a number */
668 #define lower_32_bits(n) ((uint32_t)(n))
670 #define BIT(nr) (1UL << (nr))
672 #define BITS_PER_LONG (__SIZEOF_LONG__ * 8)
673 #define GENMASK(h, l) \
674 (((~0UL) << (l)) & (~0UL >> (BITS_PER_LONG - 1 - (h))))
676 #define roundup(x, y) ((((x) + ((y) - 1)) / (y)) * (y))
677 #define rounddown(x, y) ((x) - ((x) % (y)))
679 #define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
681 #define max_t(type, x, y) ({ \
684 __max1 > __max2 ? __max1 : __max2; })
686 static inline void hns3_write_reg(void *base, uint32_t reg, uint32_t value)
688 rte_write32(value, (volatile void *)((char *)base + reg));
691 static inline uint32_t hns3_read_reg(void *base, uint32_t reg)
693 return rte_read32((volatile void *)((char *)base + reg));
696 #define hns3_write_dev(a, reg, value) \
697 hns3_write_reg((a)->io_base, (reg), (value))
699 #define hns3_read_dev(a, reg) \
700 hns3_read_reg((a)->io_base, (reg))
702 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
704 #define NEXT_ITEM_OF_ACTION(act, actions, index) \
706 act = (actions) + (index); \
707 while (act->type == RTE_FLOW_ACTION_TYPE_VOID) { \
709 act = actions + index; \
713 #define MSEC_PER_SEC 1000L
714 #define USEC_PER_MSEC 1000L
716 static inline uint64_t
717 get_timeofday_ms(void)
721 (void)gettimeofday(&tv, NULL);
723 return (uint64_t)tv.tv_sec * MSEC_PER_SEC + tv.tv_usec / USEC_PER_MSEC;
726 static inline uint64_t
727 hns3_atomic_test_bit(unsigned int nr, volatile uint64_t *addr)
731 res = (__atomic_load_n(addr, __ATOMIC_RELAXED) & (1UL << nr)) != 0;
736 hns3_atomic_set_bit(unsigned int nr, volatile uint64_t *addr)
738 __atomic_fetch_or(addr, (1UL << nr), __ATOMIC_RELAXED);
742 hns3_atomic_clear_bit(unsigned int nr, volatile uint64_t *addr)
744 __atomic_fetch_and(addr, ~(1UL << nr), __ATOMIC_RELAXED);
747 static inline int64_t
748 hns3_test_and_clear_bit(unsigned int nr, volatile uint64_t *addr)
750 uint64_t mask = (1UL << nr);
752 return __atomic_fetch_and(addr, ~mask, __ATOMIC_RELAXED) & mask;
755 int hns3_buffer_alloc(struct hns3_hw *hw);
756 int hns3_dev_filter_ctrl(struct rte_eth_dev *dev,
757 enum rte_filter_type filter_type,
758 enum rte_filter_op filter_op, void *arg);
759 bool hns3_is_reset_pending(struct hns3_adapter *hns);
760 bool hns3vf_is_reset_pending(struct hns3_adapter *hns);
761 void hns3_update_link_status(struct hns3_hw *hw);
764 is_reset_pending(struct hns3_adapter *hns)
768 ret = hns3vf_is_reset_pending(hns);
770 ret = hns3_is_reset_pending(hns);
774 static inline uint64_t
775 hns3_txvlan_cap_get(struct hns3_hw *hw)
777 if (hw->port_base_vlan_cfg.state)
778 return DEV_TX_OFFLOAD_VLAN_INSERT;
780 return DEV_TX_OFFLOAD_VLAN_INSERT | DEV_TX_OFFLOAD_QINQ_INSERT;
783 #endif /* _HNS3_ETHDEV_H_ */