1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 HiSilicon Limited
5 #include <rte_kvargs.h>
6 #include <rte_bus_pci.h>
7 #include <ethdev_pci.h>
10 #include "hns3_common.h"
11 #include "hns3_logs.h"
12 #include "hns3_regs.h"
13 #include "hns3_rxtx.h"
16 hns3_fw_version_get(struct rte_eth_dev *eth_dev, char *fw_version,
19 struct hns3_adapter *hns = eth_dev->data->dev_private;
20 struct hns3_hw *hw = &hns->hw;
21 uint32_t version = hw->fw_version;
24 ret = snprintf(fw_version, fw_size, "%lu.%lu.%lu.%lu",
25 hns3_get_field(version, HNS3_FW_VERSION_BYTE3_M,
26 HNS3_FW_VERSION_BYTE3_S),
27 hns3_get_field(version, HNS3_FW_VERSION_BYTE2_M,
28 HNS3_FW_VERSION_BYTE2_S),
29 hns3_get_field(version, HNS3_FW_VERSION_BYTE1_M,
30 HNS3_FW_VERSION_BYTE1_S),
31 hns3_get_field(version, HNS3_FW_VERSION_BYTE0_M,
32 HNS3_FW_VERSION_BYTE0_S));
36 ret += 1; /* add the size of '\0' */
37 if (fw_size < (size_t)ret)
44 hns3_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info)
46 struct hns3_adapter *hns = eth_dev->data->dev_private;
47 struct hns3_hw *hw = &hns->hw;
48 uint16_t queue_num = hw->tqps_num;
51 * In interrupt mode, 'max_rx_queues' is set based on the number of
52 * MSI-X interrupt resources of the hardware.
54 if (hw->data->dev_conf.intr_conf.rxq == 1)
55 queue_num = hw->intr_tqps_num;
57 info->max_rx_queues = queue_num;
58 info->max_tx_queues = hw->tqps_num;
59 info->max_rx_pktlen = HNS3_MAX_FRAME_LEN; /* CRC included */
60 info->min_rx_bufsize = HNS3_MIN_BD_BUF_SIZE;
61 info->max_mtu = info->max_rx_pktlen - HNS3_ETH_OVERHEAD;
62 info->max_lro_pkt_size = HNS3_MAX_LRO_SIZE;
63 info->rx_offload_capa = (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
64 RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
65 RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
66 RTE_ETH_RX_OFFLOAD_SCTP_CKSUM |
67 RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
68 RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM |
69 RTE_ETH_RX_OFFLOAD_SCATTER |
70 RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
71 RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
72 RTE_ETH_RX_OFFLOAD_RSS_HASH |
73 RTE_ETH_RX_OFFLOAD_TCP_LRO);
74 info->tx_offload_capa = (RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
75 RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
76 RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
77 RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
78 RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
79 RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
80 RTE_ETH_TX_OFFLOAD_TCP_TSO |
81 RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
82 RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
83 RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
84 RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE |
85 RTE_ETH_TX_OFFLOAD_VLAN_INSERT);
87 if (!hw->port_base_vlan_cfg.state)
88 info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_QINQ_INSERT;
90 if (hns3_dev_get_support(hw, OUTER_UDP_CKSUM))
91 info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM;
93 if (hns3_dev_get_support(hw, INDEP_TXRX))
94 info->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
95 RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
96 info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
98 if (hns3_dev_get_support(hw, PTP))
99 info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_TIMESTAMP;
101 info->rx_desc_lim = (struct rte_eth_desc_lim) {
102 .nb_max = HNS3_MAX_RING_DESC,
103 .nb_min = HNS3_MIN_RING_DESC,
104 .nb_align = HNS3_ALIGN_RING_DESC,
107 info->tx_desc_lim = (struct rte_eth_desc_lim) {
108 .nb_max = HNS3_MAX_RING_DESC,
109 .nb_min = HNS3_MIN_RING_DESC,
110 .nb_align = HNS3_ALIGN_RING_DESC,
111 .nb_seg_max = HNS3_MAX_TSO_BD_PER_PKT,
112 .nb_mtu_seg_max = hw->max_non_tso_bd_num,
115 info->default_rxconf = (struct rte_eth_rxconf) {
116 .rx_free_thresh = HNS3_DEFAULT_RX_FREE_THRESH,
118 * If there are no available Rx buffer descriptors, incoming
119 * packets are always dropped by hardware based on hns3 network
125 info->default_txconf = (struct rte_eth_txconf) {
126 .tx_rs_thresh = HNS3_DEFAULT_TX_RS_THRESH,
130 info->reta_size = hw->rss_ind_tbl_size;
131 info->hash_key_size = HNS3_RSS_KEY_SIZE;
132 info->flow_type_rss_offloads = HNS3_ETH_RSS_SUPPORT;
134 info->default_rxportconf.burst_size = HNS3_DEFAULT_PORT_CONF_BURST_SIZE;
135 info->default_txportconf.burst_size = HNS3_DEFAULT_PORT_CONF_BURST_SIZE;
136 info->default_rxportconf.nb_queues = HNS3_DEFAULT_PORT_CONF_QUEUES_NUM;
137 info->default_txportconf.nb_queues = HNS3_DEFAULT_PORT_CONF_QUEUES_NUM;
138 info->default_rxportconf.ring_size = HNS3_DEFAULT_RING_DESC;
139 info->default_txportconf.ring_size = HNS3_DEFAULT_RING_DESC;
142 * Next is the PF/VF difference section.
145 info->max_mac_addrs = HNS3_UC_MACADDR_NUM;
146 info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_KEEP_CRC;
147 info->speed_capa = hns3_get_speed_capa(hw);
149 info->max_mac_addrs = HNS3_VF_UC_MACADDR_NUM;
156 hns3_parse_io_hint_func(const char *key, const char *value, void *extra_args)
158 uint32_t hint = HNS3_IO_FUNC_HINT_NONE;
162 if (strcmp(value, "vec") == 0)
163 hint = HNS3_IO_FUNC_HINT_VEC;
164 else if (strcmp(value, "sve") == 0)
165 hint = HNS3_IO_FUNC_HINT_SVE;
166 else if (strcmp(value, "simple") == 0)
167 hint = HNS3_IO_FUNC_HINT_SIMPLE;
168 else if (strcmp(value, "common") == 0)
169 hint = HNS3_IO_FUNC_HINT_COMMON;
171 /* If the hint is valid then update output parameters */
172 if (hint != HNS3_IO_FUNC_HINT_NONE)
173 *(uint32_t *)extra_args = hint;
179 hns3_get_io_hint_func_name(uint32_t hint)
182 case HNS3_IO_FUNC_HINT_VEC:
184 case HNS3_IO_FUNC_HINT_SVE:
186 case HNS3_IO_FUNC_HINT_SIMPLE:
188 case HNS3_IO_FUNC_HINT_COMMON:
196 hns3_parse_dev_caps_mask(const char *key, const char *value, void *extra_args)
202 val = strtoull(value, NULL, HNS3_CONVERT_TO_HEXADECIMAL);
203 *(uint64_t *)extra_args = val;
209 hns3_parse_mbx_time_limit(const char *key, const char *value, void *extra_args)
215 val = strtoul(value, NULL, HNS3_CONVERT_TO_DECIMAL);
218 * 500ms is empirical value in process of mailbox communication. If
219 * the delay value is set to one lower thanthe empirical value, mailbox
220 * communication may fail.
222 if (val > HNS3_MBX_DEF_TIME_LIMIT_MS && val <= UINT16_MAX)
223 *(uint16_t *)extra_args = val;
229 hns3_parse_devargs(struct rte_eth_dev *dev)
231 uint16_t mbx_time_limit_ms = HNS3_MBX_DEF_TIME_LIMIT_MS;
232 struct hns3_adapter *hns = dev->data->dev_private;
233 uint32_t rx_func_hint = HNS3_IO_FUNC_HINT_NONE;
234 uint32_t tx_func_hint = HNS3_IO_FUNC_HINT_NONE;
235 struct hns3_hw *hw = &hns->hw;
236 uint64_t dev_caps_mask = 0;
237 struct rte_kvargs *kvlist;
239 if (dev->device->devargs == NULL)
242 kvlist = rte_kvargs_parse(dev->device->devargs->args, NULL);
246 (void)rte_kvargs_process(kvlist, HNS3_DEVARG_RX_FUNC_HINT,
247 &hns3_parse_io_hint_func, &rx_func_hint);
248 (void)rte_kvargs_process(kvlist, HNS3_DEVARG_TX_FUNC_HINT,
249 &hns3_parse_io_hint_func, &tx_func_hint);
250 (void)rte_kvargs_process(kvlist, HNS3_DEVARG_DEV_CAPS_MASK,
251 &hns3_parse_dev_caps_mask, &dev_caps_mask);
252 (void)rte_kvargs_process(kvlist, HNS3_DEVARG_MBX_TIME_LIMIT_MS,
253 &hns3_parse_mbx_time_limit, &mbx_time_limit_ms);
255 rte_kvargs_free(kvlist);
257 if (rx_func_hint != HNS3_IO_FUNC_HINT_NONE)
258 hns3_warn(hw, "parsed %s = %s.", HNS3_DEVARG_RX_FUNC_HINT,
259 hns3_get_io_hint_func_name(rx_func_hint));
260 hns->rx_func_hint = rx_func_hint;
261 if (tx_func_hint != HNS3_IO_FUNC_HINT_NONE)
262 hns3_warn(hw, "parsed %s = %s.", HNS3_DEVARG_TX_FUNC_HINT,
263 hns3_get_io_hint_func_name(tx_func_hint));
264 hns->tx_func_hint = tx_func_hint;
266 if (dev_caps_mask != 0)
267 hns3_warn(hw, "parsed %s = 0x%" PRIx64 ".",
268 HNS3_DEVARG_DEV_CAPS_MASK, dev_caps_mask);
269 hns->dev_caps_mask = dev_caps_mask;
271 if (mbx_time_limit_ms != HNS3_MBX_DEF_TIME_LIMIT_MS)
272 hns3_warn(hw, "parsed %s = %u.", HNS3_DEVARG_MBX_TIME_LIMIT_MS,
274 hns->mbx_time_limit_ms = mbx_time_limit_ms;
278 hns3_clock_gettime(struct timeval *tv)
280 #ifdef CLOCK_MONOTONIC_RAW /* Defined in glibc bits/time.h */
281 #define CLOCK_TYPE CLOCK_MONOTONIC_RAW
283 #define CLOCK_TYPE CLOCK_MONOTONIC
285 #define NSEC_TO_USEC_DIV 1000
287 struct timespec spec;
288 (void)clock_gettime(CLOCK_TYPE, &spec);
290 tv->tv_sec = spec.tv_sec;
291 tv->tv_usec = spec.tv_nsec / NSEC_TO_USEC_DIV;
295 hns3_clock_calctime_ms(struct timeval *tv)
297 return (uint64_t)tv->tv_sec * MSEC_PER_SEC +
298 tv->tv_usec / USEC_PER_MSEC;
302 hns3_clock_gettime_ms(void)
306 hns3_clock_gettime(&tv);
307 return hns3_clock_calctime_ms(&tv);
310 void hns3_ether_format_addr(char *buf, uint16_t size,
311 const struct rte_ether_addr *ether_addr)
313 (void)snprintf(buf, size, "%02X:**:**:**:%02X:%02X",
314 ether_addr->addr_bytes[0],
315 ether_addr->addr_bytes[4],
316 ether_addr->addr_bytes[5]);
320 hns3_set_mc_addr_chk_param(struct hns3_hw *hw,
321 struct rte_ether_addr *mc_addr_set,
324 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
325 char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
326 struct rte_ether_addr *addr;
327 uint16_t mac_addrs_capa;
331 if (nb_mc_addr > HNS3_MC_MACADDR_NUM) {
332 hns3_err(hw, "failed to set mc mac addr, nb_mc_addr(%u) "
333 "invalid. valid range: 0~%d",
334 nb_mc_addr, HNS3_MC_MACADDR_NUM);
338 /* Check if input mac addresses are valid */
339 for (i = 0; i < nb_mc_addr; i++) {
340 addr = &mc_addr_set[i];
341 if (!rte_is_multicast_ether_addr(addr)) {
342 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
345 "failed to set mc mac addr, addr(%s) invalid.",
350 /* Check if there are duplicate addresses */
351 for (j = i + 1; j < nb_mc_addr; j++) {
352 if (rte_is_same_ether_addr(addr, &mc_addr_set[j])) {
353 hns3_ether_format_addr(mac_str,
354 RTE_ETHER_ADDR_FMT_SIZE,
356 hns3_err(hw, "failed to set mc mac addr, "
357 "addrs invalid. two same addrs(%s).",
364 * Check if there are duplicate addresses between mac_addrs
367 mac_addrs_capa = hns->is_vf ? HNS3_VF_UC_MACADDR_NUM :
369 for (j = 0; j < mac_addrs_capa; j++) {
370 if (rte_is_same_ether_addr(addr,
371 &hw->data->mac_addrs[j])) {
372 hns3_ether_format_addr(mac_str,
373 RTE_ETHER_ADDR_FMT_SIZE,
375 hns3_err(hw, "failed to set mc mac addr, "
376 "addrs invalid. addrs(%s) has already "
377 "configured in mac_addr add API",
388 hns3_set_mc_mac_addr_list(struct rte_eth_dev *dev,
389 struct rte_ether_addr *mc_addr_set,
392 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
393 struct rte_ether_addr *addr;
400 /* Check if input parameters are valid */
401 ret = hns3_set_mc_addr_chk_param(hw, mc_addr_set, nb_mc_addr);
405 rte_spinlock_lock(&hw->lock);
406 cur_addr_num = hw->mc_addrs_num;
407 for (i = 0; i < cur_addr_num; i++) {
408 num = cur_addr_num - i - 1;
409 addr = &hw->mc_addrs[num];
410 ret = hw->ops.del_mc_mac_addr(hw, addr);
412 rte_spinlock_unlock(&hw->lock);
419 set_addr_num = (int)nb_mc_addr;
420 for (i = 0; i < set_addr_num; i++) {
421 addr = &mc_addr_set[i];
422 ret = hw->ops.add_mc_mac_addr(hw, addr);
424 rte_spinlock_unlock(&hw->lock);
428 rte_ether_addr_copy(addr, &hw->mc_addrs[hw->mc_addrs_num]);
431 rte_spinlock_unlock(&hw->lock);
437 hns3_configure_all_mc_mac_addr(struct hns3_adapter *hns, bool del)
439 char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
440 struct hns3_hw *hw = &hns->hw;
441 struct rte_ether_addr *addr;
445 for (i = 0; i < hw->mc_addrs_num; i++) {
446 addr = &hw->mc_addrs[i];
447 if (!rte_is_multicast_ether_addr(addr))
450 ret = hw->ops.del_mc_mac_addr(hw, addr);
452 ret = hw->ops.add_mc_mac_addr(hw, addr);
454 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
456 hns3_dbg(hw, "failed to %s mc mac addr: %s ret = %d",
457 del ? "Remove" : "Restore", mac_str, ret);
464 hns3_configure_all_mac_addr(struct hns3_adapter *hns, bool del)
466 char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
467 struct hns3_hw *hw = &hns->hw;
468 struct hns3_hw_ops *ops = &hw->ops;
469 struct rte_ether_addr *addr;
470 uint16_t mac_addrs_capa;
475 hns->is_vf ? HNS3_VF_UC_MACADDR_NUM : HNS3_UC_MACADDR_NUM;
476 for (i = 0; i < mac_addrs_capa; i++) {
477 addr = &hw->data->mac_addrs[i];
478 if (rte_is_zero_ether_addr(addr))
480 if (rte_is_multicast_ether_addr(addr))
481 ret = del ? ops->del_mc_mac_addr(hw, addr) :
482 ops->add_mc_mac_addr(hw, addr);
484 ret = del ? ops->del_uc_mac_addr(hw, addr) :
485 ops->add_uc_mac_addr(hw, addr);
488 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
490 hns3_err(hw, "failed to %s mac addr(%s) index:%d ret = %d.",
491 del ? "remove" : "restore", mac_str, i, ret);
499 hns3_find_duplicate_mc_addr(struct hns3_hw *hw, struct rte_ether_addr *mc_addr)
501 char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
502 struct rte_ether_addr *addr;
505 for (i = 0; i < hw->mc_addrs_num; i++) {
506 addr = &hw->mc_addrs[i];
507 /* Check if there are duplicate addresses in mc_addrs[] */
508 if (rte_is_same_ether_addr(addr, mc_addr)) {
509 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
511 hns3_err(hw, "failed to add mc mac addr, same addrs"
512 "(%s) is added by the set_mc_mac_addr_list "
522 hns3_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
523 __rte_unused uint32_t idx, __rte_unused uint32_t pool)
525 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
526 char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
529 rte_spinlock_lock(&hw->lock);
532 * In hns3 network engine adding UC and MC mac address with different
533 * commands with firmware. We need to determine whether the input
534 * address is a UC or a MC address to call different commands.
535 * By the way, it is recommended calling the API function named
536 * rte_eth_dev_set_mc_addr_list to set the MC mac address, because
537 * using the rte_eth_dev_mac_addr_add API function to set MC mac address
538 * may affect the specifications of UC mac addresses.
540 if (rte_is_multicast_ether_addr(mac_addr)) {
541 if (hns3_find_duplicate_mc_addr(hw, mac_addr)) {
542 rte_spinlock_unlock(&hw->lock);
545 ret = hw->ops.add_mc_mac_addr(hw, mac_addr);
547 ret = hw->ops.add_uc_mac_addr(hw, mac_addr);
549 rte_spinlock_unlock(&hw->lock);
551 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
553 hns3_err(hw, "failed to add mac addr(%s), ret = %d", mac_str,
561 hns3_remove_mac_addr(struct rte_eth_dev *dev, uint32_t idx)
563 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
564 /* index will be checked by upper level rte interface */
565 struct rte_ether_addr *mac_addr = &dev->data->mac_addrs[idx];
566 char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
569 rte_spinlock_lock(&hw->lock);
571 if (rte_is_multicast_ether_addr(mac_addr))
572 ret = hw->ops.del_mc_mac_addr(hw, mac_addr);
574 ret = hw->ops.del_uc_mac_addr(hw, mac_addr);
575 rte_spinlock_unlock(&hw->lock);
577 hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
579 hns3_err(hw, "failed to remove mac addr(%s), ret = %d", mac_str,
585 hns3_init_ring_with_vector(struct hns3_hw *hw)
592 * In hns3 network engine, vector 0 is always the misc interrupt of this
593 * function, vector 1~N can be used respectively for the queues of the
594 * function. Tx and Rx queues with the same number share the interrupt
595 * vector. In the initialization clearing the all hardware mapping
596 * relationship configurations between queues and interrupt vectors is
597 * needed, so some error caused by the residual configurations, such as
598 * the unexpected Tx interrupt, can be avoid.
600 vec = hw->num_msi - 1; /* vector 0 for misc interrupt, not for queue */
601 if (hw->intr.mapping_mode == HNS3_INTR_MAPPING_VEC_RSV_ONE)
602 vec = vec - 1; /* the last interrupt is reserved */
603 hw->intr_tqps_num = RTE_MIN(vec, hw->tqps_num);
604 for (i = 0; i < hw->intr_tqps_num; i++) {
606 * Set gap limiter/rate limiter/quantity limiter algorithm
607 * configuration for interrupt coalesce of queue's interrupt.
609 hns3_set_queue_intr_gl(hw, i, HNS3_RING_GL_RX,
610 HNS3_TQP_INTR_GL_DEFAULT);
611 hns3_set_queue_intr_gl(hw, i, HNS3_RING_GL_TX,
612 HNS3_TQP_INTR_GL_DEFAULT);
613 hns3_set_queue_intr_rl(hw, i, HNS3_TQP_INTR_RL_DEFAULT);
615 * QL(quantity limiter) is not used currently, just set 0 to
618 hns3_set_queue_intr_ql(hw, i, HNS3_TQP_INTR_QL_DEFAULT);
620 ret = hw->ops.bind_ring_with_vector(hw, vec, false,
621 HNS3_RING_TYPE_TX, i);
623 PMD_INIT_LOG(ERR, "fail to unbind TX ring(%d) with "
624 "vector: %u, ret=%d", i, vec, ret);
628 ret = hw->ops.bind_ring_with_vector(hw, vec, false,
629 HNS3_RING_TYPE_RX, i);
631 PMD_INIT_LOG(ERR, "fail to unbind RX ring(%d) with "
632 "vector: %u, ret=%d", i, vec, ret);
641 hns3_map_rx_interrupt(struct rte_eth_dev *dev)
643 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
644 struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
645 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
646 uint16_t base = RTE_INTR_VEC_ZERO_OFFSET;
647 uint16_t vec = RTE_INTR_VEC_ZERO_OFFSET;
648 uint32_t intr_vector;
653 * hns3 needs a separate interrupt to be used as event interrupt which
654 * could not be shared with task queue pair, so KERNEL drivers need
655 * support multiple interrupt vectors.
657 if (dev->data->dev_conf.intr_conf.rxq == 0 ||
658 !rte_intr_cap_multiple(intr_handle))
661 rte_intr_disable(intr_handle);
662 intr_vector = hw->used_rx_queues;
663 /* creates event fd for each intr vector when MSIX is used */
664 if (rte_intr_efd_enable(intr_handle, intr_vector))
667 /* Allocate vector list */
668 if (rte_intr_vec_list_alloc(intr_handle, "intr_vec",
669 hw->used_rx_queues)) {
670 hns3_err(hw, "failed to allocate %u rx_queues intr_vec",
673 goto alloc_intr_vec_error;
676 if (rte_intr_allow_others(intr_handle)) {
677 vec = RTE_INTR_VEC_RXTX_OFFSET;
678 base = RTE_INTR_VEC_RXTX_OFFSET;
681 for (q_id = 0; q_id < hw->used_rx_queues; q_id++) {
682 ret = hw->ops.bind_ring_with_vector(hw, vec, true,
683 HNS3_RING_TYPE_RX, q_id);
685 goto bind_vector_error;
687 if (rte_intr_vec_list_index_set(intr_handle, q_id, vec))
688 goto bind_vector_error;
690 * If there are not enough efds (e.g. not enough interrupt),
691 * remaining queues will be bond to the last interrupt.
693 if (vec < base + rte_intr_nb_efd_get(intr_handle) - 1)
696 rte_intr_enable(intr_handle);
700 rte_intr_vec_list_free(intr_handle);
701 alloc_intr_vec_error:
702 rte_intr_efd_disable(intr_handle);
707 hns3_unmap_rx_interrupt(struct rte_eth_dev *dev)
709 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
710 struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
711 struct hns3_adapter *hns = dev->data->dev_private;
712 struct hns3_hw *hw = &hns->hw;
713 uint8_t base = RTE_INTR_VEC_ZERO_OFFSET;
714 uint8_t vec = RTE_INTR_VEC_ZERO_OFFSET;
717 if (dev->data->dev_conf.intr_conf.rxq == 0)
720 /* unmap the ring with vector */
721 if (rte_intr_allow_others(intr_handle)) {
722 vec = RTE_INTR_VEC_RXTX_OFFSET;
723 base = RTE_INTR_VEC_RXTX_OFFSET;
725 if (rte_intr_dp_is_en(intr_handle)) {
726 for (q_id = 0; q_id < hw->used_rx_queues; q_id++) {
727 (void)hw->ops.bind_ring_with_vector(hw, vec, false,
730 if (vec < base + rte_intr_nb_efd_get(intr_handle) - 1)
734 /* Clean datapath event and queue/vec mapping */
735 rte_intr_efd_disable(intr_handle);
736 rte_intr_vec_list_free(intr_handle);
740 hns3_restore_rx_interrupt(struct hns3_hw *hw)
742 struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id];
743 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
744 struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
748 if (dev->data->dev_conf.intr_conf.rxq == 0)
751 if (rte_intr_dp_is_en(intr_handle)) {
752 for (q_id = 0; q_id < hw->used_rx_queues; q_id++) {
753 ret = hw->ops.bind_ring_with_vector(hw,
754 rte_intr_vec_list_index_get(intr_handle,
756 true, HNS3_RING_TYPE_RX, q_id);