1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
5 #include <rte_ethdev_pci.h>
7 #include "base/ice_sched.h"
8 #include "ice_ethdev.h"
11 #define ICE_MAX_QP_NUM "max_queue_pair_num"
12 #define ICE_DFLT_OUTER_TAG_TYPE ICE_AQ_VSI_OUTER_TAG_VLAN_9100
15 int ice_logtype_driver;
17 static int ice_dev_configure(struct rte_eth_dev *dev);
18 static int ice_dev_start(struct rte_eth_dev *dev);
19 static void ice_dev_stop(struct rte_eth_dev *dev);
20 static void ice_dev_close(struct rte_eth_dev *dev);
21 static int ice_dev_reset(struct rte_eth_dev *dev);
22 static void ice_dev_info_get(struct rte_eth_dev *dev,
23 struct rte_eth_dev_info *dev_info);
24 static int ice_link_update(struct rte_eth_dev *dev,
25 int wait_to_complete);
26 static int ice_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
27 static int ice_vlan_offload_set(struct rte_eth_dev *dev, int mask);
28 static int ice_vlan_tpid_set(struct rte_eth_dev *dev,
29 enum rte_vlan_type vlan_type,
31 static int ice_rss_reta_update(struct rte_eth_dev *dev,
32 struct rte_eth_rss_reta_entry64 *reta_conf,
34 static int ice_rss_reta_query(struct rte_eth_dev *dev,
35 struct rte_eth_rss_reta_entry64 *reta_conf,
37 static int ice_rss_hash_update(struct rte_eth_dev *dev,
38 struct rte_eth_rss_conf *rss_conf);
39 static int ice_rss_hash_conf_get(struct rte_eth_dev *dev,
40 struct rte_eth_rss_conf *rss_conf);
41 static int ice_vlan_filter_set(struct rte_eth_dev *dev,
44 static int ice_macaddr_set(struct rte_eth_dev *dev,
45 struct ether_addr *mac_addr);
46 static int ice_macaddr_add(struct rte_eth_dev *dev,
47 struct ether_addr *mac_addr,
48 __rte_unused uint32_t index,
50 static void ice_macaddr_remove(struct rte_eth_dev *dev, uint32_t index);
51 static int ice_rx_queue_intr_enable(struct rte_eth_dev *dev,
53 static int ice_rx_queue_intr_disable(struct rte_eth_dev *dev,
55 static int ice_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
57 static int ice_vlan_pvid_set(struct rte_eth_dev *dev,
58 uint16_t pvid, int on);
59 static int ice_get_eeprom_length(struct rte_eth_dev *dev);
60 static int ice_get_eeprom(struct rte_eth_dev *dev,
61 struct rte_dev_eeprom_info *eeprom);
62 static int ice_stats_get(struct rte_eth_dev *dev,
63 struct rte_eth_stats *stats);
64 static void ice_stats_reset(struct rte_eth_dev *dev);
65 static int ice_xstats_get(struct rte_eth_dev *dev,
66 struct rte_eth_xstat *xstats, unsigned int n);
67 static int ice_xstats_get_names(struct rte_eth_dev *dev,
68 struct rte_eth_xstat_name *xstats_names,
71 static const struct rte_pci_id pci_id_ice_map[] = {
72 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_BACKPLANE) },
73 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_QSFP) },
74 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_SFP) },
75 { .vendor_id = 0, /* sentinel */ },
78 static const struct eth_dev_ops ice_eth_dev_ops = {
79 .dev_configure = ice_dev_configure,
80 .dev_start = ice_dev_start,
81 .dev_stop = ice_dev_stop,
82 .dev_close = ice_dev_close,
83 .dev_reset = ice_dev_reset,
84 .rx_queue_start = ice_rx_queue_start,
85 .rx_queue_stop = ice_rx_queue_stop,
86 .tx_queue_start = ice_tx_queue_start,
87 .tx_queue_stop = ice_tx_queue_stop,
88 .rx_queue_setup = ice_rx_queue_setup,
89 .rx_queue_release = ice_rx_queue_release,
90 .tx_queue_setup = ice_tx_queue_setup,
91 .tx_queue_release = ice_tx_queue_release,
92 .dev_infos_get = ice_dev_info_get,
93 .dev_supported_ptypes_get = ice_dev_supported_ptypes_get,
94 .link_update = ice_link_update,
95 .mtu_set = ice_mtu_set,
96 .mac_addr_set = ice_macaddr_set,
97 .mac_addr_add = ice_macaddr_add,
98 .mac_addr_remove = ice_macaddr_remove,
99 .vlan_filter_set = ice_vlan_filter_set,
100 .vlan_offload_set = ice_vlan_offload_set,
101 .vlan_tpid_set = ice_vlan_tpid_set,
102 .reta_update = ice_rss_reta_update,
103 .reta_query = ice_rss_reta_query,
104 .rss_hash_update = ice_rss_hash_update,
105 .rss_hash_conf_get = ice_rss_hash_conf_get,
106 .rx_queue_intr_enable = ice_rx_queue_intr_enable,
107 .rx_queue_intr_disable = ice_rx_queue_intr_disable,
108 .fw_version_get = ice_fw_version_get,
109 .vlan_pvid_set = ice_vlan_pvid_set,
110 .rxq_info_get = ice_rxq_info_get,
111 .txq_info_get = ice_txq_info_get,
112 .get_eeprom_length = ice_get_eeprom_length,
113 .get_eeprom = ice_get_eeprom,
114 .rx_queue_count = ice_rx_queue_count,
115 .rx_descriptor_status = ice_rx_descriptor_status,
116 .tx_descriptor_status = ice_tx_descriptor_status,
117 .stats_get = ice_stats_get,
118 .stats_reset = ice_stats_reset,
119 .xstats_get = ice_xstats_get,
120 .xstats_get_names = ice_xstats_get_names,
121 .xstats_reset = ice_stats_reset,
124 /* store statistics names and its offset in stats structure */
125 struct ice_xstats_name_off {
126 char name[RTE_ETH_XSTATS_NAME_SIZE];
130 static const struct ice_xstats_name_off ice_stats_strings[] = {
131 {"rx_unicast_packets", offsetof(struct ice_eth_stats, rx_unicast)},
132 {"rx_multicast_packets", offsetof(struct ice_eth_stats, rx_multicast)},
133 {"rx_broadcast_packets", offsetof(struct ice_eth_stats, rx_broadcast)},
134 {"rx_dropped", offsetof(struct ice_eth_stats, rx_discards)},
135 {"rx_unknown_protocol_packets", offsetof(struct ice_eth_stats,
136 rx_unknown_protocol)},
137 {"tx_unicast_packets", offsetof(struct ice_eth_stats, tx_unicast)},
138 {"tx_multicast_packets", offsetof(struct ice_eth_stats, tx_multicast)},
139 {"tx_broadcast_packets", offsetof(struct ice_eth_stats, tx_broadcast)},
140 {"tx_dropped", offsetof(struct ice_eth_stats, tx_discards)},
143 #define ICE_NB_ETH_XSTATS (sizeof(ice_stats_strings) / \
144 sizeof(ice_stats_strings[0]))
146 static const struct ice_xstats_name_off ice_hw_port_strings[] = {
147 {"tx_link_down_dropped", offsetof(struct ice_hw_port_stats,
148 tx_dropped_link_down)},
149 {"rx_crc_errors", offsetof(struct ice_hw_port_stats, crc_errors)},
150 {"rx_illegal_byte_errors", offsetof(struct ice_hw_port_stats,
152 {"rx_error_bytes", offsetof(struct ice_hw_port_stats, error_bytes)},
153 {"mac_local_errors", offsetof(struct ice_hw_port_stats,
155 {"mac_remote_errors", offsetof(struct ice_hw_port_stats,
157 {"rx_len_errors", offsetof(struct ice_hw_port_stats,
159 {"tx_xon_packets", offsetof(struct ice_hw_port_stats, link_xon_tx)},
160 {"rx_xon_packets", offsetof(struct ice_hw_port_stats, link_xon_rx)},
161 {"tx_xoff_packets", offsetof(struct ice_hw_port_stats, link_xoff_tx)},
162 {"rx_xoff_packets", offsetof(struct ice_hw_port_stats, link_xoff_rx)},
163 {"rx_size_64_packets", offsetof(struct ice_hw_port_stats, rx_size_64)},
164 {"rx_size_65_to_127_packets", offsetof(struct ice_hw_port_stats,
166 {"rx_size_128_to_255_packets", offsetof(struct ice_hw_port_stats,
168 {"rx_size_256_to_511_packets", offsetof(struct ice_hw_port_stats,
170 {"rx_size_512_to_1023_packets", offsetof(struct ice_hw_port_stats,
172 {"rx_size_1024_to_1522_packets", offsetof(struct ice_hw_port_stats,
174 {"rx_size_1523_to_max_packets", offsetof(struct ice_hw_port_stats,
176 {"rx_undersized_errors", offsetof(struct ice_hw_port_stats,
178 {"rx_oversize_errors", offsetof(struct ice_hw_port_stats,
180 {"rx_mac_short_pkt_dropped", offsetof(struct ice_hw_port_stats,
181 mac_short_pkt_dropped)},
182 {"rx_fragmented_errors", offsetof(struct ice_hw_port_stats,
184 {"rx_jabber_errors", offsetof(struct ice_hw_port_stats, rx_jabber)},
185 {"tx_size_64_packets", offsetof(struct ice_hw_port_stats, tx_size_64)},
186 {"tx_size_65_to_127_packets", offsetof(struct ice_hw_port_stats,
188 {"tx_size_128_to_255_packets", offsetof(struct ice_hw_port_stats,
190 {"tx_size_256_to_511_packets", offsetof(struct ice_hw_port_stats,
192 {"tx_size_512_to_1023_packets", offsetof(struct ice_hw_port_stats,
194 {"tx_size_1024_to_1522_packets", offsetof(struct ice_hw_port_stats,
196 {"tx_size_1523_to_max_packets", offsetof(struct ice_hw_port_stats,
200 #define ICE_NB_HW_PORT_XSTATS (sizeof(ice_hw_port_strings) / \
201 sizeof(ice_hw_port_strings[0]))
204 ice_init_controlq_parameter(struct ice_hw *hw)
206 /* fields for adminq */
207 hw->adminq.num_rq_entries = ICE_ADMINQ_LEN;
208 hw->adminq.num_sq_entries = ICE_ADMINQ_LEN;
209 hw->adminq.rq_buf_size = ICE_ADMINQ_BUF_SZ;
210 hw->adminq.sq_buf_size = ICE_ADMINQ_BUF_SZ;
212 /* fields for mailboxq, DPDK used as PF host */
213 hw->mailboxq.num_rq_entries = ICE_MAILBOXQ_LEN;
214 hw->mailboxq.num_sq_entries = ICE_MAILBOXQ_LEN;
215 hw->mailboxq.rq_buf_size = ICE_MAILBOXQ_BUF_SZ;
216 hw->mailboxq.sq_buf_size = ICE_MAILBOXQ_BUF_SZ;
220 ice_check_qp_num(const char *key, const char *qp_value,
221 __rte_unused void *opaque)
226 while (isblank(*qp_value))
229 num = strtoul(qp_value, &end, 10);
231 if (!num || (*end == '-') || errno) {
232 PMD_DRV_LOG(WARNING, "invalid value:\"%s\" for key:\"%s\", "
242 ice_config_max_queue_pair_num(struct rte_devargs *devargs)
244 struct rte_kvargs *kvlist;
245 const char *queue_num_key = ICE_MAX_QP_NUM;
251 kvlist = rte_kvargs_parse(devargs->args, NULL);
255 if (!rte_kvargs_count(kvlist, queue_num_key)) {
256 rte_kvargs_free(kvlist);
260 if (rte_kvargs_process(kvlist, queue_num_key,
261 ice_check_qp_num, NULL) < 0) {
262 rte_kvargs_free(kvlist);
265 ret = rte_kvargs_process(kvlist, queue_num_key,
266 ice_check_qp_num, NULL);
267 rte_kvargs_free(kvlist);
273 ice_res_pool_init(struct ice_res_pool_info *pool, uint32_t base,
276 struct pool_entry *entry;
281 entry = rte_zmalloc(NULL, sizeof(*entry), 0);
284 "Failed to allocate memory for resource pool");
288 /* queue heap initialize */
289 pool->num_free = num;
292 LIST_INIT(&pool->alloc_list);
293 LIST_INIT(&pool->free_list);
295 /* Initialize element */
299 LIST_INSERT_HEAD(&pool->free_list, entry, next);
304 ice_res_pool_alloc(struct ice_res_pool_info *pool,
307 struct pool_entry *entry, *valid_entry;
310 PMD_INIT_LOG(ERR, "Invalid parameter");
314 if (pool->num_free < num) {
315 PMD_INIT_LOG(ERR, "No resource. ask:%u, available:%u",
316 num, pool->num_free);
321 /* Lookup in free list and find most fit one */
322 LIST_FOREACH(entry, &pool->free_list, next) {
323 if (entry->len >= num) {
325 if (entry->len == num) {
330 valid_entry->len > entry->len)
335 /* Not find one to satisfy the request, return */
337 PMD_INIT_LOG(ERR, "No valid entry found");
341 * The entry have equal queue number as requested,
342 * remove it from alloc_list.
344 if (valid_entry->len == num) {
345 LIST_REMOVE(valid_entry, next);
348 * The entry have more numbers than requested,
349 * create a new entry for alloc_list and minus its
350 * queue base and number in free_list.
352 entry = rte_zmalloc(NULL, sizeof(*entry), 0);
355 "Failed to allocate memory for "
359 entry->base = valid_entry->base;
361 valid_entry->base += num;
362 valid_entry->len -= num;
366 /* Insert it into alloc list, not sorted */
367 LIST_INSERT_HEAD(&pool->alloc_list, valid_entry, next);
369 pool->num_free -= valid_entry->len;
370 pool->num_alloc += valid_entry->len;
372 return valid_entry->base + pool->base;
376 ice_res_pool_destroy(struct ice_res_pool_info *pool)
378 struct pool_entry *entry, *next_entry;
383 for (entry = LIST_FIRST(&pool->alloc_list);
384 entry && (next_entry = LIST_NEXT(entry, next), 1);
385 entry = next_entry) {
386 LIST_REMOVE(entry, next);
390 for (entry = LIST_FIRST(&pool->free_list);
391 entry && (next_entry = LIST_NEXT(entry, next), 1);
392 entry = next_entry) {
393 LIST_REMOVE(entry, next);
400 LIST_INIT(&pool->alloc_list);
401 LIST_INIT(&pool->free_list);
405 ice_vsi_config_default_rss(struct ice_aqc_vsi_props *info)
407 /* Set VSI LUT selection */
408 info->q_opt_rss = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI &
409 ICE_AQ_VSI_Q_OPT_RSS_LUT_M;
410 /* Set Hash scheme */
411 info->q_opt_rss |= ICE_AQ_VSI_Q_OPT_RSS_TPLZ &
412 ICE_AQ_VSI_Q_OPT_RSS_HASH_M;
414 info->q_opt_tc = ICE_AQ_VSI_Q_OPT_TC_OVR_M;
417 static enum ice_status
418 ice_vsi_config_tc_queue_mapping(struct ice_vsi *vsi,
419 struct ice_aqc_vsi_props *info,
420 uint8_t enabled_tcmap)
422 uint16_t bsf, qp_idx;
424 /* default tc 0 now. Multi-TC supporting need to be done later.
425 * Configure TC and queue mapping parameters, for enabled TC,
426 * allocate qpnum_per_tc queues to this traffic.
428 if (enabled_tcmap != 0x01) {
429 PMD_INIT_LOG(ERR, "only TC0 is supported");
433 vsi->nb_qps = RTE_MIN(vsi->nb_qps, ICE_MAX_Q_PER_TC);
434 bsf = rte_bsf32(vsi->nb_qps);
435 /* Adjust the queue number to actual queues that can be applied */
436 vsi->nb_qps = 0x1 << bsf;
439 /* Set tc and queue mapping with VSI */
440 info->tc_mapping[0] = rte_cpu_to_le_16((qp_idx <<
441 ICE_AQ_VSI_TC_Q_OFFSET_S) |
442 (bsf << ICE_AQ_VSI_TC_Q_NUM_S));
444 /* Associate queue number with VSI */
445 info->mapping_flags |= rte_cpu_to_le_16(ICE_AQ_VSI_Q_MAP_CONTIG);
446 info->q_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
447 info->q_mapping[1] = rte_cpu_to_le_16(vsi->nb_qps);
448 info->valid_sections |=
449 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID);
450 /* Set the info.ingress_table and info.egress_table
451 * for UP translate table. Now just set it to 1:1 map by default
452 * -- 0b 111 110 101 100 011 010 001 000 == 0xFAC688
454 #define ICE_TC_QUEUE_TABLE_DFLT 0x00FAC688
455 info->ingress_table = rte_cpu_to_le_32(ICE_TC_QUEUE_TABLE_DFLT);
456 info->egress_table = rte_cpu_to_le_32(ICE_TC_QUEUE_TABLE_DFLT);
457 info->outer_up_table = rte_cpu_to_le_32(ICE_TC_QUEUE_TABLE_DFLT);
462 ice_init_mac_address(struct rte_eth_dev *dev)
464 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
466 if (!is_unicast_ether_addr
467 ((struct ether_addr *)hw->port_info[0].mac.lan_addr)) {
468 PMD_INIT_LOG(ERR, "Invalid MAC address");
472 ether_addr_copy((struct ether_addr *)hw->port_info[0].mac.lan_addr,
473 (struct ether_addr *)hw->port_info[0].mac.perm_addr);
475 dev->data->mac_addrs = rte_zmalloc(NULL, sizeof(struct ether_addr), 0);
476 if (!dev->data->mac_addrs) {
478 "Failed to allocate memory to store mac address");
481 /* store it to dev data */
482 ether_addr_copy((struct ether_addr *)hw->port_info[0].mac.perm_addr,
483 &dev->data->mac_addrs[0]);
487 /* Find out specific MAC filter */
488 static struct ice_mac_filter *
489 ice_find_mac_filter(struct ice_vsi *vsi, struct ether_addr *macaddr)
491 struct ice_mac_filter *f;
493 TAILQ_FOREACH(f, &vsi->mac_list, next) {
494 if (is_same_ether_addr(macaddr, &f->mac_info.mac_addr))
502 ice_add_mac_filter(struct ice_vsi *vsi, struct ether_addr *mac_addr)
504 struct ice_fltr_list_entry *m_list_itr = NULL;
505 struct ice_mac_filter *f;
506 struct LIST_HEAD_TYPE list_head;
507 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
510 /* If it's added and configured, return */
511 f = ice_find_mac_filter(vsi, mac_addr);
513 PMD_DRV_LOG(INFO, "This MAC filter already exists.");
517 INIT_LIST_HEAD(&list_head);
519 m_list_itr = (struct ice_fltr_list_entry *)
520 ice_malloc(hw, sizeof(*m_list_itr));
525 ice_memcpy(m_list_itr->fltr_info.l_data.mac.mac_addr,
526 mac_addr, ETH_ALEN, ICE_NONDMA_TO_NONDMA);
527 m_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI;
528 m_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
529 m_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_MAC;
530 m_list_itr->fltr_info.flag = ICE_FLTR_TX;
531 m_list_itr->fltr_info.vsi_handle = vsi->idx;
533 LIST_ADD(&m_list_itr->list_entry, &list_head);
536 ret = ice_add_mac(hw, &list_head);
537 if (ret != ICE_SUCCESS) {
538 PMD_DRV_LOG(ERR, "Failed to add MAC filter");
542 /* Add the mac addr into mac list */
543 f = rte_zmalloc(NULL, sizeof(*f), 0);
545 PMD_DRV_LOG(ERR, "failed to allocate memory");
549 rte_memcpy(&f->mac_info.mac_addr, mac_addr, ETH_ADDR_LEN);
550 TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
556 rte_free(m_list_itr);
561 ice_remove_mac_filter(struct ice_vsi *vsi, struct ether_addr *mac_addr)
563 struct ice_fltr_list_entry *m_list_itr = NULL;
564 struct ice_mac_filter *f;
565 struct LIST_HEAD_TYPE list_head;
566 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
569 /* Can't find it, return an error */
570 f = ice_find_mac_filter(vsi, mac_addr);
574 INIT_LIST_HEAD(&list_head);
576 m_list_itr = (struct ice_fltr_list_entry *)
577 ice_malloc(hw, sizeof(*m_list_itr));
582 ice_memcpy(m_list_itr->fltr_info.l_data.mac.mac_addr,
583 mac_addr, ETH_ALEN, ICE_NONDMA_TO_NONDMA);
584 m_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI;
585 m_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
586 m_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_MAC;
587 m_list_itr->fltr_info.flag = ICE_FLTR_TX;
588 m_list_itr->fltr_info.vsi_handle = vsi->idx;
590 LIST_ADD(&m_list_itr->list_entry, &list_head);
592 /* remove the mac filter */
593 ret = ice_remove_mac(hw, &list_head);
594 if (ret != ICE_SUCCESS) {
595 PMD_DRV_LOG(ERR, "Failed to remove MAC filter");
600 /* Remove the mac addr from mac list */
601 TAILQ_REMOVE(&vsi->mac_list, f, next);
607 rte_free(m_list_itr);
611 /* Find out specific VLAN filter */
612 static struct ice_vlan_filter *
613 ice_find_vlan_filter(struct ice_vsi *vsi, uint16_t vlan_id)
615 struct ice_vlan_filter *f;
617 TAILQ_FOREACH(f, &vsi->vlan_list, next) {
618 if (vlan_id == f->vlan_info.vlan_id)
626 ice_add_vlan_filter(struct ice_vsi *vsi, uint16_t vlan_id)
628 struct ice_fltr_list_entry *v_list_itr = NULL;
629 struct ice_vlan_filter *f;
630 struct LIST_HEAD_TYPE list_head;
631 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
634 if (!vsi || vlan_id > ETHER_MAX_VLAN_ID)
637 /* If it's added and configured, return. */
638 f = ice_find_vlan_filter(vsi, vlan_id);
640 PMD_DRV_LOG(INFO, "This VLAN filter already exists.");
644 if (!vsi->vlan_anti_spoof_on && !vsi->vlan_filter_on)
647 INIT_LIST_HEAD(&list_head);
649 v_list_itr = (struct ice_fltr_list_entry *)
650 ice_malloc(hw, sizeof(*v_list_itr));
655 v_list_itr->fltr_info.l_data.vlan.vlan_id = vlan_id;
656 v_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI;
657 v_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
658 v_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_VLAN;
659 v_list_itr->fltr_info.flag = ICE_FLTR_TX;
660 v_list_itr->fltr_info.vsi_handle = vsi->idx;
662 LIST_ADD(&v_list_itr->list_entry, &list_head);
665 ret = ice_add_vlan(hw, &list_head);
666 if (ret != ICE_SUCCESS) {
667 PMD_DRV_LOG(ERR, "Failed to add VLAN filter");
672 /* Add vlan into vlan list */
673 f = rte_zmalloc(NULL, sizeof(*f), 0);
675 PMD_DRV_LOG(ERR, "failed to allocate memory");
679 f->vlan_info.vlan_id = vlan_id;
680 TAILQ_INSERT_TAIL(&vsi->vlan_list, f, next);
686 rte_free(v_list_itr);
691 ice_remove_vlan_filter(struct ice_vsi *vsi, uint16_t vlan_id)
693 struct ice_fltr_list_entry *v_list_itr = NULL;
694 struct ice_vlan_filter *f;
695 struct LIST_HEAD_TYPE list_head;
696 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
700 * Vlan 0 is the generic filter for untagged packets
701 * and can't be removed.
703 if (!vsi || vlan_id == 0 || vlan_id > ETHER_MAX_VLAN_ID)
706 /* Can't find it, return an error */
707 f = ice_find_vlan_filter(vsi, vlan_id);
711 INIT_LIST_HEAD(&list_head);
713 v_list_itr = (struct ice_fltr_list_entry *)
714 ice_malloc(hw, sizeof(*v_list_itr));
720 v_list_itr->fltr_info.l_data.vlan.vlan_id = vlan_id;
721 v_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI;
722 v_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
723 v_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_VLAN;
724 v_list_itr->fltr_info.flag = ICE_FLTR_TX;
725 v_list_itr->fltr_info.vsi_handle = vsi->idx;
727 LIST_ADD(&v_list_itr->list_entry, &list_head);
729 /* remove the vlan filter */
730 ret = ice_remove_vlan(hw, &list_head);
731 if (ret != ICE_SUCCESS) {
732 PMD_DRV_LOG(ERR, "Failed to remove VLAN filter");
737 /* Remove the vlan id from vlan list */
738 TAILQ_REMOVE(&vsi->vlan_list, f, next);
744 rte_free(v_list_itr);
749 ice_remove_all_mac_vlan_filters(struct ice_vsi *vsi)
751 struct ice_mac_filter *m_f;
752 struct ice_vlan_filter *v_f;
755 if (!vsi || !vsi->mac_num)
758 TAILQ_FOREACH(m_f, &vsi->mac_list, next) {
759 ret = ice_remove_mac_filter(vsi, &m_f->mac_info.mac_addr);
760 if (ret != ICE_SUCCESS) {
766 if (vsi->vlan_num == 0)
769 TAILQ_FOREACH(v_f, &vsi->vlan_list, next) {
770 ret = ice_remove_vlan_filter(vsi, v_f->vlan_info.vlan_id);
771 if (ret != ICE_SUCCESS) {
782 ice_vsi_config_qinq_insertion(struct ice_vsi *vsi, bool on)
784 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
785 struct ice_vsi_ctx ctxt;
789 /* Check if it has been already on or off */
790 if (vsi->info.valid_sections &
791 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID)) {
793 if ((vsi->info.outer_tag_flags &
794 ICE_AQ_VSI_OUTER_TAG_ACCEPT_HOST) ==
795 ICE_AQ_VSI_OUTER_TAG_ACCEPT_HOST)
796 return 0; /* already on */
798 if (!(vsi->info.outer_tag_flags &
799 ICE_AQ_VSI_OUTER_TAG_ACCEPT_HOST))
800 return 0; /* already off */
805 qinq_flags = ICE_AQ_VSI_OUTER_TAG_ACCEPT_HOST;
808 /* clear global insertion and use per packet insertion */
809 vsi->info.outer_tag_flags &= ~(ICE_AQ_VSI_OUTER_TAG_INSERT);
810 vsi->info.outer_tag_flags &= ~(ICE_AQ_VSI_OUTER_TAG_ACCEPT_HOST);
811 vsi->info.outer_tag_flags |= qinq_flags;
812 /* use default vlan type 0x8100 */
813 vsi->info.outer_tag_flags &= ~(ICE_AQ_VSI_OUTER_TAG_TYPE_M);
814 vsi->info.outer_tag_flags |= ICE_DFLT_OUTER_TAG_TYPE <<
815 ICE_AQ_VSI_OUTER_TAG_TYPE_S;
816 (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
817 ctxt.info.valid_sections =
818 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID);
819 ctxt.vsi_num = vsi->vsi_id;
820 ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
823 "Update VSI failed to %s qinq stripping",
824 on ? "enable" : "disable");
828 vsi->info.valid_sections |=
829 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID);
835 ice_vsi_config_qinq_stripping(struct ice_vsi *vsi, bool on)
837 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
838 struct ice_vsi_ctx ctxt;
842 /* Check if it has been already on or off */
843 if (vsi->info.valid_sections &
844 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID)) {
846 if ((vsi->info.outer_tag_flags &
847 ICE_AQ_VSI_OUTER_TAG_MODE_M) ==
848 ICE_AQ_VSI_OUTER_TAG_COPY)
849 return 0; /* already on */
851 if ((vsi->info.outer_tag_flags &
852 ICE_AQ_VSI_OUTER_TAG_MODE_M) ==
853 ICE_AQ_VSI_OUTER_TAG_NOTHING)
854 return 0; /* already off */
859 qinq_flags = ICE_AQ_VSI_OUTER_TAG_COPY;
861 qinq_flags = ICE_AQ_VSI_OUTER_TAG_NOTHING;
862 vsi->info.outer_tag_flags &= ~(ICE_AQ_VSI_OUTER_TAG_MODE_M);
863 vsi->info.outer_tag_flags |= qinq_flags;
864 /* use default vlan type 0x8100 */
865 vsi->info.outer_tag_flags &= ~(ICE_AQ_VSI_OUTER_TAG_TYPE_M);
866 vsi->info.outer_tag_flags |= ICE_DFLT_OUTER_TAG_TYPE <<
867 ICE_AQ_VSI_OUTER_TAG_TYPE_S;
868 (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
869 ctxt.info.valid_sections =
870 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID);
871 ctxt.vsi_num = vsi->vsi_id;
872 ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
875 "Update VSI failed to %s qinq stripping",
876 on ? "enable" : "disable");
880 vsi->info.valid_sections |=
881 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID);
887 ice_vsi_config_double_vlan(struct ice_vsi *vsi, int on)
891 ret = ice_vsi_config_qinq_stripping(vsi, on);
893 PMD_DRV_LOG(ERR, "Fail to set qinq stripping - %d", ret);
895 ret = ice_vsi_config_qinq_insertion(vsi, on);
897 PMD_DRV_LOG(ERR, "Fail to set qinq insertion - %d", ret);
904 ice_pf_enable_irq0(struct ice_hw *hw)
906 /* reset the registers */
907 ICE_WRITE_REG(hw, PFINT_OICR_ENA, 0);
908 ICE_READ_REG(hw, PFINT_OICR);
911 ICE_WRITE_REG(hw, PFINT_OICR_ENA,
912 (uint32_t)(PFINT_OICR_ENA_INT_ENA_M &
913 (~PFINT_OICR_LINK_STAT_CHANGE_M)));
915 ICE_WRITE_REG(hw, PFINT_OICR_CTL,
916 (0 & PFINT_OICR_CTL_MSIX_INDX_M) |
917 ((0 << PFINT_OICR_CTL_ITR_INDX_S) &
918 PFINT_OICR_CTL_ITR_INDX_M) |
919 PFINT_OICR_CTL_CAUSE_ENA_M);
921 ICE_WRITE_REG(hw, PFINT_FW_CTL,
922 (0 & PFINT_FW_CTL_MSIX_INDX_M) |
923 ((0 << PFINT_FW_CTL_ITR_INDX_S) &
924 PFINT_FW_CTL_ITR_INDX_M) |
925 PFINT_FW_CTL_CAUSE_ENA_M);
927 ICE_WRITE_REG(hw, PFINT_OICR_ENA, PFINT_OICR_ENA_INT_ENA_M);
930 ICE_WRITE_REG(hw, GLINT_DYN_CTL(0),
931 GLINT_DYN_CTL_INTENA_M |
932 GLINT_DYN_CTL_CLEARPBA_M |
933 GLINT_DYN_CTL_ITR_INDX_M);
940 ice_pf_disable_irq0(struct ice_hw *hw)
942 /* Disable all interrupt types */
943 ICE_WRITE_REG(hw, GLINT_DYN_CTL(0), GLINT_DYN_CTL_WB_ON_ITR_M);
949 ice_handle_aq_msg(struct rte_eth_dev *dev)
951 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
952 struct ice_ctl_q_info *cq = &hw->adminq;
953 struct ice_rq_event_info event;
954 uint16_t pending, opcode;
957 event.buf_len = ICE_AQ_MAX_BUF_LEN;
958 event.msg_buf = rte_zmalloc(NULL, event.buf_len, 0);
959 if (!event.msg_buf) {
960 PMD_DRV_LOG(ERR, "Failed to allocate mem");
966 ret = ice_clean_rq_elem(hw, cq, &event, &pending);
968 if (ret != ICE_SUCCESS) {
970 "Failed to read msg from AdminQ, "
972 hw->adminq.sq_last_status);
975 opcode = rte_le_to_cpu_16(event.desc.opcode);
978 case ice_aqc_opc_get_link_status:
979 ret = ice_link_update(dev, 0);
981 _rte_eth_dev_callback_process
982 (dev, RTE_ETH_EVENT_INTR_LSC, NULL);
985 PMD_DRV_LOG(DEBUG, "Request %u is not supported yet",
990 rte_free(event.msg_buf);
995 * Interrupt handler triggered by NIC for handling
996 * specific interrupt.
999 * Pointer to interrupt handle.
1001 * The address of parameter (struct rte_eth_dev *) regsitered before.
1007 ice_interrupt_handler(void *param)
1009 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1010 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1017 uint32_t int_fw_ctl;
1020 /* Disable interrupt */
1021 ice_pf_disable_irq0(hw);
1023 /* read out interrupt causes */
1024 oicr = ICE_READ_REG(hw, PFINT_OICR);
1026 int_fw_ctl = ICE_READ_REG(hw, PFINT_FW_CTL);
1029 /* No interrupt event indicated */
1030 if (!(oicr & PFINT_OICR_INTEVENT_M)) {
1031 PMD_DRV_LOG(INFO, "No interrupt event");
1036 if (int_fw_ctl & PFINT_FW_CTL_INTEVENT_M) {
1037 PMD_DRV_LOG(INFO, "FW_CTL: link state change event");
1038 ice_handle_aq_msg(dev);
1041 if (oicr & PFINT_OICR_LINK_STAT_CHANGE_M) {
1042 PMD_DRV_LOG(INFO, "OICR: link state change event");
1043 ice_link_update(dev, 0);
1047 if (oicr & PFINT_OICR_MAL_DETECT_M) {
1048 PMD_DRV_LOG(WARNING, "OICR: MDD event");
1049 reg = ICE_READ_REG(hw, GL_MDET_TX_PQM);
1050 if (reg & GL_MDET_TX_PQM_VALID_M) {
1051 pf_num = (reg & GL_MDET_TX_PQM_PF_NUM_M) >>
1052 GL_MDET_TX_PQM_PF_NUM_S;
1053 event = (reg & GL_MDET_TX_PQM_MAL_TYPE_M) >>
1054 GL_MDET_TX_PQM_MAL_TYPE_S;
1055 queue = (reg & GL_MDET_TX_PQM_QNUM_M) >>
1056 GL_MDET_TX_PQM_QNUM_S;
1058 PMD_DRV_LOG(WARNING, "Malicious Driver Detection event "
1059 "%d by PQM on TX queue %d PF# %d",
1060 event, queue, pf_num);
1063 reg = ICE_READ_REG(hw, GL_MDET_TX_TCLAN);
1064 if (reg & GL_MDET_TX_TCLAN_VALID_M) {
1065 pf_num = (reg & GL_MDET_TX_TCLAN_PF_NUM_M) >>
1066 GL_MDET_TX_TCLAN_PF_NUM_S;
1067 event = (reg & GL_MDET_TX_TCLAN_MAL_TYPE_M) >>
1068 GL_MDET_TX_TCLAN_MAL_TYPE_S;
1069 queue = (reg & GL_MDET_TX_TCLAN_QNUM_M) >>
1070 GL_MDET_TX_TCLAN_QNUM_S;
1072 PMD_DRV_LOG(WARNING, "Malicious Driver Detection event "
1073 "%d by TCLAN on TX queue %d PF# %d",
1074 event, queue, pf_num);
1078 /* Enable interrupt */
1079 ice_pf_enable_irq0(hw);
1080 rte_intr_enable(dev->intr_handle);
1083 /* Initialize SW parameters of PF */
1085 ice_pf_sw_init(struct rte_eth_dev *dev)
1087 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1088 struct ice_hw *hw = ICE_PF_TO_HW(pf);
1090 if (ice_config_max_queue_pair_num(dev->device->devargs) > 0)
1092 ice_config_max_queue_pair_num(dev->device->devargs);
1095 (uint16_t)RTE_MIN(hw->func_caps.common_cap.num_txq,
1096 hw->func_caps.common_cap.num_rxq);
1098 pf->lan_nb_qps = pf->lan_nb_qp_max;
1103 static struct ice_vsi *
1104 ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type)
1106 struct ice_hw *hw = ICE_PF_TO_HW(pf);
1107 struct ice_vsi *vsi = NULL;
1108 struct ice_vsi_ctx vsi_ctx;
1110 struct ether_addr broadcast = {
1111 .addr_bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff} };
1112 struct ether_addr mac_addr;
1113 uint16_t max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
1114 uint8_t tc_bitmap = 0x1;
1116 /* hw->num_lports = 1 in NIC mode */
1117 vsi = rte_zmalloc(NULL, sizeof(struct ice_vsi), 0);
1121 vsi->idx = pf->next_vsi_idx;
1124 vsi->adapter = ICE_PF_TO_ADAPTER(pf);
1125 vsi->max_macaddrs = ICE_NUM_MACADDR_MAX;
1126 vsi->vlan_anti_spoof_on = 0;
1127 vsi->vlan_filter_on = 1;
1128 TAILQ_INIT(&vsi->mac_list);
1129 TAILQ_INIT(&vsi->vlan_list);
1131 memset(&vsi_ctx, 0, sizeof(vsi_ctx));
1132 /* base_queue in used in queue mapping of VSI add/update command.
1133 * Suppose vsi->base_queue is 0 now, don't consider SRIOV, VMDQ
1134 * cases in the first stage. Only Main VSI.
1136 vsi->base_queue = 0;
1139 vsi->nb_qps = pf->lan_nb_qps;
1140 ice_vsi_config_default_rss(&vsi_ctx.info);
1141 vsi_ctx.alloc_from_pool = true;
1142 vsi_ctx.flags = ICE_AQ_VSI_TYPE_PF;
1143 /* switch_id is queried by get_switch_config aq, which is done
1146 vsi_ctx.info.sw_id = hw->port_info->sw_id;
1147 vsi_ctx.info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA;
1148 /* Allow all untagged or tagged packets */
1149 vsi_ctx.info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL;
1150 vsi_ctx.info.vlan_flags |= ICE_AQ_VSI_VLAN_EMOD_NOTHING;
1151 vsi_ctx.info.q_opt_rss = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF |
1152 ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
1153 /* Enable VLAN/UP trip */
1154 ret = ice_vsi_config_tc_queue_mapping(vsi,
1159 "tc queue mapping with vsi failed, "
1167 /* for other types of VSI */
1168 PMD_INIT_LOG(ERR, "other types of VSI not supported");
1172 /* VF has MSIX interrupt in VF range, don't allocate here */
1173 if (type == ICE_VSI_PF) {
1174 ret = ice_res_pool_alloc(&pf->msix_pool,
1175 RTE_MIN(vsi->nb_qps,
1176 RTE_MAX_RXTX_INTR_VEC_ID));
1178 PMD_INIT_LOG(ERR, "VSI MAIN %d get heap failed %d",
1181 vsi->msix_intr = ret;
1182 vsi->nb_msix = RTE_MIN(vsi->nb_qps, RTE_MAX_RXTX_INTR_VEC_ID);
1187 ret = ice_add_vsi(hw, vsi->idx, &vsi_ctx, NULL);
1188 if (ret != ICE_SUCCESS) {
1189 PMD_INIT_LOG(ERR, "add vsi failed, err = %d", ret);
1192 /* store vsi information is SW structure */
1193 vsi->vsi_id = vsi_ctx.vsi_num;
1194 vsi->info = vsi_ctx.info;
1195 pf->vsis_allocated = vsi_ctx.vsis_allocd;
1196 pf->vsis_unallocated = vsi_ctx.vsis_unallocated;
1198 /* MAC configuration */
1199 rte_memcpy(pf->dev_addr.addr_bytes,
1200 hw->port_info->mac.perm_addr,
1203 rte_memcpy(&mac_addr, &pf->dev_addr, ETHER_ADDR_LEN);
1204 ret = ice_add_mac_filter(vsi, &mac_addr);
1205 if (ret != ICE_SUCCESS)
1206 PMD_INIT_LOG(ERR, "Failed to add dflt MAC filter");
1208 rte_memcpy(&mac_addr, &broadcast, ETHER_ADDR_LEN);
1209 ret = ice_add_mac_filter(vsi, &mac_addr);
1210 if (ret != ICE_SUCCESS)
1211 PMD_INIT_LOG(ERR, "Failed to add MAC filter");
1213 /* At the beginning, only TC0. */
1214 /* What we need here is the maximam number of the TX queues.
1215 * Currently vsi->nb_qps means it.
1216 * Correct it if any change.
1218 max_txqs[0] = vsi->nb_qps;
1219 ret = ice_cfg_vsi_lan(hw->port_info, vsi->idx,
1220 tc_bitmap, max_txqs);
1221 if (ret != ICE_SUCCESS)
1222 PMD_INIT_LOG(ERR, "Failed to config vsi sched");
1232 ice_pf_setup(struct ice_pf *pf)
1234 struct ice_vsi *vsi;
1236 /* Clear all stats counters */
1237 pf->offset_loaded = FALSE;
1238 memset(&pf->stats, 0, sizeof(struct ice_hw_port_stats));
1239 memset(&pf->stats_offset, 0, sizeof(struct ice_hw_port_stats));
1240 memset(&pf->internal_stats, 0, sizeof(struct ice_eth_stats));
1241 memset(&pf->internal_stats_offset, 0, sizeof(struct ice_eth_stats));
1243 vsi = ice_setup_vsi(pf, ICE_VSI_PF);
1245 PMD_INIT_LOG(ERR, "Failed to add vsi for PF");
1255 ice_dev_init(struct rte_eth_dev *dev)
1257 struct rte_pci_device *pci_dev;
1258 struct rte_intr_handle *intr_handle;
1259 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1260 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1261 struct ice_vsi *vsi;
1264 dev->dev_ops = &ice_eth_dev_ops;
1265 dev->rx_pkt_burst = ice_recv_pkts;
1266 dev->tx_pkt_burst = ice_xmit_pkts;
1267 dev->tx_pkt_prepare = ice_prep_pkts;
1269 ice_set_default_ptype_table(dev);
1270 pci_dev = RTE_DEV_TO_PCI(dev->device);
1271 intr_handle = &pci_dev->intr_handle;
1273 pf->adapter = ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1274 pf->adapter->eth_dev = dev;
1275 pf->dev_data = dev->data;
1276 hw->back = pf->adapter;
1277 hw->hw_addr = (uint8_t *)pci_dev->mem_resource[0].addr;
1278 hw->vendor_id = pci_dev->id.vendor_id;
1279 hw->device_id = pci_dev->id.device_id;
1280 hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
1281 hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
1282 hw->bus.device = pci_dev->addr.devid;
1283 hw->bus.func = pci_dev->addr.function;
1285 ice_init_controlq_parameter(hw);
1287 ret = ice_init_hw(hw);
1289 PMD_INIT_LOG(ERR, "Failed to initialize HW");
1293 PMD_INIT_LOG(INFO, "FW %d.%d.%05d API %d.%d",
1294 hw->fw_maj_ver, hw->fw_min_ver, hw->fw_build,
1295 hw->api_maj_ver, hw->api_min_ver);
1297 ice_pf_sw_init(dev);
1298 ret = ice_init_mac_address(dev);
1300 PMD_INIT_LOG(ERR, "Failed to initialize mac address");
1304 ret = ice_res_pool_init(&pf->msix_pool, 1,
1305 hw->func_caps.common_cap.num_msix_vectors - 1);
1307 PMD_INIT_LOG(ERR, "Failed to init MSIX pool");
1308 goto err_msix_pool_init;
1311 ret = ice_pf_setup(pf);
1313 PMD_INIT_LOG(ERR, "Failed to setup PF");
1319 /* Disable double vlan by default */
1320 ice_vsi_config_double_vlan(vsi, FALSE);
1322 /* register callback func to eal lib */
1323 rte_intr_callback_register(intr_handle,
1324 ice_interrupt_handler, dev);
1326 ice_pf_enable_irq0(hw);
1328 /* enable uio intr after callback register */
1329 rte_intr_enable(intr_handle);
1334 ice_res_pool_destroy(&pf->msix_pool);
1336 rte_free(dev->data->mac_addrs);
1338 ice_sched_cleanup_all(hw);
1339 rte_free(hw->port_info);
1340 ice_shutdown_all_ctrlq(hw);
1346 ice_release_vsi(struct ice_vsi *vsi)
1349 struct ice_vsi_ctx vsi_ctx;
1350 enum ice_status ret;
1355 hw = ICE_VSI_TO_HW(vsi);
1357 ice_remove_all_mac_vlan_filters(vsi);
1359 memset(&vsi_ctx, 0, sizeof(vsi_ctx));
1361 vsi_ctx.vsi_num = vsi->vsi_id;
1362 vsi_ctx.info = vsi->info;
1363 ret = ice_free_vsi(hw, vsi->idx, &vsi_ctx, false, NULL);
1364 if (ret != ICE_SUCCESS) {
1365 PMD_INIT_LOG(ERR, "Failed to free vsi by aq, %u", vsi->vsi_id);
1375 ice_vsi_disable_queues_intr(struct ice_vsi *vsi)
1377 struct rte_eth_dev *dev = vsi->adapter->eth_dev;
1378 struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
1379 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1380 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
1381 uint16_t msix_intr, i;
1383 /* disable interrupt and also clear all the exist config */
1384 for (i = 0; i < vsi->nb_qps; i++) {
1385 ICE_WRITE_REG(hw, QINT_TQCTL(vsi->base_queue + i), 0);
1386 ICE_WRITE_REG(hw, QINT_RQCTL(vsi->base_queue + i), 0);
1390 if (rte_intr_allow_others(intr_handle))
1392 for (i = 0; i < vsi->nb_msix; i++) {
1393 msix_intr = vsi->msix_intr + i;
1394 ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr),
1395 GLINT_DYN_CTL_WB_ON_ITR_M);
1399 ICE_WRITE_REG(hw, GLINT_DYN_CTL(0), GLINT_DYN_CTL_WB_ON_ITR_M);
1403 ice_dev_stop(struct rte_eth_dev *dev)
1405 struct rte_eth_dev_data *data = dev->data;
1406 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1407 struct ice_vsi *main_vsi = pf->main_vsi;
1408 struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
1409 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1412 /* avoid stopping again */
1413 if (pf->adapter_stopped)
1416 /* stop and clear all Rx queues */
1417 for (i = 0; i < data->nb_rx_queues; i++)
1418 ice_rx_queue_stop(dev, i);
1420 /* stop and clear all Tx queues */
1421 for (i = 0; i < data->nb_tx_queues; i++)
1422 ice_tx_queue_stop(dev, i);
1424 /* disable all queue interrupts */
1425 ice_vsi_disable_queues_intr(main_vsi);
1427 /* Clear all queues and release mbufs */
1428 ice_clear_queues(dev);
1430 /* Clean datapath event and queue/vec mapping */
1431 rte_intr_efd_disable(intr_handle);
1432 if (intr_handle->intr_vec) {
1433 rte_free(intr_handle->intr_vec);
1434 intr_handle->intr_vec = NULL;
1437 pf->adapter_stopped = true;
1441 ice_dev_close(struct rte_eth_dev *dev)
1443 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1444 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1448 /* release all queue resource */
1449 ice_free_queues(dev);
1451 ice_res_pool_destroy(&pf->msix_pool);
1452 ice_release_vsi(pf->main_vsi);
1454 ice_shutdown_all_ctrlq(hw);
1458 ice_dev_uninit(struct rte_eth_dev *dev)
1460 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1461 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1462 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1463 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1467 dev->dev_ops = NULL;
1468 dev->rx_pkt_burst = NULL;
1469 dev->tx_pkt_burst = NULL;
1471 rte_free(dev->data->mac_addrs);
1472 dev->data->mac_addrs = NULL;
1474 /* disable uio intr before callback unregister */
1475 rte_intr_disable(intr_handle);
1477 /* register callback func to eal lib */
1478 rte_intr_callback_unregister(intr_handle,
1479 ice_interrupt_handler, dev);
1481 ice_release_vsi(pf->main_vsi);
1482 ice_sched_cleanup_all(hw);
1483 rte_free(hw->port_info);
1484 ice_shutdown_all_ctrlq(hw);
1490 ice_dev_configure(__rte_unused struct rte_eth_dev *dev)
1492 struct ice_adapter *ad =
1493 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1495 /* Initialize to TRUE. If any of Rx queues doesn't meet the
1496 * bulk allocation or vector Rx preconditions we will reset it.
1498 ad->rx_bulk_alloc_allowed = true;
1499 ad->tx_simple_allowed = true;
1504 static int ice_init_rss(struct ice_pf *pf)
1506 struct ice_hw *hw = ICE_PF_TO_HW(pf);
1507 struct ice_vsi *vsi = pf->main_vsi;
1508 struct rte_eth_dev *dev = pf->adapter->eth_dev;
1509 struct rte_eth_rss_conf *rss_conf;
1510 struct ice_aqc_get_set_rss_keys key;
1514 rss_conf = &dev->data->dev_conf.rx_adv_conf.rss_conf;
1515 nb_q = dev->data->nb_rx_queues;
1516 vsi->rss_key_size = ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE;
1517 vsi->rss_lut_size = hw->func_caps.common_cap.rss_table_size;
1520 vsi->rss_key = rte_zmalloc(NULL,
1521 vsi->rss_key_size, 0);
1523 vsi->rss_lut = rte_zmalloc(NULL,
1524 vsi->rss_lut_size, 0);
1526 /* configure RSS key */
1527 if (!rss_conf->rss_key) {
1528 /* Calculate the default hash key */
1529 for (i = 0; i <= vsi->rss_key_size; i++)
1530 vsi->rss_key[i] = (uint8_t)rte_rand();
1532 rte_memcpy(vsi->rss_key, rss_conf->rss_key,
1533 RTE_MIN(rss_conf->rss_key_len,
1534 vsi->rss_key_size));
1536 rte_memcpy(key.standard_rss_key, vsi->rss_key, vsi->rss_key_size);
1537 ret = ice_aq_set_rss_key(hw, vsi->idx, &key);
1541 /* init RSS LUT table */
1542 for (i = 0; i < vsi->rss_lut_size; i++)
1543 vsi->rss_lut[i] = i % nb_q;
1545 ret = ice_aq_set_rss_lut(hw, vsi->idx,
1546 ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF,
1547 vsi->rss_lut, vsi->rss_lut_size);
1555 __vsi_queues_bind_intr(struct ice_vsi *vsi, uint16_t msix_vect,
1556 int base_queue, int nb_queue)
1558 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
1559 uint32_t val, val_tx;
1562 for (i = 0; i < nb_queue; i++) {
1564 val = (msix_vect & QINT_RQCTL_MSIX_INDX_M) |
1565 (0 < QINT_RQCTL_ITR_INDX_S) | QINT_RQCTL_CAUSE_ENA_M;
1566 val_tx = (msix_vect & QINT_TQCTL_MSIX_INDX_M) |
1567 (0 < QINT_TQCTL_ITR_INDX_S) | QINT_TQCTL_CAUSE_ENA_M;
1569 PMD_DRV_LOG(INFO, "queue %d is binding to vect %d",
1570 base_queue + i, msix_vect);
1571 /* set ITR0 value */
1572 ICE_WRITE_REG(hw, GLINT_ITR(0, msix_vect), 0x10);
1573 ICE_WRITE_REG(hw, QINT_RQCTL(base_queue + i), val);
1574 ICE_WRITE_REG(hw, QINT_TQCTL(base_queue + i), val_tx);
1579 ice_vsi_queues_bind_intr(struct ice_vsi *vsi)
1581 struct rte_eth_dev *dev = vsi->adapter->eth_dev;
1582 struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
1583 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1584 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
1585 uint16_t msix_vect = vsi->msix_intr;
1586 uint16_t nb_msix = RTE_MIN(vsi->nb_msix, intr_handle->nb_efd);
1587 uint16_t queue_idx = 0;
1591 /* clear Rx/Tx queue interrupt */
1592 for (i = 0; i < vsi->nb_used_qps; i++) {
1593 ICE_WRITE_REG(hw, QINT_TQCTL(vsi->base_queue + i), 0);
1594 ICE_WRITE_REG(hw, QINT_RQCTL(vsi->base_queue + i), 0);
1597 /* PF bind interrupt */
1598 if (rte_intr_dp_is_en(intr_handle)) {
1603 for (i = 0; i < vsi->nb_used_qps; i++) {
1605 if (!rte_intr_allow_others(intr_handle))
1606 msix_vect = ICE_MISC_VEC_ID;
1608 /* uio mapping all queue to one msix_vect */
1609 __vsi_queues_bind_intr(vsi, msix_vect,
1610 vsi->base_queue + i,
1611 vsi->nb_used_qps - i);
1613 for (; !!record && i < vsi->nb_used_qps; i++)
1614 intr_handle->intr_vec[queue_idx + i] =
1619 /* vfio 1:1 queue/msix_vect mapping */
1620 __vsi_queues_bind_intr(vsi, msix_vect,
1621 vsi->base_queue + i, 1);
1624 intr_handle->intr_vec[queue_idx + i] = msix_vect;
1632 ice_vsi_enable_queues_intr(struct ice_vsi *vsi)
1634 struct rte_eth_dev *dev = vsi->adapter->eth_dev;
1635 struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
1636 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1637 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
1638 uint16_t msix_intr, i;
1640 if (rte_intr_allow_others(intr_handle))
1641 for (i = 0; i < vsi->nb_used_qps; i++) {
1642 msix_intr = vsi->msix_intr + i;
1643 ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr),
1644 GLINT_DYN_CTL_INTENA_M |
1645 GLINT_DYN_CTL_CLEARPBA_M |
1646 GLINT_DYN_CTL_ITR_INDX_M |
1647 GLINT_DYN_CTL_WB_ON_ITR_M);
1650 ICE_WRITE_REG(hw, GLINT_DYN_CTL(0),
1651 GLINT_DYN_CTL_INTENA_M |
1652 GLINT_DYN_CTL_CLEARPBA_M |
1653 GLINT_DYN_CTL_ITR_INDX_M |
1654 GLINT_DYN_CTL_WB_ON_ITR_M);
1658 ice_rxq_intr_setup(struct rte_eth_dev *dev)
1660 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1661 struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
1662 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1663 struct ice_vsi *vsi = pf->main_vsi;
1664 uint32_t intr_vector = 0;
1666 rte_intr_disable(intr_handle);
1668 /* check and configure queue intr-vector mapping */
1669 if ((rte_intr_cap_multiple(intr_handle) ||
1670 !RTE_ETH_DEV_SRIOV(dev).active) &&
1671 dev->data->dev_conf.intr_conf.rxq != 0) {
1672 intr_vector = dev->data->nb_rx_queues;
1673 if (intr_vector > ICE_MAX_INTR_QUEUE_NUM) {
1674 PMD_DRV_LOG(ERR, "At most %d intr queues supported",
1675 ICE_MAX_INTR_QUEUE_NUM);
1678 if (rte_intr_efd_enable(intr_handle, intr_vector))
1682 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
1683 intr_handle->intr_vec =
1684 rte_zmalloc(NULL, dev->data->nb_rx_queues * sizeof(int),
1686 if (!intr_handle->intr_vec) {
1688 "Failed to allocate %d rx_queues intr_vec",
1689 dev->data->nb_rx_queues);
1694 /* Map queues with MSIX interrupt */
1695 vsi->nb_used_qps = dev->data->nb_rx_queues;
1696 ice_vsi_queues_bind_intr(vsi);
1698 /* Enable interrupts for all the queues */
1699 ice_vsi_enable_queues_intr(vsi);
1701 rte_intr_enable(intr_handle);
1707 ice_dev_start(struct rte_eth_dev *dev)
1709 struct rte_eth_dev_data *data = dev->data;
1710 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1711 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1712 uint16_t nb_rxq = 0;
1716 /* program Tx queues' context in hardware */
1717 for (nb_txq = 0; nb_txq < data->nb_tx_queues; nb_txq++) {
1718 ret = ice_tx_queue_start(dev, nb_txq);
1720 PMD_DRV_LOG(ERR, "fail to start Tx queue %u", nb_txq);
1725 /* program Rx queues' context in hardware*/
1726 for (nb_rxq = 0; nb_rxq < data->nb_rx_queues; nb_rxq++) {
1727 ret = ice_rx_queue_start(dev, nb_rxq);
1729 PMD_DRV_LOG(ERR, "fail to start Rx queue %u", nb_rxq);
1734 ret = ice_init_rss(pf);
1736 PMD_DRV_LOG(ERR, "Failed to enable rss for PF");
1740 ice_set_rx_function(dev);
1742 /* enable Rx interrput and mapping Rx queue to interrupt vector */
1743 if (ice_rxq_intr_setup(dev))
1746 ret = ice_aq_set_event_mask(hw, hw->port_info->lport,
1747 ((u16)(ICE_AQ_LINK_EVENT_LINK_FAULT |
1748 ICE_AQ_LINK_EVENT_PHY_TEMP_ALARM |
1749 ICE_AQ_LINK_EVENT_EXCESSIVE_ERRORS |
1750 ICE_AQ_LINK_EVENT_SIGNAL_DETECT |
1751 ICE_AQ_LINK_EVENT_AN_COMPLETED |
1752 ICE_AQ_LINK_EVENT_PORT_TX_SUSPENDED)),
1754 if (ret != ICE_SUCCESS)
1755 PMD_DRV_LOG(WARNING, "Fail to set phy mask");
1757 /* Call get_link_info aq commond to enable/disable LSE */
1758 ice_link_update(dev, 0);
1760 pf->adapter_stopped = false;
1764 /* stop the started queues if failed to start all queues */
1766 for (i = 0; i < nb_rxq; i++)
1767 ice_rx_queue_stop(dev, i);
1769 for (i = 0; i < nb_txq; i++)
1770 ice_tx_queue_stop(dev, i);
1776 ice_dev_reset(struct rte_eth_dev *dev)
1780 if (dev->data->sriov.active)
1783 ret = ice_dev_uninit(dev);
1785 PMD_INIT_LOG(ERR, "failed to uninit device, status = %d", ret);
1789 ret = ice_dev_init(dev);
1791 PMD_INIT_LOG(ERR, "failed to init device, status = %d", ret);
1799 ice_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1801 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1802 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1803 struct ice_vsi *vsi = pf->main_vsi;
1804 struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
1806 dev_info->min_rx_bufsize = ICE_BUF_SIZE_MIN;
1807 dev_info->max_rx_pktlen = ICE_FRAME_SIZE_MAX;
1808 dev_info->max_rx_queues = vsi->nb_qps;
1809 dev_info->max_tx_queues = vsi->nb_qps;
1810 dev_info->max_mac_addrs = vsi->max_macaddrs;
1811 dev_info->max_vfs = pci_dev->max_vfs;
1813 dev_info->rx_offload_capa =
1814 DEV_RX_OFFLOAD_VLAN_STRIP |
1815 DEV_RX_OFFLOAD_IPV4_CKSUM |
1816 DEV_RX_OFFLOAD_UDP_CKSUM |
1817 DEV_RX_OFFLOAD_TCP_CKSUM |
1818 DEV_RX_OFFLOAD_QINQ_STRIP |
1819 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
1820 DEV_RX_OFFLOAD_VLAN_EXTEND |
1821 DEV_RX_OFFLOAD_JUMBO_FRAME |
1822 DEV_RX_OFFLOAD_KEEP_CRC |
1823 DEV_RX_OFFLOAD_SCATTER |
1824 DEV_RX_OFFLOAD_VLAN_FILTER;
1825 dev_info->tx_offload_capa =
1826 DEV_TX_OFFLOAD_VLAN_INSERT |
1827 DEV_TX_OFFLOAD_QINQ_INSERT |
1828 DEV_TX_OFFLOAD_IPV4_CKSUM |
1829 DEV_TX_OFFLOAD_UDP_CKSUM |
1830 DEV_TX_OFFLOAD_TCP_CKSUM |
1831 DEV_TX_OFFLOAD_SCTP_CKSUM |
1832 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
1833 DEV_TX_OFFLOAD_TCP_TSO |
1834 DEV_TX_OFFLOAD_MULTI_SEGS |
1835 DEV_TX_OFFLOAD_MBUF_FAST_FREE;
1836 dev_info->rx_queue_offload_capa = 0;
1837 dev_info->tx_queue_offload_capa = 0;
1839 dev_info->reta_size = hw->func_caps.common_cap.rss_table_size;
1840 dev_info->hash_key_size = (VSIQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
1841 dev_info->flow_type_rss_offloads = ICE_RSS_OFFLOAD_ALL;
1843 dev_info->default_rxconf = (struct rte_eth_rxconf) {
1845 .pthresh = ICE_DEFAULT_RX_PTHRESH,
1846 .hthresh = ICE_DEFAULT_RX_HTHRESH,
1847 .wthresh = ICE_DEFAULT_RX_WTHRESH,
1849 .rx_free_thresh = ICE_DEFAULT_RX_FREE_THRESH,
1854 dev_info->default_txconf = (struct rte_eth_txconf) {
1856 .pthresh = ICE_DEFAULT_TX_PTHRESH,
1857 .hthresh = ICE_DEFAULT_TX_HTHRESH,
1858 .wthresh = ICE_DEFAULT_TX_WTHRESH,
1860 .tx_free_thresh = ICE_DEFAULT_TX_FREE_THRESH,
1861 .tx_rs_thresh = ICE_DEFAULT_TX_RSBIT_THRESH,
1865 dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
1866 .nb_max = ICE_MAX_RING_DESC,
1867 .nb_min = ICE_MIN_RING_DESC,
1868 .nb_align = ICE_ALIGN_RING_DESC,
1871 dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
1872 .nb_max = ICE_MAX_RING_DESC,
1873 .nb_min = ICE_MIN_RING_DESC,
1874 .nb_align = ICE_ALIGN_RING_DESC,
1877 dev_info->speed_capa = ETH_LINK_SPEED_10M |
1878 ETH_LINK_SPEED_100M |
1880 ETH_LINK_SPEED_2_5G |
1882 ETH_LINK_SPEED_10G |
1883 ETH_LINK_SPEED_20G |
1884 ETH_LINK_SPEED_25G |
1887 dev_info->nb_rx_queues = dev->data->nb_rx_queues;
1888 dev_info->nb_tx_queues = dev->data->nb_tx_queues;
1890 dev_info->default_rxportconf.burst_size = ICE_RX_MAX_BURST;
1891 dev_info->default_txportconf.burst_size = ICE_TX_MAX_BURST;
1892 dev_info->default_rxportconf.nb_queues = 1;
1893 dev_info->default_txportconf.nb_queues = 1;
1894 dev_info->default_rxportconf.ring_size = ICE_BUF_SIZE_MIN;
1895 dev_info->default_txportconf.ring_size = ICE_BUF_SIZE_MIN;
1899 ice_atomic_read_link_status(struct rte_eth_dev *dev,
1900 struct rte_eth_link *link)
1902 struct rte_eth_link *dst = link;
1903 struct rte_eth_link *src = &dev->data->dev_link;
1905 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
1906 *(uint64_t *)src) == 0)
1913 ice_atomic_write_link_status(struct rte_eth_dev *dev,
1914 struct rte_eth_link *link)
1916 struct rte_eth_link *dst = &dev->data->dev_link;
1917 struct rte_eth_link *src = link;
1919 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
1920 *(uint64_t *)src) == 0)
1927 ice_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
1929 #define CHECK_INTERVAL 100 /* 100ms */
1930 #define MAX_REPEAT_TIME 10 /* 1s (10 * 100ms) in total */
1931 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1932 struct ice_link_status link_status;
1933 struct rte_eth_link link, old;
1935 unsigned int rep_cnt = MAX_REPEAT_TIME;
1936 bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false;
1938 memset(&link, 0, sizeof(link));
1939 memset(&old, 0, sizeof(old));
1940 memset(&link_status, 0, sizeof(link_status));
1941 ice_atomic_read_link_status(dev, &old);
1944 /* Get link status information from hardware */
1945 status = ice_aq_get_link_info(hw->port_info, enable_lse,
1946 &link_status, NULL);
1947 if (status != ICE_SUCCESS) {
1948 link.link_speed = ETH_SPEED_NUM_100M;
1949 link.link_duplex = ETH_LINK_FULL_DUPLEX;
1950 PMD_DRV_LOG(ERR, "Failed to get link info");
1954 link.link_status = link_status.link_info & ICE_AQ_LINK_UP;
1955 if (!wait_to_complete || link.link_status)
1958 rte_delay_ms(CHECK_INTERVAL);
1959 } while (--rep_cnt);
1961 if (!link.link_status)
1964 /* Full-duplex operation at all supported speeds */
1965 link.link_duplex = ETH_LINK_FULL_DUPLEX;
1967 /* Parse the link status */
1968 switch (link_status.link_speed) {
1969 case ICE_AQ_LINK_SPEED_10MB:
1970 link.link_speed = ETH_SPEED_NUM_10M;
1972 case ICE_AQ_LINK_SPEED_100MB:
1973 link.link_speed = ETH_SPEED_NUM_100M;
1975 case ICE_AQ_LINK_SPEED_1000MB:
1976 link.link_speed = ETH_SPEED_NUM_1G;
1978 case ICE_AQ_LINK_SPEED_2500MB:
1979 link.link_speed = ETH_SPEED_NUM_2_5G;
1981 case ICE_AQ_LINK_SPEED_5GB:
1982 link.link_speed = ETH_SPEED_NUM_5G;
1984 case ICE_AQ_LINK_SPEED_10GB:
1985 link.link_speed = ETH_SPEED_NUM_10G;
1987 case ICE_AQ_LINK_SPEED_20GB:
1988 link.link_speed = ETH_SPEED_NUM_20G;
1990 case ICE_AQ_LINK_SPEED_25GB:
1991 link.link_speed = ETH_SPEED_NUM_25G;
1993 case ICE_AQ_LINK_SPEED_40GB:
1994 link.link_speed = ETH_SPEED_NUM_40G;
1996 case ICE_AQ_LINK_SPEED_UNKNOWN:
1998 PMD_DRV_LOG(ERR, "Unknown link speed");
1999 link.link_speed = ETH_SPEED_NUM_NONE;
2003 link.link_autoneg = !(dev->data->dev_conf.link_speeds &
2004 ETH_LINK_SPEED_FIXED);
2007 ice_atomic_write_link_status(dev, &link);
2008 if (link.link_status == old.link_status)
2015 ice_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
2017 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2018 struct rte_eth_dev_data *dev_data = pf->dev_data;
2019 uint32_t frame_size = mtu + ETHER_HDR_LEN
2020 + ETHER_CRC_LEN + ICE_VLAN_TAG_SIZE;
2022 /* check if mtu is within the allowed range */
2023 if (mtu < ETHER_MIN_MTU || frame_size > ICE_FRAME_SIZE_MAX)
2026 /* mtu setting is forbidden if port is start */
2027 if (dev_data->dev_started) {
2029 "port %d must be stopped before configuration",
2034 if (frame_size > ETHER_MAX_LEN)
2035 dev_data->dev_conf.rxmode.offloads |=
2036 DEV_RX_OFFLOAD_JUMBO_FRAME;
2038 dev_data->dev_conf.rxmode.offloads &=
2039 ~DEV_RX_OFFLOAD_JUMBO_FRAME;
2041 dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
2046 static int ice_macaddr_set(struct rte_eth_dev *dev,
2047 struct ether_addr *mac_addr)
2049 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2050 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2051 struct ice_vsi *vsi = pf->main_vsi;
2052 struct ice_mac_filter *f;
2056 if (!is_valid_assigned_ether_addr(mac_addr)) {
2057 PMD_DRV_LOG(ERR, "Tried to set invalid MAC address.");
2061 TAILQ_FOREACH(f, &vsi->mac_list, next) {
2062 if (is_same_ether_addr(&pf->dev_addr, &f->mac_info.mac_addr))
2067 PMD_DRV_LOG(ERR, "Failed to find filter for default mac");
2071 ret = ice_remove_mac_filter(vsi, &f->mac_info.mac_addr);
2072 if (ret != ICE_SUCCESS) {
2073 PMD_DRV_LOG(ERR, "Failed to delete mac filter");
2076 ret = ice_add_mac_filter(vsi, mac_addr);
2077 if (ret != ICE_SUCCESS) {
2078 PMD_DRV_LOG(ERR, "Failed to add mac filter");
2081 memcpy(&pf->dev_addr, mac_addr, ETH_ADDR_LEN);
2083 flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL;
2084 ret = ice_aq_manage_mac_write(hw, mac_addr->addr_bytes, flags, NULL);
2085 if (ret != ICE_SUCCESS)
2086 PMD_DRV_LOG(ERR, "Failed to set manage mac");
2091 /* Add a MAC address, and update filters */
2093 ice_macaddr_add(struct rte_eth_dev *dev,
2094 struct ether_addr *mac_addr,
2095 __rte_unused uint32_t index,
2096 __rte_unused uint32_t pool)
2098 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2099 struct ice_vsi *vsi = pf->main_vsi;
2102 ret = ice_add_mac_filter(vsi, mac_addr);
2103 if (ret != ICE_SUCCESS) {
2104 PMD_DRV_LOG(ERR, "Failed to add MAC filter");
2111 /* Remove a MAC address, and update filters */
2113 ice_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
2115 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2116 struct ice_vsi *vsi = pf->main_vsi;
2117 struct rte_eth_dev_data *data = dev->data;
2118 struct ether_addr *macaddr;
2121 macaddr = &data->mac_addrs[index];
2122 ret = ice_remove_mac_filter(vsi, macaddr);
2124 PMD_DRV_LOG(ERR, "Failed to remove MAC filter");
2130 ice_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
2132 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2133 struct ice_vsi *vsi = pf->main_vsi;
2136 PMD_INIT_FUNC_TRACE();
2139 ret = ice_add_vlan_filter(vsi, vlan_id);
2141 PMD_DRV_LOG(ERR, "Failed to add vlan filter");
2145 ret = ice_remove_vlan_filter(vsi, vlan_id);
2147 PMD_DRV_LOG(ERR, "Failed to remove vlan filter");
2155 /* Configure vlan filter on or off */
2157 ice_vsi_config_vlan_filter(struct ice_vsi *vsi, bool on)
2159 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
2160 struct ice_vsi_ctx ctxt;
2161 uint8_t sec_flags, sw_flags2;
2164 sec_flags = ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
2165 ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S;
2166 sw_flags2 = ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
2169 vsi->info.sec_flags |= sec_flags;
2170 vsi->info.sw_flags2 |= sw_flags2;
2172 vsi->info.sec_flags &= ~sec_flags;
2173 vsi->info.sw_flags2 &= ~sw_flags2;
2175 vsi->info.sw_id = hw->port_info->sw_id;
2176 (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
2177 ctxt.info.valid_sections =
2178 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_SW_VALID |
2179 ICE_AQ_VSI_PROP_SECURITY_VALID);
2180 ctxt.vsi_num = vsi->vsi_id;
2182 ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
2184 PMD_DRV_LOG(INFO, "Update VSI failed to %s vlan rx pruning",
2185 on ? "enable" : "disable");
2188 vsi->info.valid_sections |=
2189 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_SW_VALID |
2190 ICE_AQ_VSI_PROP_SECURITY_VALID);
2197 ice_vsi_config_vlan_stripping(struct ice_vsi *vsi, bool on)
2199 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
2200 struct ice_vsi_ctx ctxt;
2204 /* Check if it has been already on or off */
2205 if (vsi->info.valid_sections &
2206 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID)) {
2208 if ((vsi->info.vlan_flags &
2209 ICE_AQ_VSI_VLAN_EMOD_M) ==
2210 ICE_AQ_VSI_VLAN_EMOD_STR_BOTH)
2211 return 0; /* already on */
2213 if ((vsi->info.vlan_flags &
2214 ICE_AQ_VSI_VLAN_EMOD_M) ==
2215 ICE_AQ_VSI_VLAN_EMOD_NOTHING)
2216 return 0; /* already off */
2221 vlan_flags = ICE_AQ_VSI_VLAN_EMOD_STR_BOTH;
2223 vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING;
2224 vsi->info.vlan_flags &= ~(ICE_AQ_VSI_VLAN_EMOD_M);
2225 vsi->info.vlan_flags |= vlan_flags;
2226 (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
2227 ctxt.info.valid_sections =
2228 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID);
2229 ctxt.vsi_num = vsi->vsi_id;
2230 ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
2232 PMD_DRV_LOG(INFO, "Update VSI failed to %s vlan stripping",
2233 on ? "enable" : "disable");
2237 vsi->info.valid_sections |=
2238 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID);
2244 ice_vlan_offload_set(struct rte_eth_dev *dev, int mask)
2246 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2247 struct ice_vsi *vsi = pf->main_vsi;
2248 struct rte_eth_rxmode *rxmode;
2250 rxmode = &dev->data->dev_conf.rxmode;
2251 if (mask & ETH_VLAN_FILTER_MASK) {
2252 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
2253 ice_vsi_config_vlan_filter(vsi, TRUE);
2255 ice_vsi_config_vlan_filter(vsi, FALSE);
2258 if (mask & ETH_VLAN_STRIP_MASK) {
2259 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
2260 ice_vsi_config_vlan_stripping(vsi, TRUE);
2262 ice_vsi_config_vlan_stripping(vsi, FALSE);
2265 if (mask & ETH_VLAN_EXTEND_MASK) {
2266 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
2267 ice_vsi_config_double_vlan(vsi, TRUE);
2269 ice_vsi_config_double_vlan(vsi, FALSE);
2276 ice_vlan_tpid_set(struct rte_eth_dev *dev,
2277 enum rte_vlan_type vlan_type,
2280 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2281 uint64_t reg_r = 0, reg_w = 0;
2282 uint16_t reg_id = 0;
2284 int qinq = dev->data->dev_conf.rxmode.offloads &
2285 DEV_RX_OFFLOAD_VLAN_EXTEND;
2287 switch (vlan_type) {
2288 case ETH_VLAN_TYPE_OUTER:
2294 case ETH_VLAN_TYPE_INNER:
2299 "Unsupported vlan type in single vlan.");
2304 PMD_DRV_LOG(ERR, "Unsupported vlan type %d", vlan_type);
2307 reg_r = ICE_READ_REG(hw, GL_SWT_L2TAGCTRL(reg_id));
2308 PMD_DRV_LOG(DEBUG, "Debug read from ICE GL_SWT_L2TAGCTRL[%d]: "
2309 "0x%08"PRIx64"", reg_id, reg_r);
2311 reg_w = reg_r & (~(GL_SWT_L2TAGCTRL_ETHERTYPE_M));
2312 reg_w |= ((uint64_t)tpid << GL_SWT_L2TAGCTRL_ETHERTYPE_S);
2313 if (reg_r == reg_w) {
2314 PMD_DRV_LOG(DEBUG, "No need to write");
2318 ICE_WRITE_REG(hw, GL_SWT_L2TAGCTRL(reg_id), reg_w);
2319 PMD_DRV_LOG(DEBUG, "Debug write 0x%08"PRIx64" to "
2320 "ICE GL_SWT_L2TAGCTRL[%d]", reg_w, reg_id);
2326 ice_get_rss_lut(struct ice_vsi *vsi, uint8_t *lut, uint16_t lut_size)
2328 struct ice_pf *pf = ICE_VSI_TO_PF(vsi);
2329 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
2335 if (pf->flags & ICE_FLAG_RSS_AQ_CAPABLE) {
2336 ret = ice_aq_get_rss_lut(hw, vsi->idx, TRUE,
2339 PMD_DRV_LOG(ERR, "Failed to get RSS lookup table");
2343 uint64_t *lut_dw = (uint64_t *)lut;
2344 uint16_t i, lut_size_dw = lut_size / 4;
2346 for (i = 0; i < lut_size_dw; i++)
2347 lut_dw[i] = ICE_READ_REG(hw, PFQF_HLUT(i));
2354 ice_set_rss_lut(struct ice_vsi *vsi, uint8_t *lut, uint16_t lut_size)
2356 struct ice_pf *pf = ICE_VSI_TO_PF(vsi);
2357 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
2363 if (pf->flags & ICE_FLAG_RSS_AQ_CAPABLE) {
2364 ret = ice_aq_set_rss_lut(hw, vsi->idx, TRUE,
2367 PMD_DRV_LOG(ERR, "Failed to set RSS lookup table");
2371 uint64_t *lut_dw = (uint64_t *)lut;
2372 uint16_t i, lut_size_dw = lut_size / 4;
2374 for (i = 0; i < lut_size_dw; i++)
2375 ICE_WRITE_REG(hw, PFQF_HLUT(i), lut_dw[i]);
2384 ice_rss_reta_update(struct rte_eth_dev *dev,
2385 struct rte_eth_rss_reta_entry64 *reta_conf,
2388 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2389 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2390 uint16_t i, lut_size = hw->func_caps.common_cap.rss_table_size;
2391 uint16_t idx, shift;
2395 if (reta_size != lut_size ||
2396 reta_size > ETH_RSS_RETA_SIZE_512) {
2398 "The size of hash lookup table configured (%d)"
2399 "doesn't match the number hardware can "
2401 reta_size, lut_size);
2405 lut = rte_zmalloc(NULL, reta_size, 0);
2407 PMD_DRV_LOG(ERR, "No memory can be allocated");
2410 ret = ice_get_rss_lut(pf->main_vsi, lut, reta_size);
2414 for (i = 0; i < reta_size; i++) {
2415 idx = i / RTE_RETA_GROUP_SIZE;
2416 shift = i % RTE_RETA_GROUP_SIZE;
2417 if (reta_conf[idx].mask & (1ULL << shift))
2418 lut[i] = reta_conf[idx].reta[shift];
2420 ret = ice_set_rss_lut(pf->main_vsi, lut, reta_size);
2429 ice_rss_reta_query(struct rte_eth_dev *dev,
2430 struct rte_eth_rss_reta_entry64 *reta_conf,
2433 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2434 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2435 uint16_t i, lut_size = hw->func_caps.common_cap.rss_table_size;
2436 uint16_t idx, shift;
2440 if (reta_size != lut_size ||
2441 reta_size > ETH_RSS_RETA_SIZE_512) {
2443 "The size of hash lookup table configured (%d)"
2444 "doesn't match the number hardware can "
2446 reta_size, lut_size);
2450 lut = rte_zmalloc(NULL, reta_size, 0);
2452 PMD_DRV_LOG(ERR, "No memory can be allocated");
2456 ret = ice_get_rss_lut(pf->main_vsi, lut, reta_size);
2460 for (i = 0; i < reta_size; i++) {
2461 idx = i / RTE_RETA_GROUP_SIZE;
2462 shift = i % RTE_RETA_GROUP_SIZE;
2463 if (reta_conf[idx].mask & (1ULL << shift))
2464 reta_conf[idx].reta[shift] = lut[i];
2474 ice_set_rss_key(struct ice_vsi *vsi, uint8_t *key, uint8_t key_len)
2476 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
2479 if (!key || key_len == 0) {
2480 PMD_DRV_LOG(DEBUG, "No key to be configured");
2482 } else if (key_len != (VSIQF_HKEY_MAX_INDEX + 1) *
2484 PMD_DRV_LOG(ERR, "Invalid key length %u", key_len);
2488 struct ice_aqc_get_set_rss_keys *key_dw =
2489 (struct ice_aqc_get_set_rss_keys *)key;
2491 ret = ice_aq_set_rss_key(hw, vsi->idx, key_dw);
2493 PMD_DRV_LOG(ERR, "Failed to configure RSS key via AQ");
2501 ice_get_rss_key(struct ice_vsi *vsi, uint8_t *key, uint8_t *key_len)
2503 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
2506 if (!key || !key_len)
2509 ret = ice_aq_get_rss_key
2511 (struct ice_aqc_get_set_rss_keys *)key);
2513 PMD_DRV_LOG(ERR, "Failed to get RSS key via AQ");
2516 *key_len = (VSIQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
2522 ice_rss_hash_update(struct rte_eth_dev *dev,
2523 struct rte_eth_rss_conf *rss_conf)
2525 enum ice_status status = ICE_SUCCESS;
2526 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2527 struct ice_vsi *vsi = pf->main_vsi;
2530 status = ice_set_rss_key(vsi, rss_conf->rss_key, rss_conf->rss_key_len);
2534 /* TODO: hash enable config, ice_add_rss_cfg */
2539 ice_rss_hash_conf_get(struct rte_eth_dev *dev,
2540 struct rte_eth_rss_conf *rss_conf)
2542 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2543 struct ice_vsi *vsi = pf->main_vsi;
2545 ice_get_rss_key(vsi, rss_conf->rss_key,
2546 &rss_conf->rss_key_len);
2548 /* TODO: default set to 0 as hf config is not supported now */
2549 rss_conf->rss_hf = 0;
2553 static int ice_rx_queue_intr_enable(struct rte_eth_dev *dev,
2556 struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
2557 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2558 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2562 msix_intr = intr_handle->intr_vec[queue_id];
2564 val = GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M |
2565 GLINT_DYN_CTL_ITR_INDX_M;
2566 val &= ~GLINT_DYN_CTL_WB_ON_ITR_M;
2568 ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr), val);
2569 rte_intr_enable(&pci_dev->intr_handle);
2574 static int ice_rx_queue_intr_disable(struct rte_eth_dev *dev,
2577 struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
2578 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2579 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2582 msix_intr = intr_handle->intr_vec[queue_id];
2584 ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr), GLINT_DYN_CTL_WB_ON_ITR_M);
2590 ice_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
2592 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2595 ret = snprintf(fw_version, fw_size, "%d.%d.%05d %d.%d",
2596 hw->fw_maj_ver, hw->fw_min_ver, hw->fw_build,
2597 hw->api_maj_ver, hw->api_min_ver);
2599 /* add the size of '\0' */
2601 if (fw_size < (u32)ret)
2608 ice_vsi_vlan_pvid_set(struct ice_vsi *vsi, struct ice_vsi_vlan_pvid_info *info)
2611 struct ice_vsi_ctx ctxt;
2612 uint8_t vlan_flags = 0;
2615 if (!vsi || !info) {
2616 PMD_DRV_LOG(ERR, "invalid parameters");
2621 vsi->info.pvid = info->config.pvid;
2623 * If insert pvid is enabled, only tagged pkts are
2624 * allowed to be sent out.
2626 vlan_flags = ICE_AQ_VSI_PVLAN_INSERT_PVID |
2627 ICE_AQ_VSI_VLAN_MODE_UNTAGGED;
2630 if (info->config.reject.tagged == 0)
2631 vlan_flags |= ICE_AQ_VSI_VLAN_MODE_TAGGED;
2633 if (info->config.reject.untagged == 0)
2634 vlan_flags |= ICE_AQ_VSI_VLAN_MODE_UNTAGGED;
2636 vsi->info.vlan_flags &= ~(ICE_AQ_VSI_PVLAN_INSERT_PVID |
2637 ICE_AQ_VSI_VLAN_MODE_M);
2638 vsi->info.vlan_flags |= vlan_flags;
2639 memset(&ctxt, 0, sizeof(ctxt));
2640 rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
2641 ctxt.info.valid_sections =
2642 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID);
2643 ctxt.vsi_num = vsi->vsi_id;
2645 hw = ICE_VSI_TO_HW(vsi);
2646 ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
2647 if (ret != ICE_SUCCESS) {
2649 "update VSI for VLAN insert failed, err %d",
2654 vsi->info.valid_sections |=
2655 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID);
2661 ice_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on)
2663 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2664 struct ice_vsi *vsi = pf->main_vsi;
2665 struct rte_eth_dev_data *data = pf->dev_data;
2666 struct ice_vsi_vlan_pvid_info info;
2669 memset(&info, 0, sizeof(info));
2672 info.config.pvid = pvid;
2674 info.config.reject.tagged =
2675 data->dev_conf.txmode.hw_vlan_reject_tagged;
2676 info.config.reject.untagged =
2677 data->dev_conf.txmode.hw_vlan_reject_untagged;
2680 ret = ice_vsi_vlan_pvid_set(vsi, &info);
2682 PMD_DRV_LOG(ERR, "Failed to set pvid.");
2690 ice_get_eeprom_length(struct rte_eth_dev *dev)
2692 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2694 /* Convert word count to byte count */
2695 return hw->nvm.sr_words << 1;
2699 ice_get_eeprom(struct rte_eth_dev *dev,
2700 struct rte_dev_eeprom_info *eeprom)
2702 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2703 uint16_t *data = eeprom->data;
2704 uint16_t offset, length, i;
2705 enum ice_status ret_code = ICE_SUCCESS;
2707 offset = eeprom->offset >> 1;
2708 length = eeprom->length >> 1;
2710 if (offset > hw->nvm.sr_words ||
2711 offset + length > hw->nvm.sr_words) {
2712 PMD_DRV_LOG(ERR, "Requested EEPROM bytes out of range.");
2716 eeprom->magic = hw->vendor_id | (hw->device_id << 16);
2718 for (i = 0; i < length; i++) {
2719 ret_code = ice_read_sr_word(hw, offset + i, &data[i]);
2720 if (ret_code != ICE_SUCCESS) {
2721 PMD_DRV_LOG(ERR, "EEPROM read failed.");
2730 ice_stat_update_32(struct ice_hw *hw,
2738 new_data = (uint64_t)ICE_READ_REG(hw, reg);
2742 if (new_data >= *offset)
2743 *stat = (uint64_t)(new_data - *offset);
2745 *stat = (uint64_t)((new_data +
2746 ((uint64_t)1 << ICE_32_BIT_WIDTH))
2751 ice_stat_update_40(struct ice_hw *hw,
2760 new_data = (uint64_t)ICE_READ_REG(hw, loreg);
2761 new_data |= (uint64_t)(ICE_READ_REG(hw, hireg) & ICE_8_BIT_MASK) <<
2767 if (new_data >= *offset)
2768 *stat = new_data - *offset;
2770 *stat = (uint64_t)((new_data +
2771 ((uint64_t)1 << ICE_40_BIT_WIDTH)) -
2774 *stat &= ICE_40_BIT_MASK;
2777 /* Get all the statistics of a VSI */
2779 ice_update_vsi_stats(struct ice_vsi *vsi)
2781 struct ice_eth_stats *oes = &vsi->eth_stats_offset;
2782 struct ice_eth_stats *nes = &vsi->eth_stats;
2783 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
2784 int idx = rte_le_to_cpu_16(vsi->vsi_id);
2786 ice_stat_update_40(hw, GLV_GORCH(idx), GLV_GORCL(idx),
2787 vsi->offset_loaded, &oes->rx_bytes,
2789 ice_stat_update_40(hw, GLV_UPRCH(idx), GLV_UPRCL(idx),
2790 vsi->offset_loaded, &oes->rx_unicast,
2792 ice_stat_update_40(hw, GLV_MPRCH(idx), GLV_MPRCL(idx),
2793 vsi->offset_loaded, &oes->rx_multicast,
2794 &nes->rx_multicast);
2795 ice_stat_update_40(hw, GLV_BPRCH(idx), GLV_BPRCL(idx),
2796 vsi->offset_loaded, &oes->rx_broadcast,
2797 &nes->rx_broadcast);
2798 /* exclude CRC bytes */
2799 nes->rx_bytes -= (nes->rx_unicast + nes->rx_multicast +
2800 nes->rx_broadcast) * ETHER_CRC_LEN;
2802 ice_stat_update_32(hw, GLV_RDPC(idx), vsi->offset_loaded,
2803 &oes->rx_discards, &nes->rx_discards);
2804 /* GLV_REPC not supported */
2805 /* GLV_RMPC not supported */
2806 ice_stat_update_32(hw, GLSWID_RUPP(idx), vsi->offset_loaded,
2807 &oes->rx_unknown_protocol,
2808 &nes->rx_unknown_protocol);
2809 ice_stat_update_40(hw, GLV_GOTCH(idx), GLV_GOTCL(idx),
2810 vsi->offset_loaded, &oes->tx_bytes,
2812 ice_stat_update_40(hw, GLV_UPTCH(idx), GLV_UPTCL(idx),
2813 vsi->offset_loaded, &oes->tx_unicast,
2815 ice_stat_update_40(hw, GLV_MPTCH(idx), GLV_MPTCL(idx),
2816 vsi->offset_loaded, &oes->tx_multicast,
2817 &nes->tx_multicast);
2818 ice_stat_update_40(hw, GLV_BPTCH(idx), GLV_BPTCL(idx),
2819 vsi->offset_loaded, &oes->tx_broadcast,
2820 &nes->tx_broadcast);
2821 /* GLV_TDPC not supported */
2822 ice_stat_update_32(hw, GLV_TEPC(idx), vsi->offset_loaded,
2823 &oes->tx_errors, &nes->tx_errors);
2824 vsi->offset_loaded = true;
2826 PMD_DRV_LOG(DEBUG, "************** VSI[%u] stats start **************",
2828 PMD_DRV_LOG(DEBUG, "rx_bytes: %"PRIu64"", nes->rx_bytes);
2829 PMD_DRV_LOG(DEBUG, "rx_unicast: %"PRIu64"", nes->rx_unicast);
2830 PMD_DRV_LOG(DEBUG, "rx_multicast: %"PRIu64"", nes->rx_multicast);
2831 PMD_DRV_LOG(DEBUG, "rx_broadcast: %"PRIu64"", nes->rx_broadcast);
2832 PMD_DRV_LOG(DEBUG, "rx_discards: %"PRIu64"", nes->rx_discards);
2833 PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"",
2834 nes->rx_unknown_protocol);
2835 PMD_DRV_LOG(DEBUG, "tx_bytes: %"PRIu64"", nes->tx_bytes);
2836 PMD_DRV_LOG(DEBUG, "tx_unicast: %"PRIu64"", nes->tx_unicast);
2837 PMD_DRV_LOG(DEBUG, "tx_multicast: %"PRIu64"", nes->tx_multicast);
2838 PMD_DRV_LOG(DEBUG, "tx_broadcast: %"PRIu64"", nes->tx_broadcast);
2839 PMD_DRV_LOG(DEBUG, "tx_discards: %"PRIu64"", nes->tx_discards);
2840 PMD_DRV_LOG(DEBUG, "tx_errors: %"PRIu64"", nes->tx_errors);
2841 PMD_DRV_LOG(DEBUG, "************** VSI[%u] stats end ****************",
2846 ice_read_stats_registers(struct ice_pf *pf, struct ice_hw *hw)
2848 struct ice_hw_port_stats *ns = &pf->stats; /* new stats */
2849 struct ice_hw_port_stats *os = &pf->stats_offset; /* old stats */
2851 /* Get statistics of struct ice_eth_stats */
2852 ice_stat_update_40(hw, GLPRT_GORCH(hw->port_info->lport),
2853 GLPRT_GORCL(hw->port_info->lport),
2854 pf->offset_loaded, &os->eth.rx_bytes,
2856 ice_stat_update_40(hw, GLPRT_UPRCH(hw->port_info->lport),
2857 GLPRT_UPRCL(hw->port_info->lport),
2858 pf->offset_loaded, &os->eth.rx_unicast,
2859 &ns->eth.rx_unicast);
2860 ice_stat_update_40(hw, GLPRT_MPRCH(hw->port_info->lport),
2861 GLPRT_MPRCL(hw->port_info->lport),
2862 pf->offset_loaded, &os->eth.rx_multicast,
2863 &ns->eth.rx_multicast);
2864 ice_stat_update_40(hw, GLPRT_BPRCH(hw->port_info->lport),
2865 GLPRT_BPRCL(hw->port_info->lport),
2866 pf->offset_loaded, &os->eth.rx_broadcast,
2867 &ns->eth.rx_broadcast);
2868 ice_stat_update_32(hw, PRTRPB_RDPC,
2869 pf->offset_loaded, &os->eth.rx_discards,
2870 &ns->eth.rx_discards);
2872 /* Workaround: CRC size should not be included in byte statistics,
2873 * so subtract ETHER_CRC_LEN from the byte counter for each rx packet.
2875 ns->eth.rx_bytes -= (ns->eth.rx_unicast + ns->eth.rx_multicast +
2876 ns->eth.rx_broadcast) * ETHER_CRC_LEN;
2878 /* GLPRT_REPC not supported */
2879 /* GLPRT_RMPC not supported */
2880 ice_stat_update_32(hw, GLSWID_RUPP(hw->port_info->lport),
2882 &os->eth.rx_unknown_protocol,
2883 &ns->eth.rx_unknown_protocol);
2884 ice_stat_update_40(hw, GLPRT_GOTCH(hw->port_info->lport),
2885 GLPRT_GOTCL(hw->port_info->lport),
2886 pf->offset_loaded, &os->eth.tx_bytes,
2888 ice_stat_update_40(hw, GLPRT_UPTCH(hw->port_info->lport),
2889 GLPRT_UPTCL(hw->port_info->lport),
2890 pf->offset_loaded, &os->eth.tx_unicast,
2891 &ns->eth.tx_unicast);
2892 ice_stat_update_40(hw, GLPRT_MPTCH(hw->port_info->lport),
2893 GLPRT_MPTCL(hw->port_info->lport),
2894 pf->offset_loaded, &os->eth.tx_multicast,
2895 &ns->eth.tx_multicast);
2896 ice_stat_update_40(hw, GLPRT_BPTCH(hw->port_info->lport),
2897 GLPRT_BPTCL(hw->port_info->lport),
2898 pf->offset_loaded, &os->eth.tx_broadcast,
2899 &ns->eth.tx_broadcast);
2900 ns->eth.tx_bytes -= (ns->eth.tx_unicast + ns->eth.tx_multicast +
2901 ns->eth.tx_broadcast) * ETHER_CRC_LEN;
2903 /* GLPRT_TEPC not supported */
2905 /* additional port specific stats */
2906 ice_stat_update_32(hw, GLPRT_TDOLD(hw->port_info->lport),
2907 pf->offset_loaded, &os->tx_dropped_link_down,
2908 &ns->tx_dropped_link_down);
2909 ice_stat_update_32(hw, GLPRT_CRCERRS(hw->port_info->lport),
2910 pf->offset_loaded, &os->crc_errors,
2912 ice_stat_update_32(hw, GLPRT_ILLERRC(hw->port_info->lport),
2913 pf->offset_loaded, &os->illegal_bytes,
2914 &ns->illegal_bytes);
2915 /* GLPRT_ERRBC not supported */
2916 ice_stat_update_32(hw, GLPRT_MLFC(hw->port_info->lport),
2917 pf->offset_loaded, &os->mac_local_faults,
2918 &ns->mac_local_faults);
2919 ice_stat_update_32(hw, GLPRT_MRFC(hw->port_info->lport),
2920 pf->offset_loaded, &os->mac_remote_faults,
2921 &ns->mac_remote_faults);
2923 ice_stat_update_32(hw, GLPRT_RLEC(hw->port_info->lport),
2924 pf->offset_loaded, &os->rx_len_errors,
2925 &ns->rx_len_errors);
2927 ice_stat_update_32(hw, GLPRT_LXONRXC(hw->port_info->lport),
2928 pf->offset_loaded, &os->link_xon_rx,
2930 ice_stat_update_32(hw, GLPRT_LXOFFRXC(hw->port_info->lport),
2931 pf->offset_loaded, &os->link_xoff_rx,
2933 ice_stat_update_32(hw, GLPRT_LXONTXC(hw->port_info->lport),
2934 pf->offset_loaded, &os->link_xon_tx,
2936 ice_stat_update_32(hw, GLPRT_LXOFFTXC(hw->port_info->lport),
2937 pf->offset_loaded, &os->link_xoff_tx,
2939 ice_stat_update_40(hw, GLPRT_PRC64H(hw->port_info->lport),
2940 GLPRT_PRC64L(hw->port_info->lport),
2941 pf->offset_loaded, &os->rx_size_64,
2943 ice_stat_update_40(hw, GLPRT_PRC127H(hw->port_info->lport),
2944 GLPRT_PRC127L(hw->port_info->lport),
2945 pf->offset_loaded, &os->rx_size_127,
2947 ice_stat_update_40(hw, GLPRT_PRC255H(hw->port_info->lport),
2948 GLPRT_PRC255L(hw->port_info->lport),
2949 pf->offset_loaded, &os->rx_size_255,
2951 ice_stat_update_40(hw, GLPRT_PRC511H(hw->port_info->lport),
2952 GLPRT_PRC511L(hw->port_info->lport),
2953 pf->offset_loaded, &os->rx_size_511,
2955 ice_stat_update_40(hw, GLPRT_PRC1023H(hw->port_info->lport),
2956 GLPRT_PRC1023L(hw->port_info->lport),
2957 pf->offset_loaded, &os->rx_size_1023,
2959 ice_stat_update_40(hw, GLPRT_PRC1522H(hw->port_info->lport),
2960 GLPRT_PRC1522L(hw->port_info->lport),
2961 pf->offset_loaded, &os->rx_size_1522,
2963 ice_stat_update_40(hw, GLPRT_PRC9522H(hw->port_info->lport),
2964 GLPRT_PRC9522L(hw->port_info->lport),
2965 pf->offset_loaded, &os->rx_size_big,
2967 ice_stat_update_32(hw, GLPRT_RUC(hw->port_info->lport),
2968 pf->offset_loaded, &os->rx_undersize,
2970 ice_stat_update_32(hw, GLPRT_RFC(hw->port_info->lport),
2971 pf->offset_loaded, &os->rx_fragments,
2973 ice_stat_update_32(hw, GLPRT_ROC(hw->port_info->lport),
2974 pf->offset_loaded, &os->rx_oversize,
2976 ice_stat_update_32(hw, GLPRT_RJC(hw->port_info->lport),
2977 pf->offset_loaded, &os->rx_jabber,
2979 ice_stat_update_40(hw, GLPRT_PTC64H(hw->port_info->lport),
2980 GLPRT_PTC64L(hw->port_info->lport),
2981 pf->offset_loaded, &os->tx_size_64,
2983 ice_stat_update_40(hw, GLPRT_PTC127H(hw->port_info->lport),
2984 GLPRT_PTC127L(hw->port_info->lport),
2985 pf->offset_loaded, &os->tx_size_127,
2987 ice_stat_update_40(hw, GLPRT_PTC255H(hw->port_info->lport),
2988 GLPRT_PTC255L(hw->port_info->lport),
2989 pf->offset_loaded, &os->tx_size_255,
2991 ice_stat_update_40(hw, GLPRT_PTC511H(hw->port_info->lport),
2992 GLPRT_PTC511L(hw->port_info->lport),
2993 pf->offset_loaded, &os->tx_size_511,
2995 ice_stat_update_40(hw, GLPRT_PTC1023H(hw->port_info->lport),
2996 GLPRT_PTC1023L(hw->port_info->lport),
2997 pf->offset_loaded, &os->tx_size_1023,
2999 ice_stat_update_40(hw, GLPRT_PTC1522H(hw->port_info->lport),
3000 GLPRT_PTC1522L(hw->port_info->lport),
3001 pf->offset_loaded, &os->tx_size_1522,
3003 ice_stat_update_40(hw, GLPRT_PTC9522H(hw->port_info->lport),
3004 GLPRT_PTC9522L(hw->port_info->lport),
3005 pf->offset_loaded, &os->tx_size_big,
3008 /* GLPRT_MSPDC not supported */
3009 /* GLPRT_XEC not supported */
3011 pf->offset_loaded = true;
3014 ice_update_vsi_stats(pf->main_vsi);
3017 /* Get all statistics of a port */
3019 ice_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
3021 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3022 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3023 struct ice_hw_port_stats *ns = &pf->stats; /* new stats */
3025 /* call read registers - updates values, now write them to struct */
3026 ice_read_stats_registers(pf, hw);
3028 stats->ipackets = ns->eth.rx_unicast +
3029 ns->eth.rx_multicast +
3030 ns->eth.rx_broadcast -
3031 ns->eth.rx_discards -
3032 pf->main_vsi->eth_stats.rx_discards;
3033 stats->opackets = ns->eth.tx_unicast +
3034 ns->eth.tx_multicast +
3035 ns->eth.tx_broadcast;
3036 stats->ibytes = ns->eth.rx_bytes;
3037 stats->obytes = ns->eth.tx_bytes;
3038 stats->oerrors = ns->eth.tx_errors +
3039 pf->main_vsi->eth_stats.tx_errors;
3042 stats->imissed = ns->eth.rx_discards +
3043 pf->main_vsi->eth_stats.rx_discards;
3044 stats->ierrors = ns->crc_errors +
3046 ns->rx_oversize + ns->rx_fragments + ns->rx_jabber;
3048 PMD_DRV_LOG(DEBUG, "*************** PF stats start *****************");
3049 PMD_DRV_LOG(DEBUG, "rx_bytes: %"PRIu64"", ns->eth.rx_bytes);
3050 PMD_DRV_LOG(DEBUG, "rx_unicast: %"PRIu64"", ns->eth.rx_unicast);
3051 PMD_DRV_LOG(DEBUG, "rx_multicast:%"PRIu64"", ns->eth.rx_multicast);
3052 PMD_DRV_LOG(DEBUG, "rx_broadcast:%"PRIu64"", ns->eth.rx_broadcast);
3053 PMD_DRV_LOG(DEBUG, "rx_discards:%"PRIu64"", ns->eth.rx_discards);
3054 PMD_DRV_LOG(DEBUG, "vsi rx_discards:%"PRIu64"",
3055 pf->main_vsi->eth_stats.rx_discards);
3056 PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"",
3057 ns->eth.rx_unknown_protocol);
3058 PMD_DRV_LOG(DEBUG, "tx_bytes: %"PRIu64"", ns->eth.tx_bytes);
3059 PMD_DRV_LOG(DEBUG, "tx_unicast: %"PRIu64"", ns->eth.tx_unicast);
3060 PMD_DRV_LOG(DEBUG, "tx_multicast:%"PRIu64"", ns->eth.tx_multicast);
3061 PMD_DRV_LOG(DEBUG, "tx_broadcast:%"PRIu64"", ns->eth.tx_broadcast);
3062 PMD_DRV_LOG(DEBUG, "tx_discards:%"PRIu64"", ns->eth.tx_discards);
3063 PMD_DRV_LOG(DEBUG, "vsi tx_discards:%"PRIu64"",
3064 pf->main_vsi->eth_stats.tx_discards);
3065 PMD_DRV_LOG(DEBUG, "tx_errors: %"PRIu64"", ns->eth.tx_errors);
3067 PMD_DRV_LOG(DEBUG, "tx_dropped_link_down: %"PRIu64"",
3068 ns->tx_dropped_link_down);
3069 PMD_DRV_LOG(DEBUG, "crc_errors: %"PRIu64"", ns->crc_errors);
3070 PMD_DRV_LOG(DEBUG, "illegal_bytes: %"PRIu64"",
3072 PMD_DRV_LOG(DEBUG, "error_bytes: %"PRIu64"", ns->error_bytes);
3073 PMD_DRV_LOG(DEBUG, "mac_local_faults: %"PRIu64"",
3074 ns->mac_local_faults);
3075 PMD_DRV_LOG(DEBUG, "mac_remote_faults: %"PRIu64"",
3076 ns->mac_remote_faults);
3077 PMD_DRV_LOG(DEBUG, "link_xon_rx: %"PRIu64"", ns->link_xon_rx);
3078 PMD_DRV_LOG(DEBUG, "link_xoff_rx: %"PRIu64"", ns->link_xoff_rx);
3079 PMD_DRV_LOG(DEBUG, "link_xon_tx: %"PRIu64"", ns->link_xon_tx);
3080 PMD_DRV_LOG(DEBUG, "link_xoff_tx: %"PRIu64"", ns->link_xoff_tx);
3081 PMD_DRV_LOG(DEBUG, "rx_size_64: %"PRIu64"", ns->rx_size_64);
3082 PMD_DRV_LOG(DEBUG, "rx_size_127: %"PRIu64"", ns->rx_size_127);
3083 PMD_DRV_LOG(DEBUG, "rx_size_255: %"PRIu64"", ns->rx_size_255);
3084 PMD_DRV_LOG(DEBUG, "rx_size_511: %"PRIu64"", ns->rx_size_511);
3085 PMD_DRV_LOG(DEBUG, "rx_size_1023: %"PRIu64"", ns->rx_size_1023);
3086 PMD_DRV_LOG(DEBUG, "rx_size_1522: %"PRIu64"", ns->rx_size_1522);
3087 PMD_DRV_LOG(DEBUG, "rx_size_big: %"PRIu64"", ns->rx_size_big);
3088 PMD_DRV_LOG(DEBUG, "rx_undersize: %"PRIu64"", ns->rx_undersize);
3089 PMD_DRV_LOG(DEBUG, "rx_fragments: %"PRIu64"", ns->rx_fragments);
3090 PMD_DRV_LOG(DEBUG, "rx_oversize: %"PRIu64"", ns->rx_oversize);
3091 PMD_DRV_LOG(DEBUG, "rx_jabber: %"PRIu64"", ns->rx_jabber);
3092 PMD_DRV_LOG(DEBUG, "tx_size_64: %"PRIu64"", ns->tx_size_64);
3093 PMD_DRV_LOG(DEBUG, "tx_size_127: %"PRIu64"", ns->tx_size_127);
3094 PMD_DRV_LOG(DEBUG, "tx_size_255: %"PRIu64"", ns->tx_size_255);
3095 PMD_DRV_LOG(DEBUG, "tx_size_511: %"PRIu64"", ns->tx_size_511);
3096 PMD_DRV_LOG(DEBUG, "tx_size_1023: %"PRIu64"", ns->tx_size_1023);
3097 PMD_DRV_LOG(DEBUG, "tx_size_1522: %"PRIu64"", ns->tx_size_1522);
3098 PMD_DRV_LOG(DEBUG, "tx_size_big: %"PRIu64"", ns->tx_size_big);
3099 PMD_DRV_LOG(DEBUG, "rx_len_errors: %"PRIu64"", ns->rx_len_errors);
3100 PMD_DRV_LOG(DEBUG, "************* PF stats end ****************");
3104 /* Reset the statistics */
3106 ice_stats_reset(struct rte_eth_dev *dev)
3108 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3109 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3111 /* Mark PF and VSI stats to update the offset, aka "reset" */
3112 pf->offset_loaded = false;
3114 pf->main_vsi->offset_loaded = false;
3116 /* read the stats, reading current register values into offset */
3117 ice_read_stats_registers(pf, hw);
3121 ice_xstats_calc_num(void)
3125 num = ICE_NB_ETH_XSTATS + ICE_NB_HW_PORT_XSTATS;
3131 ice_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
3134 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3135 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3138 struct ice_hw_port_stats *hw_stats = &pf->stats;
3140 count = ice_xstats_calc_num();
3144 ice_read_stats_registers(pf, hw);
3151 /* Get stats from ice_eth_stats struct */
3152 for (i = 0; i < ICE_NB_ETH_XSTATS; i++) {
3153 xstats[count].value =
3154 *(uint64_t *)((char *)&hw_stats->eth +
3155 ice_stats_strings[i].offset);
3156 xstats[count].id = count;
3160 /* Get individiual stats from ice_hw_port struct */
3161 for (i = 0; i < ICE_NB_HW_PORT_XSTATS; i++) {
3162 xstats[count].value =
3163 *(uint64_t *)((char *)hw_stats +
3164 ice_hw_port_strings[i].offset);
3165 xstats[count].id = count;
3172 static int ice_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
3173 struct rte_eth_xstat_name *xstats_names,
3174 __rte_unused unsigned int limit)
3176 unsigned int count = 0;
3180 return ice_xstats_calc_num();
3182 /* Note: limit checked in rte_eth_xstats_names() */
3184 /* Get stats from ice_eth_stats struct */
3185 for (i = 0; i < ICE_NB_ETH_XSTATS; i++) {
3186 snprintf(xstats_names[count].name,
3187 sizeof(xstats_names[count].name),
3188 "%s", ice_stats_strings[i].name);
3192 /* Get individiual stats from ice_hw_port struct */
3193 for (i = 0; i < ICE_NB_HW_PORT_XSTATS; i++) {
3194 snprintf(xstats_names[count].name,
3195 sizeof(xstats_names[count].name),
3196 "%s", ice_hw_port_strings[i].name);
3204 ice_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
3205 struct rte_pci_device *pci_dev)
3207 return rte_eth_dev_pci_generic_probe(pci_dev,
3208 sizeof(struct ice_adapter),
3213 ice_pci_remove(struct rte_pci_device *pci_dev)
3215 return rte_eth_dev_pci_generic_remove(pci_dev, ice_dev_uninit);
3218 static struct rte_pci_driver rte_ice_pmd = {
3219 .id_table = pci_id_ice_map,
3220 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
3221 RTE_PCI_DRV_IOVA_AS_VA,
3222 .probe = ice_pci_probe,
3223 .remove = ice_pci_remove,
3227 * Driver initialization routine.
3228 * Invoked once at EAL init time.
3229 * Register itself as the [Poll Mode] Driver of PCI devices.
3231 RTE_PMD_REGISTER_PCI(net_ice, rte_ice_pmd);
3232 RTE_PMD_REGISTER_PCI_TABLE(net_ice, pci_id_ice_map);
3233 RTE_PMD_REGISTER_KMOD_DEP(net_ice, "* igb_uio | uio_pci_generic | vfio-pci");
3234 RTE_PMD_REGISTER_PARAM_STRING(net_ice,
3235 ICE_MAX_QP_NUM "=<int>");
3237 RTE_INIT(ice_init_log)
3239 ice_logtype_init = rte_log_register("pmd.net.ice.init");
3240 if (ice_logtype_init >= 0)
3241 rte_log_set_level(ice_logtype_init, RTE_LOG_NOTICE);
3242 ice_logtype_driver = rte_log_register("pmd.net.ice.driver");
3243 if (ice_logtype_driver >= 0)
3244 rte_log_set_level(ice_logtype_driver, RTE_LOG_NOTICE);