1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
5 #include <rte_string_fns.h>
6 #include <rte_ethdev_pci.h>
13 #include "base/ice_sched.h"
14 #include "base/ice_flow.h"
15 #include "base/ice_dcb.h"
16 #include "ice_ethdev.h"
19 #define ICE_MAX_QP_NUM "max_queue_pair_num"
20 #define ICE_DFLT_OUTER_TAG_TYPE ICE_AQ_VSI_OUTER_TAG_VLAN_9100
21 #define ICE_DFLT_PKG_FILE "/lib/firmware/intel/ice/ddp/ice.pkg"
24 int ice_logtype_driver;
26 static int ice_dev_configure(struct rte_eth_dev *dev);
27 static int ice_dev_start(struct rte_eth_dev *dev);
28 static void ice_dev_stop(struct rte_eth_dev *dev);
29 static void ice_dev_close(struct rte_eth_dev *dev);
30 static int ice_dev_reset(struct rte_eth_dev *dev);
31 static void ice_dev_info_get(struct rte_eth_dev *dev,
32 struct rte_eth_dev_info *dev_info);
33 static int ice_link_update(struct rte_eth_dev *dev,
34 int wait_to_complete);
35 static int ice_dev_set_link_up(struct rte_eth_dev *dev);
36 static int ice_dev_set_link_down(struct rte_eth_dev *dev);
38 static int ice_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
39 static int ice_vlan_offload_set(struct rte_eth_dev *dev, int mask);
40 static int ice_vlan_tpid_set(struct rte_eth_dev *dev,
41 enum rte_vlan_type vlan_type,
43 static int ice_rss_reta_update(struct rte_eth_dev *dev,
44 struct rte_eth_rss_reta_entry64 *reta_conf,
46 static int ice_rss_reta_query(struct rte_eth_dev *dev,
47 struct rte_eth_rss_reta_entry64 *reta_conf,
49 static int ice_rss_hash_update(struct rte_eth_dev *dev,
50 struct rte_eth_rss_conf *rss_conf);
51 static int ice_rss_hash_conf_get(struct rte_eth_dev *dev,
52 struct rte_eth_rss_conf *rss_conf);
53 static void ice_promisc_enable(struct rte_eth_dev *dev);
54 static void ice_promisc_disable(struct rte_eth_dev *dev);
55 static void ice_allmulti_enable(struct rte_eth_dev *dev);
56 static void ice_allmulti_disable(struct rte_eth_dev *dev);
57 static int ice_vlan_filter_set(struct rte_eth_dev *dev,
60 static int ice_macaddr_set(struct rte_eth_dev *dev,
61 struct rte_ether_addr *mac_addr);
62 static int ice_macaddr_add(struct rte_eth_dev *dev,
63 struct rte_ether_addr *mac_addr,
64 __rte_unused uint32_t index,
66 static void ice_macaddr_remove(struct rte_eth_dev *dev, uint32_t index);
67 static int ice_rx_queue_intr_enable(struct rte_eth_dev *dev,
69 static int ice_rx_queue_intr_disable(struct rte_eth_dev *dev,
71 static int ice_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
73 static int ice_vlan_pvid_set(struct rte_eth_dev *dev,
74 uint16_t pvid, int on);
75 static int ice_get_eeprom_length(struct rte_eth_dev *dev);
76 static int ice_get_eeprom(struct rte_eth_dev *dev,
77 struct rte_dev_eeprom_info *eeprom);
78 static int ice_stats_get(struct rte_eth_dev *dev,
79 struct rte_eth_stats *stats);
80 static void ice_stats_reset(struct rte_eth_dev *dev);
81 static int ice_xstats_get(struct rte_eth_dev *dev,
82 struct rte_eth_xstat *xstats, unsigned int n);
83 static int ice_xstats_get_names(struct rte_eth_dev *dev,
84 struct rte_eth_xstat_name *xstats_names,
87 static const struct rte_pci_id pci_id_ice_map[] = {
88 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_BACKPLANE) },
89 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_QSFP) },
90 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_SFP) },
91 { .vendor_id = 0, /* sentinel */ },
94 static const struct eth_dev_ops ice_eth_dev_ops = {
95 .dev_configure = ice_dev_configure,
96 .dev_start = ice_dev_start,
97 .dev_stop = ice_dev_stop,
98 .dev_close = ice_dev_close,
99 .dev_reset = ice_dev_reset,
100 .dev_set_link_up = ice_dev_set_link_up,
101 .dev_set_link_down = ice_dev_set_link_down,
102 .rx_queue_start = ice_rx_queue_start,
103 .rx_queue_stop = ice_rx_queue_stop,
104 .tx_queue_start = ice_tx_queue_start,
105 .tx_queue_stop = ice_tx_queue_stop,
106 .rx_queue_setup = ice_rx_queue_setup,
107 .rx_queue_release = ice_rx_queue_release,
108 .tx_queue_setup = ice_tx_queue_setup,
109 .tx_queue_release = ice_tx_queue_release,
110 .dev_infos_get = ice_dev_info_get,
111 .dev_supported_ptypes_get = ice_dev_supported_ptypes_get,
112 .link_update = ice_link_update,
113 .mtu_set = ice_mtu_set,
114 .mac_addr_set = ice_macaddr_set,
115 .mac_addr_add = ice_macaddr_add,
116 .mac_addr_remove = ice_macaddr_remove,
117 .vlan_filter_set = ice_vlan_filter_set,
118 .vlan_offload_set = ice_vlan_offload_set,
119 .vlan_tpid_set = ice_vlan_tpid_set,
120 .reta_update = ice_rss_reta_update,
121 .reta_query = ice_rss_reta_query,
122 .rss_hash_update = ice_rss_hash_update,
123 .rss_hash_conf_get = ice_rss_hash_conf_get,
124 .promiscuous_enable = ice_promisc_enable,
125 .promiscuous_disable = ice_promisc_disable,
126 .allmulticast_enable = ice_allmulti_enable,
127 .allmulticast_disable = ice_allmulti_disable,
128 .rx_queue_intr_enable = ice_rx_queue_intr_enable,
129 .rx_queue_intr_disable = ice_rx_queue_intr_disable,
130 .fw_version_get = ice_fw_version_get,
131 .vlan_pvid_set = ice_vlan_pvid_set,
132 .rxq_info_get = ice_rxq_info_get,
133 .txq_info_get = ice_txq_info_get,
134 .get_eeprom_length = ice_get_eeprom_length,
135 .get_eeprom = ice_get_eeprom,
136 .rx_queue_count = ice_rx_queue_count,
137 .rx_descriptor_status = ice_rx_descriptor_status,
138 .tx_descriptor_status = ice_tx_descriptor_status,
139 .stats_get = ice_stats_get,
140 .stats_reset = ice_stats_reset,
141 .xstats_get = ice_xstats_get,
142 .xstats_get_names = ice_xstats_get_names,
143 .xstats_reset = ice_stats_reset,
146 /* store statistics names and its offset in stats structure */
147 struct ice_xstats_name_off {
148 char name[RTE_ETH_XSTATS_NAME_SIZE];
152 static const struct ice_xstats_name_off ice_stats_strings[] = {
153 {"rx_unicast_packets", offsetof(struct ice_eth_stats, rx_unicast)},
154 {"rx_multicast_packets", offsetof(struct ice_eth_stats, rx_multicast)},
155 {"rx_broadcast_packets", offsetof(struct ice_eth_stats, rx_broadcast)},
156 {"rx_dropped", offsetof(struct ice_eth_stats, rx_discards)},
157 {"rx_unknown_protocol_packets", offsetof(struct ice_eth_stats,
158 rx_unknown_protocol)},
159 {"tx_unicast_packets", offsetof(struct ice_eth_stats, tx_unicast)},
160 {"tx_multicast_packets", offsetof(struct ice_eth_stats, tx_multicast)},
161 {"tx_broadcast_packets", offsetof(struct ice_eth_stats, tx_broadcast)},
162 {"tx_dropped", offsetof(struct ice_eth_stats, tx_discards)},
165 #define ICE_NB_ETH_XSTATS (sizeof(ice_stats_strings) / \
166 sizeof(ice_stats_strings[0]))
168 static const struct ice_xstats_name_off ice_hw_port_strings[] = {
169 {"tx_link_down_dropped", offsetof(struct ice_hw_port_stats,
170 tx_dropped_link_down)},
171 {"rx_crc_errors", offsetof(struct ice_hw_port_stats, crc_errors)},
172 {"rx_illegal_byte_errors", offsetof(struct ice_hw_port_stats,
174 {"rx_error_bytes", offsetof(struct ice_hw_port_stats, error_bytes)},
175 {"mac_local_errors", offsetof(struct ice_hw_port_stats,
177 {"mac_remote_errors", offsetof(struct ice_hw_port_stats,
179 {"rx_len_errors", offsetof(struct ice_hw_port_stats,
181 {"tx_xon_packets", offsetof(struct ice_hw_port_stats, link_xon_tx)},
182 {"rx_xon_packets", offsetof(struct ice_hw_port_stats, link_xon_rx)},
183 {"tx_xoff_packets", offsetof(struct ice_hw_port_stats, link_xoff_tx)},
184 {"rx_xoff_packets", offsetof(struct ice_hw_port_stats, link_xoff_rx)},
185 {"rx_size_64_packets", offsetof(struct ice_hw_port_stats, rx_size_64)},
186 {"rx_size_65_to_127_packets", offsetof(struct ice_hw_port_stats,
188 {"rx_size_128_to_255_packets", offsetof(struct ice_hw_port_stats,
190 {"rx_size_256_to_511_packets", offsetof(struct ice_hw_port_stats,
192 {"rx_size_512_to_1023_packets", offsetof(struct ice_hw_port_stats,
194 {"rx_size_1024_to_1522_packets", offsetof(struct ice_hw_port_stats,
196 {"rx_size_1523_to_max_packets", offsetof(struct ice_hw_port_stats,
198 {"rx_undersized_errors", offsetof(struct ice_hw_port_stats,
200 {"rx_oversize_errors", offsetof(struct ice_hw_port_stats,
202 {"rx_mac_short_pkt_dropped", offsetof(struct ice_hw_port_stats,
203 mac_short_pkt_dropped)},
204 {"rx_fragmented_errors", offsetof(struct ice_hw_port_stats,
206 {"rx_jabber_errors", offsetof(struct ice_hw_port_stats, rx_jabber)},
207 {"tx_size_64_packets", offsetof(struct ice_hw_port_stats, tx_size_64)},
208 {"tx_size_65_to_127_packets", offsetof(struct ice_hw_port_stats,
210 {"tx_size_128_to_255_packets", offsetof(struct ice_hw_port_stats,
212 {"tx_size_256_to_511_packets", offsetof(struct ice_hw_port_stats,
214 {"tx_size_512_to_1023_packets", offsetof(struct ice_hw_port_stats,
216 {"tx_size_1024_to_1522_packets", offsetof(struct ice_hw_port_stats,
218 {"tx_size_1523_to_max_packets", offsetof(struct ice_hw_port_stats,
222 #define ICE_NB_HW_PORT_XSTATS (sizeof(ice_hw_port_strings) / \
223 sizeof(ice_hw_port_strings[0]))
226 ice_init_controlq_parameter(struct ice_hw *hw)
228 /* fields for adminq */
229 hw->adminq.num_rq_entries = ICE_ADMINQ_LEN;
230 hw->adminq.num_sq_entries = ICE_ADMINQ_LEN;
231 hw->adminq.rq_buf_size = ICE_ADMINQ_BUF_SZ;
232 hw->adminq.sq_buf_size = ICE_ADMINQ_BUF_SZ;
234 /* fields for mailboxq, DPDK used as PF host */
235 hw->mailboxq.num_rq_entries = ICE_MAILBOXQ_LEN;
236 hw->mailboxq.num_sq_entries = ICE_MAILBOXQ_LEN;
237 hw->mailboxq.rq_buf_size = ICE_MAILBOXQ_BUF_SZ;
238 hw->mailboxq.sq_buf_size = ICE_MAILBOXQ_BUF_SZ;
242 ice_check_qp_num(const char *key, const char *qp_value,
243 __rte_unused void *opaque)
248 while (isblank(*qp_value))
251 num = strtoul(qp_value, &end, 10);
253 if (!num || (*end == '-') || errno) {
254 PMD_DRV_LOG(WARNING, "invalid value:\"%s\" for key:\"%s\", "
264 ice_config_max_queue_pair_num(struct rte_devargs *devargs)
266 struct rte_kvargs *kvlist;
267 const char *queue_num_key = ICE_MAX_QP_NUM;
273 kvlist = rte_kvargs_parse(devargs->args, NULL);
277 if (!rte_kvargs_count(kvlist, queue_num_key)) {
278 rte_kvargs_free(kvlist);
282 if (rte_kvargs_process(kvlist, queue_num_key,
283 ice_check_qp_num, NULL) < 0) {
284 rte_kvargs_free(kvlist);
287 ret = rte_kvargs_process(kvlist, queue_num_key,
288 ice_check_qp_num, NULL);
289 rte_kvargs_free(kvlist);
295 ice_res_pool_init(struct ice_res_pool_info *pool, uint32_t base,
298 struct pool_entry *entry;
303 entry = rte_zmalloc(NULL, sizeof(*entry), 0);
306 "Failed to allocate memory for resource pool");
310 /* queue heap initialize */
311 pool->num_free = num;
314 LIST_INIT(&pool->alloc_list);
315 LIST_INIT(&pool->free_list);
317 /* Initialize element */
321 LIST_INSERT_HEAD(&pool->free_list, entry, next);
326 ice_res_pool_alloc(struct ice_res_pool_info *pool,
329 struct pool_entry *entry, *valid_entry;
332 PMD_INIT_LOG(ERR, "Invalid parameter");
336 if (pool->num_free < num) {
337 PMD_INIT_LOG(ERR, "No resource. ask:%u, available:%u",
338 num, pool->num_free);
343 /* Lookup in free list and find most fit one */
344 LIST_FOREACH(entry, &pool->free_list, next) {
345 if (entry->len >= num) {
347 if (entry->len == num) {
352 valid_entry->len > entry->len)
357 /* Not find one to satisfy the request, return */
359 PMD_INIT_LOG(ERR, "No valid entry found");
363 * The entry have equal queue number as requested,
364 * remove it from alloc_list.
366 if (valid_entry->len == num) {
367 LIST_REMOVE(valid_entry, next);
370 * The entry have more numbers than requested,
371 * create a new entry for alloc_list and minus its
372 * queue base and number in free_list.
374 entry = rte_zmalloc(NULL, sizeof(*entry), 0);
377 "Failed to allocate memory for "
381 entry->base = valid_entry->base;
383 valid_entry->base += num;
384 valid_entry->len -= num;
388 /* Insert it into alloc list, not sorted */
389 LIST_INSERT_HEAD(&pool->alloc_list, valid_entry, next);
391 pool->num_free -= valid_entry->len;
392 pool->num_alloc += valid_entry->len;
394 return valid_entry->base + pool->base;
398 ice_res_pool_destroy(struct ice_res_pool_info *pool)
400 struct pool_entry *entry, *next_entry;
405 for (entry = LIST_FIRST(&pool->alloc_list);
406 entry && (next_entry = LIST_NEXT(entry, next), 1);
407 entry = next_entry) {
408 LIST_REMOVE(entry, next);
412 for (entry = LIST_FIRST(&pool->free_list);
413 entry && (next_entry = LIST_NEXT(entry, next), 1);
414 entry = next_entry) {
415 LIST_REMOVE(entry, next);
422 LIST_INIT(&pool->alloc_list);
423 LIST_INIT(&pool->free_list);
427 ice_vsi_config_default_rss(struct ice_aqc_vsi_props *info)
429 /* Set VSI LUT selection */
430 info->q_opt_rss = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI &
431 ICE_AQ_VSI_Q_OPT_RSS_LUT_M;
432 /* Set Hash scheme */
433 info->q_opt_rss |= ICE_AQ_VSI_Q_OPT_RSS_TPLZ &
434 ICE_AQ_VSI_Q_OPT_RSS_HASH_M;
436 info->q_opt_tc = ICE_AQ_VSI_Q_OPT_TC_OVR_M;
439 static enum ice_status
440 ice_vsi_config_tc_queue_mapping(struct ice_vsi *vsi,
441 struct ice_aqc_vsi_props *info,
442 uint8_t enabled_tcmap)
444 uint16_t bsf, qp_idx;
446 /* default tc 0 now. Multi-TC supporting need to be done later.
447 * Configure TC and queue mapping parameters, for enabled TC,
448 * allocate qpnum_per_tc queues to this traffic.
450 if (enabled_tcmap != 0x01) {
451 PMD_INIT_LOG(ERR, "only TC0 is supported");
455 vsi->nb_qps = RTE_MIN(vsi->nb_qps, ICE_MAX_Q_PER_TC);
456 bsf = rte_bsf32(vsi->nb_qps);
457 /* Adjust the queue number to actual queues that can be applied */
458 vsi->nb_qps = 0x1 << bsf;
461 /* Set tc and queue mapping with VSI */
462 info->tc_mapping[0] = rte_cpu_to_le_16((qp_idx <<
463 ICE_AQ_VSI_TC_Q_OFFSET_S) |
464 (bsf << ICE_AQ_VSI_TC_Q_NUM_S));
466 /* Associate queue number with VSI */
467 info->mapping_flags |= rte_cpu_to_le_16(ICE_AQ_VSI_Q_MAP_CONTIG);
468 info->q_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
469 info->q_mapping[1] = rte_cpu_to_le_16(vsi->nb_qps);
470 info->valid_sections |=
471 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID);
472 /* Set the info.ingress_table and info.egress_table
473 * for UP translate table. Now just set it to 1:1 map by default
474 * -- 0b 111 110 101 100 011 010 001 000 == 0xFAC688
476 #define ICE_TC_QUEUE_TABLE_DFLT 0x00FAC688
477 info->ingress_table = rte_cpu_to_le_32(ICE_TC_QUEUE_TABLE_DFLT);
478 info->egress_table = rte_cpu_to_le_32(ICE_TC_QUEUE_TABLE_DFLT);
479 info->outer_up_table = rte_cpu_to_le_32(ICE_TC_QUEUE_TABLE_DFLT);
484 ice_init_mac_address(struct rte_eth_dev *dev)
486 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
488 if (!rte_is_unicast_ether_addr
489 ((struct rte_ether_addr *)hw->port_info[0].mac.lan_addr)) {
490 PMD_INIT_LOG(ERR, "Invalid MAC address");
495 (struct rte_ether_addr *)hw->port_info[0].mac.lan_addr,
496 (struct rte_ether_addr *)hw->port_info[0].mac.perm_addr);
498 dev->data->mac_addrs =
499 rte_zmalloc(NULL, sizeof(struct rte_ether_addr), 0);
500 if (!dev->data->mac_addrs) {
502 "Failed to allocate memory to store mac address");
505 /* store it to dev data */
507 (struct rte_ether_addr *)hw->port_info[0].mac.perm_addr,
508 &dev->data->mac_addrs[0]);
512 /* Find out specific MAC filter */
513 static struct ice_mac_filter *
514 ice_find_mac_filter(struct ice_vsi *vsi, struct rte_ether_addr *macaddr)
516 struct ice_mac_filter *f;
518 TAILQ_FOREACH(f, &vsi->mac_list, next) {
519 if (rte_is_same_ether_addr(macaddr, &f->mac_info.mac_addr))
527 ice_add_mac_filter(struct ice_vsi *vsi, struct rte_ether_addr *mac_addr)
529 struct ice_fltr_list_entry *m_list_itr = NULL;
530 struct ice_mac_filter *f;
531 struct LIST_HEAD_TYPE list_head;
532 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
535 /* If it's added and configured, return */
536 f = ice_find_mac_filter(vsi, mac_addr);
538 PMD_DRV_LOG(INFO, "This MAC filter already exists.");
542 INIT_LIST_HEAD(&list_head);
544 m_list_itr = (struct ice_fltr_list_entry *)
545 ice_malloc(hw, sizeof(*m_list_itr));
550 ice_memcpy(m_list_itr->fltr_info.l_data.mac.mac_addr,
551 mac_addr, ETH_ALEN, ICE_NONDMA_TO_NONDMA);
552 m_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI;
553 m_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
554 m_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_MAC;
555 m_list_itr->fltr_info.flag = ICE_FLTR_TX;
556 m_list_itr->fltr_info.vsi_handle = vsi->idx;
558 LIST_ADD(&m_list_itr->list_entry, &list_head);
561 ret = ice_add_mac(hw, &list_head);
562 if (ret != ICE_SUCCESS) {
563 PMD_DRV_LOG(ERR, "Failed to add MAC filter");
567 /* Add the mac addr into mac list */
568 f = rte_zmalloc(NULL, sizeof(*f), 0);
570 PMD_DRV_LOG(ERR, "failed to allocate memory");
574 rte_memcpy(&f->mac_info.mac_addr, mac_addr, ETH_ADDR_LEN);
575 TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
581 rte_free(m_list_itr);
586 ice_remove_mac_filter(struct ice_vsi *vsi, struct rte_ether_addr *mac_addr)
588 struct ice_fltr_list_entry *m_list_itr = NULL;
589 struct ice_mac_filter *f;
590 struct LIST_HEAD_TYPE list_head;
591 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
594 /* Can't find it, return an error */
595 f = ice_find_mac_filter(vsi, mac_addr);
599 INIT_LIST_HEAD(&list_head);
601 m_list_itr = (struct ice_fltr_list_entry *)
602 ice_malloc(hw, sizeof(*m_list_itr));
607 ice_memcpy(m_list_itr->fltr_info.l_data.mac.mac_addr,
608 mac_addr, ETH_ALEN, ICE_NONDMA_TO_NONDMA);
609 m_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI;
610 m_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
611 m_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_MAC;
612 m_list_itr->fltr_info.flag = ICE_FLTR_TX;
613 m_list_itr->fltr_info.vsi_handle = vsi->idx;
615 LIST_ADD(&m_list_itr->list_entry, &list_head);
617 /* remove the mac filter */
618 ret = ice_remove_mac(hw, &list_head);
619 if (ret != ICE_SUCCESS) {
620 PMD_DRV_LOG(ERR, "Failed to remove MAC filter");
625 /* Remove the mac addr from mac list */
626 TAILQ_REMOVE(&vsi->mac_list, f, next);
632 rte_free(m_list_itr);
636 /* Find out specific VLAN filter */
637 static struct ice_vlan_filter *
638 ice_find_vlan_filter(struct ice_vsi *vsi, uint16_t vlan_id)
640 struct ice_vlan_filter *f;
642 TAILQ_FOREACH(f, &vsi->vlan_list, next) {
643 if (vlan_id == f->vlan_info.vlan_id)
651 ice_add_vlan_filter(struct ice_vsi *vsi, uint16_t vlan_id)
653 struct ice_fltr_list_entry *v_list_itr = NULL;
654 struct ice_vlan_filter *f;
655 struct LIST_HEAD_TYPE list_head;
659 if (!vsi || vlan_id > RTE_ETHER_MAX_VLAN_ID)
662 hw = ICE_VSI_TO_HW(vsi);
664 /* If it's added and configured, return. */
665 f = ice_find_vlan_filter(vsi, vlan_id);
667 PMD_DRV_LOG(INFO, "This VLAN filter already exists.");
671 if (!vsi->vlan_anti_spoof_on && !vsi->vlan_filter_on)
674 INIT_LIST_HEAD(&list_head);
676 v_list_itr = (struct ice_fltr_list_entry *)
677 ice_malloc(hw, sizeof(*v_list_itr));
682 v_list_itr->fltr_info.l_data.vlan.vlan_id = vlan_id;
683 v_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI;
684 v_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
685 v_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_VLAN;
686 v_list_itr->fltr_info.flag = ICE_FLTR_TX;
687 v_list_itr->fltr_info.vsi_handle = vsi->idx;
689 LIST_ADD(&v_list_itr->list_entry, &list_head);
692 ret = ice_add_vlan(hw, &list_head);
693 if (ret != ICE_SUCCESS) {
694 PMD_DRV_LOG(ERR, "Failed to add VLAN filter");
699 /* Add vlan into vlan list */
700 f = rte_zmalloc(NULL, sizeof(*f), 0);
702 PMD_DRV_LOG(ERR, "failed to allocate memory");
706 f->vlan_info.vlan_id = vlan_id;
707 TAILQ_INSERT_TAIL(&vsi->vlan_list, f, next);
713 rte_free(v_list_itr);
718 ice_remove_vlan_filter(struct ice_vsi *vsi, uint16_t vlan_id)
720 struct ice_fltr_list_entry *v_list_itr = NULL;
721 struct ice_vlan_filter *f;
722 struct LIST_HEAD_TYPE list_head;
727 * Vlan 0 is the generic filter for untagged packets
728 * and can't be removed.
730 if (!vsi || vlan_id == 0 || vlan_id > RTE_ETHER_MAX_VLAN_ID)
733 hw = ICE_VSI_TO_HW(vsi);
735 /* Can't find it, return an error */
736 f = ice_find_vlan_filter(vsi, vlan_id);
740 INIT_LIST_HEAD(&list_head);
742 v_list_itr = (struct ice_fltr_list_entry *)
743 ice_malloc(hw, sizeof(*v_list_itr));
749 v_list_itr->fltr_info.l_data.vlan.vlan_id = vlan_id;
750 v_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI;
751 v_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
752 v_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_VLAN;
753 v_list_itr->fltr_info.flag = ICE_FLTR_TX;
754 v_list_itr->fltr_info.vsi_handle = vsi->idx;
756 LIST_ADD(&v_list_itr->list_entry, &list_head);
758 /* remove the vlan filter */
759 ret = ice_remove_vlan(hw, &list_head);
760 if (ret != ICE_SUCCESS) {
761 PMD_DRV_LOG(ERR, "Failed to remove VLAN filter");
766 /* Remove the vlan id from vlan list */
767 TAILQ_REMOVE(&vsi->vlan_list, f, next);
773 rte_free(v_list_itr);
778 ice_remove_all_mac_vlan_filters(struct ice_vsi *vsi)
780 struct ice_mac_filter *m_f;
781 struct ice_vlan_filter *v_f;
784 if (!vsi || !vsi->mac_num)
787 TAILQ_FOREACH(m_f, &vsi->mac_list, next) {
788 ret = ice_remove_mac_filter(vsi, &m_f->mac_info.mac_addr);
789 if (ret != ICE_SUCCESS) {
795 if (vsi->vlan_num == 0)
798 TAILQ_FOREACH(v_f, &vsi->vlan_list, next) {
799 ret = ice_remove_vlan_filter(vsi, v_f->vlan_info.vlan_id);
800 if (ret != ICE_SUCCESS) {
811 ice_vsi_config_qinq_insertion(struct ice_vsi *vsi, bool on)
813 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
814 struct ice_vsi_ctx ctxt;
818 /* Check if it has been already on or off */
819 if (vsi->info.valid_sections &
820 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID)) {
822 if ((vsi->info.outer_tag_flags &
823 ICE_AQ_VSI_OUTER_TAG_ACCEPT_HOST) ==
824 ICE_AQ_VSI_OUTER_TAG_ACCEPT_HOST)
825 return 0; /* already on */
827 if (!(vsi->info.outer_tag_flags &
828 ICE_AQ_VSI_OUTER_TAG_ACCEPT_HOST))
829 return 0; /* already off */
834 qinq_flags = ICE_AQ_VSI_OUTER_TAG_ACCEPT_HOST;
837 /* clear global insertion and use per packet insertion */
838 vsi->info.outer_tag_flags &= ~(ICE_AQ_VSI_OUTER_TAG_INSERT);
839 vsi->info.outer_tag_flags &= ~(ICE_AQ_VSI_OUTER_TAG_ACCEPT_HOST);
840 vsi->info.outer_tag_flags |= qinq_flags;
841 /* use default vlan type 0x8100 */
842 vsi->info.outer_tag_flags &= ~(ICE_AQ_VSI_OUTER_TAG_TYPE_M);
843 vsi->info.outer_tag_flags |= ICE_DFLT_OUTER_TAG_TYPE <<
844 ICE_AQ_VSI_OUTER_TAG_TYPE_S;
845 (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
846 ctxt.info.valid_sections =
847 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID);
848 ctxt.vsi_num = vsi->vsi_id;
849 ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
852 "Update VSI failed to %s qinq stripping",
853 on ? "enable" : "disable");
857 vsi->info.valid_sections |=
858 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID);
864 ice_vsi_config_qinq_stripping(struct ice_vsi *vsi, bool on)
866 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
867 struct ice_vsi_ctx ctxt;
871 /* Check if it has been already on or off */
872 if (vsi->info.valid_sections &
873 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID)) {
875 if ((vsi->info.outer_tag_flags &
876 ICE_AQ_VSI_OUTER_TAG_MODE_M) ==
877 ICE_AQ_VSI_OUTER_TAG_COPY)
878 return 0; /* already on */
880 if ((vsi->info.outer_tag_flags &
881 ICE_AQ_VSI_OUTER_TAG_MODE_M) ==
882 ICE_AQ_VSI_OUTER_TAG_NOTHING)
883 return 0; /* already off */
888 qinq_flags = ICE_AQ_VSI_OUTER_TAG_COPY;
890 qinq_flags = ICE_AQ_VSI_OUTER_TAG_NOTHING;
891 vsi->info.outer_tag_flags &= ~(ICE_AQ_VSI_OUTER_TAG_MODE_M);
892 vsi->info.outer_tag_flags |= qinq_flags;
893 /* use default vlan type 0x8100 */
894 vsi->info.outer_tag_flags &= ~(ICE_AQ_VSI_OUTER_TAG_TYPE_M);
895 vsi->info.outer_tag_flags |= ICE_DFLT_OUTER_TAG_TYPE <<
896 ICE_AQ_VSI_OUTER_TAG_TYPE_S;
897 (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
898 ctxt.info.valid_sections =
899 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID);
900 ctxt.vsi_num = vsi->vsi_id;
901 ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
904 "Update VSI failed to %s qinq stripping",
905 on ? "enable" : "disable");
909 vsi->info.valid_sections |=
910 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID);
916 ice_vsi_config_double_vlan(struct ice_vsi *vsi, int on)
920 ret = ice_vsi_config_qinq_stripping(vsi, on);
922 PMD_DRV_LOG(ERR, "Fail to set qinq stripping - %d", ret);
924 ret = ice_vsi_config_qinq_insertion(vsi, on);
926 PMD_DRV_LOG(ERR, "Fail to set qinq insertion - %d", ret);
933 ice_pf_enable_irq0(struct ice_hw *hw)
935 /* reset the registers */
936 ICE_WRITE_REG(hw, PFINT_OICR_ENA, 0);
937 ICE_READ_REG(hw, PFINT_OICR);
940 ICE_WRITE_REG(hw, PFINT_OICR_ENA,
941 (uint32_t)(PFINT_OICR_ENA_INT_ENA_M &
942 (~PFINT_OICR_LINK_STAT_CHANGE_M)));
944 ICE_WRITE_REG(hw, PFINT_OICR_CTL,
945 (0 & PFINT_OICR_CTL_MSIX_INDX_M) |
946 ((0 << PFINT_OICR_CTL_ITR_INDX_S) &
947 PFINT_OICR_CTL_ITR_INDX_M) |
948 PFINT_OICR_CTL_CAUSE_ENA_M);
950 ICE_WRITE_REG(hw, PFINT_FW_CTL,
951 (0 & PFINT_FW_CTL_MSIX_INDX_M) |
952 ((0 << PFINT_FW_CTL_ITR_INDX_S) &
953 PFINT_FW_CTL_ITR_INDX_M) |
954 PFINT_FW_CTL_CAUSE_ENA_M);
956 ICE_WRITE_REG(hw, PFINT_OICR_ENA, PFINT_OICR_ENA_INT_ENA_M);
959 ICE_WRITE_REG(hw, GLINT_DYN_CTL(0),
960 GLINT_DYN_CTL_INTENA_M |
961 GLINT_DYN_CTL_CLEARPBA_M |
962 GLINT_DYN_CTL_ITR_INDX_M);
969 ice_pf_disable_irq0(struct ice_hw *hw)
971 /* Disable all interrupt types */
972 ICE_WRITE_REG(hw, GLINT_DYN_CTL(0), GLINT_DYN_CTL_WB_ON_ITR_M);
978 ice_handle_aq_msg(struct rte_eth_dev *dev)
980 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
981 struct ice_ctl_q_info *cq = &hw->adminq;
982 struct ice_rq_event_info event;
983 uint16_t pending, opcode;
986 event.buf_len = ICE_AQ_MAX_BUF_LEN;
987 event.msg_buf = rte_zmalloc(NULL, event.buf_len, 0);
988 if (!event.msg_buf) {
989 PMD_DRV_LOG(ERR, "Failed to allocate mem");
995 ret = ice_clean_rq_elem(hw, cq, &event, &pending);
997 if (ret != ICE_SUCCESS) {
999 "Failed to read msg from AdminQ, "
1001 hw->adminq.sq_last_status);
1004 opcode = rte_le_to_cpu_16(event.desc.opcode);
1007 case ice_aqc_opc_get_link_status:
1008 ret = ice_link_update(dev, 0);
1010 _rte_eth_dev_callback_process
1011 (dev, RTE_ETH_EVENT_INTR_LSC, NULL);
1014 PMD_DRV_LOG(DEBUG, "Request %u is not supported yet",
1019 rte_free(event.msg_buf);
1024 * Interrupt handler triggered by NIC for handling
1025 * specific interrupt.
1028 * Pointer to interrupt handle.
1030 * The address of parameter (struct rte_eth_dev *) regsitered before.
1036 ice_interrupt_handler(void *param)
1038 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1039 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1046 uint32_t int_fw_ctl;
1049 /* Disable interrupt */
1050 ice_pf_disable_irq0(hw);
1052 /* read out interrupt causes */
1053 oicr = ICE_READ_REG(hw, PFINT_OICR);
1055 int_fw_ctl = ICE_READ_REG(hw, PFINT_FW_CTL);
1058 /* No interrupt event indicated */
1059 if (!(oicr & PFINT_OICR_INTEVENT_M)) {
1060 PMD_DRV_LOG(INFO, "No interrupt event");
1065 if (int_fw_ctl & PFINT_FW_CTL_INTEVENT_M) {
1066 PMD_DRV_LOG(INFO, "FW_CTL: link state change event");
1067 ice_handle_aq_msg(dev);
1070 if (oicr & PFINT_OICR_LINK_STAT_CHANGE_M) {
1071 PMD_DRV_LOG(INFO, "OICR: link state change event");
1072 ice_link_update(dev, 0);
1076 if (oicr & PFINT_OICR_MAL_DETECT_M) {
1077 PMD_DRV_LOG(WARNING, "OICR: MDD event");
1078 reg = ICE_READ_REG(hw, GL_MDET_TX_PQM);
1079 if (reg & GL_MDET_TX_PQM_VALID_M) {
1080 pf_num = (reg & GL_MDET_TX_PQM_PF_NUM_M) >>
1081 GL_MDET_TX_PQM_PF_NUM_S;
1082 event = (reg & GL_MDET_TX_PQM_MAL_TYPE_M) >>
1083 GL_MDET_TX_PQM_MAL_TYPE_S;
1084 queue = (reg & GL_MDET_TX_PQM_QNUM_M) >>
1085 GL_MDET_TX_PQM_QNUM_S;
1087 PMD_DRV_LOG(WARNING, "Malicious Driver Detection event "
1088 "%d by PQM on TX queue %d PF# %d",
1089 event, queue, pf_num);
1092 reg = ICE_READ_REG(hw, GL_MDET_TX_TCLAN);
1093 if (reg & GL_MDET_TX_TCLAN_VALID_M) {
1094 pf_num = (reg & GL_MDET_TX_TCLAN_PF_NUM_M) >>
1095 GL_MDET_TX_TCLAN_PF_NUM_S;
1096 event = (reg & GL_MDET_TX_TCLAN_MAL_TYPE_M) >>
1097 GL_MDET_TX_TCLAN_MAL_TYPE_S;
1098 queue = (reg & GL_MDET_TX_TCLAN_QNUM_M) >>
1099 GL_MDET_TX_TCLAN_QNUM_S;
1101 PMD_DRV_LOG(WARNING, "Malicious Driver Detection event "
1102 "%d by TCLAN on TX queue %d PF# %d",
1103 event, queue, pf_num);
1107 /* Enable interrupt */
1108 ice_pf_enable_irq0(hw);
1109 rte_intr_enable(dev->intr_handle);
1112 /* Initialize SW parameters of PF */
1114 ice_pf_sw_init(struct rte_eth_dev *dev)
1116 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1117 struct ice_hw *hw = ICE_PF_TO_HW(pf);
1119 if (ice_config_max_queue_pair_num(dev->device->devargs) > 0)
1121 ice_config_max_queue_pair_num(dev->device->devargs);
1124 (uint16_t)RTE_MIN(hw->func_caps.common_cap.num_txq,
1125 hw->func_caps.common_cap.num_rxq);
1127 pf->lan_nb_qps = pf->lan_nb_qp_max;
1132 static struct ice_vsi *
1133 ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type)
1135 struct ice_hw *hw = ICE_PF_TO_HW(pf);
1136 struct ice_vsi *vsi = NULL;
1137 struct ice_vsi_ctx vsi_ctx;
1139 struct rte_ether_addr broadcast = {
1140 .addr_bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff} };
1141 struct rte_ether_addr mac_addr;
1142 uint16_t max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
1143 uint8_t tc_bitmap = 0x1;
1145 /* hw->num_lports = 1 in NIC mode */
1146 vsi = rte_zmalloc(NULL, sizeof(struct ice_vsi), 0);
1150 vsi->idx = pf->next_vsi_idx;
1153 vsi->adapter = ICE_PF_TO_ADAPTER(pf);
1154 vsi->max_macaddrs = ICE_NUM_MACADDR_MAX;
1155 vsi->vlan_anti_spoof_on = 0;
1156 vsi->vlan_filter_on = 1;
1157 TAILQ_INIT(&vsi->mac_list);
1158 TAILQ_INIT(&vsi->vlan_list);
1160 /* Be sync with ETH_RSS_RETA_SIZE_x maximum value definition */
1161 pf->hash_lut_size = hw->func_caps.common_cap.rss_table_size >
1162 ETH_RSS_RETA_SIZE_512 ? ETH_RSS_RETA_SIZE_512 :
1163 hw->func_caps.common_cap.rss_table_size;
1164 pf->flags |= ICE_FLAG_RSS_AQ_CAPABLE;
1166 memset(&vsi_ctx, 0, sizeof(vsi_ctx));
1167 /* base_queue in used in queue mapping of VSI add/update command.
1168 * Suppose vsi->base_queue is 0 now, don't consider SRIOV, VMDQ
1169 * cases in the first stage. Only Main VSI.
1171 vsi->base_queue = 0;
1174 vsi->nb_qps = pf->lan_nb_qps;
1175 ice_vsi_config_default_rss(&vsi_ctx.info);
1176 vsi_ctx.alloc_from_pool = true;
1177 vsi_ctx.flags = ICE_AQ_VSI_TYPE_PF;
1178 /* switch_id is queried by get_switch_config aq, which is done
1181 vsi_ctx.info.sw_id = hw->port_info->sw_id;
1182 vsi_ctx.info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA;
1183 /* Allow all untagged or tagged packets */
1184 vsi_ctx.info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL;
1185 vsi_ctx.info.vlan_flags |= ICE_AQ_VSI_VLAN_EMOD_NOTHING;
1186 vsi_ctx.info.q_opt_rss = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF |
1187 ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
1188 /* Enable VLAN/UP trip */
1189 ret = ice_vsi_config_tc_queue_mapping(vsi,
1194 "tc queue mapping with vsi failed, "
1202 /* for other types of VSI */
1203 PMD_INIT_LOG(ERR, "other types of VSI not supported");
1207 /* VF has MSIX interrupt in VF range, don't allocate here */
1208 if (type == ICE_VSI_PF) {
1209 ret = ice_res_pool_alloc(&pf->msix_pool,
1210 RTE_MIN(vsi->nb_qps,
1211 RTE_MAX_RXTX_INTR_VEC_ID));
1213 PMD_INIT_LOG(ERR, "VSI MAIN %d get heap failed %d",
1216 vsi->msix_intr = ret;
1217 vsi->nb_msix = RTE_MIN(vsi->nb_qps, RTE_MAX_RXTX_INTR_VEC_ID);
1222 ret = ice_add_vsi(hw, vsi->idx, &vsi_ctx, NULL);
1223 if (ret != ICE_SUCCESS) {
1224 PMD_INIT_LOG(ERR, "add vsi failed, err = %d", ret);
1227 /* store vsi information is SW structure */
1228 vsi->vsi_id = vsi_ctx.vsi_num;
1229 vsi->info = vsi_ctx.info;
1230 pf->vsis_allocated = vsi_ctx.vsis_allocd;
1231 pf->vsis_unallocated = vsi_ctx.vsis_unallocated;
1233 /* MAC configuration */
1234 rte_memcpy(pf->dev_addr.addr_bytes,
1235 hw->port_info->mac.perm_addr,
1238 rte_memcpy(&mac_addr, &pf->dev_addr, RTE_ETHER_ADDR_LEN);
1239 ret = ice_add_mac_filter(vsi, &mac_addr);
1240 if (ret != ICE_SUCCESS)
1241 PMD_INIT_LOG(ERR, "Failed to add dflt MAC filter");
1243 rte_memcpy(&mac_addr, &broadcast, RTE_ETHER_ADDR_LEN);
1244 ret = ice_add_mac_filter(vsi, &mac_addr);
1245 if (ret != ICE_SUCCESS)
1246 PMD_INIT_LOG(ERR, "Failed to add MAC filter");
1248 /* At the beginning, only TC0. */
1249 /* What we need here is the maximam number of the TX queues.
1250 * Currently vsi->nb_qps means it.
1251 * Correct it if any change.
1253 max_txqs[0] = vsi->nb_qps;
1254 ret = ice_cfg_vsi_lan(hw->port_info, vsi->idx,
1255 tc_bitmap, max_txqs);
1256 if (ret != ICE_SUCCESS)
1257 PMD_INIT_LOG(ERR, "Failed to config vsi sched");
1267 ice_send_driver_ver(struct ice_hw *hw)
1269 struct ice_driver_ver dv;
1271 /* we don't have driver version use 0 for dummy */
1275 dv.subbuild_ver = 0;
1276 strncpy((char *)dv.driver_string, "dpdk", sizeof(dv.driver_string));
1278 return ice_aq_send_driver_ver(hw, &dv, NULL);
1282 ice_pf_setup(struct ice_pf *pf)
1284 struct ice_vsi *vsi;
1286 /* Clear all stats counters */
1287 pf->offset_loaded = FALSE;
1288 memset(&pf->stats, 0, sizeof(struct ice_hw_port_stats));
1289 memset(&pf->stats_offset, 0, sizeof(struct ice_hw_port_stats));
1290 memset(&pf->internal_stats, 0, sizeof(struct ice_eth_stats));
1291 memset(&pf->internal_stats_offset, 0, sizeof(struct ice_eth_stats));
1293 vsi = ice_setup_vsi(pf, ICE_VSI_PF);
1295 PMD_INIT_LOG(ERR, "Failed to add vsi for PF");
1304 static int ice_load_pkg(struct rte_eth_dev *dev)
1306 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1307 const char *pkg_file = ICE_DFLT_PKG_FILE;
1314 file = fopen(pkg_file, "rb");
1316 PMD_INIT_LOG(ERR, "failed to open file: %s\n", pkg_file);
1320 err = stat(pkg_file, &fstat);
1322 PMD_INIT_LOG(ERR, "failed to get file stats\n");
1327 buf_len = fstat.st_size;
1328 buf = rte_malloc(NULL, buf_len, 0);
1331 PMD_INIT_LOG(ERR, "failed to allocate buf of size %d for package\n",
1337 err = fread(buf, buf_len, 1, file);
1339 PMD_INIT_LOG(ERR, "failed to read package data\n");
1347 err = ice_copy_and_init_pkg(hw, buf, buf_len);
1349 PMD_INIT_LOG(ERR, "ice_copy_and_init_hw failed: %d\n", err);
1352 err = ice_init_hw_tbls(hw);
1354 PMD_INIT_LOG(ERR, "ice_init_hw_tbls failed: %d\n", err);
1355 goto fail_init_tbls;
1361 rte_free(hw->pkg_copy);
1368 ice_dev_init(struct rte_eth_dev *dev)
1370 struct rte_pci_device *pci_dev;
1371 struct rte_intr_handle *intr_handle;
1372 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1373 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1374 struct ice_adapter *ad =
1375 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1376 struct ice_vsi *vsi;
1379 dev->dev_ops = &ice_eth_dev_ops;
1380 dev->rx_pkt_burst = ice_recv_pkts;
1381 dev->tx_pkt_burst = ice_xmit_pkts;
1382 dev->tx_pkt_prepare = ice_prep_pkts;
1384 ice_set_default_ptype_table(dev);
1385 pci_dev = RTE_DEV_TO_PCI(dev->device);
1386 intr_handle = &pci_dev->intr_handle;
1388 pf->adapter = ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1389 pf->adapter->eth_dev = dev;
1390 pf->dev_data = dev->data;
1391 hw->back = pf->adapter;
1392 hw->hw_addr = (uint8_t *)pci_dev->mem_resource[0].addr;
1393 hw->vendor_id = pci_dev->id.vendor_id;
1394 hw->device_id = pci_dev->id.device_id;
1395 hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
1396 hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
1397 hw->bus.device = pci_dev->addr.devid;
1398 hw->bus.func = pci_dev->addr.function;
1400 ice_init_controlq_parameter(hw);
1402 ret = ice_init_hw(hw);
1404 PMD_INIT_LOG(ERR, "Failed to initialize HW");
1408 ret = ice_load_pkg(dev);
1410 PMD_INIT_LOG(WARNING, "Failed to load the DDP package,"
1411 "Entering Safe Mode");
1412 ad->is_safe_mode = 1;
1415 PMD_INIT_LOG(INFO, "FW %d.%d.%05d API %d.%d",
1416 hw->fw_maj_ver, hw->fw_min_ver, hw->fw_build,
1417 hw->api_maj_ver, hw->api_min_ver);
1419 ice_pf_sw_init(dev);
1420 ret = ice_init_mac_address(dev);
1422 PMD_INIT_LOG(ERR, "Failed to initialize mac address");
1426 ret = ice_res_pool_init(&pf->msix_pool, 1,
1427 hw->func_caps.common_cap.num_msix_vectors - 1);
1429 PMD_INIT_LOG(ERR, "Failed to init MSIX pool");
1430 goto err_msix_pool_init;
1433 ret = ice_pf_setup(pf);
1435 PMD_INIT_LOG(ERR, "Failed to setup PF");
1439 ret = ice_send_driver_ver(hw);
1441 PMD_INIT_LOG(ERR, "Failed to send driver version");
1447 /* Disable double vlan by default */
1448 ice_vsi_config_double_vlan(vsi, FALSE);
1450 ret = ice_aq_stop_lldp(hw, TRUE, NULL);
1451 if (ret != ICE_SUCCESS)
1452 PMD_INIT_LOG(DEBUG, "lldp has already stopped\n");
1454 /* register callback func to eal lib */
1455 rte_intr_callback_register(intr_handle,
1456 ice_interrupt_handler, dev);
1458 ice_pf_enable_irq0(hw);
1460 /* enable uio intr after callback register */
1461 rte_intr_enable(intr_handle);
1466 ice_res_pool_destroy(&pf->msix_pool);
1468 rte_free(dev->data->mac_addrs);
1470 ice_sched_cleanup_all(hw);
1471 rte_free(hw->port_info);
1472 ice_shutdown_all_ctrlq(hw);
1478 ice_release_vsi(struct ice_vsi *vsi)
1481 struct ice_vsi_ctx vsi_ctx;
1482 enum ice_status ret;
1487 hw = ICE_VSI_TO_HW(vsi);
1489 ice_remove_all_mac_vlan_filters(vsi);
1491 memset(&vsi_ctx, 0, sizeof(vsi_ctx));
1493 vsi_ctx.vsi_num = vsi->vsi_id;
1494 vsi_ctx.info = vsi->info;
1495 ret = ice_free_vsi(hw, vsi->idx, &vsi_ctx, false, NULL);
1496 if (ret != ICE_SUCCESS) {
1497 PMD_INIT_LOG(ERR, "Failed to free vsi by aq, %u", vsi->vsi_id);
1507 ice_vsi_disable_queues_intr(struct ice_vsi *vsi)
1509 struct rte_eth_dev *dev = vsi->adapter->eth_dev;
1510 struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
1511 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1512 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
1513 uint16_t msix_intr, i;
1515 /* disable interrupt and also clear all the exist config */
1516 for (i = 0; i < vsi->nb_qps; i++) {
1517 ICE_WRITE_REG(hw, QINT_TQCTL(vsi->base_queue + i), 0);
1518 ICE_WRITE_REG(hw, QINT_RQCTL(vsi->base_queue + i), 0);
1522 if (rte_intr_allow_others(intr_handle))
1524 for (i = 0; i < vsi->nb_msix; i++) {
1525 msix_intr = vsi->msix_intr + i;
1526 ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr),
1527 GLINT_DYN_CTL_WB_ON_ITR_M);
1531 ICE_WRITE_REG(hw, GLINT_DYN_CTL(0), GLINT_DYN_CTL_WB_ON_ITR_M);
1535 ice_dev_stop(struct rte_eth_dev *dev)
1537 struct rte_eth_dev_data *data = dev->data;
1538 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1539 struct ice_vsi *main_vsi = pf->main_vsi;
1540 struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
1541 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1544 /* avoid stopping again */
1545 if (pf->adapter_stopped)
1548 /* stop and clear all Rx queues */
1549 for (i = 0; i < data->nb_rx_queues; i++)
1550 ice_rx_queue_stop(dev, i);
1552 /* stop and clear all Tx queues */
1553 for (i = 0; i < data->nb_tx_queues; i++)
1554 ice_tx_queue_stop(dev, i);
1556 /* disable all queue interrupts */
1557 ice_vsi_disable_queues_intr(main_vsi);
1559 /* Clear all queues and release mbufs */
1560 ice_clear_queues(dev);
1562 ice_dev_set_link_down(dev);
1564 /* Clean datapath event and queue/vec mapping */
1565 rte_intr_efd_disable(intr_handle);
1566 if (intr_handle->intr_vec) {
1567 rte_free(intr_handle->intr_vec);
1568 intr_handle->intr_vec = NULL;
1571 pf->adapter_stopped = true;
1575 ice_dev_close(struct rte_eth_dev *dev)
1577 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1578 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1580 /* Since stop will make link down, then the link event will be
1581 * triggered, disable the irq firstly to avoid the port_infoe etc
1582 * resources deallocation causing the interrupt service thread
1585 ice_pf_disable_irq0(hw);
1589 /* release all queue resource */
1590 ice_free_queues(dev);
1592 ice_res_pool_destroy(&pf->msix_pool);
1593 ice_release_vsi(pf->main_vsi);
1594 ice_sched_cleanup_all(hw);
1595 rte_free(hw->port_info);
1596 hw->port_info = NULL;
1597 ice_shutdown_all_ctrlq(hw);
1601 ice_dev_uninit(struct rte_eth_dev *dev)
1603 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1604 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1608 dev->dev_ops = NULL;
1609 dev->rx_pkt_burst = NULL;
1610 dev->tx_pkt_burst = NULL;
1612 rte_free(dev->data->mac_addrs);
1613 dev->data->mac_addrs = NULL;
1615 /* disable uio intr before callback unregister */
1616 rte_intr_disable(intr_handle);
1618 /* unregister callback func from eal lib */
1619 rte_intr_callback_unregister(intr_handle,
1620 ice_interrupt_handler, dev);
1626 ice_dev_configure(__rte_unused struct rte_eth_dev *dev)
1628 struct ice_adapter *ad =
1629 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1631 /* Initialize to TRUE. If any of Rx queues doesn't meet the
1632 * bulk allocation or vector Rx preconditions we will reset it.
1634 ad->rx_bulk_alloc_allowed = true;
1635 ad->tx_simple_allowed = true;
1640 static int ice_init_rss(struct ice_pf *pf)
1642 struct ice_hw *hw = ICE_PF_TO_HW(pf);
1643 struct ice_vsi *vsi = pf->main_vsi;
1644 struct rte_eth_dev *dev = pf->adapter->eth_dev;
1645 struct rte_eth_rss_conf *rss_conf;
1646 struct ice_aqc_get_set_rss_keys key;
1649 bool is_safe_mode = pf->adapter->is_safe_mode;
1651 rss_conf = &dev->data->dev_conf.rx_adv_conf.rss_conf;
1652 nb_q = dev->data->nb_rx_queues;
1653 vsi->rss_key_size = ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE;
1654 vsi->rss_lut_size = pf->hash_lut_size;
1657 PMD_DRV_LOG(WARNING, "RSS is not supported in safe mode\n");
1662 vsi->rss_key = rte_zmalloc(NULL,
1663 vsi->rss_key_size, 0);
1665 vsi->rss_lut = rte_zmalloc(NULL,
1666 vsi->rss_lut_size, 0);
1668 /* configure RSS key */
1669 if (!rss_conf->rss_key) {
1670 /* Calculate the default hash key */
1671 for (i = 0; i <= vsi->rss_key_size; i++)
1672 vsi->rss_key[i] = (uint8_t)rte_rand();
1674 rte_memcpy(vsi->rss_key, rss_conf->rss_key,
1675 RTE_MIN(rss_conf->rss_key_len,
1676 vsi->rss_key_size));
1678 rte_memcpy(key.standard_rss_key, vsi->rss_key, vsi->rss_key_size);
1679 ret = ice_aq_set_rss_key(hw, vsi->idx, &key);
1683 /* init RSS LUT table */
1684 for (i = 0; i < vsi->rss_lut_size; i++)
1685 vsi->rss_lut[i] = i % nb_q;
1687 ret = ice_aq_set_rss_lut(hw, vsi->idx,
1688 ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF,
1689 vsi->rss_lut, vsi->rss_lut_size);
1693 /* configure RSS for IPv4 with input set IPv4 src/dst */
1694 ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV4,
1695 ICE_FLOW_SEG_HDR_IPV4);
1697 PMD_DRV_LOG(ERR, "%s IPV4 rss flow fail %d", __func__, ret);
1699 /* configure RSS for IPv6 with input set IPv6 src/dst */
1700 ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV6,
1701 ICE_FLOW_SEG_HDR_IPV6);
1703 PMD_DRV_LOG(ERR, "%s IPV6 rss flow fail %d", __func__, ret);
1705 /* configure RSS for tcp6 with input set IPv6 src/dst, TCP src/dst */
1706 ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_TCP_IPV6,
1707 ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV6);
1709 PMD_DRV_LOG(ERR, "%s TCP_IPV6 rss flow fail %d", __func__, ret);
1711 /* configure RSS for udp6 with input set IPv6 src/dst, UDP src/dst */
1712 ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_UDP_IPV6,
1713 ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV6);
1715 PMD_DRV_LOG(ERR, "%s UDP_IPV6 rss flow fail %d", __func__, ret);
1717 /* configure RSS for sctp6 with input set IPv6 src/dst */
1718 ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV6,
1719 ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV6);
1721 PMD_DRV_LOG(ERR, "%s SCTP_IPV6 rss flow fail %d",
1724 /* configure RSS for tcp4 with input set IP src/dst, TCP src/dst */
1725 ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_TCP_IPV4,
1726 ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV4);
1728 PMD_DRV_LOG(ERR, "%s TCP_IPV4 rss flow fail %d", __func__, ret);
1730 /* configure RSS for udp4 with input set IP src/dst, UDP src/dst */
1731 ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_UDP_IPV4,
1732 ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV4);
1734 PMD_DRV_LOG(ERR, "%s UDP_IPV4 rss flow fail %d", __func__, ret);
1736 /* configure RSS for sctp4 with input set IP src/dst */
1737 ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV4,
1738 ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV4);
1740 PMD_DRV_LOG(ERR, "%s SCTP_IPV4 rss flow fail %d",
1747 __vsi_queues_bind_intr(struct ice_vsi *vsi, uint16_t msix_vect,
1748 int base_queue, int nb_queue)
1750 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
1751 uint32_t val, val_tx;
1754 for (i = 0; i < nb_queue; i++) {
1756 val = (msix_vect & QINT_RQCTL_MSIX_INDX_M) |
1757 (0 < QINT_RQCTL_ITR_INDX_S) | QINT_RQCTL_CAUSE_ENA_M;
1758 val_tx = (msix_vect & QINT_TQCTL_MSIX_INDX_M) |
1759 (0 < QINT_TQCTL_ITR_INDX_S) | QINT_TQCTL_CAUSE_ENA_M;
1761 PMD_DRV_LOG(INFO, "queue %d is binding to vect %d",
1762 base_queue + i, msix_vect);
1763 /* set ITR0 value */
1764 ICE_WRITE_REG(hw, GLINT_ITR(0, msix_vect), 0x10);
1765 ICE_WRITE_REG(hw, QINT_RQCTL(base_queue + i), val);
1766 ICE_WRITE_REG(hw, QINT_TQCTL(base_queue + i), val_tx);
1771 ice_vsi_queues_bind_intr(struct ice_vsi *vsi)
1773 struct rte_eth_dev *dev = vsi->adapter->eth_dev;
1774 struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
1775 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1776 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
1777 uint16_t msix_vect = vsi->msix_intr;
1778 uint16_t nb_msix = RTE_MIN(vsi->nb_msix, intr_handle->nb_efd);
1779 uint16_t queue_idx = 0;
1783 /* clear Rx/Tx queue interrupt */
1784 for (i = 0; i < vsi->nb_used_qps; i++) {
1785 ICE_WRITE_REG(hw, QINT_TQCTL(vsi->base_queue + i), 0);
1786 ICE_WRITE_REG(hw, QINT_RQCTL(vsi->base_queue + i), 0);
1789 /* PF bind interrupt */
1790 if (rte_intr_dp_is_en(intr_handle)) {
1795 for (i = 0; i < vsi->nb_used_qps; i++) {
1797 if (!rte_intr_allow_others(intr_handle))
1798 msix_vect = ICE_MISC_VEC_ID;
1800 /* uio mapping all queue to one msix_vect */
1801 __vsi_queues_bind_intr(vsi, msix_vect,
1802 vsi->base_queue + i,
1803 vsi->nb_used_qps - i);
1805 for (; !!record && i < vsi->nb_used_qps; i++)
1806 intr_handle->intr_vec[queue_idx + i] =
1811 /* vfio 1:1 queue/msix_vect mapping */
1812 __vsi_queues_bind_intr(vsi, msix_vect,
1813 vsi->base_queue + i, 1);
1816 intr_handle->intr_vec[queue_idx + i] = msix_vect;
1824 ice_vsi_enable_queues_intr(struct ice_vsi *vsi)
1826 struct rte_eth_dev *dev = vsi->adapter->eth_dev;
1827 struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
1828 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1829 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
1830 uint16_t msix_intr, i;
1832 if (rte_intr_allow_others(intr_handle))
1833 for (i = 0; i < vsi->nb_used_qps; i++) {
1834 msix_intr = vsi->msix_intr + i;
1835 ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr),
1836 GLINT_DYN_CTL_INTENA_M |
1837 GLINT_DYN_CTL_CLEARPBA_M |
1838 GLINT_DYN_CTL_ITR_INDX_M |
1839 GLINT_DYN_CTL_WB_ON_ITR_M);
1842 ICE_WRITE_REG(hw, GLINT_DYN_CTL(0),
1843 GLINT_DYN_CTL_INTENA_M |
1844 GLINT_DYN_CTL_CLEARPBA_M |
1845 GLINT_DYN_CTL_ITR_INDX_M |
1846 GLINT_DYN_CTL_WB_ON_ITR_M);
1850 ice_rxq_intr_setup(struct rte_eth_dev *dev)
1852 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1853 struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
1854 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1855 struct ice_vsi *vsi = pf->main_vsi;
1856 uint32_t intr_vector = 0;
1858 rte_intr_disable(intr_handle);
1860 /* check and configure queue intr-vector mapping */
1861 if ((rte_intr_cap_multiple(intr_handle) ||
1862 !RTE_ETH_DEV_SRIOV(dev).active) &&
1863 dev->data->dev_conf.intr_conf.rxq != 0) {
1864 intr_vector = dev->data->nb_rx_queues;
1865 if (intr_vector > ICE_MAX_INTR_QUEUE_NUM) {
1866 PMD_DRV_LOG(ERR, "At most %d intr queues supported",
1867 ICE_MAX_INTR_QUEUE_NUM);
1870 if (rte_intr_efd_enable(intr_handle, intr_vector))
1874 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
1875 intr_handle->intr_vec =
1876 rte_zmalloc(NULL, dev->data->nb_rx_queues * sizeof(int),
1878 if (!intr_handle->intr_vec) {
1880 "Failed to allocate %d rx_queues intr_vec",
1881 dev->data->nb_rx_queues);
1886 /* Map queues with MSIX interrupt */
1887 vsi->nb_used_qps = dev->data->nb_rx_queues;
1888 ice_vsi_queues_bind_intr(vsi);
1890 /* Enable interrupts for all the queues */
1891 ice_vsi_enable_queues_intr(vsi);
1893 rte_intr_enable(intr_handle);
1899 ice_dev_start(struct rte_eth_dev *dev)
1901 struct rte_eth_dev_data *data = dev->data;
1902 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1903 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1904 struct ice_vsi *vsi = pf->main_vsi;
1905 uint16_t nb_rxq = 0;
1909 /* program Tx queues' context in hardware */
1910 for (nb_txq = 0; nb_txq < data->nb_tx_queues; nb_txq++) {
1911 ret = ice_tx_queue_start(dev, nb_txq);
1913 PMD_DRV_LOG(ERR, "fail to start Tx queue %u", nb_txq);
1918 /* program Rx queues' context in hardware*/
1919 for (nb_rxq = 0; nb_rxq < data->nb_rx_queues; nb_rxq++) {
1920 ret = ice_rx_queue_start(dev, nb_rxq);
1922 PMD_DRV_LOG(ERR, "fail to start Rx queue %u", nb_rxq);
1927 ret = ice_init_rss(pf);
1929 PMD_DRV_LOG(ERR, "Failed to enable rss for PF");
1933 ice_set_rx_function(dev);
1934 ice_set_tx_function(dev);
1936 mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
1937 ETH_VLAN_EXTEND_MASK;
1938 ret = ice_vlan_offload_set(dev, mask);
1940 PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
1944 /* enable Rx interrput and mapping Rx queue to interrupt vector */
1945 if (ice_rxq_intr_setup(dev))
1948 /* Enable receiving broadcast packets and transmitting packets */
1949 ret = ice_set_vsi_promisc(hw, vsi->idx,
1950 ICE_PROMISC_BCAST_RX | ICE_PROMISC_BCAST_TX |
1951 ICE_PROMISC_UCAST_TX | ICE_PROMISC_MCAST_TX,
1953 if (ret != ICE_SUCCESS)
1954 PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
1956 ret = ice_aq_set_event_mask(hw, hw->port_info->lport,
1957 ((u16)(ICE_AQ_LINK_EVENT_LINK_FAULT |
1958 ICE_AQ_LINK_EVENT_PHY_TEMP_ALARM |
1959 ICE_AQ_LINK_EVENT_EXCESSIVE_ERRORS |
1960 ICE_AQ_LINK_EVENT_SIGNAL_DETECT |
1961 ICE_AQ_LINK_EVENT_AN_COMPLETED |
1962 ICE_AQ_LINK_EVENT_PORT_TX_SUSPENDED)),
1964 if (ret != ICE_SUCCESS)
1965 PMD_DRV_LOG(WARNING, "Fail to set phy mask");
1967 ice_dev_set_link_up(dev);
1969 /* Call get_link_info aq commond to enable/disable LSE */
1970 ice_link_update(dev, 0);
1972 pf->adapter_stopped = false;
1976 /* stop the started queues if failed to start all queues */
1978 for (i = 0; i < nb_rxq; i++)
1979 ice_rx_queue_stop(dev, i);
1981 for (i = 0; i < nb_txq; i++)
1982 ice_tx_queue_stop(dev, i);
1988 ice_dev_reset(struct rte_eth_dev *dev)
1992 if (dev->data->sriov.active)
1995 ret = ice_dev_uninit(dev);
1997 PMD_INIT_LOG(ERR, "failed to uninit device, status = %d", ret);
2001 ret = ice_dev_init(dev);
2003 PMD_INIT_LOG(ERR, "failed to init device, status = %d", ret);
2011 ice_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
2013 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2014 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2015 struct ice_vsi *vsi = pf->main_vsi;
2016 struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
2017 bool is_safe_mode = pf->adapter->is_safe_mode;
2021 dev_info->min_rx_bufsize = ICE_BUF_SIZE_MIN;
2022 dev_info->max_rx_pktlen = ICE_FRAME_SIZE_MAX;
2023 dev_info->max_rx_queues = vsi->nb_qps;
2024 dev_info->max_tx_queues = vsi->nb_qps;
2025 dev_info->max_mac_addrs = vsi->max_macaddrs;
2026 dev_info->max_vfs = pci_dev->max_vfs;
2027 dev_info->max_mtu = dev_info->max_rx_pktlen - ICE_ETH_OVERHEAD;
2028 dev_info->min_mtu = RTE_ETHER_MIN_MTU;
2030 dev_info->rx_offload_capa =
2031 DEV_RX_OFFLOAD_VLAN_STRIP |
2032 DEV_RX_OFFLOAD_JUMBO_FRAME |
2033 DEV_RX_OFFLOAD_KEEP_CRC |
2034 DEV_RX_OFFLOAD_SCATTER |
2035 DEV_RX_OFFLOAD_VLAN_FILTER;
2036 dev_info->tx_offload_capa =
2037 DEV_TX_OFFLOAD_VLAN_INSERT |
2038 DEV_TX_OFFLOAD_TCP_TSO |
2039 DEV_TX_OFFLOAD_MULTI_SEGS |
2040 DEV_TX_OFFLOAD_MBUF_FAST_FREE;
2041 dev_info->flow_type_rss_offloads = 0;
2043 if (!is_safe_mode) {
2044 dev_info->rx_offload_capa |=
2045 DEV_RX_OFFLOAD_IPV4_CKSUM |
2046 DEV_RX_OFFLOAD_UDP_CKSUM |
2047 DEV_RX_OFFLOAD_TCP_CKSUM |
2048 DEV_RX_OFFLOAD_QINQ_STRIP |
2049 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
2050 DEV_RX_OFFLOAD_VLAN_EXTEND;
2051 dev_info->tx_offload_capa |=
2052 DEV_TX_OFFLOAD_QINQ_INSERT |
2053 DEV_TX_OFFLOAD_IPV4_CKSUM |
2054 DEV_TX_OFFLOAD_UDP_CKSUM |
2055 DEV_TX_OFFLOAD_TCP_CKSUM |
2056 DEV_TX_OFFLOAD_SCTP_CKSUM |
2057 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
2058 dev_info->flow_type_rss_offloads |= ICE_RSS_OFFLOAD_ALL;
2061 dev_info->rx_queue_offload_capa = 0;
2062 dev_info->tx_queue_offload_capa = 0;
2064 dev_info->reta_size = pf->hash_lut_size;
2065 dev_info->hash_key_size = (VSIQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
2067 dev_info->default_rxconf = (struct rte_eth_rxconf) {
2069 .pthresh = ICE_DEFAULT_RX_PTHRESH,
2070 .hthresh = ICE_DEFAULT_RX_HTHRESH,
2071 .wthresh = ICE_DEFAULT_RX_WTHRESH,
2073 .rx_free_thresh = ICE_DEFAULT_RX_FREE_THRESH,
2078 dev_info->default_txconf = (struct rte_eth_txconf) {
2080 .pthresh = ICE_DEFAULT_TX_PTHRESH,
2081 .hthresh = ICE_DEFAULT_TX_HTHRESH,
2082 .wthresh = ICE_DEFAULT_TX_WTHRESH,
2084 .tx_free_thresh = ICE_DEFAULT_TX_FREE_THRESH,
2085 .tx_rs_thresh = ICE_DEFAULT_TX_RSBIT_THRESH,
2089 dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
2090 .nb_max = ICE_MAX_RING_DESC,
2091 .nb_min = ICE_MIN_RING_DESC,
2092 .nb_align = ICE_ALIGN_RING_DESC,
2095 dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
2096 .nb_max = ICE_MAX_RING_DESC,
2097 .nb_min = ICE_MIN_RING_DESC,
2098 .nb_align = ICE_ALIGN_RING_DESC,
2101 dev_info->speed_capa = ETH_LINK_SPEED_10M |
2102 ETH_LINK_SPEED_100M |
2104 ETH_LINK_SPEED_2_5G |
2106 ETH_LINK_SPEED_10G |
2107 ETH_LINK_SPEED_20G |
2110 phy_type_low = hw->port_info->phy.phy_type_low;
2111 phy_type_high = hw->port_info->phy.phy_type_high;
2113 if (ICE_PHY_TYPE_SUPPORT_50G(phy_type_low))
2114 dev_info->speed_capa |= ETH_LINK_SPEED_50G;
2116 if (ICE_PHY_TYPE_SUPPORT_100G_LOW(phy_type_low) ||
2117 ICE_PHY_TYPE_SUPPORT_100G_HIGH(phy_type_high))
2118 dev_info->speed_capa |= ETH_LINK_SPEED_100G;
2120 dev_info->nb_rx_queues = dev->data->nb_rx_queues;
2121 dev_info->nb_tx_queues = dev->data->nb_tx_queues;
2123 dev_info->default_rxportconf.burst_size = ICE_RX_MAX_BURST;
2124 dev_info->default_txportconf.burst_size = ICE_TX_MAX_BURST;
2125 dev_info->default_rxportconf.nb_queues = 1;
2126 dev_info->default_txportconf.nb_queues = 1;
2127 dev_info->default_rxportconf.ring_size = ICE_BUF_SIZE_MIN;
2128 dev_info->default_txportconf.ring_size = ICE_BUF_SIZE_MIN;
2132 ice_atomic_read_link_status(struct rte_eth_dev *dev,
2133 struct rte_eth_link *link)
2135 struct rte_eth_link *dst = link;
2136 struct rte_eth_link *src = &dev->data->dev_link;
2138 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
2139 *(uint64_t *)src) == 0)
2146 ice_atomic_write_link_status(struct rte_eth_dev *dev,
2147 struct rte_eth_link *link)
2149 struct rte_eth_link *dst = &dev->data->dev_link;
2150 struct rte_eth_link *src = link;
2152 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
2153 *(uint64_t *)src) == 0)
2160 ice_link_update(struct rte_eth_dev *dev, int wait_to_complete)
2162 #define CHECK_INTERVAL 100 /* 100ms */
2163 #define MAX_REPEAT_TIME 10 /* 1s (10 * 100ms) in total */
2164 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2165 struct ice_link_status link_status;
2166 struct rte_eth_link link, old;
2168 unsigned int rep_cnt = MAX_REPEAT_TIME;
2169 bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false;
2171 memset(&link, 0, sizeof(link));
2172 memset(&old, 0, sizeof(old));
2173 memset(&link_status, 0, sizeof(link_status));
2174 ice_atomic_read_link_status(dev, &old);
2177 /* Get link status information from hardware */
2178 status = ice_aq_get_link_info(hw->port_info, enable_lse,
2179 &link_status, NULL);
2180 if (status != ICE_SUCCESS) {
2181 link.link_speed = ETH_SPEED_NUM_100M;
2182 link.link_duplex = ETH_LINK_FULL_DUPLEX;
2183 PMD_DRV_LOG(ERR, "Failed to get link info");
2187 link.link_status = link_status.link_info & ICE_AQ_LINK_UP;
2188 if (!wait_to_complete || link.link_status)
2191 rte_delay_ms(CHECK_INTERVAL);
2192 } while (--rep_cnt);
2194 if (!link.link_status)
2197 /* Full-duplex operation at all supported speeds */
2198 link.link_duplex = ETH_LINK_FULL_DUPLEX;
2200 /* Parse the link status */
2201 switch (link_status.link_speed) {
2202 case ICE_AQ_LINK_SPEED_10MB:
2203 link.link_speed = ETH_SPEED_NUM_10M;
2205 case ICE_AQ_LINK_SPEED_100MB:
2206 link.link_speed = ETH_SPEED_NUM_100M;
2208 case ICE_AQ_LINK_SPEED_1000MB:
2209 link.link_speed = ETH_SPEED_NUM_1G;
2211 case ICE_AQ_LINK_SPEED_2500MB:
2212 link.link_speed = ETH_SPEED_NUM_2_5G;
2214 case ICE_AQ_LINK_SPEED_5GB:
2215 link.link_speed = ETH_SPEED_NUM_5G;
2217 case ICE_AQ_LINK_SPEED_10GB:
2218 link.link_speed = ETH_SPEED_NUM_10G;
2220 case ICE_AQ_LINK_SPEED_20GB:
2221 link.link_speed = ETH_SPEED_NUM_20G;
2223 case ICE_AQ_LINK_SPEED_25GB:
2224 link.link_speed = ETH_SPEED_NUM_25G;
2226 case ICE_AQ_LINK_SPEED_40GB:
2227 link.link_speed = ETH_SPEED_NUM_40G;
2229 case ICE_AQ_LINK_SPEED_50GB:
2230 link.link_speed = ETH_SPEED_NUM_50G;
2232 case ICE_AQ_LINK_SPEED_100GB:
2233 link.link_speed = ETH_SPEED_NUM_100G;
2235 case ICE_AQ_LINK_SPEED_UNKNOWN:
2237 PMD_DRV_LOG(ERR, "Unknown link speed");
2238 link.link_speed = ETH_SPEED_NUM_NONE;
2242 link.link_autoneg = !(dev->data->dev_conf.link_speeds &
2243 ETH_LINK_SPEED_FIXED);
2246 ice_atomic_write_link_status(dev, &link);
2247 if (link.link_status == old.link_status)
2253 /* Force the physical link state by getting the current PHY capabilities from
2254 * hardware and setting the PHY config based on the determined capabilities. If
2255 * link changes, link event will be triggered because both the Enable Automatic
2256 * Link Update and LESM Enable bits are set when setting the PHY capabilities.
2258 static enum ice_status
2259 ice_force_phys_link_state(struct ice_hw *hw, bool link_up)
2261 struct ice_aqc_set_phy_cfg_data cfg = { 0 };
2262 struct ice_aqc_get_phy_caps_data *pcaps;
2263 struct ice_port_info *pi;
2264 enum ice_status status;
2266 if (!hw || !hw->port_info)
2267 return ICE_ERR_PARAM;
2271 pcaps = (struct ice_aqc_get_phy_caps_data *)
2272 ice_malloc(hw, sizeof(*pcaps));
2274 return ICE_ERR_NO_MEMORY;
2276 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps,
2281 /* No change in link */
2282 if (link_up == !!(pcaps->caps & ICE_AQC_PHY_EN_LINK) &&
2283 link_up == !!(pi->phy.link_info.link_info & ICE_AQ_LINK_UP))
2286 cfg.phy_type_low = pcaps->phy_type_low;
2287 cfg.phy_type_high = pcaps->phy_type_high;
2288 cfg.caps = pcaps->caps | ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
2289 cfg.low_power_ctrl = pcaps->low_power_ctrl;
2290 cfg.eee_cap = pcaps->eee_cap;
2291 cfg.eeer_value = pcaps->eeer_value;
2292 cfg.link_fec_opt = pcaps->link_fec_options;
2294 cfg.caps |= ICE_AQ_PHY_ENA_LINK;
2296 cfg.caps &= ~ICE_AQ_PHY_ENA_LINK;
2298 status = ice_aq_set_phy_cfg(hw, pi->lport, &cfg, NULL);
2301 ice_free(hw, pcaps);
2306 ice_dev_set_link_up(struct rte_eth_dev *dev)
2308 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2310 return ice_force_phys_link_state(hw, true);
2314 ice_dev_set_link_down(struct rte_eth_dev *dev)
2316 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2318 return ice_force_phys_link_state(hw, false);
2322 ice_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
2324 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2325 struct rte_eth_dev_data *dev_data = pf->dev_data;
2326 uint32_t frame_size = mtu + ICE_ETH_OVERHEAD;
2328 /* check if mtu is within the allowed range */
2329 if (mtu < RTE_ETHER_MIN_MTU || frame_size > ICE_FRAME_SIZE_MAX)
2332 /* mtu setting is forbidden if port is start */
2333 if (dev_data->dev_started) {
2335 "port %d must be stopped before configuration",
2340 if (frame_size > RTE_ETHER_MAX_LEN)
2341 dev_data->dev_conf.rxmode.offloads |=
2342 DEV_RX_OFFLOAD_JUMBO_FRAME;
2344 dev_data->dev_conf.rxmode.offloads &=
2345 ~DEV_RX_OFFLOAD_JUMBO_FRAME;
2347 dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
2352 static int ice_macaddr_set(struct rte_eth_dev *dev,
2353 struct rte_ether_addr *mac_addr)
2355 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2356 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2357 struct ice_vsi *vsi = pf->main_vsi;
2358 struct ice_mac_filter *f;
2362 if (!rte_is_valid_assigned_ether_addr(mac_addr)) {
2363 PMD_DRV_LOG(ERR, "Tried to set invalid MAC address.");
2367 TAILQ_FOREACH(f, &vsi->mac_list, next) {
2368 if (rte_is_same_ether_addr(&pf->dev_addr, &f->mac_info.mac_addr))
2373 PMD_DRV_LOG(ERR, "Failed to find filter for default mac");
2377 ret = ice_remove_mac_filter(vsi, &f->mac_info.mac_addr);
2378 if (ret != ICE_SUCCESS) {
2379 PMD_DRV_LOG(ERR, "Failed to delete mac filter");
2382 ret = ice_add_mac_filter(vsi, mac_addr);
2383 if (ret != ICE_SUCCESS) {
2384 PMD_DRV_LOG(ERR, "Failed to add mac filter");
2387 memcpy(&pf->dev_addr, mac_addr, ETH_ADDR_LEN);
2389 flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL;
2390 ret = ice_aq_manage_mac_write(hw, mac_addr->addr_bytes, flags, NULL);
2391 if (ret != ICE_SUCCESS)
2392 PMD_DRV_LOG(ERR, "Failed to set manage mac");
2397 /* Add a MAC address, and update filters */
2399 ice_macaddr_add(struct rte_eth_dev *dev,
2400 struct rte_ether_addr *mac_addr,
2401 __rte_unused uint32_t index,
2402 __rte_unused uint32_t pool)
2404 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2405 struct ice_vsi *vsi = pf->main_vsi;
2408 ret = ice_add_mac_filter(vsi, mac_addr);
2409 if (ret != ICE_SUCCESS) {
2410 PMD_DRV_LOG(ERR, "Failed to add MAC filter");
2417 /* Remove a MAC address, and update filters */
2419 ice_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
2421 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2422 struct ice_vsi *vsi = pf->main_vsi;
2423 struct rte_eth_dev_data *data = dev->data;
2424 struct rte_ether_addr *macaddr;
2427 macaddr = &data->mac_addrs[index];
2428 ret = ice_remove_mac_filter(vsi, macaddr);
2430 PMD_DRV_LOG(ERR, "Failed to remove MAC filter");
2436 ice_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
2438 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2439 struct ice_vsi *vsi = pf->main_vsi;
2442 PMD_INIT_FUNC_TRACE();
2445 ret = ice_add_vlan_filter(vsi, vlan_id);
2447 PMD_DRV_LOG(ERR, "Failed to add vlan filter");
2451 ret = ice_remove_vlan_filter(vsi, vlan_id);
2453 PMD_DRV_LOG(ERR, "Failed to remove vlan filter");
2461 /* Configure vlan filter on or off */
2463 ice_vsi_config_vlan_filter(struct ice_vsi *vsi, bool on)
2465 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
2466 struct ice_vsi_ctx ctxt;
2467 uint8_t sec_flags, sw_flags2;
2470 sec_flags = ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
2471 ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S;
2472 sw_flags2 = ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
2475 vsi->info.sec_flags |= sec_flags;
2476 vsi->info.sw_flags2 |= sw_flags2;
2478 vsi->info.sec_flags &= ~sec_flags;
2479 vsi->info.sw_flags2 &= ~sw_flags2;
2481 vsi->info.sw_id = hw->port_info->sw_id;
2482 (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
2483 ctxt.info.valid_sections =
2484 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_SW_VALID |
2485 ICE_AQ_VSI_PROP_SECURITY_VALID);
2486 ctxt.vsi_num = vsi->vsi_id;
2488 ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
2490 PMD_DRV_LOG(INFO, "Update VSI failed to %s vlan rx pruning",
2491 on ? "enable" : "disable");
2494 vsi->info.valid_sections |=
2495 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_SW_VALID |
2496 ICE_AQ_VSI_PROP_SECURITY_VALID);
2499 /* consist with other drivers, allow untagged packet when vlan filter on */
2501 ret = ice_add_vlan_filter(vsi, 0);
2503 ret = ice_remove_vlan_filter(vsi, 0);
2509 ice_vsi_config_vlan_stripping(struct ice_vsi *vsi, bool on)
2511 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
2512 struct ice_vsi_ctx ctxt;
2516 /* Check if it has been already on or off */
2517 if (vsi->info.valid_sections &
2518 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID)) {
2520 if ((vsi->info.vlan_flags &
2521 ICE_AQ_VSI_VLAN_EMOD_M) ==
2522 ICE_AQ_VSI_VLAN_EMOD_STR_BOTH)
2523 return 0; /* already on */
2525 if ((vsi->info.vlan_flags &
2526 ICE_AQ_VSI_VLAN_EMOD_M) ==
2527 ICE_AQ_VSI_VLAN_EMOD_NOTHING)
2528 return 0; /* already off */
2533 vlan_flags = ICE_AQ_VSI_VLAN_EMOD_STR_BOTH;
2535 vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING;
2536 vsi->info.vlan_flags &= ~(ICE_AQ_VSI_VLAN_EMOD_M);
2537 vsi->info.vlan_flags |= vlan_flags;
2538 (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
2539 ctxt.info.valid_sections =
2540 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID);
2541 ctxt.vsi_num = vsi->vsi_id;
2542 ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
2544 PMD_DRV_LOG(INFO, "Update VSI failed to %s vlan stripping",
2545 on ? "enable" : "disable");
2549 vsi->info.valid_sections |=
2550 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID);
2556 ice_vlan_offload_set(struct rte_eth_dev *dev, int mask)
2558 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2559 struct ice_vsi *vsi = pf->main_vsi;
2560 struct rte_eth_rxmode *rxmode;
2562 rxmode = &dev->data->dev_conf.rxmode;
2563 if (mask & ETH_VLAN_FILTER_MASK) {
2564 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
2565 ice_vsi_config_vlan_filter(vsi, TRUE);
2567 ice_vsi_config_vlan_filter(vsi, FALSE);
2570 if (mask & ETH_VLAN_STRIP_MASK) {
2571 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
2572 ice_vsi_config_vlan_stripping(vsi, TRUE);
2574 ice_vsi_config_vlan_stripping(vsi, FALSE);
2577 if (mask & ETH_VLAN_EXTEND_MASK) {
2578 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
2579 ice_vsi_config_double_vlan(vsi, TRUE);
2581 ice_vsi_config_double_vlan(vsi, FALSE);
2588 ice_vlan_tpid_set(struct rte_eth_dev *dev,
2589 enum rte_vlan_type vlan_type,
2592 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2593 uint64_t reg_r = 0, reg_w = 0;
2594 uint16_t reg_id = 0;
2596 int qinq = dev->data->dev_conf.rxmode.offloads &
2597 DEV_RX_OFFLOAD_VLAN_EXTEND;
2599 switch (vlan_type) {
2600 case ETH_VLAN_TYPE_OUTER:
2606 case ETH_VLAN_TYPE_INNER:
2611 "Unsupported vlan type in single vlan.");
2616 PMD_DRV_LOG(ERR, "Unsupported vlan type %d", vlan_type);
2619 reg_r = ICE_READ_REG(hw, GL_SWT_L2TAGCTRL(reg_id));
2620 PMD_DRV_LOG(DEBUG, "Debug read from ICE GL_SWT_L2TAGCTRL[%d]: "
2621 "0x%08"PRIx64"", reg_id, reg_r);
2623 reg_w = reg_r & (~(GL_SWT_L2TAGCTRL_ETHERTYPE_M));
2624 reg_w |= ((uint64_t)tpid << GL_SWT_L2TAGCTRL_ETHERTYPE_S);
2625 if (reg_r == reg_w) {
2626 PMD_DRV_LOG(DEBUG, "No need to write");
2630 ICE_WRITE_REG(hw, GL_SWT_L2TAGCTRL(reg_id), reg_w);
2631 PMD_DRV_LOG(DEBUG, "Debug write 0x%08"PRIx64" to "
2632 "ICE GL_SWT_L2TAGCTRL[%d]", reg_w, reg_id);
2638 ice_get_rss_lut(struct ice_vsi *vsi, uint8_t *lut, uint16_t lut_size)
2640 struct ice_pf *pf = ICE_VSI_TO_PF(vsi);
2641 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
2647 if (pf->flags & ICE_FLAG_RSS_AQ_CAPABLE) {
2648 ret = ice_aq_get_rss_lut(hw, vsi->idx, TRUE,
2651 PMD_DRV_LOG(ERR, "Failed to get RSS lookup table");
2655 uint64_t *lut_dw = (uint64_t *)lut;
2656 uint16_t i, lut_size_dw = lut_size / 4;
2658 for (i = 0; i < lut_size_dw; i++)
2659 lut_dw[i] = ICE_READ_REG(hw, PFQF_HLUT(i));
2666 ice_set_rss_lut(struct ice_vsi *vsi, uint8_t *lut, uint16_t lut_size)
2675 pf = ICE_VSI_TO_PF(vsi);
2676 hw = ICE_VSI_TO_HW(vsi);
2678 if (pf->flags & ICE_FLAG_RSS_AQ_CAPABLE) {
2679 ret = ice_aq_set_rss_lut(hw, vsi->idx, TRUE,
2682 PMD_DRV_LOG(ERR, "Failed to set RSS lookup table");
2686 uint64_t *lut_dw = (uint64_t *)lut;
2687 uint16_t i, lut_size_dw = lut_size / 4;
2689 for (i = 0; i < lut_size_dw; i++)
2690 ICE_WRITE_REG(hw, PFQF_HLUT(i), lut_dw[i]);
2699 ice_rss_reta_update(struct rte_eth_dev *dev,
2700 struct rte_eth_rss_reta_entry64 *reta_conf,
2703 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2704 uint16_t i, lut_size = pf->hash_lut_size;
2705 uint16_t idx, shift;
2709 if (reta_size != ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128 &&
2710 reta_size != ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512 &&
2711 reta_size != ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K) {
2713 "The size of hash lookup table configured (%d)"
2714 "doesn't match the number hardware can "
2715 "supported (128, 512, 2048)",
2720 /* It MUST use the current LUT size to get the RSS lookup table,
2721 * otherwise if will fail with -100 error code.
2723 lut = rte_zmalloc(NULL, RTE_MAX(reta_size, lut_size), 0);
2725 PMD_DRV_LOG(ERR, "No memory can be allocated");
2728 ret = ice_get_rss_lut(pf->main_vsi, lut, lut_size);
2732 for (i = 0; i < reta_size; i++) {
2733 idx = i / RTE_RETA_GROUP_SIZE;
2734 shift = i % RTE_RETA_GROUP_SIZE;
2735 if (reta_conf[idx].mask & (1ULL << shift))
2736 lut[i] = reta_conf[idx].reta[shift];
2738 ret = ice_set_rss_lut(pf->main_vsi, lut, reta_size);
2739 if (ret == 0 && lut_size != reta_size) {
2741 "The size of hash lookup table is changed from (%d) to (%d)",
2742 lut_size, reta_size);
2743 pf->hash_lut_size = reta_size;
2753 ice_rss_reta_query(struct rte_eth_dev *dev,
2754 struct rte_eth_rss_reta_entry64 *reta_conf,
2757 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2758 uint16_t i, lut_size = pf->hash_lut_size;
2759 uint16_t idx, shift;
2763 if (reta_size != lut_size) {
2765 "The size of hash lookup table configured (%d)"
2766 "doesn't match the number hardware can "
2768 reta_size, lut_size);
2772 lut = rte_zmalloc(NULL, reta_size, 0);
2774 PMD_DRV_LOG(ERR, "No memory can be allocated");
2778 ret = ice_get_rss_lut(pf->main_vsi, lut, reta_size);
2782 for (i = 0; i < reta_size; i++) {
2783 idx = i / RTE_RETA_GROUP_SIZE;
2784 shift = i % RTE_RETA_GROUP_SIZE;
2785 if (reta_conf[idx].mask & (1ULL << shift))
2786 reta_conf[idx].reta[shift] = lut[i];
2796 ice_set_rss_key(struct ice_vsi *vsi, uint8_t *key, uint8_t key_len)
2798 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
2801 if (!key || key_len == 0) {
2802 PMD_DRV_LOG(DEBUG, "No key to be configured");
2804 } else if (key_len != (VSIQF_HKEY_MAX_INDEX + 1) *
2806 PMD_DRV_LOG(ERR, "Invalid key length %u", key_len);
2810 struct ice_aqc_get_set_rss_keys *key_dw =
2811 (struct ice_aqc_get_set_rss_keys *)key;
2813 ret = ice_aq_set_rss_key(hw, vsi->idx, key_dw);
2815 PMD_DRV_LOG(ERR, "Failed to configure RSS key via AQ");
2823 ice_get_rss_key(struct ice_vsi *vsi, uint8_t *key, uint8_t *key_len)
2825 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
2828 if (!key || !key_len)
2831 ret = ice_aq_get_rss_key
2833 (struct ice_aqc_get_set_rss_keys *)key);
2835 PMD_DRV_LOG(ERR, "Failed to get RSS key via AQ");
2838 *key_len = (VSIQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
2844 ice_rss_hash_update(struct rte_eth_dev *dev,
2845 struct rte_eth_rss_conf *rss_conf)
2847 enum ice_status status = ICE_SUCCESS;
2848 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2849 struct ice_vsi *vsi = pf->main_vsi;
2852 status = ice_set_rss_key(vsi, rss_conf->rss_key, rss_conf->rss_key_len);
2856 /* TODO: hash enable config, ice_add_rss_cfg */
2861 ice_rss_hash_conf_get(struct rte_eth_dev *dev,
2862 struct rte_eth_rss_conf *rss_conf)
2864 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2865 struct ice_vsi *vsi = pf->main_vsi;
2867 ice_get_rss_key(vsi, rss_conf->rss_key,
2868 &rss_conf->rss_key_len);
2870 /* TODO: default set to 0 as hf config is not supported now */
2871 rss_conf->rss_hf = 0;
2876 ice_promisc_enable(struct rte_eth_dev *dev)
2878 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2879 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2880 struct ice_vsi *vsi = pf->main_vsi;
2881 enum ice_status status;
2884 pmask = ICE_PROMISC_UCAST_RX | ICE_PROMISC_UCAST_TX |
2885 ICE_PROMISC_MCAST_RX | ICE_PROMISC_MCAST_TX;
2887 status = ice_set_vsi_promisc(hw, vsi->idx, pmask, 0);
2888 if (status == ICE_ERR_ALREADY_EXISTS)
2889 PMD_DRV_LOG(DEBUG, "Promisc mode has already been enabled");
2890 else if (status != ICE_SUCCESS)
2891 PMD_DRV_LOG(ERR, "Failed to enable promisc, err=%d", status);
2895 ice_promisc_disable(struct rte_eth_dev *dev)
2897 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2898 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2899 struct ice_vsi *vsi = pf->main_vsi;
2900 enum ice_status status;
2903 pmask = ICE_PROMISC_UCAST_RX | ICE_PROMISC_UCAST_TX |
2904 ICE_PROMISC_MCAST_RX | ICE_PROMISC_MCAST_TX;
2906 status = ice_clear_vsi_promisc(hw, vsi->idx, pmask, 0);
2907 if (status != ICE_SUCCESS)
2908 PMD_DRV_LOG(ERR, "Failed to clear promisc, err=%d", status);
2912 ice_allmulti_enable(struct rte_eth_dev *dev)
2914 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2915 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2916 struct ice_vsi *vsi = pf->main_vsi;
2917 enum ice_status status;
2920 pmask = ICE_PROMISC_MCAST_RX | ICE_PROMISC_MCAST_TX;
2922 status = ice_set_vsi_promisc(hw, vsi->idx, pmask, 0);
2923 if (status != ICE_SUCCESS)
2924 PMD_DRV_LOG(ERR, "Failed to enable allmulti, err=%d", status);
2928 ice_allmulti_disable(struct rte_eth_dev *dev)
2930 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2931 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2932 struct ice_vsi *vsi = pf->main_vsi;
2933 enum ice_status status;
2936 if (dev->data->promiscuous == 1)
2937 return; /* must remain in all_multicast mode */
2939 pmask = ICE_PROMISC_MCAST_RX | ICE_PROMISC_MCAST_TX;
2941 status = ice_clear_vsi_promisc(hw, vsi->idx, pmask, 0);
2942 if (status != ICE_SUCCESS)
2943 PMD_DRV_LOG(ERR, "Failed to clear allmulti, err=%d", status);
2946 static int ice_rx_queue_intr_enable(struct rte_eth_dev *dev,
2949 struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
2950 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2951 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2955 msix_intr = intr_handle->intr_vec[queue_id];
2957 val = GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M |
2958 GLINT_DYN_CTL_ITR_INDX_M;
2959 val &= ~GLINT_DYN_CTL_WB_ON_ITR_M;
2961 ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr), val);
2962 rte_intr_enable(&pci_dev->intr_handle);
2967 static int ice_rx_queue_intr_disable(struct rte_eth_dev *dev,
2970 struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
2971 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2972 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2975 msix_intr = intr_handle->intr_vec[queue_id];
2977 ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr), GLINT_DYN_CTL_WB_ON_ITR_M);
2983 ice_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
2985 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2991 full_ver = hw->nvm.oem_ver;
2992 ver = (u8)(full_ver >> 24);
2993 build = (u16)((full_ver >> 8) & 0xffff);
2994 patch = (u8)(full_ver & 0xff);
2996 ret = snprintf(fw_version, fw_size,
2997 "%d.%d%d 0x%08x %d.%d.%d",
2998 ((hw->nvm.ver >> 12) & 0xf),
2999 ((hw->nvm.ver >> 4) & 0xff),
3000 (hw->nvm.ver & 0xf), hw->nvm.eetrack,
3003 /* add the size of '\0' */
3005 if (fw_size < (u32)ret)
3012 ice_vsi_vlan_pvid_set(struct ice_vsi *vsi, struct ice_vsi_vlan_pvid_info *info)
3015 struct ice_vsi_ctx ctxt;
3016 uint8_t vlan_flags = 0;
3019 if (!vsi || !info) {
3020 PMD_DRV_LOG(ERR, "invalid parameters");
3025 vsi->info.pvid = info->config.pvid;
3027 * If insert pvid is enabled, only tagged pkts are
3028 * allowed to be sent out.
3030 vlan_flags = ICE_AQ_VSI_PVLAN_INSERT_PVID |
3031 ICE_AQ_VSI_VLAN_MODE_UNTAGGED;
3034 if (info->config.reject.tagged == 0)
3035 vlan_flags |= ICE_AQ_VSI_VLAN_MODE_TAGGED;
3037 if (info->config.reject.untagged == 0)
3038 vlan_flags |= ICE_AQ_VSI_VLAN_MODE_UNTAGGED;
3040 vsi->info.vlan_flags &= ~(ICE_AQ_VSI_PVLAN_INSERT_PVID |
3041 ICE_AQ_VSI_VLAN_MODE_M);
3042 vsi->info.vlan_flags |= vlan_flags;
3043 memset(&ctxt, 0, sizeof(ctxt));
3044 rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
3045 ctxt.info.valid_sections =
3046 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID);
3047 ctxt.vsi_num = vsi->vsi_id;
3049 hw = ICE_VSI_TO_HW(vsi);
3050 ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
3051 if (ret != ICE_SUCCESS) {
3053 "update VSI for VLAN insert failed, err %d",
3058 vsi->info.valid_sections |=
3059 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID);
3065 ice_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on)
3067 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3068 struct ice_vsi *vsi = pf->main_vsi;
3069 struct rte_eth_dev_data *data = pf->dev_data;
3070 struct ice_vsi_vlan_pvid_info info;
3073 memset(&info, 0, sizeof(info));
3076 info.config.pvid = pvid;
3078 info.config.reject.tagged =
3079 data->dev_conf.txmode.hw_vlan_reject_tagged;
3080 info.config.reject.untagged =
3081 data->dev_conf.txmode.hw_vlan_reject_untagged;
3084 ret = ice_vsi_vlan_pvid_set(vsi, &info);
3086 PMD_DRV_LOG(ERR, "Failed to set pvid.");
3094 ice_get_eeprom_length(struct rte_eth_dev *dev)
3096 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3098 /* Convert word count to byte count */
3099 return hw->nvm.sr_words << 1;
3103 ice_get_eeprom(struct rte_eth_dev *dev,
3104 struct rte_dev_eeprom_info *eeprom)
3106 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3107 uint16_t *data = eeprom->data;
3108 uint16_t first_word, last_word, nwords;
3109 enum ice_status status = ICE_SUCCESS;
3111 first_word = eeprom->offset >> 1;
3112 last_word = (eeprom->offset + eeprom->length - 1) >> 1;
3113 nwords = last_word - first_word + 1;
3115 if (first_word >= hw->nvm.sr_words ||
3116 last_word >= hw->nvm.sr_words) {
3117 PMD_DRV_LOG(ERR, "Requested EEPROM bytes out of range.");
3121 eeprom->magic = hw->vendor_id | (hw->device_id << 16);
3123 status = ice_read_sr_buf(hw, first_word, &nwords, data);
3125 PMD_DRV_LOG(ERR, "EEPROM read failed.");
3126 eeprom->length = sizeof(uint16_t) * nwords;
3134 ice_stat_update_32(struct ice_hw *hw,
3142 new_data = (uint64_t)ICE_READ_REG(hw, reg);
3146 if (new_data >= *offset)
3147 *stat = (uint64_t)(new_data - *offset);
3149 *stat = (uint64_t)((new_data +
3150 ((uint64_t)1 << ICE_32_BIT_WIDTH))
3155 ice_stat_update_40(struct ice_hw *hw,
3164 new_data = (uint64_t)ICE_READ_REG(hw, loreg);
3165 new_data |= (uint64_t)(ICE_READ_REG(hw, hireg) & ICE_8_BIT_MASK) <<
3171 if (new_data >= *offset)
3172 *stat = new_data - *offset;
3174 *stat = (uint64_t)((new_data +
3175 ((uint64_t)1 << ICE_40_BIT_WIDTH)) -
3178 *stat &= ICE_40_BIT_MASK;
3181 /* Get all the statistics of a VSI */
3183 ice_update_vsi_stats(struct ice_vsi *vsi)
3185 struct ice_eth_stats *oes = &vsi->eth_stats_offset;
3186 struct ice_eth_stats *nes = &vsi->eth_stats;
3187 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
3188 int idx = rte_le_to_cpu_16(vsi->vsi_id);
3190 ice_stat_update_40(hw, GLV_GORCH(idx), GLV_GORCL(idx),
3191 vsi->offset_loaded, &oes->rx_bytes,
3193 ice_stat_update_40(hw, GLV_UPRCH(idx), GLV_UPRCL(idx),
3194 vsi->offset_loaded, &oes->rx_unicast,
3196 ice_stat_update_40(hw, GLV_MPRCH(idx), GLV_MPRCL(idx),
3197 vsi->offset_loaded, &oes->rx_multicast,
3198 &nes->rx_multicast);
3199 ice_stat_update_40(hw, GLV_BPRCH(idx), GLV_BPRCL(idx),
3200 vsi->offset_loaded, &oes->rx_broadcast,
3201 &nes->rx_broadcast);
3202 /* exclude CRC bytes */
3203 nes->rx_bytes -= (nes->rx_unicast + nes->rx_multicast +
3204 nes->rx_broadcast) * RTE_ETHER_CRC_LEN;
3206 ice_stat_update_32(hw, GLV_RDPC(idx), vsi->offset_loaded,
3207 &oes->rx_discards, &nes->rx_discards);
3208 /* GLV_REPC not supported */
3209 /* GLV_RMPC not supported */
3210 ice_stat_update_32(hw, GLSWID_RUPP(idx), vsi->offset_loaded,
3211 &oes->rx_unknown_protocol,
3212 &nes->rx_unknown_protocol);
3213 ice_stat_update_40(hw, GLV_GOTCH(idx), GLV_GOTCL(idx),
3214 vsi->offset_loaded, &oes->tx_bytes,
3216 ice_stat_update_40(hw, GLV_UPTCH(idx), GLV_UPTCL(idx),
3217 vsi->offset_loaded, &oes->tx_unicast,
3219 ice_stat_update_40(hw, GLV_MPTCH(idx), GLV_MPTCL(idx),
3220 vsi->offset_loaded, &oes->tx_multicast,
3221 &nes->tx_multicast);
3222 ice_stat_update_40(hw, GLV_BPTCH(idx), GLV_BPTCL(idx),
3223 vsi->offset_loaded, &oes->tx_broadcast,
3224 &nes->tx_broadcast);
3225 /* GLV_TDPC not supported */
3226 ice_stat_update_32(hw, GLV_TEPC(idx), vsi->offset_loaded,
3227 &oes->tx_errors, &nes->tx_errors);
3228 vsi->offset_loaded = true;
3230 PMD_DRV_LOG(DEBUG, "************** VSI[%u] stats start **************",
3232 PMD_DRV_LOG(DEBUG, "rx_bytes: %"PRIu64"", nes->rx_bytes);
3233 PMD_DRV_LOG(DEBUG, "rx_unicast: %"PRIu64"", nes->rx_unicast);
3234 PMD_DRV_LOG(DEBUG, "rx_multicast: %"PRIu64"", nes->rx_multicast);
3235 PMD_DRV_LOG(DEBUG, "rx_broadcast: %"PRIu64"", nes->rx_broadcast);
3236 PMD_DRV_LOG(DEBUG, "rx_discards: %"PRIu64"", nes->rx_discards);
3237 PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"",
3238 nes->rx_unknown_protocol);
3239 PMD_DRV_LOG(DEBUG, "tx_bytes: %"PRIu64"", nes->tx_bytes);
3240 PMD_DRV_LOG(DEBUG, "tx_unicast: %"PRIu64"", nes->tx_unicast);
3241 PMD_DRV_LOG(DEBUG, "tx_multicast: %"PRIu64"", nes->tx_multicast);
3242 PMD_DRV_LOG(DEBUG, "tx_broadcast: %"PRIu64"", nes->tx_broadcast);
3243 PMD_DRV_LOG(DEBUG, "tx_discards: %"PRIu64"", nes->tx_discards);
3244 PMD_DRV_LOG(DEBUG, "tx_errors: %"PRIu64"", nes->tx_errors);
3245 PMD_DRV_LOG(DEBUG, "************** VSI[%u] stats end ****************",
3250 ice_read_stats_registers(struct ice_pf *pf, struct ice_hw *hw)
3252 struct ice_hw_port_stats *ns = &pf->stats; /* new stats */
3253 struct ice_hw_port_stats *os = &pf->stats_offset; /* old stats */
3255 /* Get statistics of struct ice_eth_stats */
3256 ice_stat_update_40(hw, GLPRT_GORCH(hw->port_info->lport),
3257 GLPRT_GORCL(hw->port_info->lport),
3258 pf->offset_loaded, &os->eth.rx_bytes,
3260 ice_stat_update_40(hw, GLPRT_UPRCH(hw->port_info->lport),
3261 GLPRT_UPRCL(hw->port_info->lport),
3262 pf->offset_loaded, &os->eth.rx_unicast,
3263 &ns->eth.rx_unicast);
3264 ice_stat_update_40(hw, GLPRT_MPRCH(hw->port_info->lport),
3265 GLPRT_MPRCL(hw->port_info->lport),
3266 pf->offset_loaded, &os->eth.rx_multicast,
3267 &ns->eth.rx_multicast);
3268 ice_stat_update_40(hw, GLPRT_BPRCH(hw->port_info->lport),
3269 GLPRT_BPRCL(hw->port_info->lport),
3270 pf->offset_loaded, &os->eth.rx_broadcast,
3271 &ns->eth.rx_broadcast);
3272 ice_stat_update_32(hw, PRTRPB_RDPC,
3273 pf->offset_loaded, &os->eth.rx_discards,
3274 &ns->eth.rx_discards);
3276 /* Workaround: CRC size should not be included in byte statistics,
3277 * so subtract RTE_ETHER_CRC_LEN from the byte counter for each rx
3280 ns->eth.rx_bytes -= (ns->eth.rx_unicast + ns->eth.rx_multicast +
3281 ns->eth.rx_broadcast) * RTE_ETHER_CRC_LEN;
3283 /* GLPRT_REPC not supported */
3284 /* GLPRT_RMPC not supported */
3285 ice_stat_update_32(hw, GLSWID_RUPP(hw->port_info->lport),
3287 &os->eth.rx_unknown_protocol,
3288 &ns->eth.rx_unknown_protocol);
3289 ice_stat_update_40(hw, GLPRT_GOTCH(hw->port_info->lport),
3290 GLPRT_GOTCL(hw->port_info->lport),
3291 pf->offset_loaded, &os->eth.tx_bytes,
3293 ice_stat_update_40(hw, GLPRT_UPTCH(hw->port_info->lport),
3294 GLPRT_UPTCL(hw->port_info->lport),
3295 pf->offset_loaded, &os->eth.tx_unicast,
3296 &ns->eth.tx_unicast);
3297 ice_stat_update_40(hw, GLPRT_MPTCH(hw->port_info->lport),
3298 GLPRT_MPTCL(hw->port_info->lport),
3299 pf->offset_loaded, &os->eth.tx_multicast,
3300 &ns->eth.tx_multicast);
3301 ice_stat_update_40(hw, GLPRT_BPTCH(hw->port_info->lport),
3302 GLPRT_BPTCL(hw->port_info->lport),
3303 pf->offset_loaded, &os->eth.tx_broadcast,
3304 &ns->eth.tx_broadcast);
3305 ns->eth.tx_bytes -= (ns->eth.tx_unicast + ns->eth.tx_multicast +
3306 ns->eth.tx_broadcast) * RTE_ETHER_CRC_LEN;
3308 /* GLPRT_TEPC not supported */
3310 /* additional port specific stats */
3311 ice_stat_update_32(hw, GLPRT_TDOLD(hw->port_info->lport),
3312 pf->offset_loaded, &os->tx_dropped_link_down,
3313 &ns->tx_dropped_link_down);
3314 ice_stat_update_32(hw, GLPRT_CRCERRS(hw->port_info->lport),
3315 pf->offset_loaded, &os->crc_errors,
3317 ice_stat_update_32(hw, GLPRT_ILLERRC(hw->port_info->lport),
3318 pf->offset_loaded, &os->illegal_bytes,
3319 &ns->illegal_bytes);
3320 /* GLPRT_ERRBC not supported */
3321 ice_stat_update_32(hw, GLPRT_MLFC(hw->port_info->lport),
3322 pf->offset_loaded, &os->mac_local_faults,
3323 &ns->mac_local_faults);
3324 ice_stat_update_32(hw, GLPRT_MRFC(hw->port_info->lport),
3325 pf->offset_loaded, &os->mac_remote_faults,
3326 &ns->mac_remote_faults);
3328 ice_stat_update_32(hw, GLPRT_RLEC(hw->port_info->lport),
3329 pf->offset_loaded, &os->rx_len_errors,
3330 &ns->rx_len_errors);
3332 ice_stat_update_32(hw, GLPRT_LXONRXC(hw->port_info->lport),
3333 pf->offset_loaded, &os->link_xon_rx,
3335 ice_stat_update_32(hw, GLPRT_LXOFFRXC(hw->port_info->lport),
3336 pf->offset_loaded, &os->link_xoff_rx,
3338 ice_stat_update_32(hw, GLPRT_LXONTXC(hw->port_info->lport),
3339 pf->offset_loaded, &os->link_xon_tx,
3341 ice_stat_update_32(hw, GLPRT_LXOFFTXC(hw->port_info->lport),
3342 pf->offset_loaded, &os->link_xoff_tx,
3344 ice_stat_update_40(hw, GLPRT_PRC64H(hw->port_info->lport),
3345 GLPRT_PRC64L(hw->port_info->lport),
3346 pf->offset_loaded, &os->rx_size_64,
3348 ice_stat_update_40(hw, GLPRT_PRC127H(hw->port_info->lport),
3349 GLPRT_PRC127L(hw->port_info->lport),
3350 pf->offset_loaded, &os->rx_size_127,
3352 ice_stat_update_40(hw, GLPRT_PRC255H(hw->port_info->lport),
3353 GLPRT_PRC255L(hw->port_info->lport),
3354 pf->offset_loaded, &os->rx_size_255,
3356 ice_stat_update_40(hw, GLPRT_PRC511H(hw->port_info->lport),
3357 GLPRT_PRC511L(hw->port_info->lport),
3358 pf->offset_loaded, &os->rx_size_511,
3360 ice_stat_update_40(hw, GLPRT_PRC1023H(hw->port_info->lport),
3361 GLPRT_PRC1023L(hw->port_info->lport),
3362 pf->offset_loaded, &os->rx_size_1023,
3364 ice_stat_update_40(hw, GLPRT_PRC1522H(hw->port_info->lport),
3365 GLPRT_PRC1522L(hw->port_info->lport),
3366 pf->offset_loaded, &os->rx_size_1522,
3368 ice_stat_update_40(hw, GLPRT_PRC9522H(hw->port_info->lport),
3369 GLPRT_PRC9522L(hw->port_info->lport),
3370 pf->offset_loaded, &os->rx_size_big,
3372 ice_stat_update_32(hw, GLPRT_RUC(hw->port_info->lport),
3373 pf->offset_loaded, &os->rx_undersize,
3375 ice_stat_update_32(hw, GLPRT_RFC(hw->port_info->lport),
3376 pf->offset_loaded, &os->rx_fragments,
3378 ice_stat_update_32(hw, GLPRT_ROC(hw->port_info->lport),
3379 pf->offset_loaded, &os->rx_oversize,
3381 ice_stat_update_32(hw, GLPRT_RJC(hw->port_info->lport),
3382 pf->offset_loaded, &os->rx_jabber,
3384 ice_stat_update_40(hw, GLPRT_PTC64H(hw->port_info->lport),
3385 GLPRT_PTC64L(hw->port_info->lport),
3386 pf->offset_loaded, &os->tx_size_64,
3388 ice_stat_update_40(hw, GLPRT_PTC127H(hw->port_info->lport),
3389 GLPRT_PTC127L(hw->port_info->lport),
3390 pf->offset_loaded, &os->tx_size_127,
3392 ice_stat_update_40(hw, GLPRT_PTC255H(hw->port_info->lport),
3393 GLPRT_PTC255L(hw->port_info->lport),
3394 pf->offset_loaded, &os->tx_size_255,
3396 ice_stat_update_40(hw, GLPRT_PTC511H(hw->port_info->lport),
3397 GLPRT_PTC511L(hw->port_info->lport),
3398 pf->offset_loaded, &os->tx_size_511,
3400 ice_stat_update_40(hw, GLPRT_PTC1023H(hw->port_info->lport),
3401 GLPRT_PTC1023L(hw->port_info->lport),
3402 pf->offset_loaded, &os->tx_size_1023,
3404 ice_stat_update_40(hw, GLPRT_PTC1522H(hw->port_info->lport),
3405 GLPRT_PTC1522L(hw->port_info->lport),
3406 pf->offset_loaded, &os->tx_size_1522,
3408 ice_stat_update_40(hw, GLPRT_PTC9522H(hw->port_info->lport),
3409 GLPRT_PTC9522L(hw->port_info->lport),
3410 pf->offset_loaded, &os->tx_size_big,
3413 /* GLPRT_MSPDC not supported */
3414 /* GLPRT_XEC not supported */
3416 pf->offset_loaded = true;
3419 ice_update_vsi_stats(pf->main_vsi);
3422 /* Get all statistics of a port */
3424 ice_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
3426 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3427 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3428 struct ice_hw_port_stats *ns = &pf->stats; /* new stats */
3430 /* call read registers - updates values, now write them to struct */
3431 ice_read_stats_registers(pf, hw);
3433 stats->ipackets = pf->main_vsi->eth_stats.rx_unicast +
3434 pf->main_vsi->eth_stats.rx_multicast +
3435 pf->main_vsi->eth_stats.rx_broadcast -
3436 pf->main_vsi->eth_stats.rx_discards;
3437 stats->opackets = ns->eth.tx_unicast +
3438 ns->eth.tx_multicast +
3439 ns->eth.tx_broadcast;
3440 stats->ibytes = pf->main_vsi->eth_stats.rx_bytes;
3441 stats->obytes = ns->eth.tx_bytes;
3442 stats->oerrors = ns->eth.tx_errors +
3443 pf->main_vsi->eth_stats.tx_errors;
3446 stats->imissed = ns->eth.rx_discards +
3447 pf->main_vsi->eth_stats.rx_discards;
3448 stats->ierrors = ns->crc_errors +
3450 ns->rx_oversize + ns->rx_fragments + ns->rx_jabber;
3452 PMD_DRV_LOG(DEBUG, "*************** PF stats start *****************");
3453 PMD_DRV_LOG(DEBUG, "rx_bytes: %"PRIu64"", ns->eth.rx_bytes);
3454 PMD_DRV_LOG(DEBUG, "rx_unicast: %"PRIu64"", ns->eth.rx_unicast);
3455 PMD_DRV_LOG(DEBUG, "rx_multicast:%"PRIu64"", ns->eth.rx_multicast);
3456 PMD_DRV_LOG(DEBUG, "rx_broadcast:%"PRIu64"", ns->eth.rx_broadcast);
3457 PMD_DRV_LOG(DEBUG, "rx_discards:%"PRIu64"", ns->eth.rx_discards);
3458 PMD_DRV_LOG(DEBUG, "vsi rx_discards:%"PRIu64"",
3459 pf->main_vsi->eth_stats.rx_discards);
3460 PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"",
3461 ns->eth.rx_unknown_protocol);
3462 PMD_DRV_LOG(DEBUG, "tx_bytes: %"PRIu64"", ns->eth.tx_bytes);
3463 PMD_DRV_LOG(DEBUG, "tx_unicast: %"PRIu64"", ns->eth.tx_unicast);
3464 PMD_DRV_LOG(DEBUG, "tx_multicast:%"PRIu64"", ns->eth.tx_multicast);
3465 PMD_DRV_LOG(DEBUG, "tx_broadcast:%"PRIu64"", ns->eth.tx_broadcast);
3466 PMD_DRV_LOG(DEBUG, "tx_discards:%"PRIu64"", ns->eth.tx_discards);
3467 PMD_DRV_LOG(DEBUG, "vsi tx_discards:%"PRIu64"",
3468 pf->main_vsi->eth_stats.tx_discards);
3469 PMD_DRV_LOG(DEBUG, "tx_errors: %"PRIu64"", ns->eth.tx_errors);
3471 PMD_DRV_LOG(DEBUG, "tx_dropped_link_down: %"PRIu64"",
3472 ns->tx_dropped_link_down);
3473 PMD_DRV_LOG(DEBUG, "crc_errors: %"PRIu64"", ns->crc_errors);
3474 PMD_DRV_LOG(DEBUG, "illegal_bytes: %"PRIu64"",
3476 PMD_DRV_LOG(DEBUG, "error_bytes: %"PRIu64"", ns->error_bytes);
3477 PMD_DRV_LOG(DEBUG, "mac_local_faults: %"PRIu64"",
3478 ns->mac_local_faults);
3479 PMD_DRV_LOG(DEBUG, "mac_remote_faults: %"PRIu64"",
3480 ns->mac_remote_faults);
3481 PMD_DRV_LOG(DEBUG, "link_xon_rx: %"PRIu64"", ns->link_xon_rx);
3482 PMD_DRV_LOG(DEBUG, "link_xoff_rx: %"PRIu64"", ns->link_xoff_rx);
3483 PMD_DRV_LOG(DEBUG, "link_xon_tx: %"PRIu64"", ns->link_xon_tx);
3484 PMD_DRV_LOG(DEBUG, "link_xoff_tx: %"PRIu64"", ns->link_xoff_tx);
3485 PMD_DRV_LOG(DEBUG, "rx_size_64: %"PRIu64"", ns->rx_size_64);
3486 PMD_DRV_LOG(DEBUG, "rx_size_127: %"PRIu64"", ns->rx_size_127);
3487 PMD_DRV_LOG(DEBUG, "rx_size_255: %"PRIu64"", ns->rx_size_255);
3488 PMD_DRV_LOG(DEBUG, "rx_size_511: %"PRIu64"", ns->rx_size_511);
3489 PMD_DRV_LOG(DEBUG, "rx_size_1023: %"PRIu64"", ns->rx_size_1023);
3490 PMD_DRV_LOG(DEBUG, "rx_size_1522: %"PRIu64"", ns->rx_size_1522);
3491 PMD_DRV_LOG(DEBUG, "rx_size_big: %"PRIu64"", ns->rx_size_big);
3492 PMD_DRV_LOG(DEBUG, "rx_undersize: %"PRIu64"", ns->rx_undersize);
3493 PMD_DRV_LOG(DEBUG, "rx_fragments: %"PRIu64"", ns->rx_fragments);
3494 PMD_DRV_LOG(DEBUG, "rx_oversize: %"PRIu64"", ns->rx_oversize);
3495 PMD_DRV_LOG(DEBUG, "rx_jabber: %"PRIu64"", ns->rx_jabber);
3496 PMD_DRV_LOG(DEBUG, "tx_size_64: %"PRIu64"", ns->tx_size_64);
3497 PMD_DRV_LOG(DEBUG, "tx_size_127: %"PRIu64"", ns->tx_size_127);
3498 PMD_DRV_LOG(DEBUG, "tx_size_255: %"PRIu64"", ns->tx_size_255);
3499 PMD_DRV_LOG(DEBUG, "tx_size_511: %"PRIu64"", ns->tx_size_511);
3500 PMD_DRV_LOG(DEBUG, "tx_size_1023: %"PRIu64"", ns->tx_size_1023);
3501 PMD_DRV_LOG(DEBUG, "tx_size_1522: %"PRIu64"", ns->tx_size_1522);
3502 PMD_DRV_LOG(DEBUG, "tx_size_big: %"PRIu64"", ns->tx_size_big);
3503 PMD_DRV_LOG(DEBUG, "rx_len_errors: %"PRIu64"", ns->rx_len_errors);
3504 PMD_DRV_LOG(DEBUG, "************* PF stats end ****************");
3508 /* Reset the statistics */
3510 ice_stats_reset(struct rte_eth_dev *dev)
3512 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3513 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3515 /* Mark PF and VSI stats to update the offset, aka "reset" */
3516 pf->offset_loaded = false;
3518 pf->main_vsi->offset_loaded = false;
3520 /* read the stats, reading current register values into offset */
3521 ice_read_stats_registers(pf, hw);
3525 ice_xstats_calc_num(void)
3529 num = ICE_NB_ETH_XSTATS + ICE_NB_HW_PORT_XSTATS;
3535 ice_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
3538 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3539 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3542 struct ice_hw_port_stats *hw_stats = &pf->stats;
3544 count = ice_xstats_calc_num();
3548 ice_read_stats_registers(pf, hw);
3555 /* Get stats from ice_eth_stats struct */
3556 for (i = 0; i < ICE_NB_ETH_XSTATS; i++) {
3557 xstats[count].value =
3558 *(uint64_t *)((char *)&hw_stats->eth +
3559 ice_stats_strings[i].offset);
3560 xstats[count].id = count;
3564 /* Get individiual stats from ice_hw_port struct */
3565 for (i = 0; i < ICE_NB_HW_PORT_XSTATS; i++) {
3566 xstats[count].value =
3567 *(uint64_t *)((char *)hw_stats +
3568 ice_hw_port_strings[i].offset);
3569 xstats[count].id = count;
3576 static int ice_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
3577 struct rte_eth_xstat_name *xstats_names,
3578 __rte_unused unsigned int limit)
3580 unsigned int count = 0;
3584 return ice_xstats_calc_num();
3586 /* Note: limit checked in rte_eth_xstats_names() */
3588 /* Get stats from ice_eth_stats struct */
3589 for (i = 0; i < ICE_NB_ETH_XSTATS; i++) {
3590 strlcpy(xstats_names[count].name, ice_stats_strings[i].name,
3591 sizeof(xstats_names[count].name));
3595 /* Get individiual stats from ice_hw_port struct */
3596 for (i = 0; i < ICE_NB_HW_PORT_XSTATS; i++) {
3597 strlcpy(xstats_names[count].name, ice_hw_port_strings[i].name,
3598 sizeof(xstats_names[count].name));
3606 ice_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
3607 struct rte_pci_device *pci_dev)
3609 return rte_eth_dev_pci_generic_probe(pci_dev,
3610 sizeof(struct ice_adapter),
3615 ice_pci_remove(struct rte_pci_device *pci_dev)
3617 return rte_eth_dev_pci_generic_remove(pci_dev, ice_dev_uninit);
3620 static struct rte_pci_driver rte_ice_pmd = {
3621 .id_table = pci_id_ice_map,
3622 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
3623 RTE_PCI_DRV_IOVA_AS_VA,
3624 .probe = ice_pci_probe,
3625 .remove = ice_pci_remove,
3629 * Driver initialization routine.
3630 * Invoked once at EAL init time.
3631 * Register itself as the [Poll Mode] Driver of PCI devices.
3633 RTE_PMD_REGISTER_PCI(net_ice, rte_ice_pmd);
3634 RTE_PMD_REGISTER_PCI_TABLE(net_ice, pci_id_ice_map);
3635 RTE_PMD_REGISTER_KMOD_DEP(net_ice, "* igb_uio | uio_pci_generic | vfio-pci");
3636 RTE_PMD_REGISTER_PARAM_STRING(net_ice,
3637 ICE_MAX_QP_NUM "=<int>");
3639 RTE_INIT(ice_init_log)
3641 ice_logtype_init = rte_log_register("pmd.net.ice.init");
3642 if (ice_logtype_init >= 0)
3643 rte_log_set_level(ice_logtype_init, RTE_LOG_NOTICE);
3644 ice_logtype_driver = rte_log_register("pmd.net.ice.driver");
3645 if (ice_logtype_driver >= 0)
3646 rte_log_set_level(ice_logtype_driver, RTE_LOG_NOTICE);