1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
5 #include <rte_string_fns.h>
6 #include <rte_ethdev_pci.h>
13 #include "base/ice_sched.h"
14 #include "base/ice_flow.h"
15 #include "base/ice_dcb.h"
16 #include "ice_ethdev.h"
19 #define ICE_MAX_QP_NUM "max_queue_pair_num"
20 #define ICE_DFLT_OUTER_TAG_TYPE ICE_AQ_VSI_OUTER_TAG_VLAN_9100
21 #define ICE_DFLT_PKG_FILE "/lib/firmware/intel/ice/ddp/ice.pkg"
24 int ice_logtype_driver;
26 static int ice_dev_configure(struct rte_eth_dev *dev);
27 static int ice_dev_start(struct rte_eth_dev *dev);
28 static void ice_dev_stop(struct rte_eth_dev *dev);
29 static void ice_dev_close(struct rte_eth_dev *dev);
30 static int ice_dev_reset(struct rte_eth_dev *dev);
31 static void ice_dev_info_get(struct rte_eth_dev *dev,
32 struct rte_eth_dev_info *dev_info);
33 static int ice_link_update(struct rte_eth_dev *dev,
34 int wait_to_complete);
35 static int ice_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
36 static int ice_vlan_offload_set(struct rte_eth_dev *dev, int mask);
37 static int ice_vlan_tpid_set(struct rte_eth_dev *dev,
38 enum rte_vlan_type vlan_type,
40 static int ice_rss_reta_update(struct rte_eth_dev *dev,
41 struct rte_eth_rss_reta_entry64 *reta_conf,
43 static int ice_rss_reta_query(struct rte_eth_dev *dev,
44 struct rte_eth_rss_reta_entry64 *reta_conf,
46 static int ice_rss_hash_update(struct rte_eth_dev *dev,
47 struct rte_eth_rss_conf *rss_conf);
48 static int ice_rss_hash_conf_get(struct rte_eth_dev *dev,
49 struct rte_eth_rss_conf *rss_conf);
50 static void ice_promisc_enable(struct rte_eth_dev *dev);
51 static void ice_promisc_disable(struct rte_eth_dev *dev);
52 static void ice_allmulti_enable(struct rte_eth_dev *dev);
53 static void ice_allmulti_disable(struct rte_eth_dev *dev);
54 static int ice_vlan_filter_set(struct rte_eth_dev *dev,
57 static int ice_macaddr_set(struct rte_eth_dev *dev,
58 struct ether_addr *mac_addr);
59 static int ice_macaddr_add(struct rte_eth_dev *dev,
60 struct ether_addr *mac_addr,
61 __rte_unused uint32_t index,
63 static void ice_macaddr_remove(struct rte_eth_dev *dev, uint32_t index);
64 static int ice_rx_queue_intr_enable(struct rte_eth_dev *dev,
66 static int ice_rx_queue_intr_disable(struct rte_eth_dev *dev,
68 static int ice_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
70 static int ice_vlan_pvid_set(struct rte_eth_dev *dev,
71 uint16_t pvid, int on);
72 static int ice_get_eeprom_length(struct rte_eth_dev *dev);
73 static int ice_get_eeprom(struct rte_eth_dev *dev,
74 struct rte_dev_eeprom_info *eeprom);
75 static int ice_stats_get(struct rte_eth_dev *dev,
76 struct rte_eth_stats *stats);
77 static void ice_stats_reset(struct rte_eth_dev *dev);
78 static int ice_xstats_get(struct rte_eth_dev *dev,
79 struct rte_eth_xstat *xstats, unsigned int n);
80 static int ice_xstats_get_names(struct rte_eth_dev *dev,
81 struct rte_eth_xstat_name *xstats_names,
84 static const struct rte_pci_id pci_id_ice_map[] = {
85 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_BACKPLANE) },
86 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_QSFP) },
87 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_SFP) },
88 { .vendor_id = 0, /* sentinel */ },
91 static const struct eth_dev_ops ice_eth_dev_ops = {
92 .dev_configure = ice_dev_configure,
93 .dev_start = ice_dev_start,
94 .dev_stop = ice_dev_stop,
95 .dev_close = ice_dev_close,
96 .dev_reset = ice_dev_reset,
97 .rx_queue_start = ice_rx_queue_start,
98 .rx_queue_stop = ice_rx_queue_stop,
99 .tx_queue_start = ice_tx_queue_start,
100 .tx_queue_stop = ice_tx_queue_stop,
101 .rx_queue_setup = ice_rx_queue_setup,
102 .rx_queue_release = ice_rx_queue_release,
103 .tx_queue_setup = ice_tx_queue_setup,
104 .tx_queue_release = ice_tx_queue_release,
105 .dev_infos_get = ice_dev_info_get,
106 .dev_supported_ptypes_get = ice_dev_supported_ptypes_get,
107 .link_update = ice_link_update,
108 .mtu_set = ice_mtu_set,
109 .mac_addr_set = ice_macaddr_set,
110 .mac_addr_add = ice_macaddr_add,
111 .mac_addr_remove = ice_macaddr_remove,
112 .vlan_filter_set = ice_vlan_filter_set,
113 .vlan_offload_set = ice_vlan_offload_set,
114 .vlan_tpid_set = ice_vlan_tpid_set,
115 .reta_update = ice_rss_reta_update,
116 .reta_query = ice_rss_reta_query,
117 .rss_hash_update = ice_rss_hash_update,
118 .rss_hash_conf_get = ice_rss_hash_conf_get,
119 .promiscuous_enable = ice_promisc_enable,
120 .promiscuous_disable = ice_promisc_disable,
121 .allmulticast_enable = ice_allmulti_enable,
122 .allmulticast_disable = ice_allmulti_disable,
123 .rx_queue_intr_enable = ice_rx_queue_intr_enable,
124 .rx_queue_intr_disable = ice_rx_queue_intr_disable,
125 .fw_version_get = ice_fw_version_get,
126 .vlan_pvid_set = ice_vlan_pvid_set,
127 .rxq_info_get = ice_rxq_info_get,
128 .txq_info_get = ice_txq_info_get,
129 .get_eeprom_length = ice_get_eeprom_length,
130 .get_eeprom = ice_get_eeprom,
131 .rx_queue_count = ice_rx_queue_count,
132 .rx_descriptor_status = ice_rx_descriptor_status,
133 .tx_descriptor_status = ice_tx_descriptor_status,
134 .stats_get = ice_stats_get,
135 .stats_reset = ice_stats_reset,
136 .xstats_get = ice_xstats_get,
137 .xstats_get_names = ice_xstats_get_names,
138 .xstats_reset = ice_stats_reset,
141 /* store statistics names and its offset in stats structure */
142 struct ice_xstats_name_off {
143 char name[RTE_ETH_XSTATS_NAME_SIZE];
147 static const struct ice_xstats_name_off ice_stats_strings[] = {
148 {"rx_unicast_packets", offsetof(struct ice_eth_stats, rx_unicast)},
149 {"rx_multicast_packets", offsetof(struct ice_eth_stats, rx_multicast)},
150 {"rx_broadcast_packets", offsetof(struct ice_eth_stats, rx_broadcast)},
151 {"rx_dropped", offsetof(struct ice_eth_stats, rx_discards)},
152 {"rx_unknown_protocol_packets", offsetof(struct ice_eth_stats,
153 rx_unknown_protocol)},
154 {"tx_unicast_packets", offsetof(struct ice_eth_stats, tx_unicast)},
155 {"tx_multicast_packets", offsetof(struct ice_eth_stats, tx_multicast)},
156 {"tx_broadcast_packets", offsetof(struct ice_eth_stats, tx_broadcast)},
157 {"tx_dropped", offsetof(struct ice_eth_stats, tx_discards)},
160 #define ICE_NB_ETH_XSTATS (sizeof(ice_stats_strings) / \
161 sizeof(ice_stats_strings[0]))
163 static const struct ice_xstats_name_off ice_hw_port_strings[] = {
164 {"tx_link_down_dropped", offsetof(struct ice_hw_port_stats,
165 tx_dropped_link_down)},
166 {"rx_crc_errors", offsetof(struct ice_hw_port_stats, crc_errors)},
167 {"rx_illegal_byte_errors", offsetof(struct ice_hw_port_stats,
169 {"rx_error_bytes", offsetof(struct ice_hw_port_stats, error_bytes)},
170 {"mac_local_errors", offsetof(struct ice_hw_port_stats,
172 {"mac_remote_errors", offsetof(struct ice_hw_port_stats,
174 {"rx_len_errors", offsetof(struct ice_hw_port_stats,
176 {"tx_xon_packets", offsetof(struct ice_hw_port_stats, link_xon_tx)},
177 {"rx_xon_packets", offsetof(struct ice_hw_port_stats, link_xon_rx)},
178 {"tx_xoff_packets", offsetof(struct ice_hw_port_stats, link_xoff_tx)},
179 {"rx_xoff_packets", offsetof(struct ice_hw_port_stats, link_xoff_rx)},
180 {"rx_size_64_packets", offsetof(struct ice_hw_port_stats, rx_size_64)},
181 {"rx_size_65_to_127_packets", offsetof(struct ice_hw_port_stats,
183 {"rx_size_128_to_255_packets", offsetof(struct ice_hw_port_stats,
185 {"rx_size_256_to_511_packets", offsetof(struct ice_hw_port_stats,
187 {"rx_size_512_to_1023_packets", offsetof(struct ice_hw_port_stats,
189 {"rx_size_1024_to_1522_packets", offsetof(struct ice_hw_port_stats,
191 {"rx_size_1523_to_max_packets", offsetof(struct ice_hw_port_stats,
193 {"rx_undersized_errors", offsetof(struct ice_hw_port_stats,
195 {"rx_oversize_errors", offsetof(struct ice_hw_port_stats,
197 {"rx_mac_short_pkt_dropped", offsetof(struct ice_hw_port_stats,
198 mac_short_pkt_dropped)},
199 {"rx_fragmented_errors", offsetof(struct ice_hw_port_stats,
201 {"rx_jabber_errors", offsetof(struct ice_hw_port_stats, rx_jabber)},
202 {"tx_size_64_packets", offsetof(struct ice_hw_port_stats, tx_size_64)},
203 {"tx_size_65_to_127_packets", offsetof(struct ice_hw_port_stats,
205 {"tx_size_128_to_255_packets", offsetof(struct ice_hw_port_stats,
207 {"tx_size_256_to_511_packets", offsetof(struct ice_hw_port_stats,
209 {"tx_size_512_to_1023_packets", offsetof(struct ice_hw_port_stats,
211 {"tx_size_1024_to_1522_packets", offsetof(struct ice_hw_port_stats,
213 {"tx_size_1523_to_max_packets", offsetof(struct ice_hw_port_stats,
217 #define ICE_NB_HW_PORT_XSTATS (sizeof(ice_hw_port_strings) / \
218 sizeof(ice_hw_port_strings[0]))
221 ice_init_controlq_parameter(struct ice_hw *hw)
223 /* fields for adminq */
224 hw->adminq.num_rq_entries = ICE_ADMINQ_LEN;
225 hw->adminq.num_sq_entries = ICE_ADMINQ_LEN;
226 hw->adminq.rq_buf_size = ICE_ADMINQ_BUF_SZ;
227 hw->adminq.sq_buf_size = ICE_ADMINQ_BUF_SZ;
229 /* fields for mailboxq, DPDK used as PF host */
230 hw->mailboxq.num_rq_entries = ICE_MAILBOXQ_LEN;
231 hw->mailboxq.num_sq_entries = ICE_MAILBOXQ_LEN;
232 hw->mailboxq.rq_buf_size = ICE_MAILBOXQ_BUF_SZ;
233 hw->mailboxq.sq_buf_size = ICE_MAILBOXQ_BUF_SZ;
237 ice_check_qp_num(const char *key, const char *qp_value,
238 __rte_unused void *opaque)
243 while (isblank(*qp_value))
246 num = strtoul(qp_value, &end, 10);
248 if (!num || (*end == '-') || errno) {
249 PMD_DRV_LOG(WARNING, "invalid value:\"%s\" for key:\"%s\", "
259 ice_config_max_queue_pair_num(struct rte_devargs *devargs)
261 struct rte_kvargs *kvlist;
262 const char *queue_num_key = ICE_MAX_QP_NUM;
268 kvlist = rte_kvargs_parse(devargs->args, NULL);
272 if (!rte_kvargs_count(kvlist, queue_num_key)) {
273 rte_kvargs_free(kvlist);
277 if (rte_kvargs_process(kvlist, queue_num_key,
278 ice_check_qp_num, NULL) < 0) {
279 rte_kvargs_free(kvlist);
282 ret = rte_kvargs_process(kvlist, queue_num_key,
283 ice_check_qp_num, NULL);
284 rte_kvargs_free(kvlist);
290 ice_res_pool_init(struct ice_res_pool_info *pool, uint32_t base,
293 struct pool_entry *entry;
298 entry = rte_zmalloc(NULL, sizeof(*entry), 0);
301 "Failed to allocate memory for resource pool");
305 /* queue heap initialize */
306 pool->num_free = num;
309 LIST_INIT(&pool->alloc_list);
310 LIST_INIT(&pool->free_list);
312 /* Initialize element */
316 LIST_INSERT_HEAD(&pool->free_list, entry, next);
321 ice_res_pool_alloc(struct ice_res_pool_info *pool,
324 struct pool_entry *entry, *valid_entry;
327 PMD_INIT_LOG(ERR, "Invalid parameter");
331 if (pool->num_free < num) {
332 PMD_INIT_LOG(ERR, "No resource. ask:%u, available:%u",
333 num, pool->num_free);
338 /* Lookup in free list and find most fit one */
339 LIST_FOREACH(entry, &pool->free_list, next) {
340 if (entry->len >= num) {
342 if (entry->len == num) {
347 valid_entry->len > entry->len)
352 /* Not find one to satisfy the request, return */
354 PMD_INIT_LOG(ERR, "No valid entry found");
358 * The entry have equal queue number as requested,
359 * remove it from alloc_list.
361 if (valid_entry->len == num) {
362 LIST_REMOVE(valid_entry, next);
365 * The entry have more numbers than requested,
366 * create a new entry for alloc_list and minus its
367 * queue base and number in free_list.
369 entry = rte_zmalloc(NULL, sizeof(*entry), 0);
372 "Failed to allocate memory for "
376 entry->base = valid_entry->base;
378 valid_entry->base += num;
379 valid_entry->len -= num;
383 /* Insert it into alloc list, not sorted */
384 LIST_INSERT_HEAD(&pool->alloc_list, valid_entry, next);
386 pool->num_free -= valid_entry->len;
387 pool->num_alloc += valid_entry->len;
389 return valid_entry->base + pool->base;
393 ice_res_pool_destroy(struct ice_res_pool_info *pool)
395 struct pool_entry *entry, *next_entry;
400 for (entry = LIST_FIRST(&pool->alloc_list);
401 entry && (next_entry = LIST_NEXT(entry, next), 1);
402 entry = next_entry) {
403 LIST_REMOVE(entry, next);
407 for (entry = LIST_FIRST(&pool->free_list);
408 entry && (next_entry = LIST_NEXT(entry, next), 1);
409 entry = next_entry) {
410 LIST_REMOVE(entry, next);
417 LIST_INIT(&pool->alloc_list);
418 LIST_INIT(&pool->free_list);
422 ice_vsi_config_default_rss(struct ice_aqc_vsi_props *info)
424 /* Set VSI LUT selection */
425 info->q_opt_rss = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI &
426 ICE_AQ_VSI_Q_OPT_RSS_LUT_M;
427 /* Set Hash scheme */
428 info->q_opt_rss |= ICE_AQ_VSI_Q_OPT_RSS_TPLZ &
429 ICE_AQ_VSI_Q_OPT_RSS_HASH_M;
431 info->q_opt_tc = ICE_AQ_VSI_Q_OPT_TC_OVR_M;
434 static enum ice_status
435 ice_vsi_config_tc_queue_mapping(struct ice_vsi *vsi,
436 struct ice_aqc_vsi_props *info,
437 uint8_t enabled_tcmap)
439 uint16_t bsf, qp_idx;
441 /* default tc 0 now. Multi-TC supporting need to be done later.
442 * Configure TC and queue mapping parameters, for enabled TC,
443 * allocate qpnum_per_tc queues to this traffic.
445 if (enabled_tcmap != 0x01) {
446 PMD_INIT_LOG(ERR, "only TC0 is supported");
450 vsi->nb_qps = RTE_MIN(vsi->nb_qps, ICE_MAX_Q_PER_TC);
451 bsf = rte_bsf32(vsi->nb_qps);
452 /* Adjust the queue number to actual queues that can be applied */
453 vsi->nb_qps = 0x1 << bsf;
456 /* Set tc and queue mapping with VSI */
457 info->tc_mapping[0] = rte_cpu_to_le_16((qp_idx <<
458 ICE_AQ_VSI_TC_Q_OFFSET_S) |
459 (bsf << ICE_AQ_VSI_TC_Q_NUM_S));
461 /* Associate queue number with VSI */
462 info->mapping_flags |= rte_cpu_to_le_16(ICE_AQ_VSI_Q_MAP_CONTIG);
463 info->q_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
464 info->q_mapping[1] = rte_cpu_to_le_16(vsi->nb_qps);
465 info->valid_sections |=
466 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID);
467 /* Set the info.ingress_table and info.egress_table
468 * for UP translate table. Now just set it to 1:1 map by default
469 * -- 0b 111 110 101 100 011 010 001 000 == 0xFAC688
471 #define ICE_TC_QUEUE_TABLE_DFLT 0x00FAC688
472 info->ingress_table = rte_cpu_to_le_32(ICE_TC_QUEUE_TABLE_DFLT);
473 info->egress_table = rte_cpu_to_le_32(ICE_TC_QUEUE_TABLE_DFLT);
474 info->outer_up_table = rte_cpu_to_le_32(ICE_TC_QUEUE_TABLE_DFLT);
479 ice_init_mac_address(struct rte_eth_dev *dev)
481 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
483 if (!is_unicast_ether_addr
484 ((struct ether_addr *)hw->port_info[0].mac.lan_addr)) {
485 PMD_INIT_LOG(ERR, "Invalid MAC address");
489 ether_addr_copy((struct ether_addr *)hw->port_info[0].mac.lan_addr,
490 (struct ether_addr *)hw->port_info[0].mac.perm_addr);
492 dev->data->mac_addrs = rte_zmalloc(NULL, sizeof(struct ether_addr), 0);
493 if (!dev->data->mac_addrs) {
495 "Failed to allocate memory to store mac address");
498 /* store it to dev data */
499 ether_addr_copy((struct ether_addr *)hw->port_info[0].mac.perm_addr,
500 &dev->data->mac_addrs[0]);
504 /* Find out specific MAC filter */
505 static struct ice_mac_filter *
506 ice_find_mac_filter(struct ice_vsi *vsi, struct ether_addr *macaddr)
508 struct ice_mac_filter *f;
510 TAILQ_FOREACH(f, &vsi->mac_list, next) {
511 if (is_same_ether_addr(macaddr, &f->mac_info.mac_addr))
519 ice_add_mac_filter(struct ice_vsi *vsi, struct ether_addr *mac_addr)
521 struct ice_fltr_list_entry *m_list_itr = NULL;
522 struct ice_mac_filter *f;
523 struct LIST_HEAD_TYPE list_head;
524 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
527 /* If it's added and configured, return */
528 f = ice_find_mac_filter(vsi, mac_addr);
530 PMD_DRV_LOG(INFO, "This MAC filter already exists.");
534 INIT_LIST_HEAD(&list_head);
536 m_list_itr = (struct ice_fltr_list_entry *)
537 ice_malloc(hw, sizeof(*m_list_itr));
542 ice_memcpy(m_list_itr->fltr_info.l_data.mac.mac_addr,
543 mac_addr, ETH_ALEN, ICE_NONDMA_TO_NONDMA);
544 m_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI;
545 m_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
546 m_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_MAC;
547 m_list_itr->fltr_info.flag = ICE_FLTR_TX;
548 m_list_itr->fltr_info.vsi_handle = vsi->idx;
550 LIST_ADD(&m_list_itr->list_entry, &list_head);
553 ret = ice_add_mac(hw, &list_head);
554 if (ret != ICE_SUCCESS) {
555 PMD_DRV_LOG(ERR, "Failed to add MAC filter");
559 /* Add the mac addr into mac list */
560 f = rte_zmalloc(NULL, sizeof(*f), 0);
562 PMD_DRV_LOG(ERR, "failed to allocate memory");
566 rte_memcpy(&f->mac_info.mac_addr, mac_addr, ETH_ADDR_LEN);
567 TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
573 rte_free(m_list_itr);
578 ice_remove_mac_filter(struct ice_vsi *vsi, struct ether_addr *mac_addr)
580 struct ice_fltr_list_entry *m_list_itr = NULL;
581 struct ice_mac_filter *f;
582 struct LIST_HEAD_TYPE list_head;
583 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
586 /* Can't find it, return an error */
587 f = ice_find_mac_filter(vsi, mac_addr);
591 INIT_LIST_HEAD(&list_head);
593 m_list_itr = (struct ice_fltr_list_entry *)
594 ice_malloc(hw, sizeof(*m_list_itr));
599 ice_memcpy(m_list_itr->fltr_info.l_data.mac.mac_addr,
600 mac_addr, ETH_ALEN, ICE_NONDMA_TO_NONDMA);
601 m_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI;
602 m_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
603 m_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_MAC;
604 m_list_itr->fltr_info.flag = ICE_FLTR_TX;
605 m_list_itr->fltr_info.vsi_handle = vsi->idx;
607 LIST_ADD(&m_list_itr->list_entry, &list_head);
609 /* remove the mac filter */
610 ret = ice_remove_mac(hw, &list_head);
611 if (ret != ICE_SUCCESS) {
612 PMD_DRV_LOG(ERR, "Failed to remove MAC filter");
617 /* Remove the mac addr from mac list */
618 TAILQ_REMOVE(&vsi->mac_list, f, next);
624 rte_free(m_list_itr);
628 /* Find out specific VLAN filter */
629 static struct ice_vlan_filter *
630 ice_find_vlan_filter(struct ice_vsi *vsi, uint16_t vlan_id)
632 struct ice_vlan_filter *f;
634 TAILQ_FOREACH(f, &vsi->vlan_list, next) {
635 if (vlan_id == f->vlan_info.vlan_id)
643 ice_add_vlan_filter(struct ice_vsi *vsi, uint16_t vlan_id)
645 struct ice_fltr_list_entry *v_list_itr = NULL;
646 struct ice_vlan_filter *f;
647 struct LIST_HEAD_TYPE list_head;
648 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
651 if (!vsi || vlan_id > ETHER_MAX_VLAN_ID)
654 /* If it's added and configured, return. */
655 f = ice_find_vlan_filter(vsi, vlan_id);
657 PMD_DRV_LOG(INFO, "This VLAN filter already exists.");
661 if (!vsi->vlan_anti_spoof_on && !vsi->vlan_filter_on)
664 INIT_LIST_HEAD(&list_head);
666 v_list_itr = (struct ice_fltr_list_entry *)
667 ice_malloc(hw, sizeof(*v_list_itr));
672 v_list_itr->fltr_info.l_data.vlan.vlan_id = vlan_id;
673 v_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI;
674 v_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
675 v_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_VLAN;
676 v_list_itr->fltr_info.flag = ICE_FLTR_TX;
677 v_list_itr->fltr_info.vsi_handle = vsi->idx;
679 LIST_ADD(&v_list_itr->list_entry, &list_head);
682 ret = ice_add_vlan(hw, &list_head);
683 if (ret != ICE_SUCCESS) {
684 PMD_DRV_LOG(ERR, "Failed to add VLAN filter");
689 /* Add vlan into vlan list */
690 f = rte_zmalloc(NULL, sizeof(*f), 0);
692 PMD_DRV_LOG(ERR, "failed to allocate memory");
696 f->vlan_info.vlan_id = vlan_id;
697 TAILQ_INSERT_TAIL(&vsi->vlan_list, f, next);
703 rte_free(v_list_itr);
708 ice_remove_vlan_filter(struct ice_vsi *vsi, uint16_t vlan_id)
710 struct ice_fltr_list_entry *v_list_itr = NULL;
711 struct ice_vlan_filter *f;
712 struct LIST_HEAD_TYPE list_head;
713 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
717 * Vlan 0 is the generic filter for untagged packets
718 * and can't be removed.
720 if (!vsi || vlan_id == 0 || vlan_id > ETHER_MAX_VLAN_ID)
723 /* Can't find it, return an error */
724 f = ice_find_vlan_filter(vsi, vlan_id);
728 INIT_LIST_HEAD(&list_head);
730 v_list_itr = (struct ice_fltr_list_entry *)
731 ice_malloc(hw, sizeof(*v_list_itr));
737 v_list_itr->fltr_info.l_data.vlan.vlan_id = vlan_id;
738 v_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI;
739 v_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
740 v_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_VLAN;
741 v_list_itr->fltr_info.flag = ICE_FLTR_TX;
742 v_list_itr->fltr_info.vsi_handle = vsi->idx;
744 LIST_ADD(&v_list_itr->list_entry, &list_head);
746 /* remove the vlan filter */
747 ret = ice_remove_vlan(hw, &list_head);
748 if (ret != ICE_SUCCESS) {
749 PMD_DRV_LOG(ERR, "Failed to remove VLAN filter");
754 /* Remove the vlan id from vlan list */
755 TAILQ_REMOVE(&vsi->vlan_list, f, next);
761 rte_free(v_list_itr);
766 ice_remove_all_mac_vlan_filters(struct ice_vsi *vsi)
768 struct ice_mac_filter *m_f;
769 struct ice_vlan_filter *v_f;
772 if (!vsi || !vsi->mac_num)
775 TAILQ_FOREACH(m_f, &vsi->mac_list, next) {
776 ret = ice_remove_mac_filter(vsi, &m_f->mac_info.mac_addr);
777 if (ret != ICE_SUCCESS) {
783 if (vsi->vlan_num == 0)
786 TAILQ_FOREACH(v_f, &vsi->vlan_list, next) {
787 ret = ice_remove_vlan_filter(vsi, v_f->vlan_info.vlan_id);
788 if (ret != ICE_SUCCESS) {
799 ice_vsi_config_qinq_insertion(struct ice_vsi *vsi, bool on)
801 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
802 struct ice_vsi_ctx ctxt;
806 /* Check if it has been already on or off */
807 if (vsi->info.valid_sections &
808 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID)) {
810 if ((vsi->info.outer_tag_flags &
811 ICE_AQ_VSI_OUTER_TAG_ACCEPT_HOST) ==
812 ICE_AQ_VSI_OUTER_TAG_ACCEPT_HOST)
813 return 0; /* already on */
815 if (!(vsi->info.outer_tag_flags &
816 ICE_AQ_VSI_OUTER_TAG_ACCEPT_HOST))
817 return 0; /* already off */
822 qinq_flags = ICE_AQ_VSI_OUTER_TAG_ACCEPT_HOST;
825 /* clear global insertion and use per packet insertion */
826 vsi->info.outer_tag_flags &= ~(ICE_AQ_VSI_OUTER_TAG_INSERT);
827 vsi->info.outer_tag_flags &= ~(ICE_AQ_VSI_OUTER_TAG_ACCEPT_HOST);
828 vsi->info.outer_tag_flags |= qinq_flags;
829 /* use default vlan type 0x8100 */
830 vsi->info.outer_tag_flags &= ~(ICE_AQ_VSI_OUTER_TAG_TYPE_M);
831 vsi->info.outer_tag_flags |= ICE_DFLT_OUTER_TAG_TYPE <<
832 ICE_AQ_VSI_OUTER_TAG_TYPE_S;
833 (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
834 ctxt.info.valid_sections =
835 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID);
836 ctxt.vsi_num = vsi->vsi_id;
837 ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
840 "Update VSI failed to %s qinq stripping",
841 on ? "enable" : "disable");
845 vsi->info.valid_sections |=
846 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID);
852 ice_vsi_config_qinq_stripping(struct ice_vsi *vsi, bool on)
854 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
855 struct ice_vsi_ctx ctxt;
859 /* Check if it has been already on or off */
860 if (vsi->info.valid_sections &
861 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID)) {
863 if ((vsi->info.outer_tag_flags &
864 ICE_AQ_VSI_OUTER_TAG_MODE_M) ==
865 ICE_AQ_VSI_OUTER_TAG_COPY)
866 return 0; /* already on */
868 if ((vsi->info.outer_tag_flags &
869 ICE_AQ_VSI_OUTER_TAG_MODE_M) ==
870 ICE_AQ_VSI_OUTER_TAG_NOTHING)
871 return 0; /* already off */
876 qinq_flags = ICE_AQ_VSI_OUTER_TAG_COPY;
878 qinq_flags = ICE_AQ_VSI_OUTER_TAG_NOTHING;
879 vsi->info.outer_tag_flags &= ~(ICE_AQ_VSI_OUTER_TAG_MODE_M);
880 vsi->info.outer_tag_flags |= qinq_flags;
881 /* use default vlan type 0x8100 */
882 vsi->info.outer_tag_flags &= ~(ICE_AQ_VSI_OUTER_TAG_TYPE_M);
883 vsi->info.outer_tag_flags |= ICE_DFLT_OUTER_TAG_TYPE <<
884 ICE_AQ_VSI_OUTER_TAG_TYPE_S;
885 (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
886 ctxt.info.valid_sections =
887 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID);
888 ctxt.vsi_num = vsi->vsi_id;
889 ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
892 "Update VSI failed to %s qinq stripping",
893 on ? "enable" : "disable");
897 vsi->info.valid_sections |=
898 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID);
904 ice_vsi_config_double_vlan(struct ice_vsi *vsi, int on)
908 ret = ice_vsi_config_qinq_stripping(vsi, on);
910 PMD_DRV_LOG(ERR, "Fail to set qinq stripping - %d", ret);
912 ret = ice_vsi_config_qinq_insertion(vsi, on);
914 PMD_DRV_LOG(ERR, "Fail to set qinq insertion - %d", ret);
921 ice_pf_enable_irq0(struct ice_hw *hw)
923 /* reset the registers */
924 ICE_WRITE_REG(hw, PFINT_OICR_ENA, 0);
925 ICE_READ_REG(hw, PFINT_OICR);
928 ICE_WRITE_REG(hw, PFINT_OICR_ENA,
929 (uint32_t)(PFINT_OICR_ENA_INT_ENA_M &
930 (~PFINT_OICR_LINK_STAT_CHANGE_M)));
932 ICE_WRITE_REG(hw, PFINT_OICR_CTL,
933 (0 & PFINT_OICR_CTL_MSIX_INDX_M) |
934 ((0 << PFINT_OICR_CTL_ITR_INDX_S) &
935 PFINT_OICR_CTL_ITR_INDX_M) |
936 PFINT_OICR_CTL_CAUSE_ENA_M);
938 ICE_WRITE_REG(hw, PFINT_FW_CTL,
939 (0 & PFINT_FW_CTL_MSIX_INDX_M) |
940 ((0 << PFINT_FW_CTL_ITR_INDX_S) &
941 PFINT_FW_CTL_ITR_INDX_M) |
942 PFINT_FW_CTL_CAUSE_ENA_M);
944 ICE_WRITE_REG(hw, PFINT_OICR_ENA, PFINT_OICR_ENA_INT_ENA_M);
947 ICE_WRITE_REG(hw, GLINT_DYN_CTL(0),
948 GLINT_DYN_CTL_INTENA_M |
949 GLINT_DYN_CTL_CLEARPBA_M |
950 GLINT_DYN_CTL_ITR_INDX_M);
957 ice_pf_disable_irq0(struct ice_hw *hw)
959 /* Disable all interrupt types */
960 ICE_WRITE_REG(hw, GLINT_DYN_CTL(0), GLINT_DYN_CTL_WB_ON_ITR_M);
966 ice_handle_aq_msg(struct rte_eth_dev *dev)
968 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
969 struct ice_ctl_q_info *cq = &hw->adminq;
970 struct ice_rq_event_info event;
971 uint16_t pending, opcode;
974 event.buf_len = ICE_AQ_MAX_BUF_LEN;
975 event.msg_buf = rte_zmalloc(NULL, event.buf_len, 0);
976 if (!event.msg_buf) {
977 PMD_DRV_LOG(ERR, "Failed to allocate mem");
983 ret = ice_clean_rq_elem(hw, cq, &event, &pending);
985 if (ret != ICE_SUCCESS) {
987 "Failed to read msg from AdminQ, "
989 hw->adminq.sq_last_status);
992 opcode = rte_le_to_cpu_16(event.desc.opcode);
995 case ice_aqc_opc_get_link_status:
996 ret = ice_link_update(dev, 0);
998 _rte_eth_dev_callback_process
999 (dev, RTE_ETH_EVENT_INTR_LSC, NULL);
1002 PMD_DRV_LOG(DEBUG, "Request %u is not supported yet",
1007 rte_free(event.msg_buf);
1012 * Interrupt handler triggered by NIC for handling
1013 * specific interrupt.
1016 * Pointer to interrupt handle.
1018 * The address of parameter (struct rte_eth_dev *) regsitered before.
1024 ice_interrupt_handler(void *param)
1026 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1027 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1034 uint32_t int_fw_ctl;
1037 /* Disable interrupt */
1038 ice_pf_disable_irq0(hw);
1040 /* read out interrupt causes */
1041 oicr = ICE_READ_REG(hw, PFINT_OICR);
1043 int_fw_ctl = ICE_READ_REG(hw, PFINT_FW_CTL);
1046 /* No interrupt event indicated */
1047 if (!(oicr & PFINT_OICR_INTEVENT_M)) {
1048 PMD_DRV_LOG(INFO, "No interrupt event");
1053 if (int_fw_ctl & PFINT_FW_CTL_INTEVENT_M) {
1054 PMD_DRV_LOG(INFO, "FW_CTL: link state change event");
1055 ice_handle_aq_msg(dev);
1058 if (oicr & PFINT_OICR_LINK_STAT_CHANGE_M) {
1059 PMD_DRV_LOG(INFO, "OICR: link state change event");
1060 ice_link_update(dev, 0);
1064 if (oicr & PFINT_OICR_MAL_DETECT_M) {
1065 PMD_DRV_LOG(WARNING, "OICR: MDD event");
1066 reg = ICE_READ_REG(hw, GL_MDET_TX_PQM);
1067 if (reg & GL_MDET_TX_PQM_VALID_M) {
1068 pf_num = (reg & GL_MDET_TX_PQM_PF_NUM_M) >>
1069 GL_MDET_TX_PQM_PF_NUM_S;
1070 event = (reg & GL_MDET_TX_PQM_MAL_TYPE_M) >>
1071 GL_MDET_TX_PQM_MAL_TYPE_S;
1072 queue = (reg & GL_MDET_TX_PQM_QNUM_M) >>
1073 GL_MDET_TX_PQM_QNUM_S;
1075 PMD_DRV_LOG(WARNING, "Malicious Driver Detection event "
1076 "%d by PQM on TX queue %d PF# %d",
1077 event, queue, pf_num);
1080 reg = ICE_READ_REG(hw, GL_MDET_TX_TCLAN);
1081 if (reg & GL_MDET_TX_TCLAN_VALID_M) {
1082 pf_num = (reg & GL_MDET_TX_TCLAN_PF_NUM_M) >>
1083 GL_MDET_TX_TCLAN_PF_NUM_S;
1084 event = (reg & GL_MDET_TX_TCLAN_MAL_TYPE_M) >>
1085 GL_MDET_TX_TCLAN_MAL_TYPE_S;
1086 queue = (reg & GL_MDET_TX_TCLAN_QNUM_M) >>
1087 GL_MDET_TX_TCLAN_QNUM_S;
1089 PMD_DRV_LOG(WARNING, "Malicious Driver Detection event "
1090 "%d by TCLAN on TX queue %d PF# %d",
1091 event, queue, pf_num);
1095 /* Enable interrupt */
1096 ice_pf_enable_irq0(hw);
1097 rte_intr_enable(dev->intr_handle);
1100 /* Initialize SW parameters of PF */
1102 ice_pf_sw_init(struct rte_eth_dev *dev)
1104 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1105 struct ice_hw *hw = ICE_PF_TO_HW(pf);
1107 if (ice_config_max_queue_pair_num(dev->device->devargs) > 0)
1109 ice_config_max_queue_pair_num(dev->device->devargs);
1112 (uint16_t)RTE_MIN(hw->func_caps.common_cap.num_txq,
1113 hw->func_caps.common_cap.num_rxq);
1115 pf->lan_nb_qps = pf->lan_nb_qp_max;
1120 static struct ice_vsi *
1121 ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type)
1123 struct ice_hw *hw = ICE_PF_TO_HW(pf);
1124 struct ice_vsi *vsi = NULL;
1125 struct ice_vsi_ctx vsi_ctx;
1127 struct ether_addr broadcast = {
1128 .addr_bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff} };
1129 struct ether_addr mac_addr;
1130 uint16_t max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
1131 uint8_t tc_bitmap = 0x1;
1133 /* hw->num_lports = 1 in NIC mode */
1134 vsi = rte_zmalloc(NULL, sizeof(struct ice_vsi), 0);
1138 vsi->idx = pf->next_vsi_idx;
1141 vsi->adapter = ICE_PF_TO_ADAPTER(pf);
1142 vsi->max_macaddrs = ICE_NUM_MACADDR_MAX;
1143 vsi->vlan_anti_spoof_on = 0;
1144 vsi->vlan_filter_on = 1;
1145 TAILQ_INIT(&vsi->mac_list);
1146 TAILQ_INIT(&vsi->vlan_list);
1148 memset(&vsi_ctx, 0, sizeof(vsi_ctx));
1149 /* base_queue in used in queue mapping of VSI add/update command.
1150 * Suppose vsi->base_queue is 0 now, don't consider SRIOV, VMDQ
1151 * cases in the first stage. Only Main VSI.
1153 vsi->base_queue = 0;
1156 vsi->nb_qps = pf->lan_nb_qps;
1157 ice_vsi_config_default_rss(&vsi_ctx.info);
1158 vsi_ctx.alloc_from_pool = true;
1159 vsi_ctx.flags = ICE_AQ_VSI_TYPE_PF;
1160 /* switch_id is queried by get_switch_config aq, which is done
1163 vsi_ctx.info.sw_id = hw->port_info->sw_id;
1164 vsi_ctx.info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA;
1165 /* Allow all untagged or tagged packets */
1166 vsi_ctx.info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL;
1167 vsi_ctx.info.vlan_flags |= ICE_AQ_VSI_VLAN_EMOD_NOTHING;
1168 vsi_ctx.info.q_opt_rss = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF |
1169 ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
1170 /* Enable VLAN/UP trip */
1171 ret = ice_vsi_config_tc_queue_mapping(vsi,
1176 "tc queue mapping with vsi failed, "
1184 /* for other types of VSI */
1185 PMD_INIT_LOG(ERR, "other types of VSI not supported");
1189 /* VF has MSIX interrupt in VF range, don't allocate here */
1190 if (type == ICE_VSI_PF) {
1191 ret = ice_res_pool_alloc(&pf->msix_pool,
1192 RTE_MIN(vsi->nb_qps,
1193 RTE_MAX_RXTX_INTR_VEC_ID));
1195 PMD_INIT_LOG(ERR, "VSI MAIN %d get heap failed %d",
1198 vsi->msix_intr = ret;
1199 vsi->nb_msix = RTE_MIN(vsi->nb_qps, RTE_MAX_RXTX_INTR_VEC_ID);
1204 ret = ice_add_vsi(hw, vsi->idx, &vsi_ctx, NULL);
1205 if (ret != ICE_SUCCESS) {
1206 PMD_INIT_LOG(ERR, "add vsi failed, err = %d", ret);
1209 /* store vsi information is SW structure */
1210 vsi->vsi_id = vsi_ctx.vsi_num;
1211 vsi->info = vsi_ctx.info;
1212 pf->vsis_allocated = vsi_ctx.vsis_allocd;
1213 pf->vsis_unallocated = vsi_ctx.vsis_unallocated;
1215 /* MAC configuration */
1216 rte_memcpy(pf->dev_addr.addr_bytes,
1217 hw->port_info->mac.perm_addr,
1220 rte_memcpy(&mac_addr, &pf->dev_addr, ETHER_ADDR_LEN);
1221 ret = ice_add_mac_filter(vsi, &mac_addr);
1222 if (ret != ICE_SUCCESS)
1223 PMD_INIT_LOG(ERR, "Failed to add dflt MAC filter");
1225 rte_memcpy(&mac_addr, &broadcast, ETHER_ADDR_LEN);
1226 ret = ice_add_mac_filter(vsi, &mac_addr);
1227 if (ret != ICE_SUCCESS)
1228 PMD_INIT_LOG(ERR, "Failed to add MAC filter");
1230 /* At the beginning, only TC0. */
1231 /* What we need here is the maximam number of the TX queues.
1232 * Currently vsi->nb_qps means it.
1233 * Correct it if any change.
1235 max_txqs[0] = vsi->nb_qps;
1236 ret = ice_cfg_vsi_lan(hw->port_info, vsi->idx,
1237 tc_bitmap, max_txqs);
1238 if (ret != ICE_SUCCESS)
1239 PMD_INIT_LOG(ERR, "Failed to config vsi sched");
1249 ice_send_driver_ver(struct ice_hw *hw)
1251 struct ice_driver_ver dv;
1253 /* we don't have driver version use 0 for dummy */
1257 dv.subbuild_ver = 0;
1258 strncpy((char *)dv.driver_string, "dpdk", sizeof(dv.driver_string));
1260 return ice_aq_send_driver_ver(hw, &dv, NULL);
1264 ice_pf_setup(struct ice_pf *pf)
1266 struct ice_vsi *vsi;
1268 /* Clear all stats counters */
1269 pf->offset_loaded = FALSE;
1270 memset(&pf->stats, 0, sizeof(struct ice_hw_port_stats));
1271 memset(&pf->stats_offset, 0, sizeof(struct ice_hw_port_stats));
1272 memset(&pf->internal_stats, 0, sizeof(struct ice_eth_stats));
1273 memset(&pf->internal_stats_offset, 0, sizeof(struct ice_eth_stats));
1275 vsi = ice_setup_vsi(pf, ICE_VSI_PF);
1277 PMD_INIT_LOG(ERR, "Failed to add vsi for PF");
1286 static int ice_load_pkg(struct rte_eth_dev *dev)
1288 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1289 const char *pkg_file = ICE_DFLT_PKG_FILE;
1296 file = fopen(pkg_file, "rb");
1298 PMD_INIT_LOG(ERR, "failed to open file: %s\n", pkg_file);
1302 err = stat(pkg_file, &fstat);
1304 PMD_INIT_LOG(ERR, "failed to get file stats\n");
1309 buf_len = fstat.st_size;
1310 buf = rte_malloc(NULL, buf_len, 0);
1313 PMD_INIT_LOG(ERR, "failed to allocate buf of size %d for package\n",
1319 err = fread(buf, buf_len, 1, file);
1321 PMD_INIT_LOG(ERR, "failed to read package data\n");
1329 err = ice_copy_and_init_pkg(hw, buf, buf_len);
1331 PMD_INIT_LOG(ERR, "ice_copy_and_init_hw failed: %d\n", err);
1334 err = ice_init_hw_tbls(hw);
1336 PMD_INIT_LOG(ERR, "ice_init_hw_tbls failed: %d\n", err);
1337 goto fail_init_tbls;
1343 rte_free(hw->pkg_copy);
1350 ice_dev_init(struct rte_eth_dev *dev)
1352 struct rte_pci_device *pci_dev;
1353 struct rte_intr_handle *intr_handle;
1354 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1355 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1356 struct ice_adapter *ad =
1357 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1358 struct ice_vsi *vsi;
1361 dev->dev_ops = &ice_eth_dev_ops;
1362 dev->rx_pkt_burst = ice_recv_pkts;
1363 dev->tx_pkt_burst = ice_xmit_pkts;
1364 dev->tx_pkt_prepare = ice_prep_pkts;
1366 ice_set_default_ptype_table(dev);
1367 pci_dev = RTE_DEV_TO_PCI(dev->device);
1368 intr_handle = &pci_dev->intr_handle;
1370 pf->adapter = ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1371 pf->adapter->eth_dev = dev;
1372 pf->dev_data = dev->data;
1373 hw->back = pf->adapter;
1374 hw->hw_addr = (uint8_t *)pci_dev->mem_resource[0].addr;
1375 hw->vendor_id = pci_dev->id.vendor_id;
1376 hw->device_id = pci_dev->id.device_id;
1377 hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
1378 hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
1379 hw->bus.device = pci_dev->addr.devid;
1380 hw->bus.func = pci_dev->addr.function;
1382 ice_init_controlq_parameter(hw);
1384 ret = ice_init_hw(hw);
1386 PMD_INIT_LOG(ERR, "Failed to initialize HW");
1390 ret = ice_load_pkg(dev);
1392 PMD_INIT_LOG(WARNING, "Failed to load the DDP package,"
1393 "Entering Safe Mode");
1394 ad->is_safe_mode = 1;
1397 PMD_INIT_LOG(INFO, "FW %d.%d.%05d API %d.%d",
1398 hw->fw_maj_ver, hw->fw_min_ver, hw->fw_build,
1399 hw->api_maj_ver, hw->api_min_ver);
1401 ice_pf_sw_init(dev);
1402 ret = ice_init_mac_address(dev);
1404 PMD_INIT_LOG(ERR, "Failed to initialize mac address");
1408 ret = ice_res_pool_init(&pf->msix_pool, 1,
1409 hw->func_caps.common_cap.num_msix_vectors - 1);
1411 PMD_INIT_LOG(ERR, "Failed to init MSIX pool");
1412 goto err_msix_pool_init;
1415 ret = ice_pf_setup(pf);
1417 PMD_INIT_LOG(ERR, "Failed to setup PF");
1421 ret = ice_send_driver_ver(hw);
1423 PMD_INIT_LOG(ERR, "Failed to send driver version");
1429 /* Disable double vlan by default */
1430 ice_vsi_config_double_vlan(vsi, FALSE);
1432 ret = ice_aq_stop_lldp(hw, TRUE, NULL);
1433 if (ret != ICE_SUCCESS)
1434 PMD_INIT_LOG(DEBUG, "lldp has already stopped\n");
1436 /* register callback func to eal lib */
1437 rte_intr_callback_register(intr_handle,
1438 ice_interrupt_handler, dev);
1440 ice_pf_enable_irq0(hw);
1442 /* enable uio intr after callback register */
1443 rte_intr_enable(intr_handle);
1448 ice_res_pool_destroy(&pf->msix_pool);
1450 rte_free(dev->data->mac_addrs);
1452 ice_sched_cleanup_all(hw);
1453 rte_free(hw->port_info);
1454 ice_shutdown_all_ctrlq(hw);
1460 ice_release_vsi(struct ice_vsi *vsi)
1463 struct ice_vsi_ctx vsi_ctx;
1464 enum ice_status ret;
1469 hw = ICE_VSI_TO_HW(vsi);
1471 ice_remove_all_mac_vlan_filters(vsi);
1473 memset(&vsi_ctx, 0, sizeof(vsi_ctx));
1475 vsi_ctx.vsi_num = vsi->vsi_id;
1476 vsi_ctx.info = vsi->info;
1477 ret = ice_free_vsi(hw, vsi->idx, &vsi_ctx, false, NULL);
1478 if (ret != ICE_SUCCESS) {
1479 PMD_INIT_LOG(ERR, "Failed to free vsi by aq, %u", vsi->vsi_id);
1489 ice_vsi_disable_queues_intr(struct ice_vsi *vsi)
1491 struct rte_eth_dev *dev = vsi->adapter->eth_dev;
1492 struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
1493 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1494 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
1495 uint16_t msix_intr, i;
1497 /* disable interrupt and also clear all the exist config */
1498 for (i = 0; i < vsi->nb_qps; i++) {
1499 ICE_WRITE_REG(hw, QINT_TQCTL(vsi->base_queue + i), 0);
1500 ICE_WRITE_REG(hw, QINT_RQCTL(vsi->base_queue + i), 0);
1504 if (rte_intr_allow_others(intr_handle))
1506 for (i = 0; i < vsi->nb_msix; i++) {
1507 msix_intr = vsi->msix_intr + i;
1508 ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr),
1509 GLINT_DYN_CTL_WB_ON_ITR_M);
1513 ICE_WRITE_REG(hw, GLINT_DYN_CTL(0), GLINT_DYN_CTL_WB_ON_ITR_M);
1517 ice_dev_stop(struct rte_eth_dev *dev)
1519 struct rte_eth_dev_data *data = dev->data;
1520 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1521 struct ice_vsi *main_vsi = pf->main_vsi;
1522 struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
1523 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1526 /* avoid stopping again */
1527 if (pf->adapter_stopped)
1530 /* stop and clear all Rx queues */
1531 for (i = 0; i < data->nb_rx_queues; i++)
1532 ice_rx_queue_stop(dev, i);
1534 /* stop and clear all Tx queues */
1535 for (i = 0; i < data->nb_tx_queues; i++)
1536 ice_tx_queue_stop(dev, i);
1538 /* disable all queue interrupts */
1539 ice_vsi_disable_queues_intr(main_vsi);
1541 /* Clear all queues and release mbufs */
1542 ice_clear_queues(dev);
1544 /* Clean datapath event and queue/vec mapping */
1545 rte_intr_efd_disable(intr_handle);
1546 if (intr_handle->intr_vec) {
1547 rte_free(intr_handle->intr_vec);
1548 intr_handle->intr_vec = NULL;
1551 pf->adapter_stopped = true;
1555 ice_dev_close(struct rte_eth_dev *dev)
1557 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1558 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1562 /* release all queue resource */
1563 ice_free_queues(dev);
1565 ice_res_pool_destroy(&pf->msix_pool);
1566 ice_release_vsi(pf->main_vsi);
1567 ice_sched_cleanup_all(hw);
1568 rte_free(hw->port_info);
1569 ice_shutdown_all_ctrlq(hw);
1573 ice_dev_uninit(struct rte_eth_dev *dev)
1575 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1576 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1580 dev->dev_ops = NULL;
1581 dev->rx_pkt_burst = NULL;
1582 dev->tx_pkt_burst = NULL;
1584 rte_free(dev->data->mac_addrs);
1585 dev->data->mac_addrs = NULL;
1587 /* disable uio intr before callback unregister */
1588 rte_intr_disable(intr_handle);
1590 /* register callback func to eal lib */
1591 rte_intr_callback_unregister(intr_handle,
1592 ice_interrupt_handler, dev);
1598 ice_dev_configure(__rte_unused struct rte_eth_dev *dev)
1600 struct ice_adapter *ad =
1601 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1603 /* Initialize to TRUE. If any of Rx queues doesn't meet the
1604 * bulk allocation or vector Rx preconditions we will reset it.
1606 ad->rx_bulk_alloc_allowed = true;
1607 ad->tx_simple_allowed = true;
1612 static int ice_init_rss(struct ice_pf *pf)
1614 struct ice_hw *hw = ICE_PF_TO_HW(pf);
1615 struct ice_vsi *vsi = pf->main_vsi;
1616 struct rte_eth_dev *dev = pf->adapter->eth_dev;
1617 struct rte_eth_rss_conf *rss_conf;
1618 struct ice_aqc_get_set_rss_keys key;
1621 bool is_safe_mode = pf->adapter->is_safe_mode;
1623 rss_conf = &dev->data->dev_conf.rx_adv_conf.rss_conf;
1624 nb_q = dev->data->nb_rx_queues;
1625 vsi->rss_key_size = ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE;
1626 vsi->rss_lut_size = hw->func_caps.common_cap.rss_table_size;
1629 PMD_DRV_LOG(WARNING, "RSS is not supported in safe mode\n");
1634 vsi->rss_key = rte_zmalloc(NULL,
1635 vsi->rss_key_size, 0);
1637 vsi->rss_lut = rte_zmalloc(NULL,
1638 vsi->rss_lut_size, 0);
1640 /* configure RSS key */
1641 if (!rss_conf->rss_key) {
1642 /* Calculate the default hash key */
1643 for (i = 0; i <= vsi->rss_key_size; i++)
1644 vsi->rss_key[i] = (uint8_t)rte_rand();
1646 rte_memcpy(vsi->rss_key, rss_conf->rss_key,
1647 RTE_MIN(rss_conf->rss_key_len,
1648 vsi->rss_key_size));
1650 rte_memcpy(key.standard_rss_key, vsi->rss_key, vsi->rss_key_size);
1651 ret = ice_aq_set_rss_key(hw, vsi->idx, &key);
1655 /* init RSS LUT table */
1656 for (i = 0; i < vsi->rss_lut_size; i++)
1657 vsi->rss_lut[i] = i % nb_q;
1659 ret = ice_aq_set_rss_lut(hw, vsi->idx,
1660 ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF,
1661 vsi->rss_lut, vsi->rss_lut_size);
1665 /* configure RSS for IPv4 with input set IPv4 src/dst */
1666 ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV4,
1667 ICE_FLOW_SEG_HDR_IPV4);
1669 PMD_DRV_LOG(ERR, "%s IPV4 rss flow fail %d", __func__, ret);
1671 /* configure RSS for IPv6 with input set IPv6 src/dst */
1672 ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV6,
1673 ICE_FLOW_SEG_HDR_IPV6);
1675 PMD_DRV_LOG(ERR, "%s IPV6 rss flow fail %d", __func__, ret);
1677 /* configure RSS for tcp6 with input set IPv6 src/dst, TCP src/dst */
1678 ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_TCP_IPV6,
1679 ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV6);
1681 PMD_DRV_LOG(ERR, "%s TCP_IPV6 rss flow fail %d", __func__, ret);
1683 /* configure RSS for udp6 with input set IPv6 src/dst, UDP src/dst */
1684 ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_UDP_IPV6,
1685 ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV6);
1687 PMD_DRV_LOG(ERR, "%s UDP_IPV6 rss flow fail %d", __func__, ret);
1689 /* configure RSS for sctp6 with input set IPv6 src/dst */
1690 ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV6,
1691 ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV6);
1693 PMD_DRV_LOG(ERR, "%s SCTP_IPV6 rss flow fail %d",
1696 /* configure RSS for tcp4 with input set IP src/dst, TCP src/dst */
1697 ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_TCP_IPV4,
1698 ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV4);
1700 PMD_DRV_LOG(ERR, "%s TCP_IPV4 rss flow fail %d", __func__, ret);
1702 /* configure RSS for udp4 with input set IP src/dst, UDP src/dst */
1703 ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_UDP_IPV4,
1704 ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV4);
1706 PMD_DRV_LOG(ERR, "%s UDP_IPV4 rss flow fail %d", __func__, ret);
1708 /* configure RSS for sctp4 with input set IP src/dst */
1709 ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV4,
1710 ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV4);
1712 PMD_DRV_LOG(ERR, "%s SCTP_IPV4 rss flow fail %d",
1719 __vsi_queues_bind_intr(struct ice_vsi *vsi, uint16_t msix_vect,
1720 int base_queue, int nb_queue)
1722 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
1723 uint32_t val, val_tx;
1726 for (i = 0; i < nb_queue; i++) {
1728 val = (msix_vect & QINT_RQCTL_MSIX_INDX_M) |
1729 (0 < QINT_RQCTL_ITR_INDX_S) | QINT_RQCTL_CAUSE_ENA_M;
1730 val_tx = (msix_vect & QINT_TQCTL_MSIX_INDX_M) |
1731 (0 < QINT_TQCTL_ITR_INDX_S) | QINT_TQCTL_CAUSE_ENA_M;
1733 PMD_DRV_LOG(INFO, "queue %d is binding to vect %d",
1734 base_queue + i, msix_vect);
1735 /* set ITR0 value */
1736 ICE_WRITE_REG(hw, GLINT_ITR(0, msix_vect), 0x10);
1737 ICE_WRITE_REG(hw, QINT_RQCTL(base_queue + i), val);
1738 ICE_WRITE_REG(hw, QINT_TQCTL(base_queue + i), val_tx);
1743 ice_vsi_queues_bind_intr(struct ice_vsi *vsi)
1745 struct rte_eth_dev *dev = vsi->adapter->eth_dev;
1746 struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
1747 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1748 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
1749 uint16_t msix_vect = vsi->msix_intr;
1750 uint16_t nb_msix = RTE_MIN(vsi->nb_msix, intr_handle->nb_efd);
1751 uint16_t queue_idx = 0;
1755 /* clear Rx/Tx queue interrupt */
1756 for (i = 0; i < vsi->nb_used_qps; i++) {
1757 ICE_WRITE_REG(hw, QINT_TQCTL(vsi->base_queue + i), 0);
1758 ICE_WRITE_REG(hw, QINT_RQCTL(vsi->base_queue + i), 0);
1761 /* PF bind interrupt */
1762 if (rte_intr_dp_is_en(intr_handle)) {
1767 for (i = 0; i < vsi->nb_used_qps; i++) {
1769 if (!rte_intr_allow_others(intr_handle))
1770 msix_vect = ICE_MISC_VEC_ID;
1772 /* uio mapping all queue to one msix_vect */
1773 __vsi_queues_bind_intr(vsi, msix_vect,
1774 vsi->base_queue + i,
1775 vsi->nb_used_qps - i);
1777 for (; !!record && i < vsi->nb_used_qps; i++)
1778 intr_handle->intr_vec[queue_idx + i] =
1783 /* vfio 1:1 queue/msix_vect mapping */
1784 __vsi_queues_bind_intr(vsi, msix_vect,
1785 vsi->base_queue + i, 1);
1788 intr_handle->intr_vec[queue_idx + i] = msix_vect;
1796 ice_vsi_enable_queues_intr(struct ice_vsi *vsi)
1798 struct rte_eth_dev *dev = vsi->adapter->eth_dev;
1799 struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
1800 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1801 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
1802 uint16_t msix_intr, i;
1804 if (rte_intr_allow_others(intr_handle))
1805 for (i = 0; i < vsi->nb_used_qps; i++) {
1806 msix_intr = vsi->msix_intr + i;
1807 ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr),
1808 GLINT_DYN_CTL_INTENA_M |
1809 GLINT_DYN_CTL_CLEARPBA_M |
1810 GLINT_DYN_CTL_ITR_INDX_M |
1811 GLINT_DYN_CTL_WB_ON_ITR_M);
1814 ICE_WRITE_REG(hw, GLINT_DYN_CTL(0),
1815 GLINT_DYN_CTL_INTENA_M |
1816 GLINT_DYN_CTL_CLEARPBA_M |
1817 GLINT_DYN_CTL_ITR_INDX_M |
1818 GLINT_DYN_CTL_WB_ON_ITR_M);
1822 ice_rxq_intr_setup(struct rte_eth_dev *dev)
1824 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1825 struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
1826 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1827 struct ice_vsi *vsi = pf->main_vsi;
1828 uint32_t intr_vector = 0;
1830 rte_intr_disable(intr_handle);
1832 /* check and configure queue intr-vector mapping */
1833 if ((rte_intr_cap_multiple(intr_handle) ||
1834 !RTE_ETH_DEV_SRIOV(dev).active) &&
1835 dev->data->dev_conf.intr_conf.rxq != 0) {
1836 intr_vector = dev->data->nb_rx_queues;
1837 if (intr_vector > ICE_MAX_INTR_QUEUE_NUM) {
1838 PMD_DRV_LOG(ERR, "At most %d intr queues supported",
1839 ICE_MAX_INTR_QUEUE_NUM);
1842 if (rte_intr_efd_enable(intr_handle, intr_vector))
1846 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
1847 intr_handle->intr_vec =
1848 rte_zmalloc(NULL, dev->data->nb_rx_queues * sizeof(int),
1850 if (!intr_handle->intr_vec) {
1852 "Failed to allocate %d rx_queues intr_vec",
1853 dev->data->nb_rx_queues);
1858 /* Map queues with MSIX interrupt */
1859 vsi->nb_used_qps = dev->data->nb_rx_queues;
1860 ice_vsi_queues_bind_intr(vsi);
1862 /* Enable interrupts for all the queues */
1863 ice_vsi_enable_queues_intr(vsi);
1865 rte_intr_enable(intr_handle);
1871 ice_dev_start(struct rte_eth_dev *dev)
1873 struct rte_eth_dev_data *data = dev->data;
1874 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1875 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1876 struct ice_vsi *vsi = pf->main_vsi;
1877 uint16_t nb_rxq = 0;
1881 /* program Tx queues' context in hardware */
1882 for (nb_txq = 0; nb_txq < data->nb_tx_queues; nb_txq++) {
1883 ret = ice_tx_queue_start(dev, nb_txq);
1885 PMD_DRV_LOG(ERR, "fail to start Tx queue %u", nb_txq);
1890 /* program Rx queues' context in hardware*/
1891 for (nb_rxq = 0; nb_rxq < data->nb_rx_queues; nb_rxq++) {
1892 ret = ice_rx_queue_start(dev, nb_rxq);
1894 PMD_DRV_LOG(ERR, "fail to start Rx queue %u", nb_rxq);
1899 ret = ice_init_rss(pf);
1901 PMD_DRV_LOG(ERR, "Failed to enable rss for PF");
1905 ice_set_rx_function(dev);
1906 ice_set_tx_function(dev);
1908 mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
1909 ETH_VLAN_EXTEND_MASK;
1910 ret = ice_vlan_offload_set(dev, mask);
1912 PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
1916 /* enable Rx interrput and mapping Rx queue to interrupt vector */
1917 if (ice_rxq_intr_setup(dev))
1920 /* Enable receiving broadcast packets and transmitting packets */
1921 ret = ice_set_vsi_promisc(hw, vsi->idx,
1922 ICE_PROMISC_BCAST_RX | ICE_PROMISC_BCAST_TX |
1923 ICE_PROMISC_UCAST_TX | ICE_PROMISC_MCAST_TX,
1925 if (ret != ICE_SUCCESS)
1926 PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
1928 ret = ice_aq_set_event_mask(hw, hw->port_info->lport,
1929 ((u16)(ICE_AQ_LINK_EVENT_LINK_FAULT |
1930 ICE_AQ_LINK_EVENT_PHY_TEMP_ALARM |
1931 ICE_AQ_LINK_EVENT_EXCESSIVE_ERRORS |
1932 ICE_AQ_LINK_EVENT_SIGNAL_DETECT |
1933 ICE_AQ_LINK_EVENT_AN_COMPLETED |
1934 ICE_AQ_LINK_EVENT_PORT_TX_SUSPENDED)),
1936 if (ret != ICE_SUCCESS)
1937 PMD_DRV_LOG(WARNING, "Fail to set phy mask");
1939 /* Call get_link_info aq commond to enable/disable LSE */
1940 ice_link_update(dev, 0);
1942 pf->adapter_stopped = false;
1946 /* stop the started queues if failed to start all queues */
1948 for (i = 0; i < nb_rxq; i++)
1949 ice_rx_queue_stop(dev, i);
1951 for (i = 0; i < nb_txq; i++)
1952 ice_tx_queue_stop(dev, i);
1958 ice_dev_reset(struct rte_eth_dev *dev)
1962 if (dev->data->sriov.active)
1965 ret = ice_dev_uninit(dev);
1967 PMD_INIT_LOG(ERR, "failed to uninit device, status = %d", ret);
1971 ret = ice_dev_init(dev);
1973 PMD_INIT_LOG(ERR, "failed to init device, status = %d", ret);
1981 ice_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1983 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1984 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1985 struct ice_vsi *vsi = pf->main_vsi;
1986 struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
1987 bool is_safe_mode = pf->adapter->is_safe_mode;
1991 dev_info->min_rx_bufsize = ICE_BUF_SIZE_MIN;
1992 dev_info->max_rx_pktlen = ICE_FRAME_SIZE_MAX;
1993 dev_info->max_rx_queues = vsi->nb_qps;
1994 dev_info->max_tx_queues = vsi->nb_qps;
1995 dev_info->max_mac_addrs = vsi->max_macaddrs;
1996 dev_info->max_vfs = pci_dev->max_vfs;
1998 dev_info->rx_offload_capa =
1999 DEV_RX_OFFLOAD_VLAN_STRIP |
2000 DEV_RX_OFFLOAD_JUMBO_FRAME |
2001 DEV_RX_OFFLOAD_KEEP_CRC |
2002 DEV_RX_OFFLOAD_SCATTER |
2003 DEV_RX_OFFLOAD_VLAN_FILTER;
2004 dev_info->tx_offload_capa =
2005 DEV_TX_OFFLOAD_VLAN_INSERT |
2006 DEV_TX_OFFLOAD_TCP_TSO |
2007 DEV_TX_OFFLOAD_MULTI_SEGS |
2008 DEV_TX_OFFLOAD_MBUF_FAST_FREE;
2009 dev_info->flow_type_rss_offloads = 0;
2011 if (!is_safe_mode) {
2012 dev_info->rx_offload_capa |=
2013 DEV_RX_OFFLOAD_IPV4_CKSUM |
2014 DEV_RX_OFFLOAD_UDP_CKSUM |
2015 DEV_RX_OFFLOAD_TCP_CKSUM |
2016 DEV_RX_OFFLOAD_QINQ_STRIP |
2017 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
2018 DEV_RX_OFFLOAD_VLAN_EXTEND;
2019 dev_info->tx_offload_capa |=
2020 DEV_TX_OFFLOAD_QINQ_INSERT |
2021 DEV_TX_OFFLOAD_IPV4_CKSUM |
2022 DEV_TX_OFFLOAD_UDP_CKSUM |
2023 DEV_TX_OFFLOAD_TCP_CKSUM |
2024 DEV_TX_OFFLOAD_SCTP_CKSUM |
2025 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
2026 dev_info->flow_type_rss_offloads |= ICE_RSS_OFFLOAD_ALL;
2029 dev_info->rx_queue_offload_capa = 0;
2030 dev_info->tx_queue_offload_capa = 0;
2032 dev_info->reta_size = hw->func_caps.common_cap.rss_table_size;
2033 dev_info->hash_key_size = (VSIQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
2035 dev_info->default_rxconf = (struct rte_eth_rxconf) {
2037 .pthresh = ICE_DEFAULT_RX_PTHRESH,
2038 .hthresh = ICE_DEFAULT_RX_HTHRESH,
2039 .wthresh = ICE_DEFAULT_RX_WTHRESH,
2041 .rx_free_thresh = ICE_DEFAULT_RX_FREE_THRESH,
2046 dev_info->default_txconf = (struct rte_eth_txconf) {
2048 .pthresh = ICE_DEFAULT_TX_PTHRESH,
2049 .hthresh = ICE_DEFAULT_TX_HTHRESH,
2050 .wthresh = ICE_DEFAULT_TX_WTHRESH,
2052 .tx_free_thresh = ICE_DEFAULT_TX_FREE_THRESH,
2053 .tx_rs_thresh = ICE_DEFAULT_TX_RSBIT_THRESH,
2057 dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
2058 .nb_max = ICE_MAX_RING_DESC,
2059 .nb_min = ICE_MIN_RING_DESC,
2060 .nb_align = ICE_ALIGN_RING_DESC,
2063 dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
2064 .nb_max = ICE_MAX_RING_DESC,
2065 .nb_min = ICE_MIN_RING_DESC,
2066 .nb_align = ICE_ALIGN_RING_DESC,
2069 dev_info->speed_capa = ETH_LINK_SPEED_10M |
2070 ETH_LINK_SPEED_100M |
2072 ETH_LINK_SPEED_2_5G |
2074 ETH_LINK_SPEED_10G |
2075 ETH_LINK_SPEED_20G |
2078 phy_type_low = hw->port_info->phy.phy_type_low;
2079 phy_type_high = hw->port_info->phy.phy_type_high;
2081 if (ICE_PHY_TYPE_SUPPORT_50G(phy_type_low))
2082 dev_info->speed_capa |= ETH_LINK_SPEED_50G;
2084 if (ICE_PHY_TYPE_SUPPORT_100G_LOW(phy_type_low) ||
2085 ICE_PHY_TYPE_SUPPORT_100G_HIGH(phy_type_high))
2086 dev_info->speed_capa |= ETH_LINK_SPEED_100G;
2088 dev_info->nb_rx_queues = dev->data->nb_rx_queues;
2089 dev_info->nb_tx_queues = dev->data->nb_tx_queues;
2091 dev_info->default_rxportconf.burst_size = ICE_RX_MAX_BURST;
2092 dev_info->default_txportconf.burst_size = ICE_TX_MAX_BURST;
2093 dev_info->default_rxportconf.nb_queues = 1;
2094 dev_info->default_txportconf.nb_queues = 1;
2095 dev_info->default_rxportconf.ring_size = ICE_BUF_SIZE_MIN;
2096 dev_info->default_txportconf.ring_size = ICE_BUF_SIZE_MIN;
2100 ice_atomic_read_link_status(struct rte_eth_dev *dev,
2101 struct rte_eth_link *link)
2103 struct rte_eth_link *dst = link;
2104 struct rte_eth_link *src = &dev->data->dev_link;
2106 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
2107 *(uint64_t *)src) == 0)
2114 ice_atomic_write_link_status(struct rte_eth_dev *dev,
2115 struct rte_eth_link *link)
2117 struct rte_eth_link *dst = &dev->data->dev_link;
2118 struct rte_eth_link *src = link;
2120 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
2121 *(uint64_t *)src) == 0)
2128 ice_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
2130 #define CHECK_INTERVAL 100 /* 100ms */
2131 #define MAX_REPEAT_TIME 10 /* 1s (10 * 100ms) in total */
2132 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2133 struct ice_link_status link_status;
2134 struct rte_eth_link link, old;
2136 unsigned int rep_cnt = MAX_REPEAT_TIME;
2137 bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false;
2139 memset(&link, 0, sizeof(link));
2140 memset(&old, 0, sizeof(old));
2141 memset(&link_status, 0, sizeof(link_status));
2142 ice_atomic_read_link_status(dev, &old);
2145 /* Get link status information from hardware */
2146 status = ice_aq_get_link_info(hw->port_info, enable_lse,
2147 &link_status, NULL);
2148 if (status != ICE_SUCCESS) {
2149 link.link_speed = ETH_SPEED_NUM_100M;
2150 link.link_duplex = ETH_LINK_FULL_DUPLEX;
2151 PMD_DRV_LOG(ERR, "Failed to get link info");
2155 link.link_status = link_status.link_info & ICE_AQ_LINK_UP;
2156 if (!wait_to_complete || link.link_status)
2159 rte_delay_ms(CHECK_INTERVAL);
2160 } while (--rep_cnt);
2162 if (!link.link_status)
2165 /* Full-duplex operation at all supported speeds */
2166 link.link_duplex = ETH_LINK_FULL_DUPLEX;
2168 /* Parse the link status */
2169 switch (link_status.link_speed) {
2170 case ICE_AQ_LINK_SPEED_10MB:
2171 link.link_speed = ETH_SPEED_NUM_10M;
2173 case ICE_AQ_LINK_SPEED_100MB:
2174 link.link_speed = ETH_SPEED_NUM_100M;
2176 case ICE_AQ_LINK_SPEED_1000MB:
2177 link.link_speed = ETH_SPEED_NUM_1G;
2179 case ICE_AQ_LINK_SPEED_2500MB:
2180 link.link_speed = ETH_SPEED_NUM_2_5G;
2182 case ICE_AQ_LINK_SPEED_5GB:
2183 link.link_speed = ETH_SPEED_NUM_5G;
2185 case ICE_AQ_LINK_SPEED_10GB:
2186 link.link_speed = ETH_SPEED_NUM_10G;
2188 case ICE_AQ_LINK_SPEED_20GB:
2189 link.link_speed = ETH_SPEED_NUM_20G;
2191 case ICE_AQ_LINK_SPEED_25GB:
2192 link.link_speed = ETH_SPEED_NUM_25G;
2194 case ICE_AQ_LINK_SPEED_40GB:
2195 link.link_speed = ETH_SPEED_NUM_40G;
2197 case ICE_AQ_LINK_SPEED_50GB:
2198 link.link_speed = ETH_SPEED_NUM_50G;
2200 case ICE_AQ_LINK_SPEED_100GB:
2201 link.link_speed = ETH_SPEED_NUM_100G;
2203 case ICE_AQ_LINK_SPEED_UNKNOWN:
2205 PMD_DRV_LOG(ERR, "Unknown link speed");
2206 link.link_speed = ETH_SPEED_NUM_NONE;
2210 link.link_autoneg = !(dev->data->dev_conf.link_speeds &
2211 ETH_LINK_SPEED_FIXED);
2214 ice_atomic_write_link_status(dev, &link);
2215 if (link.link_status == old.link_status)
2222 ice_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
2224 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2225 struct rte_eth_dev_data *dev_data = pf->dev_data;
2226 uint32_t frame_size = mtu + ICE_ETH_OVERHEAD;
2228 /* check if mtu is within the allowed range */
2229 if (mtu < ETHER_MIN_MTU || frame_size > ICE_FRAME_SIZE_MAX)
2232 /* mtu setting is forbidden if port is start */
2233 if (dev_data->dev_started) {
2235 "port %d must be stopped before configuration",
2240 if (frame_size > ETHER_MAX_LEN)
2241 dev_data->dev_conf.rxmode.offloads |=
2242 DEV_RX_OFFLOAD_JUMBO_FRAME;
2244 dev_data->dev_conf.rxmode.offloads &=
2245 ~DEV_RX_OFFLOAD_JUMBO_FRAME;
2247 dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
2252 static int ice_macaddr_set(struct rte_eth_dev *dev,
2253 struct ether_addr *mac_addr)
2255 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2256 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2257 struct ice_vsi *vsi = pf->main_vsi;
2258 struct ice_mac_filter *f;
2262 if (!is_valid_assigned_ether_addr(mac_addr)) {
2263 PMD_DRV_LOG(ERR, "Tried to set invalid MAC address.");
2267 TAILQ_FOREACH(f, &vsi->mac_list, next) {
2268 if (is_same_ether_addr(&pf->dev_addr, &f->mac_info.mac_addr))
2273 PMD_DRV_LOG(ERR, "Failed to find filter for default mac");
2277 ret = ice_remove_mac_filter(vsi, &f->mac_info.mac_addr);
2278 if (ret != ICE_SUCCESS) {
2279 PMD_DRV_LOG(ERR, "Failed to delete mac filter");
2282 ret = ice_add_mac_filter(vsi, mac_addr);
2283 if (ret != ICE_SUCCESS) {
2284 PMD_DRV_LOG(ERR, "Failed to add mac filter");
2287 memcpy(&pf->dev_addr, mac_addr, ETH_ADDR_LEN);
2289 flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL;
2290 ret = ice_aq_manage_mac_write(hw, mac_addr->addr_bytes, flags, NULL);
2291 if (ret != ICE_SUCCESS)
2292 PMD_DRV_LOG(ERR, "Failed to set manage mac");
2297 /* Add a MAC address, and update filters */
2299 ice_macaddr_add(struct rte_eth_dev *dev,
2300 struct ether_addr *mac_addr,
2301 __rte_unused uint32_t index,
2302 __rte_unused uint32_t pool)
2304 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2305 struct ice_vsi *vsi = pf->main_vsi;
2308 ret = ice_add_mac_filter(vsi, mac_addr);
2309 if (ret != ICE_SUCCESS) {
2310 PMD_DRV_LOG(ERR, "Failed to add MAC filter");
2317 /* Remove a MAC address, and update filters */
2319 ice_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
2321 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2322 struct ice_vsi *vsi = pf->main_vsi;
2323 struct rte_eth_dev_data *data = dev->data;
2324 struct ether_addr *macaddr;
2327 macaddr = &data->mac_addrs[index];
2328 ret = ice_remove_mac_filter(vsi, macaddr);
2330 PMD_DRV_LOG(ERR, "Failed to remove MAC filter");
2336 ice_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
2338 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2339 struct ice_vsi *vsi = pf->main_vsi;
2342 PMD_INIT_FUNC_TRACE();
2345 ret = ice_add_vlan_filter(vsi, vlan_id);
2347 PMD_DRV_LOG(ERR, "Failed to add vlan filter");
2351 ret = ice_remove_vlan_filter(vsi, vlan_id);
2353 PMD_DRV_LOG(ERR, "Failed to remove vlan filter");
2361 /* Configure vlan filter on or off */
2363 ice_vsi_config_vlan_filter(struct ice_vsi *vsi, bool on)
2365 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
2366 struct ice_vsi_ctx ctxt;
2367 uint8_t sec_flags, sw_flags2;
2370 sec_flags = ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
2371 ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S;
2372 sw_flags2 = ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
2375 vsi->info.sec_flags |= sec_flags;
2376 vsi->info.sw_flags2 |= sw_flags2;
2378 vsi->info.sec_flags &= ~sec_flags;
2379 vsi->info.sw_flags2 &= ~sw_flags2;
2381 vsi->info.sw_id = hw->port_info->sw_id;
2382 (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
2383 ctxt.info.valid_sections =
2384 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_SW_VALID |
2385 ICE_AQ_VSI_PROP_SECURITY_VALID);
2386 ctxt.vsi_num = vsi->vsi_id;
2388 ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
2390 PMD_DRV_LOG(INFO, "Update VSI failed to %s vlan rx pruning",
2391 on ? "enable" : "disable");
2394 vsi->info.valid_sections |=
2395 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_SW_VALID |
2396 ICE_AQ_VSI_PROP_SECURITY_VALID);
2399 /* consist with other drivers, allow untagged packet when vlan filter on */
2401 ret = ice_add_vlan_filter(vsi, 0);
2403 ret = ice_remove_vlan_filter(vsi, 0);
2409 ice_vsi_config_vlan_stripping(struct ice_vsi *vsi, bool on)
2411 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
2412 struct ice_vsi_ctx ctxt;
2416 /* Check if it has been already on or off */
2417 if (vsi->info.valid_sections &
2418 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID)) {
2420 if ((vsi->info.vlan_flags &
2421 ICE_AQ_VSI_VLAN_EMOD_M) ==
2422 ICE_AQ_VSI_VLAN_EMOD_STR_BOTH)
2423 return 0; /* already on */
2425 if ((vsi->info.vlan_flags &
2426 ICE_AQ_VSI_VLAN_EMOD_M) ==
2427 ICE_AQ_VSI_VLAN_EMOD_NOTHING)
2428 return 0; /* already off */
2433 vlan_flags = ICE_AQ_VSI_VLAN_EMOD_STR_BOTH;
2435 vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING;
2436 vsi->info.vlan_flags &= ~(ICE_AQ_VSI_VLAN_EMOD_M);
2437 vsi->info.vlan_flags |= vlan_flags;
2438 (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
2439 ctxt.info.valid_sections =
2440 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID);
2441 ctxt.vsi_num = vsi->vsi_id;
2442 ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
2444 PMD_DRV_LOG(INFO, "Update VSI failed to %s vlan stripping",
2445 on ? "enable" : "disable");
2449 vsi->info.valid_sections |=
2450 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID);
2456 ice_vlan_offload_set(struct rte_eth_dev *dev, int mask)
2458 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2459 struct ice_vsi *vsi = pf->main_vsi;
2460 struct rte_eth_rxmode *rxmode;
2462 rxmode = &dev->data->dev_conf.rxmode;
2463 if (mask & ETH_VLAN_FILTER_MASK) {
2464 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
2465 ice_vsi_config_vlan_filter(vsi, TRUE);
2467 ice_vsi_config_vlan_filter(vsi, FALSE);
2470 if (mask & ETH_VLAN_STRIP_MASK) {
2471 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
2472 ice_vsi_config_vlan_stripping(vsi, TRUE);
2474 ice_vsi_config_vlan_stripping(vsi, FALSE);
2477 if (mask & ETH_VLAN_EXTEND_MASK) {
2478 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
2479 ice_vsi_config_double_vlan(vsi, TRUE);
2481 ice_vsi_config_double_vlan(vsi, FALSE);
2488 ice_vlan_tpid_set(struct rte_eth_dev *dev,
2489 enum rte_vlan_type vlan_type,
2492 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2493 uint64_t reg_r = 0, reg_w = 0;
2494 uint16_t reg_id = 0;
2496 int qinq = dev->data->dev_conf.rxmode.offloads &
2497 DEV_RX_OFFLOAD_VLAN_EXTEND;
2499 switch (vlan_type) {
2500 case ETH_VLAN_TYPE_OUTER:
2506 case ETH_VLAN_TYPE_INNER:
2511 "Unsupported vlan type in single vlan.");
2516 PMD_DRV_LOG(ERR, "Unsupported vlan type %d", vlan_type);
2519 reg_r = ICE_READ_REG(hw, GL_SWT_L2TAGCTRL(reg_id));
2520 PMD_DRV_LOG(DEBUG, "Debug read from ICE GL_SWT_L2TAGCTRL[%d]: "
2521 "0x%08"PRIx64"", reg_id, reg_r);
2523 reg_w = reg_r & (~(GL_SWT_L2TAGCTRL_ETHERTYPE_M));
2524 reg_w |= ((uint64_t)tpid << GL_SWT_L2TAGCTRL_ETHERTYPE_S);
2525 if (reg_r == reg_w) {
2526 PMD_DRV_LOG(DEBUG, "No need to write");
2530 ICE_WRITE_REG(hw, GL_SWT_L2TAGCTRL(reg_id), reg_w);
2531 PMD_DRV_LOG(DEBUG, "Debug write 0x%08"PRIx64" to "
2532 "ICE GL_SWT_L2TAGCTRL[%d]", reg_w, reg_id);
2538 ice_get_rss_lut(struct ice_vsi *vsi, uint8_t *lut, uint16_t lut_size)
2540 struct ice_pf *pf = ICE_VSI_TO_PF(vsi);
2541 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
2547 if (pf->flags & ICE_FLAG_RSS_AQ_CAPABLE) {
2548 ret = ice_aq_get_rss_lut(hw, vsi->idx, TRUE,
2551 PMD_DRV_LOG(ERR, "Failed to get RSS lookup table");
2555 uint64_t *lut_dw = (uint64_t *)lut;
2556 uint16_t i, lut_size_dw = lut_size / 4;
2558 for (i = 0; i < lut_size_dw; i++)
2559 lut_dw[i] = ICE_READ_REG(hw, PFQF_HLUT(i));
2566 ice_set_rss_lut(struct ice_vsi *vsi, uint8_t *lut, uint16_t lut_size)
2568 struct ice_pf *pf = ICE_VSI_TO_PF(vsi);
2569 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
2575 if (pf->flags & ICE_FLAG_RSS_AQ_CAPABLE) {
2576 ret = ice_aq_set_rss_lut(hw, vsi->idx, TRUE,
2579 PMD_DRV_LOG(ERR, "Failed to set RSS lookup table");
2583 uint64_t *lut_dw = (uint64_t *)lut;
2584 uint16_t i, lut_size_dw = lut_size / 4;
2586 for (i = 0; i < lut_size_dw; i++)
2587 ICE_WRITE_REG(hw, PFQF_HLUT(i), lut_dw[i]);
2596 ice_rss_reta_update(struct rte_eth_dev *dev,
2597 struct rte_eth_rss_reta_entry64 *reta_conf,
2600 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2601 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2602 uint16_t i, lut_size = hw->func_caps.common_cap.rss_table_size;
2603 uint16_t idx, shift;
2607 if (reta_size != lut_size ||
2608 reta_size > ETH_RSS_RETA_SIZE_512) {
2610 "The size of hash lookup table configured (%d)"
2611 "doesn't match the number hardware can "
2613 reta_size, lut_size);
2617 lut = rte_zmalloc(NULL, reta_size, 0);
2619 PMD_DRV_LOG(ERR, "No memory can be allocated");
2622 ret = ice_get_rss_lut(pf->main_vsi, lut, reta_size);
2626 for (i = 0; i < reta_size; i++) {
2627 idx = i / RTE_RETA_GROUP_SIZE;
2628 shift = i % RTE_RETA_GROUP_SIZE;
2629 if (reta_conf[idx].mask & (1ULL << shift))
2630 lut[i] = reta_conf[idx].reta[shift];
2632 ret = ice_set_rss_lut(pf->main_vsi, lut, reta_size);
2641 ice_rss_reta_query(struct rte_eth_dev *dev,
2642 struct rte_eth_rss_reta_entry64 *reta_conf,
2645 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2646 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2647 uint16_t i, lut_size = hw->func_caps.common_cap.rss_table_size;
2648 uint16_t idx, shift;
2652 if (reta_size != lut_size ||
2653 reta_size > ETH_RSS_RETA_SIZE_512) {
2655 "The size of hash lookup table configured (%d)"
2656 "doesn't match the number hardware can "
2658 reta_size, lut_size);
2662 lut = rte_zmalloc(NULL, reta_size, 0);
2664 PMD_DRV_LOG(ERR, "No memory can be allocated");
2668 ret = ice_get_rss_lut(pf->main_vsi, lut, reta_size);
2672 for (i = 0; i < reta_size; i++) {
2673 idx = i / RTE_RETA_GROUP_SIZE;
2674 shift = i % RTE_RETA_GROUP_SIZE;
2675 if (reta_conf[idx].mask & (1ULL << shift))
2676 reta_conf[idx].reta[shift] = lut[i];
2686 ice_set_rss_key(struct ice_vsi *vsi, uint8_t *key, uint8_t key_len)
2688 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
2691 if (!key || key_len == 0) {
2692 PMD_DRV_LOG(DEBUG, "No key to be configured");
2694 } else if (key_len != (VSIQF_HKEY_MAX_INDEX + 1) *
2696 PMD_DRV_LOG(ERR, "Invalid key length %u", key_len);
2700 struct ice_aqc_get_set_rss_keys *key_dw =
2701 (struct ice_aqc_get_set_rss_keys *)key;
2703 ret = ice_aq_set_rss_key(hw, vsi->idx, key_dw);
2705 PMD_DRV_LOG(ERR, "Failed to configure RSS key via AQ");
2713 ice_get_rss_key(struct ice_vsi *vsi, uint8_t *key, uint8_t *key_len)
2715 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
2718 if (!key || !key_len)
2721 ret = ice_aq_get_rss_key
2723 (struct ice_aqc_get_set_rss_keys *)key);
2725 PMD_DRV_LOG(ERR, "Failed to get RSS key via AQ");
2728 *key_len = (VSIQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
2734 ice_rss_hash_update(struct rte_eth_dev *dev,
2735 struct rte_eth_rss_conf *rss_conf)
2737 enum ice_status status = ICE_SUCCESS;
2738 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2739 struct ice_vsi *vsi = pf->main_vsi;
2742 status = ice_set_rss_key(vsi, rss_conf->rss_key, rss_conf->rss_key_len);
2746 /* TODO: hash enable config, ice_add_rss_cfg */
2751 ice_rss_hash_conf_get(struct rte_eth_dev *dev,
2752 struct rte_eth_rss_conf *rss_conf)
2754 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2755 struct ice_vsi *vsi = pf->main_vsi;
2757 ice_get_rss_key(vsi, rss_conf->rss_key,
2758 &rss_conf->rss_key_len);
2760 /* TODO: default set to 0 as hf config is not supported now */
2761 rss_conf->rss_hf = 0;
2766 ice_promisc_enable(struct rte_eth_dev *dev)
2768 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2769 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2770 struct ice_vsi *vsi = pf->main_vsi;
2771 enum ice_status status;
2774 pmask = ICE_PROMISC_UCAST_RX | ICE_PROMISC_UCAST_TX |
2775 ICE_PROMISC_MCAST_RX | ICE_PROMISC_MCAST_TX;
2777 status = ice_set_vsi_promisc(hw, vsi->idx, pmask, 0);
2778 if (status == ICE_ERR_ALREADY_EXISTS)
2779 PMD_DRV_LOG(DEBUG, "Promisc mode has already been enabled");
2780 else if (status != ICE_SUCCESS)
2781 PMD_DRV_LOG(ERR, "Failed to enable promisc, err=%d", status);
2785 ice_promisc_disable(struct rte_eth_dev *dev)
2787 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2788 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2789 struct ice_vsi *vsi = pf->main_vsi;
2790 enum ice_status status;
2793 pmask = ICE_PROMISC_UCAST_RX | ICE_PROMISC_UCAST_TX |
2794 ICE_PROMISC_MCAST_RX | ICE_PROMISC_MCAST_TX;
2796 status = ice_clear_vsi_promisc(hw, vsi->idx, pmask, 0);
2797 if (status != ICE_SUCCESS)
2798 PMD_DRV_LOG(ERR, "Failed to clear promisc, err=%d", status);
2802 ice_allmulti_enable(struct rte_eth_dev *dev)
2804 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2805 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2806 struct ice_vsi *vsi = pf->main_vsi;
2807 enum ice_status status;
2810 pmask = ICE_PROMISC_MCAST_RX | ICE_PROMISC_MCAST_TX;
2812 status = ice_set_vsi_promisc(hw, vsi->idx, pmask, 0);
2813 if (status != ICE_SUCCESS)
2814 PMD_DRV_LOG(ERR, "Failed to enable allmulti, err=%d", status);
2818 ice_allmulti_disable(struct rte_eth_dev *dev)
2820 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2821 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2822 struct ice_vsi *vsi = pf->main_vsi;
2823 enum ice_status status;
2826 if (dev->data->promiscuous == 1)
2827 return; /* must remain in all_multicast mode */
2829 pmask = ICE_PROMISC_MCAST_RX | ICE_PROMISC_MCAST_TX;
2831 status = ice_clear_vsi_promisc(hw, vsi->idx, pmask, 0);
2832 if (status != ICE_SUCCESS)
2833 PMD_DRV_LOG(ERR, "Failed to clear allmulti, err=%d", status);
2836 static int ice_rx_queue_intr_enable(struct rte_eth_dev *dev,
2839 struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
2840 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2841 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2845 msix_intr = intr_handle->intr_vec[queue_id];
2847 val = GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M |
2848 GLINT_DYN_CTL_ITR_INDX_M;
2849 val &= ~GLINT_DYN_CTL_WB_ON_ITR_M;
2851 ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr), val);
2852 rte_intr_enable(&pci_dev->intr_handle);
2857 static int ice_rx_queue_intr_disable(struct rte_eth_dev *dev,
2860 struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
2861 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2862 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2865 msix_intr = intr_handle->intr_vec[queue_id];
2867 ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr), GLINT_DYN_CTL_WB_ON_ITR_M);
2873 ice_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
2875 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2881 full_ver = hw->nvm.oem_ver;
2882 ver = (u8)(full_ver >> 24);
2883 build = (u16)((full_ver >> 8) & 0xffff);
2884 patch = (u8)(full_ver & 0xff);
2886 ret = snprintf(fw_version, fw_size,
2887 "%d.%d%d 0x%08x %d.%d.%d",
2888 ((hw->nvm.ver >> 12) & 0xf),
2889 ((hw->nvm.ver >> 4) & 0xff),
2890 (hw->nvm.ver & 0xf), hw->nvm.eetrack,
2893 /* add the size of '\0' */
2895 if (fw_size < (u32)ret)
2902 ice_vsi_vlan_pvid_set(struct ice_vsi *vsi, struct ice_vsi_vlan_pvid_info *info)
2905 struct ice_vsi_ctx ctxt;
2906 uint8_t vlan_flags = 0;
2909 if (!vsi || !info) {
2910 PMD_DRV_LOG(ERR, "invalid parameters");
2915 vsi->info.pvid = info->config.pvid;
2917 * If insert pvid is enabled, only tagged pkts are
2918 * allowed to be sent out.
2920 vlan_flags = ICE_AQ_VSI_PVLAN_INSERT_PVID |
2921 ICE_AQ_VSI_VLAN_MODE_UNTAGGED;
2924 if (info->config.reject.tagged == 0)
2925 vlan_flags |= ICE_AQ_VSI_VLAN_MODE_TAGGED;
2927 if (info->config.reject.untagged == 0)
2928 vlan_flags |= ICE_AQ_VSI_VLAN_MODE_UNTAGGED;
2930 vsi->info.vlan_flags &= ~(ICE_AQ_VSI_PVLAN_INSERT_PVID |
2931 ICE_AQ_VSI_VLAN_MODE_M);
2932 vsi->info.vlan_flags |= vlan_flags;
2933 memset(&ctxt, 0, sizeof(ctxt));
2934 rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
2935 ctxt.info.valid_sections =
2936 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID);
2937 ctxt.vsi_num = vsi->vsi_id;
2939 hw = ICE_VSI_TO_HW(vsi);
2940 ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
2941 if (ret != ICE_SUCCESS) {
2943 "update VSI for VLAN insert failed, err %d",
2948 vsi->info.valid_sections |=
2949 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID);
2955 ice_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on)
2957 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2958 struct ice_vsi *vsi = pf->main_vsi;
2959 struct rte_eth_dev_data *data = pf->dev_data;
2960 struct ice_vsi_vlan_pvid_info info;
2963 memset(&info, 0, sizeof(info));
2966 info.config.pvid = pvid;
2968 info.config.reject.tagged =
2969 data->dev_conf.txmode.hw_vlan_reject_tagged;
2970 info.config.reject.untagged =
2971 data->dev_conf.txmode.hw_vlan_reject_untagged;
2974 ret = ice_vsi_vlan_pvid_set(vsi, &info);
2976 PMD_DRV_LOG(ERR, "Failed to set pvid.");
2984 ice_get_eeprom_length(struct rte_eth_dev *dev)
2986 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2988 /* Convert word count to byte count */
2989 return hw->nvm.sr_words << 1;
2993 ice_get_eeprom(struct rte_eth_dev *dev,
2994 struct rte_dev_eeprom_info *eeprom)
2996 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2997 uint16_t *data = eeprom->data;
2998 uint16_t first_word, last_word, nwords;
2999 enum ice_status status = ICE_SUCCESS;
3001 first_word = eeprom->offset >> 1;
3002 last_word = (eeprom->offset + eeprom->length - 1) >> 1;
3003 nwords = last_word - first_word + 1;
3005 if (first_word > hw->nvm.sr_words ||
3006 last_word > hw->nvm.sr_words) {
3007 PMD_DRV_LOG(ERR, "Requested EEPROM bytes out of range.");
3011 eeprom->magic = hw->vendor_id | (hw->device_id << 16);
3013 status = ice_read_sr_buf(hw, first_word, &nwords, data);
3015 PMD_DRV_LOG(ERR, "EEPROM read failed.");
3016 eeprom->length = sizeof(uint16_t) * nwords;
3024 ice_stat_update_32(struct ice_hw *hw,
3032 new_data = (uint64_t)ICE_READ_REG(hw, reg);
3036 if (new_data >= *offset)
3037 *stat = (uint64_t)(new_data - *offset);
3039 *stat = (uint64_t)((new_data +
3040 ((uint64_t)1 << ICE_32_BIT_WIDTH))
3045 ice_stat_update_40(struct ice_hw *hw,
3054 new_data = (uint64_t)ICE_READ_REG(hw, loreg);
3055 new_data |= (uint64_t)(ICE_READ_REG(hw, hireg) & ICE_8_BIT_MASK) <<
3061 if (new_data >= *offset)
3062 *stat = new_data - *offset;
3064 *stat = (uint64_t)((new_data +
3065 ((uint64_t)1 << ICE_40_BIT_WIDTH)) -
3068 *stat &= ICE_40_BIT_MASK;
3071 /* Get all the statistics of a VSI */
3073 ice_update_vsi_stats(struct ice_vsi *vsi)
3075 struct ice_eth_stats *oes = &vsi->eth_stats_offset;
3076 struct ice_eth_stats *nes = &vsi->eth_stats;
3077 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
3078 int idx = rte_le_to_cpu_16(vsi->vsi_id);
3080 ice_stat_update_40(hw, GLV_GORCH(idx), GLV_GORCL(idx),
3081 vsi->offset_loaded, &oes->rx_bytes,
3083 ice_stat_update_40(hw, GLV_UPRCH(idx), GLV_UPRCL(idx),
3084 vsi->offset_loaded, &oes->rx_unicast,
3086 ice_stat_update_40(hw, GLV_MPRCH(idx), GLV_MPRCL(idx),
3087 vsi->offset_loaded, &oes->rx_multicast,
3088 &nes->rx_multicast);
3089 ice_stat_update_40(hw, GLV_BPRCH(idx), GLV_BPRCL(idx),
3090 vsi->offset_loaded, &oes->rx_broadcast,
3091 &nes->rx_broadcast);
3092 /* exclude CRC bytes */
3093 nes->rx_bytes -= (nes->rx_unicast + nes->rx_multicast +
3094 nes->rx_broadcast) * ETHER_CRC_LEN;
3096 ice_stat_update_32(hw, GLV_RDPC(idx), vsi->offset_loaded,
3097 &oes->rx_discards, &nes->rx_discards);
3098 /* GLV_REPC not supported */
3099 /* GLV_RMPC not supported */
3100 ice_stat_update_32(hw, GLSWID_RUPP(idx), vsi->offset_loaded,
3101 &oes->rx_unknown_protocol,
3102 &nes->rx_unknown_protocol);
3103 ice_stat_update_40(hw, GLV_GOTCH(idx), GLV_GOTCL(idx),
3104 vsi->offset_loaded, &oes->tx_bytes,
3106 ice_stat_update_40(hw, GLV_UPTCH(idx), GLV_UPTCL(idx),
3107 vsi->offset_loaded, &oes->tx_unicast,
3109 ice_stat_update_40(hw, GLV_MPTCH(idx), GLV_MPTCL(idx),
3110 vsi->offset_loaded, &oes->tx_multicast,
3111 &nes->tx_multicast);
3112 ice_stat_update_40(hw, GLV_BPTCH(idx), GLV_BPTCL(idx),
3113 vsi->offset_loaded, &oes->tx_broadcast,
3114 &nes->tx_broadcast);
3115 /* GLV_TDPC not supported */
3116 ice_stat_update_32(hw, GLV_TEPC(idx), vsi->offset_loaded,
3117 &oes->tx_errors, &nes->tx_errors);
3118 vsi->offset_loaded = true;
3120 PMD_DRV_LOG(DEBUG, "************** VSI[%u] stats start **************",
3122 PMD_DRV_LOG(DEBUG, "rx_bytes: %"PRIu64"", nes->rx_bytes);
3123 PMD_DRV_LOG(DEBUG, "rx_unicast: %"PRIu64"", nes->rx_unicast);
3124 PMD_DRV_LOG(DEBUG, "rx_multicast: %"PRIu64"", nes->rx_multicast);
3125 PMD_DRV_LOG(DEBUG, "rx_broadcast: %"PRIu64"", nes->rx_broadcast);
3126 PMD_DRV_LOG(DEBUG, "rx_discards: %"PRIu64"", nes->rx_discards);
3127 PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"",
3128 nes->rx_unknown_protocol);
3129 PMD_DRV_LOG(DEBUG, "tx_bytes: %"PRIu64"", nes->tx_bytes);
3130 PMD_DRV_LOG(DEBUG, "tx_unicast: %"PRIu64"", nes->tx_unicast);
3131 PMD_DRV_LOG(DEBUG, "tx_multicast: %"PRIu64"", nes->tx_multicast);
3132 PMD_DRV_LOG(DEBUG, "tx_broadcast: %"PRIu64"", nes->tx_broadcast);
3133 PMD_DRV_LOG(DEBUG, "tx_discards: %"PRIu64"", nes->tx_discards);
3134 PMD_DRV_LOG(DEBUG, "tx_errors: %"PRIu64"", nes->tx_errors);
3135 PMD_DRV_LOG(DEBUG, "************** VSI[%u] stats end ****************",
3140 ice_read_stats_registers(struct ice_pf *pf, struct ice_hw *hw)
3142 struct ice_hw_port_stats *ns = &pf->stats; /* new stats */
3143 struct ice_hw_port_stats *os = &pf->stats_offset; /* old stats */
3145 /* Get statistics of struct ice_eth_stats */
3146 ice_stat_update_40(hw, GLPRT_GORCH(hw->port_info->lport),
3147 GLPRT_GORCL(hw->port_info->lport),
3148 pf->offset_loaded, &os->eth.rx_bytes,
3150 ice_stat_update_40(hw, GLPRT_UPRCH(hw->port_info->lport),
3151 GLPRT_UPRCL(hw->port_info->lport),
3152 pf->offset_loaded, &os->eth.rx_unicast,
3153 &ns->eth.rx_unicast);
3154 ice_stat_update_40(hw, GLPRT_MPRCH(hw->port_info->lport),
3155 GLPRT_MPRCL(hw->port_info->lport),
3156 pf->offset_loaded, &os->eth.rx_multicast,
3157 &ns->eth.rx_multicast);
3158 ice_stat_update_40(hw, GLPRT_BPRCH(hw->port_info->lport),
3159 GLPRT_BPRCL(hw->port_info->lport),
3160 pf->offset_loaded, &os->eth.rx_broadcast,
3161 &ns->eth.rx_broadcast);
3162 ice_stat_update_32(hw, PRTRPB_RDPC,
3163 pf->offset_loaded, &os->eth.rx_discards,
3164 &ns->eth.rx_discards);
3166 /* Workaround: CRC size should not be included in byte statistics,
3167 * so subtract ETHER_CRC_LEN from the byte counter for each rx packet.
3169 ns->eth.rx_bytes -= (ns->eth.rx_unicast + ns->eth.rx_multicast +
3170 ns->eth.rx_broadcast) * ETHER_CRC_LEN;
3172 /* GLPRT_REPC not supported */
3173 /* GLPRT_RMPC not supported */
3174 ice_stat_update_32(hw, GLSWID_RUPP(hw->port_info->lport),
3176 &os->eth.rx_unknown_protocol,
3177 &ns->eth.rx_unknown_protocol);
3178 ice_stat_update_40(hw, GLPRT_GOTCH(hw->port_info->lport),
3179 GLPRT_GOTCL(hw->port_info->lport),
3180 pf->offset_loaded, &os->eth.tx_bytes,
3182 ice_stat_update_40(hw, GLPRT_UPTCH(hw->port_info->lport),
3183 GLPRT_UPTCL(hw->port_info->lport),
3184 pf->offset_loaded, &os->eth.tx_unicast,
3185 &ns->eth.tx_unicast);
3186 ice_stat_update_40(hw, GLPRT_MPTCH(hw->port_info->lport),
3187 GLPRT_MPTCL(hw->port_info->lport),
3188 pf->offset_loaded, &os->eth.tx_multicast,
3189 &ns->eth.tx_multicast);
3190 ice_stat_update_40(hw, GLPRT_BPTCH(hw->port_info->lport),
3191 GLPRT_BPTCL(hw->port_info->lport),
3192 pf->offset_loaded, &os->eth.tx_broadcast,
3193 &ns->eth.tx_broadcast);
3194 ns->eth.tx_bytes -= (ns->eth.tx_unicast + ns->eth.tx_multicast +
3195 ns->eth.tx_broadcast) * ETHER_CRC_LEN;
3197 /* GLPRT_TEPC not supported */
3199 /* additional port specific stats */
3200 ice_stat_update_32(hw, GLPRT_TDOLD(hw->port_info->lport),
3201 pf->offset_loaded, &os->tx_dropped_link_down,
3202 &ns->tx_dropped_link_down);
3203 ice_stat_update_32(hw, GLPRT_CRCERRS(hw->port_info->lport),
3204 pf->offset_loaded, &os->crc_errors,
3206 ice_stat_update_32(hw, GLPRT_ILLERRC(hw->port_info->lport),
3207 pf->offset_loaded, &os->illegal_bytes,
3208 &ns->illegal_bytes);
3209 /* GLPRT_ERRBC not supported */
3210 ice_stat_update_32(hw, GLPRT_MLFC(hw->port_info->lport),
3211 pf->offset_loaded, &os->mac_local_faults,
3212 &ns->mac_local_faults);
3213 ice_stat_update_32(hw, GLPRT_MRFC(hw->port_info->lport),
3214 pf->offset_loaded, &os->mac_remote_faults,
3215 &ns->mac_remote_faults);
3217 ice_stat_update_32(hw, GLPRT_RLEC(hw->port_info->lport),
3218 pf->offset_loaded, &os->rx_len_errors,
3219 &ns->rx_len_errors);
3221 ice_stat_update_32(hw, GLPRT_LXONRXC(hw->port_info->lport),
3222 pf->offset_loaded, &os->link_xon_rx,
3224 ice_stat_update_32(hw, GLPRT_LXOFFRXC(hw->port_info->lport),
3225 pf->offset_loaded, &os->link_xoff_rx,
3227 ice_stat_update_32(hw, GLPRT_LXONTXC(hw->port_info->lport),
3228 pf->offset_loaded, &os->link_xon_tx,
3230 ice_stat_update_32(hw, GLPRT_LXOFFTXC(hw->port_info->lport),
3231 pf->offset_loaded, &os->link_xoff_tx,
3233 ice_stat_update_40(hw, GLPRT_PRC64H(hw->port_info->lport),
3234 GLPRT_PRC64L(hw->port_info->lport),
3235 pf->offset_loaded, &os->rx_size_64,
3237 ice_stat_update_40(hw, GLPRT_PRC127H(hw->port_info->lport),
3238 GLPRT_PRC127L(hw->port_info->lport),
3239 pf->offset_loaded, &os->rx_size_127,
3241 ice_stat_update_40(hw, GLPRT_PRC255H(hw->port_info->lport),
3242 GLPRT_PRC255L(hw->port_info->lport),
3243 pf->offset_loaded, &os->rx_size_255,
3245 ice_stat_update_40(hw, GLPRT_PRC511H(hw->port_info->lport),
3246 GLPRT_PRC511L(hw->port_info->lport),
3247 pf->offset_loaded, &os->rx_size_511,
3249 ice_stat_update_40(hw, GLPRT_PRC1023H(hw->port_info->lport),
3250 GLPRT_PRC1023L(hw->port_info->lport),
3251 pf->offset_loaded, &os->rx_size_1023,
3253 ice_stat_update_40(hw, GLPRT_PRC1522H(hw->port_info->lport),
3254 GLPRT_PRC1522L(hw->port_info->lport),
3255 pf->offset_loaded, &os->rx_size_1522,
3257 ice_stat_update_40(hw, GLPRT_PRC9522H(hw->port_info->lport),
3258 GLPRT_PRC9522L(hw->port_info->lport),
3259 pf->offset_loaded, &os->rx_size_big,
3261 ice_stat_update_32(hw, GLPRT_RUC(hw->port_info->lport),
3262 pf->offset_loaded, &os->rx_undersize,
3264 ice_stat_update_32(hw, GLPRT_RFC(hw->port_info->lport),
3265 pf->offset_loaded, &os->rx_fragments,
3267 ice_stat_update_32(hw, GLPRT_ROC(hw->port_info->lport),
3268 pf->offset_loaded, &os->rx_oversize,
3270 ice_stat_update_32(hw, GLPRT_RJC(hw->port_info->lport),
3271 pf->offset_loaded, &os->rx_jabber,
3273 ice_stat_update_40(hw, GLPRT_PTC64H(hw->port_info->lport),
3274 GLPRT_PTC64L(hw->port_info->lport),
3275 pf->offset_loaded, &os->tx_size_64,
3277 ice_stat_update_40(hw, GLPRT_PTC127H(hw->port_info->lport),
3278 GLPRT_PTC127L(hw->port_info->lport),
3279 pf->offset_loaded, &os->tx_size_127,
3281 ice_stat_update_40(hw, GLPRT_PTC255H(hw->port_info->lport),
3282 GLPRT_PTC255L(hw->port_info->lport),
3283 pf->offset_loaded, &os->tx_size_255,
3285 ice_stat_update_40(hw, GLPRT_PTC511H(hw->port_info->lport),
3286 GLPRT_PTC511L(hw->port_info->lport),
3287 pf->offset_loaded, &os->tx_size_511,
3289 ice_stat_update_40(hw, GLPRT_PTC1023H(hw->port_info->lport),
3290 GLPRT_PTC1023L(hw->port_info->lport),
3291 pf->offset_loaded, &os->tx_size_1023,
3293 ice_stat_update_40(hw, GLPRT_PTC1522H(hw->port_info->lport),
3294 GLPRT_PTC1522L(hw->port_info->lport),
3295 pf->offset_loaded, &os->tx_size_1522,
3297 ice_stat_update_40(hw, GLPRT_PTC9522H(hw->port_info->lport),
3298 GLPRT_PTC9522L(hw->port_info->lport),
3299 pf->offset_loaded, &os->tx_size_big,
3302 /* GLPRT_MSPDC not supported */
3303 /* GLPRT_XEC not supported */
3305 pf->offset_loaded = true;
3308 ice_update_vsi_stats(pf->main_vsi);
3311 /* Get all statistics of a port */
3313 ice_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
3315 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3316 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3317 struct ice_hw_port_stats *ns = &pf->stats; /* new stats */
3319 /* call read registers - updates values, now write them to struct */
3320 ice_read_stats_registers(pf, hw);
3322 stats->ipackets = ns->eth.rx_unicast +
3323 ns->eth.rx_multicast +
3324 ns->eth.rx_broadcast -
3325 ns->eth.rx_discards -
3326 pf->main_vsi->eth_stats.rx_discards;
3327 stats->opackets = ns->eth.tx_unicast +
3328 ns->eth.tx_multicast +
3329 ns->eth.tx_broadcast;
3330 stats->ibytes = ns->eth.rx_bytes;
3331 stats->obytes = ns->eth.tx_bytes;
3332 stats->oerrors = ns->eth.tx_errors +
3333 pf->main_vsi->eth_stats.tx_errors;
3336 stats->imissed = ns->eth.rx_discards +
3337 pf->main_vsi->eth_stats.rx_discards;
3338 stats->ierrors = ns->crc_errors +
3340 ns->rx_oversize + ns->rx_fragments + ns->rx_jabber;
3342 PMD_DRV_LOG(DEBUG, "*************** PF stats start *****************");
3343 PMD_DRV_LOG(DEBUG, "rx_bytes: %"PRIu64"", ns->eth.rx_bytes);
3344 PMD_DRV_LOG(DEBUG, "rx_unicast: %"PRIu64"", ns->eth.rx_unicast);
3345 PMD_DRV_LOG(DEBUG, "rx_multicast:%"PRIu64"", ns->eth.rx_multicast);
3346 PMD_DRV_LOG(DEBUG, "rx_broadcast:%"PRIu64"", ns->eth.rx_broadcast);
3347 PMD_DRV_LOG(DEBUG, "rx_discards:%"PRIu64"", ns->eth.rx_discards);
3348 PMD_DRV_LOG(DEBUG, "vsi rx_discards:%"PRIu64"",
3349 pf->main_vsi->eth_stats.rx_discards);
3350 PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"",
3351 ns->eth.rx_unknown_protocol);
3352 PMD_DRV_LOG(DEBUG, "tx_bytes: %"PRIu64"", ns->eth.tx_bytes);
3353 PMD_DRV_LOG(DEBUG, "tx_unicast: %"PRIu64"", ns->eth.tx_unicast);
3354 PMD_DRV_LOG(DEBUG, "tx_multicast:%"PRIu64"", ns->eth.tx_multicast);
3355 PMD_DRV_LOG(DEBUG, "tx_broadcast:%"PRIu64"", ns->eth.tx_broadcast);
3356 PMD_DRV_LOG(DEBUG, "tx_discards:%"PRIu64"", ns->eth.tx_discards);
3357 PMD_DRV_LOG(DEBUG, "vsi tx_discards:%"PRIu64"",
3358 pf->main_vsi->eth_stats.tx_discards);
3359 PMD_DRV_LOG(DEBUG, "tx_errors: %"PRIu64"", ns->eth.tx_errors);
3361 PMD_DRV_LOG(DEBUG, "tx_dropped_link_down: %"PRIu64"",
3362 ns->tx_dropped_link_down);
3363 PMD_DRV_LOG(DEBUG, "crc_errors: %"PRIu64"", ns->crc_errors);
3364 PMD_DRV_LOG(DEBUG, "illegal_bytes: %"PRIu64"",
3366 PMD_DRV_LOG(DEBUG, "error_bytes: %"PRIu64"", ns->error_bytes);
3367 PMD_DRV_LOG(DEBUG, "mac_local_faults: %"PRIu64"",
3368 ns->mac_local_faults);
3369 PMD_DRV_LOG(DEBUG, "mac_remote_faults: %"PRIu64"",
3370 ns->mac_remote_faults);
3371 PMD_DRV_LOG(DEBUG, "link_xon_rx: %"PRIu64"", ns->link_xon_rx);
3372 PMD_DRV_LOG(DEBUG, "link_xoff_rx: %"PRIu64"", ns->link_xoff_rx);
3373 PMD_DRV_LOG(DEBUG, "link_xon_tx: %"PRIu64"", ns->link_xon_tx);
3374 PMD_DRV_LOG(DEBUG, "link_xoff_tx: %"PRIu64"", ns->link_xoff_tx);
3375 PMD_DRV_LOG(DEBUG, "rx_size_64: %"PRIu64"", ns->rx_size_64);
3376 PMD_DRV_LOG(DEBUG, "rx_size_127: %"PRIu64"", ns->rx_size_127);
3377 PMD_DRV_LOG(DEBUG, "rx_size_255: %"PRIu64"", ns->rx_size_255);
3378 PMD_DRV_LOG(DEBUG, "rx_size_511: %"PRIu64"", ns->rx_size_511);
3379 PMD_DRV_LOG(DEBUG, "rx_size_1023: %"PRIu64"", ns->rx_size_1023);
3380 PMD_DRV_LOG(DEBUG, "rx_size_1522: %"PRIu64"", ns->rx_size_1522);
3381 PMD_DRV_LOG(DEBUG, "rx_size_big: %"PRIu64"", ns->rx_size_big);
3382 PMD_DRV_LOG(DEBUG, "rx_undersize: %"PRIu64"", ns->rx_undersize);
3383 PMD_DRV_LOG(DEBUG, "rx_fragments: %"PRIu64"", ns->rx_fragments);
3384 PMD_DRV_LOG(DEBUG, "rx_oversize: %"PRIu64"", ns->rx_oversize);
3385 PMD_DRV_LOG(DEBUG, "rx_jabber: %"PRIu64"", ns->rx_jabber);
3386 PMD_DRV_LOG(DEBUG, "tx_size_64: %"PRIu64"", ns->tx_size_64);
3387 PMD_DRV_LOG(DEBUG, "tx_size_127: %"PRIu64"", ns->tx_size_127);
3388 PMD_DRV_LOG(DEBUG, "tx_size_255: %"PRIu64"", ns->tx_size_255);
3389 PMD_DRV_LOG(DEBUG, "tx_size_511: %"PRIu64"", ns->tx_size_511);
3390 PMD_DRV_LOG(DEBUG, "tx_size_1023: %"PRIu64"", ns->tx_size_1023);
3391 PMD_DRV_LOG(DEBUG, "tx_size_1522: %"PRIu64"", ns->tx_size_1522);
3392 PMD_DRV_LOG(DEBUG, "tx_size_big: %"PRIu64"", ns->tx_size_big);
3393 PMD_DRV_LOG(DEBUG, "rx_len_errors: %"PRIu64"", ns->rx_len_errors);
3394 PMD_DRV_LOG(DEBUG, "************* PF stats end ****************");
3398 /* Reset the statistics */
3400 ice_stats_reset(struct rte_eth_dev *dev)
3402 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3403 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3405 /* Mark PF and VSI stats to update the offset, aka "reset" */
3406 pf->offset_loaded = false;
3408 pf->main_vsi->offset_loaded = false;
3410 /* read the stats, reading current register values into offset */
3411 ice_read_stats_registers(pf, hw);
3415 ice_xstats_calc_num(void)
3419 num = ICE_NB_ETH_XSTATS + ICE_NB_HW_PORT_XSTATS;
3425 ice_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
3428 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3429 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3432 struct ice_hw_port_stats *hw_stats = &pf->stats;
3434 count = ice_xstats_calc_num();
3438 ice_read_stats_registers(pf, hw);
3445 /* Get stats from ice_eth_stats struct */
3446 for (i = 0; i < ICE_NB_ETH_XSTATS; i++) {
3447 xstats[count].value =
3448 *(uint64_t *)((char *)&hw_stats->eth +
3449 ice_stats_strings[i].offset);
3450 xstats[count].id = count;
3454 /* Get individiual stats from ice_hw_port struct */
3455 for (i = 0; i < ICE_NB_HW_PORT_XSTATS; i++) {
3456 xstats[count].value =
3457 *(uint64_t *)((char *)hw_stats +
3458 ice_hw_port_strings[i].offset);
3459 xstats[count].id = count;
3466 static int ice_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
3467 struct rte_eth_xstat_name *xstats_names,
3468 __rte_unused unsigned int limit)
3470 unsigned int count = 0;
3474 return ice_xstats_calc_num();
3476 /* Note: limit checked in rte_eth_xstats_names() */
3478 /* Get stats from ice_eth_stats struct */
3479 for (i = 0; i < ICE_NB_ETH_XSTATS; i++) {
3480 strlcpy(xstats_names[count].name, ice_stats_strings[i].name,
3481 sizeof(xstats_names[count].name));
3485 /* Get individiual stats from ice_hw_port struct */
3486 for (i = 0; i < ICE_NB_HW_PORT_XSTATS; i++) {
3487 strlcpy(xstats_names[count].name, ice_hw_port_strings[i].name,
3488 sizeof(xstats_names[count].name));
3496 ice_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
3497 struct rte_pci_device *pci_dev)
3499 return rte_eth_dev_pci_generic_probe(pci_dev,
3500 sizeof(struct ice_adapter),
3505 ice_pci_remove(struct rte_pci_device *pci_dev)
3507 return rte_eth_dev_pci_generic_remove(pci_dev, ice_dev_uninit);
3510 static struct rte_pci_driver rte_ice_pmd = {
3511 .id_table = pci_id_ice_map,
3512 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
3513 RTE_PCI_DRV_IOVA_AS_VA,
3514 .probe = ice_pci_probe,
3515 .remove = ice_pci_remove,
3519 * Driver initialization routine.
3520 * Invoked once at EAL init time.
3521 * Register itself as the [Poll Mode] Driver of PCI devices.
3523 RTE_PMD_REGISTER_PCI(net_ice, rte_ice_pmd);
3524 RTE_PMD_REGISTER_PCI_TABLE(net_ice, pci_id_ice_map);
3525 RTE_PMD_REGISTER_KMOD_DEP(net_ice, "* igb_uio | uio_pci_generic | vfio-pci");
3526 RTE_PMD_REGISTER_PARAM_STRING(net_ice,
3527 ICE_MAX_QP_NUM "=<int>");
3529 RTE_INIT(ice_init_log)
3531 ice_logtype_init = rte_log_register("pmd.net.ice.init");
3532 if (ice_logtype_init >= 0)
3533 rte_log_set_level(ice_logtype_init, RTE_LOG_NOTICE);
3534 ice_logtype_driver = rte_log_register("pmd.net.ice.driver");
3535 if (ice_logtype_driver >= 0)
3536 rte_log_set_level(ice_logtype_driver, RTE_LOG_NOTICE);