1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
5 #include <rte_ethdev_pci.h>
7 #include "base/ice_sched.h"
8 #include "ice_ethdev.h"
11 #define ICE_MAX_QP_NUM "max_queue_pair_num"
12 #define ICE_DFLT_OUTER_TAG_TYPE ICE_AQ_VSI_OUTER_TAG_VLAN_9100
15 int ice_logtype_driver;
17 static int ice_dev_configure(struct rte_eth_dev *dev);
18 static int ice_dev_start(struct rte_eth_dev *dev);
19 static void ice_dev_stop(struct rte_eth_dev *dev);
20 static void ice_dev_close(struct rte_eth_dev *dev);
21 static int ice_dev_reset(struct rte_eth_dev *dev);
22 static void ice_dev_info_get(struct rte_eth_dev *dev,
23 struct rte_eth_dev_info *dev_info);
24 static int ice_link_update(struct rte_eth_dev *dev,
25 int wait_to_complete);
26 static int ice_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
27 static int ice_vlan_offload_set(struct rte_eth_dev *dev, int mask);
28 static int ice_vlan_tpid_set(struct rte_eth_dev *dev,
29 enum rte_vlan_type vlan_type,
31 static int ice_rss_reta_update(struct rte_eth_dev *dev,
32 struct rte_eth_rss_reta_entry64 *reta_conf,
34 static int ice_rss_reta_query(struct rte_eth_dev *dev,
35 struct rte_eth_rss_reta_entry64 *reta_conf,
37 static int ice_rss_hash_update(struct rte_eth_dev *dev,
38 struct rte_eth_rss_conf *rss_conf);
39 static int ice_rss_hash_conf_get(struct rte_eth_dev *dev,
40 struct rte_eth_rss_conf *rss_conf);
41 static int ice_vlan_filter_set(struct rte_eth_dev *dev,
44 static int ice_macaddr_set(struct rte_eth_dev *dev,
45 struct ether_addr *mac_addr);
46 static int ice_macaddr_add(struct rte_eth_dev *dev,
47 struct ether_addr *mac_addr,
48 __rte_unused uint32_t index,
50 static void ice_macaddr_remove(struct rte_eth_dev *dev, uint32_t index);
51 static int ice_rx_queue_intr_enable(struct rte_eth_dev *dev,
53 static int ice_rx_queue_intr_disable(struct rte_eth_dev *dev,
55 static int ice_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
57 static int ice_vlan_pvid_set(struct rte_eth_dev *dev,
58 uint16_t pvid, int on);
59 static int ice_get_eeprom_length(struct rte_eth_dev *dev);
60 static int ice_get_eeprom(struct rte_eth_dev *dev,
61 struct rte_dev_eeprom_info *eeprom);
62 static int ice_stats_get(struct rte_eth_dev *dev,
63 struct rte_eth_stats *stats);
64 static void ice_stats_reset(struct rte_eth_dev *dev);
65 static int ice_xstats_get(struct rte_eth_dev *dev,
66 struct rte_eth_xstat *xstats, unsigned int n);
67 static int ice_xstats_get_names(struct rte_eth_dev *dev,
68 struct rte_eth_xstat_name *xstats_names,
71 static const struct rte_pci_id pci_id_ice_map[] = {
72 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_BACKPLANE) },
73 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_QSFP) },
74 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_SFP) },
75 { .vendor_id = 0, /* sentinel */ },
78 static const struct eth_dev_ops ice_eth_dev_ops = {
79 .dev_configure = ice_dev_configure,
80 .dev_start = ice_dev_start,
81 .dev_stop = ice_dev_stop,
82 .dev_close = ice_dev_close,
83 .dev_reset = ice_dev_reset,
84 .rx_queue_start = ice_rx_queue_start,
85 .rx_queue_stop = ice_rx_queue_stop,
86 .tx_queue_start = ice_tx_queue_start,
87 .tx_queue_stop = ice_tx_queue_stop,
88 .rx_queue_setup = ice_rx_queue_setup,
89 .rx_queue_release = ice_rx_queue_release,
90 .tx_queue_setup = ice_tx_queue_setup,
91 .tx_queue_release = ice_tx_queue_release,
92 .dev_infos_get = ice_dev_info_get,
93 .dev_supported_ptypes_get = ice_dev_supported_ptypes_get,
94 .link_update = ice_link_update,
95 .mtu_set = ice_mtu_set,
96 .mac_addr_set = ice_macaddr_set,
97 .mac_addr_add = ice_macaddr_add,
98 .mac_addr_remove = ice_macaddr_remove,
99 .vlan_filter_set = ice_vlan_filter_set,
100 .vlan_offload_set = ice_vlan_offload_set,
101 .vlan_tpid_set = ice_vlan_tpid_set,
102 .reta_update = ice_rss_reta_update,
103 .reta_query = ice_rss_reta_query,
104 .rss_hash_update = ice_rss_hash_update,
105 .rss_hash_conf_get = ice_rss_hash_conf_get,
106 .rx_queue_intr_enable = ice_rx_queue_intr_enable,
107 .rx_queue_intr_disable = ice_rx_queue_intr_disable,
108 .fw_version_get = ice_fw_version_get,
109 .vlan_pvid_set = ice_vlan_pvid_set,
110 .rxq_info_get = ice_rxq_info_get,
111 .txq_info_get = ice_txq_info_get,
112 .get_eeprom_length = ice_get_eeprom_length,
113 .get_eeprom = ice_get_eeprom,
114 .rx_queue_count = ice_rx_queue_count,
115 .stats_get = ice_stats_get,
116 .stats_reset = ice_stats_reset,
117 .xstats_get = ice_xstats_get,
118 .xstats_get_names = ice_xstats_get_names,
119 .xstats_reset = ice_stats_reset,
122 /* store statistics names and its offset in stats structure */
123 struct ice_xstats_name_off {
124 char name[RTE_ETH_XSTATS_NAME_SIZE];
128 static const struct ice_xstats_name_off ice_stats_strings[] = {
129 {"rx_unicast_packets", offsetof(struct ice_eth_stats, rx_unicast)},
130 {"rx_multicast_packets", offsetof(struct ice_eth_stats, rx_multicast)},
131 {"rx_broadcast_packets", offsetof(struct ice_eth_stats, rx_broadcast)},
132 {"rx_dropped", offsetof(struct ice_eth_stats, rx_discards)},
133 {"rx_unknown_protocol_packets", offsetof(struct ice_eth_stats,
134 rx_unknown_protocol)},
135 {"tx_unicast_packets", offsetof(struct ice_eth_stats, tx_unicast)},
136 {"tx_multicast_packets", offsetof(struct ice_eth_stats, tx_multicast)},
137 {"tx_broadcast_packets", offsetof(struct ice_eth_stats, tx_broadcast)},
138 {"tx_dropped", offsetof(struct ice_eth_stats, tx_discards)},
141 #define ICE_NB_ETH_XSTATS (sizeof(ice_stats_strings) / \
142 sizeof(ice_stats_strings[0]))
144 static const struct ice_xstats_name_off ice_hw_port_strings[] = {
145 {"tx_link_down_dropped", offsetof(struct ice_hw_port_stats,
146 tx_dropped_link_down)},
147 {"rx_crc_errors", offsetof(struct ice_hw_port_stats, crc_errors)},
148 {"rx_illegal_byte_errors", offsetof(struct ice_hw_port_stats,
150 {"rx_error_bytes", offsetof(struct ice_hw_port_stats, error_bytes)},
151 {"mac_local_errors", offsetof(struct ice_hw_port_stats,
153 {"mac_remote_errors", offsetof(struct ice_hw_port_stats,
155 {"rx_len_errors", offsetof(struct ice_hw_port_stats,
157 {"tx_xon_packets", offsetof(struct ice_hw_port_stats, link_xon_tx)},
158 {"rx_xon_packets", offsetof(struct ice_hw_port_stats, link_xon_rx)},
159 {"tx_xoff_packets", offsetof(struct ice_hw_port_stats, link_xoff_tx)},
160 {"rx_xoff_packets", offsetof(struct ice_hw_port_stats, link_xoff_rx)},
161 {"rx_size_64_packets", offsetof(struct ice_hw_port_stats, rx_size_64)},
162 {"rx_size_65_to_127_packets", offsetof(struct ice_hw_port_stats,
164 {"rx_size_128_to_255_packets", offsetof(struct ice_hw_port_stats,
166 {"rx_size_256_to_511_packets", offsetof(struct ice_hw_port_stats,
168 {"rx_size_512_to_1023_packets", offsetof(struct ice_hw_port_stats,
170 {"rx_size_1024_to_1522_packets", offsetof(struct ice_hw_port_stats,
172 {"rx_size_1523_to_max_packets", offsetof(struct ice_hw_port_stats,
174 {"rx_undersized_errors", offsetof(struct ice_hw_port_stats,
176 {"rx_oversize_errors", offsetof(struct ice_hw_port_stats,
178 {"rx_mac_short_pkt_dropped", offsetof(struct ice_hw_port_stats,
179 mac_short_pkt_dropped)},
180 {"rx_fragmented_errors", offsetof(struct ice_hw_port_stats,
182 {"rx_jabber_errors", offsetof(struct ice_hw_port_stats, rx_jabber)},
183 {"tx_size_64_packets", offsetof(struct ice_hw_port_stats, tx_size_64)},
184 {"tx_size_65_to_127_packets", offsetof(struct ice_hw_port_stats,
186 {"tx_size_128_to_255_packets", offsetof(struct ice_hw_port_stats,
188 {"tx_size_256_to_511_packets", offsetof(struct ice_hw_port_stats,
190 {"tx_size_512_to_1023_packets", offsetof(struct ice_hw_port_stats,
192 {"tx_size_1024_to_1522_packets", offsetof(struct ice_hw_port_stats,
194 {"tx_size_1523_to_max_packets", offsetof(struct ice_hw_port_stats,
198 #define ICE_NB_HW_PORT_XSTATS (sizeof(ice_hw_port_strings) / \
199 sizeof(ice_hw_port_strings[0]))
202 ice_init_controlq_parameter(struct ice_hw *hw)
204 /* fields for adminq */
205 hw->adminq.num_rq_entries = ICE_ADMINQ_LEN;
206 hw->adminq.num_sq_entries = ICE_ADMINQ_LEN;
207 hw->adminq.rq_buf_size = ICE_ADMINQ_BUF_SZ;
208 hw->adminq.sq_buf_size = ICE_ADMINQ_BUF_SZ;
210 /* fields for mailboxq, DPDK used as PF host */
211 hw->mailboxq.num_rq_entries = ICE_MAILBOXQ_LEN;
212 hw->mailboxq.num_sq_entries = ICE_MAILBOXQ_LEN;
213 hw->mailboxq.rq_buf_size = ICE_MAILBOXQ_BUF_SZ;
214 hw->mailboxq.sq_buf_size = ICE_MAILBOXQ_BUF_SZ;
218 ice_check_qp_num(const char *key, const char *qp_value,
219 __rte_unused void *opaque)
224 while (isblank(*qp_value))
227 num = strtoul(qp_value, &end, 10);
229 if (!num || (*end == '-') || errno) {
230 PMD_DRV_LOG(WARNING, "invalid value:\"%s\" for key:\"%s\", "
240 ice_config_max_queue_pair_num(struct rte_devargs *devargs)
242 struct rte_kvargs *kvlist;
243 const char *queue_num_key = ICE_MAX_QP_NUM;
249 kvlist = rte_kvargs_parse(devargs->args, NULL);
253 if (!rte_kvargs_count(kvlist, queue_num_key)) {
254 rte_kvargs_free(kvlist);
258 if (rte_kvargs_process(kvlist, queue_num_key,
259 ice_check_qp_num, NULL) < 0) {
260 rte_kvargs_free(kvlist);
263 ret = rte_kvargs_process(kvlist, queue_num_key,
264 ice_check_qp_num, NULL);
265 rte_kvargs_free(kvlist);
271 ice_res_pool_init(struct ice_res_pool_info *pool, uint32_t base,
274 struct pool_entry *entry;
279 entry = rte_zmalloc(NULL, sizeof(*entry), 0);
282 "Failed to allocate memory for resource pool");
286 /* queue heap initialize */
287 pool->num_free = num;
290 LIST_INIT(&pool->alloc_list);
291 LIST_INIT(&pool->free_list);
293 /* Initialize element */
297 LIST_INSERT_HEAD(&pool->free_list, entry, next);
302 ice_res_pool_alloc(struct ice_res_pool_info *pool,
305 struct pool_entry *entry, *valid_entry;
308 PMD_INIT_LOG(ERR, "Invalid parameter");
312 if (pool->num_free < num) {
313 PMD_INIT_LOG(ERR, "No resource. ask:%u, available:%u",
314 num, pool->num_free);
319 /* Lookup in free list and find most fit one */
320 LIST_FOREACH(entry, &pool->free_list, next) {
321 if (entry->len >= num) {
323 if (entry->len == num) {
328 valid_entry->len > entry->len)
333 /* Not find one to satisfy the request, return */
335 PMD_INIT_LOG(ERR, "No valid entry found");
339 * The entry have equal queue number as requested,
340 * remove it from alloc_list.
342 if (valid_entry->len == num) {
343 LIST_REMOVE(valid_entry, next);
346 * The entry have more numbers than requested,
347 * create a new entry for alloc_list and minus its
348 * queue base and number in free_list.
350 entry = rte_zmalloc(NULL, sizeof(*entry), 0);
353 "Failed to allocate memory for "
357 entry->base = valid_entry->base;
359 valid_entry->base += num;
360 valid_entry->len -= num;
364 /* Insert it into alloc list, not sorted */
365 LIST_INSERT_HEAD(&pool->alloc_list, valid_entry, next);
367 pool->num_free -= valid_entry->len;
368 pool->num_alloc += valid_entry->len;
370 return valid_entry->base + pool->base;
374 ice_res_pool_destroy(struct ice_res_pool_info *pool)
376 struct pool_entry *entry, *next_entry;
381 for (entry = LIST_FIRST(&pool->alloc_list);
382 entry && (next_entry = LIST_NEXT(entry, next), 1);
383 entry = next_entry) {
384 LIST_REMOVE(entry, next);
388 for (entry = LIST_FIRST(&pool->free_list);
389 entry && (next_entry = LIST_NEXT(entry, next), 1);
390 entry = next_entry) {
391 LIST_REMOVE(entry, next);
398 LIST_INIT(&pool->alloc_list);
399 LIST_INIT(&pool->free_list);
403 ice_vsi_config_default_rss(struct ice_aqc_vsi_props *info)
405 /* Set VSI LUT selection */
406 info->q_opt_rss = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI &
407 ICE_AQ_VSI_Q_OPT_RSS_LUT_M;
408 /* Set Hash scheme */
409 info->q_opt_rss |= ICE_AQ_VSI_Q_OPT_RSS_TPLZ &
410 ICE_AQ_VSI_Q_OPT_RSS_HASH_M;
412 info->q_opt_tc = ICE_AQ_VSI_Q_OPT_TC_OVR_M;
415 static enum ice_status
416 ice_vsi_config_tc_queue_mapping(struct ice_vsi *vsi,
417 struct ice_aqc_vsi_props *info,
418 uint8_t enabled_tcmap)
420 uint16_t bsf, qp_idx;
422 /* default tc 0 now. Multi-TC supporting need to be done later.
423 * Configure TC and queue mapping parameters, for enabled TC,
424 * allocate qpnum_per_tc queues to this traffic.
426 if (enabled_tcmap != 0x01) {
427 PMD_INIT_LOG(ERR, "only TC0 is supported");
431 vsi->nb_qps = RTE_MIN(vsi->nb_qps, ICE_MAX_Q_PER_TC);
432 bsf = rte_bsf32(vsi->nb_qps);
433 /* Adjust the queue number to actual queues that can be applied */
434 vsi->nb_qps = 0x1 << bsf;
437 /* Set tc and queue mapping with VSI */
438 info->tc_mapping[0] = rte_cpu_to_le_16((qp_idx <<
439 ICE_AQ_VSI_TC_Q_OFFSET_S) |
440 (bsf << ICE_AQ_VSI_TC_Q_NUM_S));
442 /* Associate queue number with VSI */
443 info->mapping_flags |= rte_cpu_to_le_16(ICE_AQ_VSI_Q_MAP_CONTIG);
444 info->q_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
445 info->q_mapping[1] = rte_cpu_to_le_16(vsi->nb_qps);
446 info->valid_sections |=
447 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID);
448 /* Set the info.ingress_table and info.egress_table
449 * for UP translate table. Now just set it to 1:1 map by default
450 * -- 0b 111 110 101 100 011 010 001 000 == 0xFAC688
452 #define ICE_TC_QUEUE_TABLE_DFLT 0x00FAC688
453 info->ingress_table = rte_cpu_to_le_32(ICE_TC_QUEUE_TABLE_DFLT);
454 info->egress_table = rte_cpu_to_le_32(ICE_TC_QUEUE_TABLE_DFLT);
455 info->outer_up_table = rte_cpu_to_le_32(ICE_TC_QUEUE_TABLE_DFLT);
460 ice_init_mac_address(struct rte_eth_dev *dev)
462 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
464 if (!is_unicast_ether_addr
465 ((struct ether_addr *)hw->port_info[0].mac.lan_addr)) {
466 PMD_INIT_LOG(ERR, "Invalid MAC address");
470 ether_addr_copy((struct ether_addr *)hw->port_info[0].mac.lan_addr,
471 (struct ether_addr *)hw->port_info[0].mac.perm_addr);
473 dev->data->mac_addrs = rte_zmalloc(NULL, sizeof(struct ether_addr), 0);
474 if (!dev->data->mac_addrs) {
476 "Failed to allocate memory to store mac address");
479 /* store it to dev data */
480 ether_addr_copy((struct ether_addr *)hw->port_info[0].mac.perm_addr,
481 &dev->data->mac_addrs[0]);
485 /* Find out specific MAC filter */
486 static struct ice_mac_filter *
487 ice_find_mac_filter(struct ice_vsi *vsi, struct ether_addr *macaddr)
489 struct ice_mac_filter *f;
491 TAILQ_FOREACH(f, &vsi->mac_list, next) {
492 if (is_same_ether_addr(macaddr, &f->mac_info.mac_addr))
500 ice_add_mac_filter(struct ice_vsi *vsi, struct ether_addr *mac_addr)
502 struct ice_fltr_list_entry *m_list_itr = NULL;
503 struct ice_mac_filter *f;
504 struct LIST_HEAD_TYPE list_head;
505 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
508 /* If it's added and configured, return */
509 f = ice_find_mac_filter(vsi, mac_addr);
511 PMD_DRV_LOG(INFO, "This MAC filter already exists.");
515 INIT_LIST_HEAD(&list_head);
517 m_list_itr = (struct ice_fltr_list_entry *)
518 ice_malloc(hw, sizeof(*m_list_itr));
523 ice_memcpy(m_list_itr->fltr_info.l_data.mac.mac_addr,
524 mac_addr, ETH_ALEN, ICE_NONDMA_TO_NONDMA);
525 m_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI;
526 m_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
527 m_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_MAC;
528 m_list_itr->fltr_info.flag = ICE_FLTR_TX;
529 m_list_itr->fltr_info.vsi_handle = vsi->idx;
531 LIST_ADD(&m_list_itr->list_entry, &list_head);
534 ret = ice_add_mac(hw, &list_head);
535 if (ret != ICE_SUCCESS) {
536 PMD_DRV_LOG(ERR, "Failed to add MAC filter");
540 /* Add the mac addr into mac list */
541 f = rte_zmalloc(NULL, sizeof(*f), 0);
543 PMD_DRV_LOG(ERR, "failed to allocate memory");
547 rte_memcpy(&f->mac_info.mac_addr, mac_addr, ETH_ADDR_LEN);
548 TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
554 rte_free(m_list_itr);
559 ice_remove_mac_filter(struct ice_vsi *vsi, struct ether_addr *mac_addr)
561 struct ice_fltr_list_entry *m_list_itr = NULL;
562 struct ice_mac_filter *f;
563 struct LIST_HEAD_TYPE list_head;
564 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
567 /* Can't find it, return an error */
568 f = ice_find_mac_filter(vsi, mac_addr);
572 INIT_LIST_HEAD(&list_head);
574 m_list_itr = (struct ice_fltr_list_entry *)
575 ice_malloc(hw, sizeof(*m_list_itr));
580 ice_memcpy(m_list_itr->fltr_info.l_data.mac.mac_addr,
581 mac_addr, ETH_ALEN, ICE_NONDMA_TO_NONDMA);
582 m_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI;
583 m_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
584 m_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_MAC;
585 m_list_itr->fltr_info.flag = ICE_FLTR_TX;
586 m_list_itr->fltr_info.vsi_handle = vsi->idx;
588 LIST_ADD(&m_list_itr->list_entry, &list_head);
590 /* remove the mac filter */
591 ret = ice_remove_mac(hw, &list_head);
592 if (ret != ICE_SUCCESS) {
593 PMD_DRV_LOG(ERR, "Failed to remove MAC filter");
598 /* Remove the mac addr from mac list */
599 TAILQ_REMOVE(&vsi->mac_list, f, next);
605 rte_free(m_list_itr);
609 /* Find out specific VLAN filter */
610 static struct ice_vlan_filter *
611 ice_find_vlan_filter(struct ice_vsi *vsi, uint16_t vlan_id)
613 struct ice_vlan_filter *f;
615 TAILQ_FOREACH(f, &vsi->vlan_list, next) {
616 if (vlan_id == f->vlan_info.vlan_id)
624 ice_add_vlan_filter(struct ice_vsi *vsi, uint16_t vlan_id)
626 struct ice_fltr_list_entry *v_list_itr = NULL;
627 struct ice_vlan_filter *f;
628 struct LIST_HEAD_TYPE list_head;
629 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
632 if (!vsi || vlan_id > ETHER_MAX_VLAN_ID)
635 /* If it's added and configured, return. */
636 f = ice_find_vlan_filter(vsi, vlan_id);
638 PMD_DRV_LOG(INFO, "This VLAN filter already exists.");
642 if (!vsi->vlan_anti_spoof_on && !vsi->vlan_filter_on)
645 INIT_LIST_HEAD(&list_head);
647 v_list_itr = (struct ice_fltr_list_entry *)
648 ice_malloc(hw, sizeof(*v_list_itr));
653 v_list_itr->fltr_info.l_data.vlan.vlan_id = vlan_id;
654 v_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI;
655 v_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
656 v_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_VLAN;
657 v_list_itr->fltr_info.flag = ICE_FLTR_TX;
658 v_list_itr->fltr_info.vsi_handle = vsi->idx;
660 LIST_ADD(&v_list_itr->list_entry, &list_head);
663 ret = ice_add_vlan(hw, &list_head);
664 if (ret != ICE_SUCCESS) {
665 PMD_DRV_LOG(ERR, "Failed to add VLAN filter");
670 /* Add vlan into vlan list */
671 f = rte_zmalloc(NULL, sizeof(*f), 0);
673 PMD_DRV_LOG(ERR, "failed to allocate memory");
677 f->vlan_info.vlan_id = vlan_id;
678 TAILQ_INSERT_TAIL(&vsi->vlan_list, f, next);
684 rte_free(v_list_itr);
689 ice_remove_vlan_filter(struct ice_vsi *vsi, uint16_t vlan_id)
691 struct ice_fltr_list_entry *v_list_itr = NULL;
692 struct ice_vlan_filter *f;
693 struct LIST_HEAD_TYPE list_head;
694 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
698 * Vlan 0 is the generic filter for untagged packets
699 * and can't be removed.
701 if (!vsi || vlan_id == 0 || vlan_id > ETHER_MAX_VLAN_ID)
704 /* Can't find it, return an error */
705 f = ice_find_vlan_filter(vsi, vlan_id);
709 INIT_LIST_HEAD(&list_head);
711 v_list_itr = (struct ice_fltr_list_entry *)
712 ice_malloc(hw, sizeof(*v_list_itr));
718 v_list_itr->fltr_info.l_data.vlan.vlan_id = vlan_id;
719 v_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI;
720 v_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
721 v_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_VLAN;
722 v_list_itr->fltr_info.flag = ICE_FLTR_TX;
723 v_list_itr->fltr_info.vsi_handle = vsi->idx;
725 LIST_ADD(&v_list_itr->list_entry, &list_head);
727 /* remove the vlan filter */
728 ret = ice_remove_vlan(hw, &list_head);
729 if (ret != ICE_SUCCESS) {
730 PMD_DRV_LOG(ERR, "Failed to remove VLAN filter");
735 /* Remove the vlan id from vlan list */
736 TAILQ_REMOVE(&vsi->vlan_list, f, next);
742 rte_free(v_list_itr);
747 ice_remove_all_mac_vlan_filters(struct ice_vsi *vsi)
749 struct ice_mac_filter *m_f;
750 struct ice_vlan_filter *v_f;
753 if (!vsi || !vsi->mac_num)
756 TAILQ_FOREACH(m_f, &vsi->mac_list, next) {
757 ret = ice_remove_mac_filter(vsi, &m_f->mac_info.mac_addr);
758 if (ret != ICE_SUCCESS) {
764 if (vsi->vlan_num == 0)
767 TAILQ_FOREACH(v_f, &vsi->vlan_list, next) {
768 ret = ice_remove_vlan_filter(vsi, v_f->vlan_info.vlan_id);
769 if (ret != ICE_SUCCESS) {
780 ice_vsi_config_qinq_insertion(struct ice_vsi *vsi, bool on)
782 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
783 struct ice_vsi_ctx ctxt;
787 /* Check if it has been already on or off */
788 if (vsi->info.valid_sections &
789 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID)) {
791 if ((vsi->info.outer_tag_flags &
792 ICE_AQ_VSI_OUTER_TAG_ACCEPT_HOST) ==
793 ICE_AQ_VSI_OUTER_TAG_ACCEPT_HOST)
794 return 0; /* already on */
796 if (!(vsi->info.outer_tag_flags &
797 ICE_AQ_VSI_OUTER_TAG_ACCEPT_HOST))
798 return 0; /* already off */
803 qinq_flags = ICE_AQ_VSI_OUTER_TAG_ACCEPT_HOST;
806 /* clear global insertion and use per packet insertion */
807 vsi->info.outer_tag_flags &= ~(ICE_AQ_VSI_OUTER_TAG_INSERT);
808 vsi->info.outer_tag_flags &= ~(ICE_AQ_VSI_OUTER_TAG_ACCEPT_HOST);
809 vsi->info.outer_tag_flags |= qinq_flags;
810 /* use default vlan type 0x8100 */
811 vsi->info.outer_tag_flags &= ~(ICE_AQ_VSI_OUTER_TAG_TYPE_M);
812 vsi->info.outer_tag_flags |= ICE_DFLT_OUTER_TAG_TYPE <<
813 ICE_AQ_VSI_OUTER_TAG_TYPE_S;
814 (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
815 ctxt.info.valid_sections =
816 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID);
817 ctxt.vsi_num = vsi->vsi_id;
818 ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
821 "Update VSI failed to %s qinq stripping",
822 on ? "enable" : "disable");
826 vsi->info.valid_sections |=
827 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID);
833 ice_vsi_config_qinq_stripping(struct ice_vsi *vsi, bool on)
835 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
836 struct ice_vsi_ctx ctxt;
840 /* Check if it has been already on or off */
841 if (vsi->info.valid_sections &
842 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID)) {
844 if ((vsi->info.outer_tag_flags &
845 ICE_AQ_VSI_OUTER_TAG_MODE_M) ==
846 ICE_AQ_VSI_OUTER_TAG_COPY)
847 return 0; /* already on */
849 if ((vsi->info.outer_tag_flags &
850 ICE_AQ_VSI_OUTER_TAG_MODE_M) ==
851 ICE_AQ_VSI_OUTER_TAG_NOTHING)
852 return 0; /* already off */
857 qinq_flags = ICE_AQ_VSI_OUTER_TAG_COPY;
859 qinq_flags = ICE_AQ_VSI_OUTER_TAG_NOTHING;
860 vsi->info.outer_tag_flags &= ~(ICE_AQ_VSI_OUTER_TAG_MODE_M);
861 vsi->info.outer_tag_flags |= qinq_flags;
862 /* use default vlan type 0x8100 */
863 vsi->info.outer_tag_flags &= ~(ICE_AQ_VSI_OUTER_TAG_TYPE_M);
864 vsi->info.outer_tag_flags |= ICE_DFLT_OUTER_TAG_TYPE <<
865 ICE_AQ_VSI_OUTER_TAG_TYPE_S;
866 (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
867 ctxt.info.valid_sections =
868 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID);
869 ctxt.vsi_num = vsi->vsi_id;
870 ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
873 "Update VSI failed to %s qinq stripping",
874 on ? "enable" : "disable");
878 vsi->info.valid_sections |=
879 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID);
885 ice_vsi_config_double_vlan(struct ice_vsi *vsi, int on)
889 ret = ice_vsi_config_qinq_stripping(vsi, on);
891 PMD_DRV_LOG(ERR, "Fail to set qinq stripping - %d", ret);
893 ret = ice_vsi_config_qinq_insertion(vsi, on);
895 PMD_DRV_LOG(ERR, "Fail to set qinq insertion - %d", ret);
902 ice_pf_enable_irq0(struct ice_hw *hw)
904 /* reset the registers */
905 ICE_WRITE_REG(hw, PFINT_OICR_ENA, 0);
906 ICE_READ_REG(hw, PFINT_OICR);
909 ICE_WRITE_REG(hw, PFINT_OICR_ENA,
910 (uint32_t)(PFINT_OICR_ENA_INT_ENA_M &
911 (~PFINT_OICR_LINK_STAT_CHANGE_M)));
913 ICE_WRITE_REG(hw, PFINT_OICR_CTL,
914 (0 & PFINT_OICR_CTL_MSIX_INDX_M) |
915 ((0 << PFINT_OICR_CTL_ITR_INDX_S) &
916 PFINT_OICR_CTL_ITR_INDX_M) |
917 PFINT_OICR_CTL_CAUSE_ENA_M);
919 ICE_WRITE_REG(hw, PFINT_FW_CTL,
920 (0 & PFINT_FW_CTL_MSIX_INDX_M) |
921 ((0 << PFINT_FW_CTL_ITR_INDX_S) &
922 PFINT_FW_CTL_ITR_INDX_M) |
923 PFINT_FW_CTL_CAUSE_ENA_M);
925 ICE_WRITE_REG(hw, PFINT_OICR_ENA, PFINT_OICR_ENA_INT_ENA_M);
928 ICE_WRITE_REG(hw, GLINT_DYN_CTL(0),
929 GLINT_DYN_CTL_INTENA_M |
930 GLINT_DYN_CTL_CLEARPBA_M |
931 GLINT_DYN_CTL_ITR_INDX_M);
938 ice_pf_disable_irq0(struct ice_hw *hw)
940 /* Disable all interrupt types */
941 ICE_WRITE_REG(hw, GLINT_DYN_CTL(0), GLINT_DYN_CTL_WB_ON_ITR_M);
947 ice_handle_aq_msg(struct rte_eth_dev *dev)
949 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
950 struct ice_ctl_q_info *cq = &hw->adminq;
951 struct ice_rq_event_info event;
952 uint16_t pending, opcode;
955 event.buf_len = ICE_AQ_MAX_BUF_LEN;
956 event.msg_buf = rte_zmalloc(NULL, event.buf_len, 0);
957 if (!event.msg_buf) {
958 PMD_DRV_LOG(ERR, "Failed to allocate mem");
964 ret = ice_clean_rq_elem(hw, cq, &event, &pending);
966 if (ret != ICE_SUCCESS) {
968 "Failed to read msg from AdminQ, "
970 hw->adminq.sq_last_status);
973 opcode = rte_le_to_cpu_16(event.desc.opcode);
976 case ice_aqc_opc_get_link_status:
977 ret = ice_link_update(dev, 0);
979 _rte_eth_dev_callback_process
980 (dev, RTE_ETH_EVENT_INTR_LSC, NULL);
983 PMD_DRV_LOG(DEBUG, "Request %u is not supported yet",
988 rte_free(event.msg_buf);
993 * Interrupt handler triggered by NIC for handling
994 * specific interrupt.
997 * Pointer to interrupt handle.
999 * The address of parameter (struct rte_eth_dev *) regsitered before.
1005 ice_interrupt_handler(void *param)
1007 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1008 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1015 uint32_t int_fw_ctl;
1018 /* Disable interrupt */
1019 ice_pf_disable_irq0(hw);
1021 /* read out interrupt causes */
1022 oicr = ICE_READ_REG(hw, PFINT_OICR);
1024 int_fw_ctl = ICE_READ_REG(hw, PFINT_FW_CTL);
1027 /* No interrupt event indicated */
1028 if (!(oicr & PFINT_OICR_INTEVENT_M)) {
1029 PMD_DRV_LOG(INFO, "No interrupt event");
1034 if (int_fw_ctl & PFINT_FW_CTL_INTEVENT_M) {
1035 PMD_DRV_LOG(INFO, "FW_CTL: link state change event");
1036 ice_handle_aq_msg(dev);
1039 if (oicr & PFINT_OICR_LINK_STAT_CHANGE_M) {
1040 PMD_DRV_LOG(INFO, "OICR: link state change event");
1041 ice_link_update(dev, 0);
1045 if (oicr & PFINT_OICR_MAL_DETECT_M) {
1046 PMD_DRV_LOG(WARNING, "OICR: MDD event");
1047 reg = ICE_READ_REG(hw, GL_MDET_TX_PQM);
1048 if (reg & GL_MDET_TX_PQM_VALID_M) {
1049 pf_num = (reg & GL_MDET_TX_PQM_PF_NUM_M) >>
1050 GL_MDET_TX_PQM_PF_NUM_S;
1051 event = (reg & GL_MDET_TX_PQM_MAL_TYPE_M) >>
1052 GL_MDET_TX_PQM_MAL_TYPE_S;
1053 queue = (reg & GL_MDET_TX_PQM_QNUM_M) >>
1054 GL_MDET_TX_PQM_QNUM_S;
1056 PMD_DRV_LOG(WARNING, "Malicious Driver Detection event "
1057 "%d by PQM on TX queue %d PF# %d",
1058 event, queue, pf_num);
1061 reg = ICE_READ_REG(hw, GL_MDET_TX_TCLAN);
1062 if (reg & GL_MDET_TX_TCLAN_VALID_M) {
1063 pf_num = (reg & GL_MDET_TX_TCLAN_PF_NUM_M) >>
1064 GL_MDET_TX_TCLAN_PF_NUM_S;
1065 event = (reg & GL_MDET_TX_TCLAN_MAL_TYPE_M) >>
1066 GL_MDET_TX_TCLAN_MAL_TYPE_S;
1067 queue = (reg & GL_MDET_TX_TCLAN_QNUM_M) >>
1068 GL_MDET_TX_TCLAN_QNUM_S;
1070 PMD_DRV_LOG(WARNING, "Malicious Driver Detection event "
1071 "%d by TCLAN on TX queue %d PF# %d",
1072 event, queue, pf_num);
1076 /* Enable interrupt */
1077 ice_pf_enable_irq0(hw);
1078 rte_intr_enable(dev->intr_handle);
1081 /* Initialize SW parameters of PF */
1083 ice_pf_sw_init(struct rte_eth_dev *dev)
1085 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1086 struct ice_hw *hw = ICE_PF_TO_HW(pf);
1088 if (ice_config_max_queue_pair_num(dev->device->devargs) > 0)
1090 ice_config_max_queue_pair_num(dev->device->devargs);
1093 (uint16_t)RTE_MIN(hw->func_caps.common_cap.num_txq,
1094 hw->func_caps.common_cap.num_rxq);
1096 pf->lan_nb_qps = pf->lan_nb_qp_max;
1101 static struct ice_vsi *
1102 ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type)
1104 struct ice_hw *hw = ICE_PF_TO_HW(pf);
1105 struct ice_vsi *vsi = NULL;
1106 struct ice_vsi_ctx vsi_ctx;
1108 struct ether_addr broadcast = {
1109 .addr_bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff} };
1110 struct ether_addr mac_addr;
1111 uint16_t max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
1112 uint8_t tc_bitmap = 0x1;
1114 /* hw->num_lports = 1 in NIC mode */
1115 vsi = rte_zmalloc(NULL, sizeof(struct ice_vsi), 0);
1119 vsi->idx = pf->next_vsi_idx;
1122 vsi->adapter = ICE_PF_TO_ADAPTER(pf);
1123 vsi->max_macaddrs = ICE_NUM_MACADDR_MAX;
1124 vsi->vlan_anti_spoof_on = 0;
1125 vsi->vlan_filter_on = 1;
1126 TAILQ_INIT(&vsi->mac_list);
1127 TAILQ_INIT(&vsi->vlan_list);
1129 memset(&vsi_ctx, 0, sizeof(vsi_ctx));
1130 /* base_queue in used in queue mapping of VSI add/update command.
1131 * Suppose vsi->base_queue is 0 now, don't consider SRIOV, VMDQ
1132 * cases in the first stage. Only Main VSI.
1134 vsi->base_queue = 0;
1137 vsi->nb_qps = pf->lan_nb_qps;
1138 ice_vsi_config_default_rss(&vsi_ctx.info);
1139 vsi_ctx.alloc_from_pool = true;
1140 vsi_ctx.flags = ICE_AQ_VSI_TYPE_PF;
1141 /* switch_id is queried by get_switch_config aq, which is done
1144 vsi_ctx.info.sw_id = hw->port_info->sw_id;
1145 vsi_ctx.info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA;
1146 /* Allow all untagged or tagged packets */
1147 vsi_ctx.info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL;
1148 vsi_ctx.info.vlan_flags |= ICE_AQ_VSI_VLAN_EMOD_NOTHING;
1149 vsi_ctx.info.q_opt_rss = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF |
1150 ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
1151 /* Enable VLAN/UP trip */
1152 ret = ice_vsi_config_tc_queue_mapping(vsi,
1157 "tc queue mapping with vsi failed, "
1165 /* for other types of VSI */
1166 PMD_INIT_LOG(ERR, "other types of VSI not supported");
1170 /* VF has MSIX interrupt in VF range, don't allocate here */
1171 if (type == ICE_VSI_PF) {
1172 ret = ice_res_pool_alloc(&pf->msix_pool,
1173 RTE_MIN(vsi->nb_qps,
1174 RTE_MAX_RXTX_INTR_VEC_ID));
1176 PMD_INIT_LOG(ERR, "VSI MAIN %d get heap failed %d",
1179 vsi->msix_intr = ret;
1180 vsi->nb_msix = RTE_MIN(vsi->nb_qps, RTE_MAX_RXTX_INTR_VEC_ID);
1185 ret = ice_add_vsi(hw, vsi->idx, &vsi_ctx, NULL);
1186 if (ret != ICE_SUCCESS) {
1187 PMD_INIT_LOG(ERR, "add vsi failed, err = %d", ret);
1190 /* store vsi information is SW structure */
1191 vsi->vsi_id = vsi_ctx.vsi_num;
1192 vsi->info = vsi_ctx.info;
1193 pf->vsis_allocated = vsi_ctx.vsis_allocd;
1194 pf->vsis_unallocated = vsi_ctx.vsis_unallocated;
1196 /* MAC configuration */
1197 rte_memcpy(pf->dev_addr.addr_bytes,
1198 hw->port_info->mac.perm_addr,
1201 rte_memcpy(&mac_addr, &pf->dev_addr, ETHER_ADDR_LEN);
1202 ret = ice_add_mac_filter(vsi, &mac_addr);
1203 if (ret != ICE_SUCCESS)
1204 PMD_INIT_LOG(ERR, "Failed to add dflt MAC filter");
1206 rte_memcpy(&mac_addr, &broadcast, ETHER_ADDR_LEN);
1207 ret = ice_add_mac_filter(vsi, &mac_addr);
1208 if (ret != ICE_SUCCESS)
1209 PMD_INIT_LOG(ERR, "Failed to add MAC filter");
1211 /* At the beginning, only TC0. */
1212 /* What we need here is the maximam number of the TX queues.
1213 * Currently vsi->nb_qps means it.
1214 * Correct it if any change.
1216 max_txqs[0] = vsi->nb_qps;
1217 ret = ice_cfg_vsi_lan(hw->port_info, vsi->idx,
1218 tc_bitmap, max_txqs);
1219 if (ret != ICE_SUCCESS)
1220 PMD_INIT_LOG(ERR, "Failed to config vsi sched");
1230 ice_pf_setup(struct ice_pf *pf)
1232 struct ice_vsi *vsi;
1234 /* Clear all stats counters */
1235 pf->offset_loaded = FALSE;
1236 memset(&pf->stats, 0, sizeof(struct ice_hw_port_stats));
1237 memset(&pf->stats_offset, 0, sizeof(struct ice_hw_port_stats));
1238 memset(&pf->internal_stats, 0, sizeof(struct ice_eth_stats));
1239 memset(&pf->internal_stats_offset, 0, sizeof(struct ice_eth_stats));
1241 vsi = ice_setup_vsi(pf, ICE_VSI_PF);
1243 PMD_INIT_LOG(ERR, "Failed to add vsi for PF");
1253 ice_dev_init(struct rte_eth_dev *dev)
1255 struct rte_pci_device *pci_dev;
1256 struct rte_intr_handle *intr_handle;
1257 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1258 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1259 struct ice_vsi *vsi;
1262 dev->dev_ops = &ice_eth_dev_ops;
1263 dev->rx_pkt_burst = ice_recv_pkts;
1264 dev->tx_pkt_burst = ice_xmit_pkts;
1265 dev->tx_pkt_prepare = ice_prep_pkts;
1267 ice_set_default_ptype_table(dev);
1268 pci_dev = RTE_DEV_TO_PCI(dev->device);
1269 intr_handle = &pci_dev->intr_handle;
1271 pf->adapter = ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1272 pf->adapter->eth_dev = dev;
1273 pf->dev_data = dev->data;
1274 hw->back = pf->adapter;
1275 hw->hw_addr = (uint8_t *)pci_dev->mem_resource[0].addr;
1276 hw->vendor_id = pci_dev->id.vendor_id;
1277 hw->device_id = pci_dev->id.device_id;
1278 hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
1279 hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
1280 hw->bus.device = pci_dev->addr.devid;
1281 hw->bus.func = pci_dev->addr.function;
1283 ice_init_controlq_parameter(hw);
1285 ret = ice_init_hw(hw);
1287 PMD_INIT_LOG(ERR, "Failed to initialize HW");
1291 PMD_INIT_LOG(INFO, "FW %d.%d.%05d API %d.%d",
1292 hw->fw_maj_ver, hw->fw_min_ver, hw->fw_build,
1293 hw->api_maj_ver, hw->api_min_ver);
1295 ice_pf_sw_init(dev);
1296 ret = ice_init_mac_address(dev);
1298 PMD_INIT_LOG(ERR, "Failed to initialize mac address");
1302 ret = ice_res_pool_init(&pf->msix_pool, 1,
1303 hw->func_caps.common_cap.num_msix_vectors - 1);
1305 PMD_INIT_LOG(ERR, "Failed to init MSIX pool");
1306 goto err_msix_pool_init;
1309 ret = ice_pf_setup(pf);
1311 PMD_INIT_LOG(ERR, "Failed to setup PF");
1317 /* Disable double vlan by default */
1318 ice_vsi_config_double_vlan(vsi, FALSE);
1320 /* register callback func to eal lib */
1321 rte_intr_callback_register(intr_handle,
1322 ice_interrupt_handler, dev);
1324 ice_pf_enable_irq0(hw);
1326 /* enable uio intr after callback register */
1327 rte_intr_enable(intr_handle);
1332 ice_res_pool_destroy(&pf->msix_pool);
1334 rte_free(dev->data->mac_addrs);
1336 ice_sched_cleanup_all(hw);
1337 rte_free(hw->port_info);
1338 ice_shutdown_all_ctrlq(hw);
1344 ice_release_vsi(struct ice_vsi *vsi)
1347 struct ice_vsi_ctx vsi_ctx;
1348 enum ice_status ret;
1353 hw = ICE_VSI_TO_HW(vsi);
1355 ice_remove_all_mac_vlan_filters(vsi);
1357 memset(&vsi_ctx, 0, sizeof(vsi_ctx));
1359 vsi_ctx.vsi_num = vsi->vsi_id;
1360 vsi_ctx.info = vsi->info;
1361 ret = ice_free_vsi(hw, vsi->idx, &vsi_ctx, false, NULL);
1362 if (ret != ICE_SUCCESS) {
1363 PMD_INIT_LOG(ERR, "Failed to free vsi by aq, %u", vsi->vsi_id);
1373 ice_vsi_disable_queues_intr(struct ice_vsi *vsi)
1375 struct rte_eth_dev *dev = vsi->adapter->eth_dev;
1376 struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
1377 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1378 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
1379 uint16_t msix_intr, i;
1381 /* disable interrupt and also clear all the exist config */
1382 for (i = 0; i < vsi->nb_qps; i++) {
1383 ICE_WRITE_REG(hw, QINT_TQCTL(vsi->base_queue + i), 0);
1384 ICE_WRITE_REG(hw, QINT_RQCTL(vsi->base_queue + i), 0);
1388 if (rte_intr_allow_others(intr_handle))
1390 for (i = 0; i < vsi->nb_msix; i++) {
1391 msix_intr = vsi->msix_intr + i;
1392 ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr),
1393 GLINT_DYN_CTL_WB_ON_ITR_M);
1397 ICE_WRITE_REG(hw, GLINT_DYN_CTL(0), GLINT_DYN_CTL_WB_ON_ITR_M);
1401 ice_dev_stop(struct rte_eth_dev *dev)
1403 struct rte_eth_dev_data *data = dev->data;
1404 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1405 struct ice_vsi *main_vsi = pf->main_vsi;
1406 struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
1407 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1410 /* avoid stopping again */
1411 if (pf->adapter_stopped)
1414 /* stop and clear all Rx queues */
1415 for (i = 0; i < data->nb_rx_queues; i++)
1416 ice_rx_queue_stop(dev, i);
1418 /* stop and clear all Tx queues */
1419 for (i = 0; i < data->nb_tx_queues; i++)
1420 ice_tx_queue_stop(dev, i);
1422 /* disable all queue interrupts */
1423 ice_vsi_disable_queues_intr(main_vsi);
1425 /* Clear all queues and release mbufs */
1426 ice_clear_queues(dev);
1428 /* Clean datapath event and queue/vec mapping */
1429 rte_intr_efd_disable(intr_handle);
1430 if (intr_handle->intr_vec) {
1431 rte_free(intr_handle->intr_vec);
1432 intr_handle->intr_vec = NULL;
1435 pf->adapter_stopped = true;
1439 ice_dev_close(struct rte_eth_dev *dev)
1441 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1442 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1446 /* release all queue resource */
1447 ice_free_queues(dev);
1449 ice_res_pool_destroy(&pf->msix_pool);
1450 ice_release_vsi(pf->main_vsi);
1452 ice_shutdown_all_ctrlq(hw);
1456 ice_dev_uninit(struct rte_eth_dev *dev)
1458 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1459 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1460 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1461 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1465 dev->dev_ops = NULL;
1466 dev->rx_pkt_burst = NULL;
1467 dev->tx_pkt_burst = NULL;
1469 rte_free(dev->data->mac_addrs);
1470 dev->data->mac_addrs = NULL;
1472 /* disable uio intr before callback unregister */
1473 rte_intr_disable(intr_handle);
1475 /* register callback func to eal lib */
1476 rte_intr_callback_unregister(intr_handle,
1477 ice_interrupt_handler, dev);
1479 ice_release_vsi(pf->main_vsi);
1480 ice_sched_cleanup_all(hw);
1481 rte_free(hw->port_info);
1482 ice_shutdown_all_ctrlq(hw);
1488 ice_dev_configure(__rte_unused struct rte_eth_dev *dev)
1490 struct ice_adapter *ad =
1491 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1493 /* Initialize to TRUE. If any of Rx queues doesn't meet the
1494 * bulk allocation or vector Rx preconditions we will reset it.
1496 ad->rx_bulk_alloc_allowed = true;
1497 ad->tx_simple_allowed = true;
1502 static int ice_init_rss(struct ice_pf *pf)
1504 struct ice_hw *hw = ICE_PF_TO_HW(pf);
1505 struct ice_vsi *vsi = pf->main_vsi;
1506 struct rte_eth_dev *dev = pf->adapter->eth_dev;
1507 struct rte_eth_rss_conf *rss_conf;
1508 struct ice_aqc_get_set_rss_keys key;
1512 rss_conf = &dev->data->dev_conf.rx_adv_conf.rss_conf;
1513 nb_q = dev->data->nb_rx_queues;
1514 vsi->rss_key_size = ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE;
1515 vsi->rss_lut_size = hw->func_caps.common_cap.rss_table_size;
1518 vsi->rss_key = rte_zmalloc(NULL,
1519 vsi->rss_key_size, 0);
1521 vsi->rss_lut = rte_zmalloc(NULL,
1522 vsi->rss_lut_size, 0);
1524 /* configure RSS key */
1525 if (!rss_conf->rss_key) {
1526 /* Calculate the default hash key */
1527 for (i = 0; i <= vsi->rss_key_size; i++)
1528 vsi->rss_key[i] = (uint8_t)rte_rand();
1530 rte_memcpy(vsi->rss_key, rss_conf->rss_key,
1531 RTE_MIN(rss_conf->rss_key_len,
1532 vsi->rss_key_size));
1534 rte_memcpy(key.standard_rss_key, vsi->rss_key, vsi->rss_key_size);
1535 ret = ice_aq_set_rss_key(hw, vsi->idx, &key);
1539 /* init RSS LUT table */
1540 for (i = 0; i < vsi->rss_lut_size; i++)
1541 vsi->rss_lut[i] = i % nb_q;
1543 ret = ice_aq_set_rss_lut(hw, vsi->idx,
1544 ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF,
1545 vsi->rss_lut, vsi->rss_lut_size);
1553 __vsi_queues_bind_intr(struct ice_vsi *vsi, uint16_t msix_vect,
1554 int base_queue, int nb_queue)
1556 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
1557 uint32_t val, val_tx;
1560 for (i = 0; i < nb_queue; i++) {
1562 val = (msix_vect & QINT_RQCTL_MSIX_INDX_M) |
1563 (0 < QINT_RQCTL_ITR_INDX_S) | QINT_RQCTL_CAUSE_ENA_M;
1564 val_tx = (msix_vect & QINT_TQCTL_MSIX_INDX_M) |
1565 (0 < QINT_TQCTL_ITR_INDX_S) | QINT_TQCTL_CAUSE_ENA_M;
1567 PMD_DRV_LOG(INFO, "queue %d is binding to vect %d",
1568 base_queue + i, msix_vect);
1569 /* set ITR0 value */
1570 ICE_WRITE_REG(hw, GLINT_ITR(0, msix_vect), 0x10);
1571 ICE_WRITE_REG(hw, QINT_RQCTL(base_queue + i), val);
1572 ICE_WRITE_REG(hw, QINT_TQCTL(base_queue + i), val_tx);
1577 ice_vsi_queues_bind_intr(struct ice_vsi *vsi)
1579 struct rte_eth_dev *dev = vsi->adapter->eth_dev;
1580 struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
1581 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1582 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
1583 uint16_t msix_vect = vsi->msix_intr;
1584 uint16_t nb_msix = RTE_MIN(vsi->nb_msix, intr_handle->nb_efd);
1585 uint16_t queue_idx = 0;
1589 /* clear Rx/Tx queue interrupt */
1590 for (i = 0; i < vsi->nb_used_qps; i++) {
1591 ICE_WRITE_REG(hw, QINT_TQCTL(vsi->base_queue + i), 0);
1592 ICE_WRITE_REG(hw, QINT_RQCTL(vsi->base_queue + i), 0);
1595 /* PF bind interrupt */
1596 if (rte_intr_dp_is_en(intr_handle)) {
1601 for (i = 0; i < vsi->nb_used_qps; i++) {
1603 if (!rte_intr_allow_others(intr_handle))
1604 msix_vect = ICE_MISC_VEC_ID;
1606 /* uio mapping all queue to one msix_vect */
1607 __vsi_queues_bind_intr(vsi, msix_vect,
1608 vsi->base_queue + i,
1609 vsi->nb_used_qps - i);
1611 for (; !!record && i < vsi->nb_used_qps; i++)
1612 intr_handle->intr_vec[queue_idx + i] =
1617 /* vfio 1:1 queue/msix_vect mapping */
1618 __vsi_queues_bind_intr(vsi, msix_vect,
1619 vsi->base_queue + i, 1);
1622 intr_handle->intr_vec[queue_idx + i] = msix_vect;
1630 ice_vsi_enable_queues_intr(struct ice_vsi *vsi)
1632 struct rte_eth_dev *dev = vsi->adapter->eth_dev;
1633 struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
1634 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1635 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
1636 uint16_t msix_intr, i;
1638 if (rte_intr_allow_others(intr_handle))
1639 for (i = 0; i < vsi->nb_used_qps; i++) {
1640 msix_intr = vsi->msix_intr + i;
1641 ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr),
1642 GLINT_DYN_CTL_INTENA_M |
1643 GLINT_DYN_CTL_CLEARPBA_M |
1644 GLINT_DYN_CTL_ITR_INDX_M |
1645 GLINT_DYN_CTL_WB_ON_ITR_M);
1648 ICE_WRITE_REG(hw, GLINT_DYN_CTL(0),
1649 GLINT_DYN_CTL_INTENA_M |
1650 GLINT_DYN_CTL_CLEARPBA_M |
1651 GLINT_DYN_CTL_ITR_INDX_M |
1652 GLINT_DYN_CTL_WB_ON_ITR_M);
1656 ice_rxq_intr_setup(struct rte_eth_dev *dev)
1658 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1659 struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
1660 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1661 struct ice_vsi *vsi = pf->main_vsi;
1662 uint32_t intr_vector = 0;
1664 rte_intr_disable(intr_handle);
1666 /* check and configure queue intr-vector mapping */
1667 if ((rte_intr_cap_multiple(intr_handle) ||
1668 !RTE_ETH_DEV_SRIOV(dev).active) &&
1669 dev->data->dev_conf.intr_conf.rxq != 0) {
1670 intr_vector = dev->data->nb_rx_queues;
1671 if (intr_vector > ICE_MAX_INTR_QUEUE_NUM) {
1672 PMD_DRV_LOG(ERR, "At most %d intr queues supported",
1673 ICE_MAX_INTR_QUEUE_NUM);
1676 if (rte_intr_efd_enable(intr_handle, intr_vector))
1680 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
1681 intr_handle->intr_vec =
1682 rte_zmalloc(NULL, dev->data->nb_rx_queues * sizeof(int),
1684 if (!intr_handle->intr_vec) {
1686 "Failed to allocate %d rx_queues intr_vec",
1687 dev->data->nb_rx_queues);
1692 /* Map queues with MSIX interrupt */
1693 vsi->nb_used_qps = dev->data->nb_rx_queues;
1694 ice_vsi_queues_bind_intr(vsi);
1696 /* Enable interrupts for all the queues */
1697 ice_vsi_enable_queues_intr(vsi);
1699 rte_intr_enable(intr_handle);
1705 ice_dev_start(struct rte_eth_dev *dev)
1707 struct rte_eth_dev_data *data = dev->data;
1708 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1709 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1710 uint16_t nb_rxq = 0;
1714 /* program Tx queues' context in hardware */
1715 for (nb_txq = 0; nb_txq < data->nb_tx_queues; nb_txq++) {
1716 ret = ice_tx_queue_start(dev, nb_txq);
1718 PMD_DRV_LOG(ERR, "fail to start Tx queue %u", nb_txq);
1723 /* program Rx queues' context in hardware*/
1724 for (nb_rxq = 0; nb_rxq < data->nb_rx_queues; nb_rxq++) {
1725 ret = ice_rx_queue_start(dev, nb_rxq);
1727 PMD_DRV_LOG(ERR, "fail to start Rx queue %u", nb_rxq);
1732 ret = ice_init_rss(pf);
1734 PMD_DRV_LOG(ERR, "Failed to enable rss for PF");
1738 ice_set_rx_function(dev);
1740 /* enable Rx interrput and mapping Rx queue to interrupt vector */
1741 if (ice_rxq_intr_setup(dev))
1744 ret = ice_aq_set_event_mask(hw, hw->port_info->lport,
1745 ((u16)(ICE_AQ_LINK_EVENT_LINK_FAULT |
1746 ICE_AQ_LINK_EVENT_PHY_TEMP_ALARM |
1747 ICE_AQ_LINK_EVENT_EXCESSIVE_ERRORS |
1748 ICE_AQ_LINK_EVENT_SIGNAL_DETECT |
1749 ICE_AQ_LINK_EVENT_AN_COMPLETED |
1750 ICE_AQ_LINK_EVENT_PORT_TX_SUSPENDED)),
1752 if (ret != ICE_SUCCESS)
1753 PMD_DRV_LOG(WARNING, "Fail to set phy mask");
1755 /* Call get_link_info aq commond to enable/disable LSE */
1756 ice_link_update(dev, 0);
1758 pf->adapter_stopped = false;
1762 /* stop the started queues if failed to start all queues */
1764 for (i = 0; i < nb_rxq; i++)
1765 ice_rx_queue_stop(dev, i);
1767 for (i = 0; i < nb_txq; i++)
1768 ice_tx_queue_stop(dev, i);
1774 ice_dev_reset(struct rte_eth_dev *dev)
1778 if (dev->data->sriov.active)
1781 ret = ice_dev_uninit(dev);
1783 PMD_INIT_LOG(ERR, "failed to uninit device, status = %d", ret);
1787 ret = ice_dev_init(dev);
1789 PMD_INIT_LOG(ERR, "failed to init device, status = %d", ret);
1797 ice_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1799 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1800 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1801 struct ice_vsi *vsi = pf->main_vsi;
1802 struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
1804 dev_info->min_rx_bufsize = ICE_BUF_SIZE_MIN;
1805 dev_info->max_rx_pktlen = ICE_FRAME_SIZE_MAX;
1806 dev_info->max_rx_queues = vsi->nb_qps;
1807 dev_info->max_tx_queues = vsi->nb_qps;
1808 dev_info->max_mac_addrs = vsi->max_macaddrs;
1809 dev_info->max_vfs = pci_dev->max_vfs;
1811 dev_info->rx_offload_capa =
1812 DEV_RX_OFFLOAD_VLAN_STRIP |
1813 DEV_RX_OFFLOAD_IPV4_CKSUM |
1814 DEV_RX_OFFLOAD_UDP_CKSUM |
1815 DEV_RX_OFFLOAD_TCP_CKSUM |
1816 DEV_RX_OFFLOAD_QINQ_STRIP |
1817 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
1818 DEV_RX_OFFLOAD_VLAN_EXTEND |
1819 DEV_RX_OFFLOAD_JUMBO_FRAME |
1820 DEV_RX_OFFLOAD_KEEP_CRC |
1821 DEV_RX_OFFLOAD_SCATTER |
1822 DEV_RX_OFFLOAD_VLAN_FILTER;
1823 dev_info->tx_offload_capa =
1824 DEV_TX_OFFLOAD_VLAN_INSERT |
1825 DEV_TX_OFFLOAD_QINQ_INSERT |
1826 DEV_TX_OFFLOAD_IPV4_CKSUM |
1827 DEV_TX_OFFLOAD_UDP_CKSUM |
1828 DEV_TX_OFFLOAD_TCP_CKSUM |
1829 DEV_TX_OFFLOAD_SCTP_CKSUM |
1830 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
1831 DEV_TX_OFFLOAD_TCP_TSO |
1832 DEV_TX_OFFLOAD_MULTI_SEGS |
1833 DEV_TX_OFFLOAD_MBUF_FAST_FREE;
1834 dev_info->rx_queue_offload_capa = 0;
1835 dev_info->tx_queue_offload_capa = 0;
1837 dev_info->reta_size = hw->func_caps.common_cap.rss_table_size;
1838 dev_info->hash_key_size = (VSIQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
1839 dev_info->flow_type_rss_offloads = ICE_RSS_OFFLOAD_ALL;
1841 dev_info->default_rxconf = (struct rte_eth_rxconf) {
1843 .pthresh = ICE_DEFAULT_RX_PTHRESH,
1844 .hthresh = ICE_DEFAULT_RX_HTHRESH,
1845 .wthresh = ICE_DEFAULT_RX_WTHRESH,
1847 .rx_free_thresh = ICE_DEFAULT_RX_FREE_THRESH,
1852 dev_info->default_txconf = (struct rte_eth_txconf) {
1854 .pthresh = ICE_DEFAULT_TX_PTHRESH,
1855 .hthresh = ICE_DEFAULT_TX_HTHRESH,
1856 .wthresh = ICE_DEFAULT_TX_WTHRESH,
1858 .tx_free_thresh = ICE_DEFAULT_TX_FREE_THRESH,
1859 .tx_rs_thresh = ICE_DEFAULT_TX_RSBIT_THRESH,
1863 dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
1864 .nb_max = ICE_MAX_RING_DESC,
1865 .nb_min = ICE_MIN_RING_DESC,
1866 .nb_align = ICE_ALIGN_RING_DESC,
1869 dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
1870 .nb_max = ICE_MAX_RING_DESC,
1871 .nb_min = ICE_MIN_RING_DESC,
1872 .nb_align = ICE_ALIGN_RING_DESC,
1875 dev_info->speed_capa = ETH_LINK_SPEED_10M |
1876 ETH_LINK_SPEED_100M |
1878 ETH_LINK_SPEED_2_5G |
1880 ETH_LINK_SPEED_10G |
1881 ETH_LINK_SPEED_20G |
1882 ETH_LINK_SPEED_25G |
1885 dev_info->nb_rx_queues = dev->data->nb_rx_queues;
1886 dev_info->nb_tx_queues = dev->data->nb_tx_queues;
1888 dev_info->default_rxportconf.burst_size = ICE_RX_MAX_BURST;
1889 dev_info->default_txportconf.burst_size = ICE_TX_MAX_BURST;
1890 dev_info->default_rxportconf.nb_queues = 1;
1891 dev_info->default_txportconf.nb_queues = 1;
1892 dev_info->default_rxportconf.ring_size = ICE_BUF_SIZE_MIN;
1893 dev_info->default_txportconf.ring_size = ICE_BUF_SIZE_MIN;
1897 ice_atomic_read_link_status(struct rte_eth_dev *dev,
1898 struct rte_eth_link *link)
1900 struct rte_eth_link *dst = link;
1901 struct rte_eth_link *src = &dev->data->dev_link;
1903 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
1904 *(uint64_t *)src) == 0)
1911 ice_atomic_write_link_status(struct rte_eth_dev *dev,
1912 struct rte_eth_link *link)
1914 struct rte_eth_link *dst = &dev->data->dev_link;
1915 struct rte_eth_link *src = link;
1917 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
1918 *(uint64_t *)src) == 0)
1925 ice_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
1927 #define CHECK_INTERVAL 100 /* 100ms */
1928 #define MAX_REPEAT_TIME 10 /* 1s (10 * 100ms) in total */
1929 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1930 struct ice_link_status link_status;
1931 struct rte_eth_link link, old;
1933 unsigned int rep_cnt = MAX_REPEAT_TIME;
1934 bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false;
1936 memset(&link, 0, sizeof(link));
1937 memset(&old, 0, sizeof(old));
1938 memset(&link_status, 0, sizeof(link_status));
1939 ice_atomic_read_link_status(dev, &old);
1942 /* Get link status information from hardware */
1943 status = ice_aq_get_link_info(hw->port_info, enable_lse,
1944 &link_status, NULL);
1945 if (status != ICE_SUCCESS) {
1946 link.link_speed = ETH_SPEED_NUM_100M;
1947 link.link_duplex = ETH_LINK_FULL_DUPLEX;
1948 PMD_DRV_LOG(ERR, "Failed to get link info");
1952 link.link_status = link_status.link_info & ICE_AQ_LINK_UP;
1953 if (!wait_to_complete || link.link_status)
1956 rte_delay_ms(CHECK_INTERVAL);
1957 } while (--rep_cnt);
1959 if (!link.link_status)
1962 /* Full-duplex operation at all supported speeds */
1963 link.link_duplex = ETH_LINK_FULL_DUPLEX;
1965 /* Parse the link status */
1966 switch (link_status.link_speed) {
1967 case ICE_AQ_LINK_SPEED_10MB:
1968 link.link_speed = ETH_SPEED_NUM_10M;
1970 case ICE_AQ_LINK_SPEED_100MB:
1971 link.link_speed = ETH_SPEED_NUM_100M;
1973 case ICE_AQ_LINK_SPEED_1000MB:
1974 link.link_speed = ETH_SPEED_NUM_1G;
1976 case ICE_AQ_LINK_SPEED_2500MB:
1977 link.link_speed = ETH_SPEED_NUM_2_5G;
1979 case ICE_AQ_LINK_SPEED_5GB:
1980 link.link_speed = ETH_SPEED_NUM_5G;
1982 case ICE_AQ_LINK_SPEED_10GB:
1983 link.link_speed = ETH_SPEED_NUM_10G;
1985 case ICE_AQ_LINK_SPEED_20GB:
1986 link.link_speed = ETH_SPEED_NUM_20G;
1988 case ICE_AQ_LINK_SPEED_25GB:
1989 link.link_speed = ETH_SPEED_NUM_25G;
1991 case ICE_AQ_LINK_SPEED_40GB:
1992 link.link_speed = ETH_SPEED_NUM_40G;
1994 case ICE_AQ_LINK_SPEED_UNKNOWN:
1996 PMD_DRV_LOG(ERR, "Unknown link speed");
1997 link.link_speed = ETH_SPEED_NUM_NONE;
2001 link.link_autoneg = !(dev->data->dev_conf.link_speeds &
2002 ETH_LINK_SPEED_FIXED);
2005 ice_atomic_write_link_status(dev, &link);
2006 if (link.link_status == old.link_status)
2013 ice_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
2015 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2016 struct rte_eth_dev_data *dev_data = pf->dev_data;
2017 uint32_t frame_size = mtu + ETHER_HDR_LEN
2018 + ETHER_CRC_LEN + ICE_VLAN_TAG_SIZE;
2020 /* check if mtu is within the allowed range */
2021 if (mtu < ETHER_MIN_MTU || frame_size > ICE_FRAME_SIZE_MAX)
2024 /* mtu setting is forbidden if port is start */
2025 if (dev_data->dev_started) {
2027 "port %d must be stopped before configuration",
2032 if (frame_size > ETHER_MAX_LEN)
2033 dev_data->dev_conf.rxmode.offloads |=
2034 DEV_RX_OFFLOAD_JUMBO_FRAME;
2036 dev_data->dev_conf.rxmode.offloads &=
2037 ~DEV_RX_OFFLOAD_JUMBO_FRAME;
2039 dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
2044 static int ice_macaddr_set(struct rte_eth_dev *dev,
2045 struct ether_addr *mac_addr)
2047 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2048 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2049 struct ice_vsi *vsi = pf->main_vsi;
2050 struct ice_mac_filter *f;
2054 if (!is_valid_assigned_ether_addr(mac_addr)) {
2055 PMD_DRV_LOG(ERR, "Tried to set invalid MAC address.");
2059 TAILQ_FOREACH(f, &vsi->mac_list, next) {
2060 if (is_same_ether_addr(&pf->dev_addr, &f->mac_info.mac_addr))
2065 PMD_DRV_LOG(ERR, "Failed to find filter for default mac");
2069 ret = ice_remove_mac_filter(vsi, &f->mac_info.mac_addr);
2070 if (ret != ICE_SUCCESS) {
2071 PMD_DRV_LOG(ERR, "Failed to delete mac filter");
2074 ret = ice_add_mac_filter(vsi, mac_addr);
2075 if (ret != ICE_SUCCESS) {
2076 PMD_DRV_LOG(ERR, "Failed to add mac filter");
2079 memcpy(&pf->dev_addr, mac_addr, ETH_ADDR_LEN);
2081 flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL;
2082 ret = ice_aq_manage_mac_write(hw, mac_addr->addr_bytes, flags, NULL);
2083 if (ret != ICE_SUCCESS)
2084 PMD_DRV_LOG(ERR, "Failed to set manage mac");
2089 /* Add a MAC address, and update filters */
2091 ice_macaddr_add(struct rte_eth_dev *dev,
2092 struct ether_addr *mac_addr,
2093 __rte_unused uint32_t index,
2094 __rte_unused uint32_t pool)
2096 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2097 struct ice_vsi *vsi = pf->main_vsi;
2100 ret = ice_add_mac_filter(vsi, mac_addr);
2101 if (ret != ICE_SUCCESS) {
2102 PMD_DRV_LOG(ERR, "Failed to add MAC filter");
2109 /* Remove a MAC address, and update filters */
2111 ice_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
2113 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2114 struct ice_vsi *vsi = pf->main_vsi;
2115 struct rte_eth_dev_data *data = dev->data;
2116 struct ether_addr *macaddr;
2119 macaddr = &data->mac_addrs[index];
2120 ret = ice_remove_mac_filter(vsi, macaddr);
2122 PMD_DRV_LOG(ERR, "Failed to remove MAC filter");
2128 ice_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
2130 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2131 struct ice_vsi *vsi = pf->main_vsi;
2134 PMD_INIT_FUNC_TRACE();
2137 ret = ice_add_vlan_filter(vsi, vlan_id);
2139 PMD_DRV_LOG(ERR, "Failed to add vlan filter");
2143 ret = ice_remove_vlan_filter(vsi, vlan_id);
2145 PMD_DRV_LOG(ERR, "Failed to remove vlan filter");
2153 /* Configure vlan filter on or off */
2155 ice_vsi_config_vlan_filter(struct ice_vsi *vsi, bool on)
2157 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
2158 struct ice_vsi_ctx ctxt;
2159 uint8_t sec_flags, sw_flags2;
2162 sec_flags = ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
2163 ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S;
2164 sw_flags2 = ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
2167 vsi->info.sec_flags |= sec_flags;
2168 vsi->info.sw_flags2 |= sw_flags2;
2170 vsi->info.sec_flags &= ~sec_flags;
2171 vsi->info.sw_flags2 &= ~sw_flags2;
2173 vsi->info.sw_id = hw->port_info->sw_id;
2174 (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
2175 ctxt.info.valid_sections =
2176 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_SW_VALID |
2177 ICE_AQ_VSI_PROP_SECURITY_VALID);
2178 ctxt.vsi_num = vsi->vsi_id;
2180 ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
2182 PMD_DRV_LOG(INFO, "Update VSI failed to %s vlan rx pruning",
2183 on ? "enable" : "disable");
2186 vsi->info.valid_sections |=
2187 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_SW_VALID |
2188 ICE_AQ_VSI_PROP_SECURITY_VALID);
2195 ice_vsi_config_vlan_stripping(struct ice_vsi *vsi, bool on)
2197 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
2198 struct ice_vsi_ctx ctxt;
2202 /* Check if it has been already on or off */
2203 if (vsi->info.valid_sections &
2204 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID)) {
2206 if ((vsi->info.vlan_flags &
2207 ICE_AQ_VSI_VLAN_EMOD_M) ==
2208 ICE_AQ_VSI_VLAN_EMOD_STR_BOTH)
2209 return 0; /* already on */
2211 if ((vsi->info.vlan_flags &
2212 ICE_AQ_VSI_VLAN_EMOD_M) ==
2213 ICE_AQ_VSI_VLAN_EMOD_NOTHING)
2214 return 0; /* already off */
2219 vlan_flags = ICE_AQ_VSI_VLAN_EMOD_STR_BOTH;
2221 vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING;
2222 vsi->info.vlan_flags &= ~(ICE_AQ_VSI_VLAN_EMOD_M);
2223 vsi->info.vlan_flags |= vlan_flags;
2224 (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
2225 ctxt.info.valid_sections =
2226 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID);
2227 ctxt.vsi_num = vsi->vsi_id;
2228 ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
2230 PMD_DRV_LOG(INFO, "Update VSI failed to %s vlan stripping",
2231 on ? "enable" : "disable");
2235 vsi->info.valid_sections |=
2236 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID);
2242 ice_vlan_offload_set(struct rte_eth_dev *dev, int mask)
2244 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2245 struct ice_vsi *vsi = pf->main_vsi;
2246 struct rte_eth_rxmode *rxmode;
2248 rxmode = &dev->data->dev_conf.rxmode;
2249 if (mask & ETH_VLAN_FILTER_MASK) {
2250 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
2251 ice_vsi_config_vlan_filter(vsi, TRUE);
2253 ice_vsi_config_vlan_filter(vsi, FALSE);
2256 if (mask & ETH_VLAN_STRIP_MASK) {
2257 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
2258 ice_vsi_config_vlan_stripping(vsi, TRUE);
2260 ice_vsi_config_vlan_stripping(vsi, FALSE);
2263 if (mask & ETH_VLAN_EXTEND_MASK) {
2264 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
2265 ice_vsi_config_double_vlan(vsi, TRUE);
2267 ice_vsi_config_double_vlan(vsi, FALSE);
2274 ice_vlan_tpid_set(struct rte_eth_dev *dev,
2275 enum rte_vlan_type vlan_type,
2278 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2279 uint64_t reg_r = 0, reg_w = 0;
2280 uint16_t reg_id = 0;
2282 int qinq = dev->data->dev_conf.rxmode.offloads &
2283 DEV_RX_OFFLOAD_VLAN_EXTEND;
2285 switch (vlan_type) {
2286 case ETH_VLAN_TYPE_OUTER:
2292 case ETH_VLAN_TYPE_INNER:
2297 "Unsupported vlan type in single vlan.");
2302 PMD_DRV_LOG(ERR, "Unsupported vlan type %d", vlan_type);
2305 reg_r = ICE_READ_REG(hw, GL_SWT_L2TAGCTRL(reg_id));
2306 PMD_DRV_LOG(DEBUG, "Debug read from ICE GL_SWT_L2TAGCTRL[%d]: "
2307 "0x%08"PRIx64"", reg_id, reg_r);
2309 reg_w = reg_r & (~(GL_SWT_L2TAGCTRL_ETHERTYPE_M));
2310 reg_w |= ((uint64_t)tpid << GL_SWT_L2TAGCTRL_ETHERTYPE_S);
2311 if (reg_r == reg_w) {
2312 PMD_DRV_LOG(DEBUG, "No need to write");
2316 ICE_WRITE_REG(hw, GL_SWT_L2TAGCTRL(reg_id), reg_w);
2317 PMD_DRV_LOG(DEBUG, "Debug write 0x%08"PRIx64" to "
2318 "ICE GL_SWT_L2TAGCTRL[%d]", reg_w, reg_id);
2324 ice_get_rss_lut(struct ice_vsi *vsi, uint8_t *lut, uint16_t lut_size)
2326 struct ice_pf *pf = ICE_VSI_TO_PF(vsi);
2327 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
2333 if (pf->flags & ICE_FLAG_RSS_AQ_CAPABLE) {
2334 ret = ice_aq_get_rss_lut(hw, vsi->idx, TRUE,
2337 PMD_DRV_LOG(ERR, "Failed to get RSS lookup table");
2341 uint64_t *lut_dw = (uint64_t *)lut;
2342 uint16_t i, lut_size_dw = lut_size / 4;
2344 for (i = 0; i < lut_size_dw; i++)
2345 lut_dw[i] = ICE_READ_REG(hw, PFQF_HLUT(i));
2352 ice_set_rss_lut(struct ice_vsi *vsi, uint8_t *lut, uint16_t lut_size)
2354 struct ice_pf *pf = ICE_VSI_TO_PF(vsi);
2355 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
2361 if (pf->flags & ICE_FLAG_RSS_AQ_CAPABLE) {
2362 ret = ice_aq_set_rss_lut(hw, vsi->idx, TRUE,
2365 PMD_DRV_LOG(ERR, "Failed to set RSS lookup table");
2369 uint64_t *lut_dw = (uint64_t *)lut;
2370 uint16_t i, lut_size_dw = lut_size / 4;
2372 for (i = 0; i < lut_size_dw; i++)
2373 ICE_WRITE_REG(hw, PFQF_HLUT(i), lut_dw[i]);
2382 ice_rss_reta_update(struct rte_eth_dev *dev,
2383 struct rte_eth_rss_reta_entry64 *reta_conf,
2386 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2387 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2388 uint16_t i, lut_size = hw->func_caps.common_cap.rss_table_size;
2389 uint16_t idx, shift;
2393 if (reta_size != lut_size ||
2394 reta_size > ETH_RSS_RETA_SIZE_512) {
2396 "The size of hash lookup table configured (%d)"
2397 "doesn't match the number hardware can "
2399 reta_size, lut_size);
2403 lut = rte_zmalloc(NULL, reta_size, 0);
2405 PMD_DRV_LOG(ERR, "No memory can be allocated");
2408 ret = ice_get_rss_lut(pf->main_vsi, lut, reta_size);
2412 for (i = 0; i < reta_size; i++) {
2413 idx = i / RTE_RETA_GROUP_SIZE;
2414 shift = i % RTE_RETA_GROUP_SIZE;
2415 if (reta_conf[idx].mask & (1ULL << shift))
2416 lut[i] = reta_conf[idx].reta[shift];
2418 ret = ice_set_rss_lut(pf->main_vsi, lut, reta_size);
2427 ice_rss_reta_query(struct rte_eth_dev *dev,
2428 struct rte_eth_rss_reta_entry64 *reta_conf,
2431 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2432 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2433 uint16_t i, lut_size = hw->func_caps.common_cap.rss_table_size;
2434 uint16_t idx, shift;
2438 if (reta_size != lut_size ||
2439 reta_size > ETH_RSS_RETA_SIZE_512) {
2441 "The size of hash lookup table configured (%d)"
2442 "doesn't match the number hardware can "
2444 reta_size, lut_size);
2448 lut = rte_zmalloc(NULL, reta_size, 0);
2450 PMD_DRV_LOG(ERR, "No memory can be allocated");
2454 ret = ice_get_rss_lut(pf->main_vsi, lut, reta_size);
2458 for (i = 0; i < reta_size; i++) {
2459 idx = i / RTE_RETA_GROUP_SIZE;
2460 shift = i % RTE_RETA_GROUP_SIZE;
2461 if (reta_conf[idx].mask & (1ULL << shift))
2462 reta_conf[idx].reta[shift] = lut[i];
2472 ice_set_rss_key(struct ice_vsi *vsi, uint8_t *key, uint8_t key_len)
2474 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
2477 if (!key || key_len == 0) {
2478 PMD_DRV_LOG(DEBUG, "No key to be configured");
2480 } else if (key_len != (VSIQF_HKEY_MAX_INDEX + 1) *
2482 PMD_DRV_LOG(ERR, "Invalid key length %u", key_len);
2486 struct ice_aqc_get_set_rss_keys *key_dw =
2487 (struct ice_aqc_get_set_rss_keys *)key;
2489 ret = ice_aq_set_rss_key(hw, vsi->idx, key_dw);
2491 PMD_DRV_LOG(ERR, "Failed to configure RSS key via AQ");
2499 ice_get_rss_key(struct ice_vsi *vsi, uint8_t *key, uint8_t *key_len)
2501 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
2504 if (!key || !key_len)
2507 ret = ice_aq_get_rss_key
2509 (struct ice_aqc_get_set_rss_keys *)key);
2511 PMD_DRV_LOG(ERR, "Failed to get RSS key via AQ");
2514 *key_len = (VSIQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
2520 ice_rss_hash_update(struct rte_eth_dev *dev,
2521 struct rte_eth_rss_conf *rss_conf)
2523 enum ice_status status = ICE_SUCCESS;
2524 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2525 struct ice_vsi *vsi = pf->main_vsi;
2528 status = ice_set_rss_key(vsi, rss_conf->rss_key, rss_conf->rss_key_len);
2532 /* TODO: hash enable config, ice_add_rss_cfg */
2537 ice_rss_hash_conf_get(struct rte_eth_dev *dev,
2538 struct rte_eth_rss_conf *rss_conf)
2540 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2541 struct ice_vsi *vsi = pf->main_vsi;
2543 ice_get_rss_key(vsi, rss_conf->rss_key,
2544 &rss_conf->rss_key_len);
2546 /* TODO: default set to 0 as hf config is not supported now */
2547 rss_conf->rss_hf = 0;
2551 static int ice_rx_queue_intr_enable(struct rte_eth_dev *dev,
2554 struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
2555 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2556 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2560 msix_intr = intr_handle->intr_vec[queue_id];
2562 val = GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M |
2563 GLINT_DYN_CTL_ITR_INDX_M;
2564 val &= ~GLINT_DYN_CTL_WB_ON_ITR_M;
2566 ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr), val);
2567 rte_intr_enable(&pci_dev->intr_handle);
2572 static int ice_rx_queue_intr_disable(struct rte_eth_dev *dev,
2575 struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
2576 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2577 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2580 msix_intr = intr_handle->intr_vec[queue_id];
2582 ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr), GLINT_DYN_CTL_WB_ON_ITR_M);
2588 ice_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
2590 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2593 ret = snprintf(fw_version, fw_size, "%d.%d.%05d %d.%d",
2594 hw->fw_maj_ver, hw->fw_min_ver, hw->fw_build,
2595 hw->api_maj_ver, hw->api_min_ver);
2597 /* add the size of '\0' */
2599 if (fw_size < (u32)ret)
2606 ice_vsi_vlan_pvid_set(struct ice_vsi *vsi, struct ice_vsi_vlan_pvid_info *info)
2609 struct ice_vsi_ctx ctxt;
2610 uint8_t vlan_flags = 0;
2613 if (!vsi || !info) {
2614 PMD_DRV_LOG(ERR, "invalid parameters");
2619 vsi->info.pvid = info->config.pvid;
2621 * If insert pvid is enabled, only tagged pkts are
2622 * allowed to be sent out.
2624 vlan_flags = ICE_AQ_VSI_PVLAN_INSERT_PVID |
2625 ICE_AQ_VSI_VLAN_MODE_UNTAGGED;
2628 if (info->config.reject.tagged == 0)
2629 vlan_flags |= ICE_AQ_VSI_VLAN_MODE_TAGGED;
2631 if (info->config.reject.untagged == 0)
2632 vlan_flags |= ICE_AQ_VSI_VLAN_MODE_UNTAGGED;
2634 vsi->info.vlan_flags &= ~(ICE_AQ_VSI_PVLAN_INSERT_PVID |
2635 ICE_AQ_VSI_VLAN_MODE_M);
2636 vsi->info.vlan_flags |= vlan_flags;
2637 memset(&ctxt, 0, sizeof(ctxt));
2638 rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
2639 ctxt.info.valid_sections =
2640 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID);
2641 ctxt.vsi_num = vsi->vsi_id;
2643 hw = ICE_VSI_TO_HW(vsi);
2644 ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
2645 if (ret != ICE_SUCCESS) {
2647 "update VSI for VLAN insert failed, err %d",
2652 vsi->info.valid_sections |=
2653 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID);
2659 ice_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on)
2661 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2662 struct ice_vsi *vsi = pf->main_vsi;
2663 struct rte_eth_dev_data *data = pf->dev_data;
2664 struct ice_vsi_vlan_pvid_info info;
2667 memset(&info, 0, sizeof(info));
2670 info.config.pvid = pvid;
2672 info.config.reject.tagged =
2673 data->dev_conf.txmode.hw_vlan_reject_tagged;
2674 info.config.reject.untagged =
2675 data->dev_conf.txmode.hw_vlan_reject_untagged;
2678 ret = ice_vsi_vlan_pvid_set(vsi, &info);
2680 PMD_DRV_LOG(ERR, "Failed to set pvid.");
2688 ice_get_eeprom_length(struct rte_eth_dev *dev)
2690 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2692 /* Convert word count to byte count */
2693 return hw->nvm.sr_words << 1;
2697 ice_get_eeprom(struct rte_eth_dev *dev,
2698 struct rte_dev_eeprom_info *eeprom)
2700 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2701 uint16_t *data = eeprom->data;
2702 uint16_t offset, length, i;
2703 enum ice_status ret_code = ICE_SUCCESS;
2705 offset = eeprom->offset >> 1;
2706 length = eeprom->length >> 1;
2708 if (offset > hw->nvm.sr_words ||
2709 offset + length > hw->nvm.sr_words) {
2710 PMD_DRV_LOG(ERR, "Requested EEPROM bytes out of range.");
2714 eeprom->magic = hw->vendor_id | (hw->device_id << 16);
2716 for (i = 0; i < length; i++) {
2717 ret_code = ice_read_sr_word(hw, offset + i, &data[i]);
2718 if (ret_code != ICE_SUCCESS) {
2719 PMD_DRV_LOG(ERR, "EEPROM read failed.");
2728 ice_stat_update_32(struct ice_hw *hw,
2736 new_data = (uint64_t)ICE_READ_REG(hw, reg);
2740 if (new_data >= *offset)
2741 *stat = (uint64_t)(new_data - *offset);
2743 *stat = (uint64_t)((new_data +
2744 ((uint64_t)1 << ICE_32_BIT_WIDTH))
2749 ice_stat_update_40(struct ice_hw *hw,
2758 new_data = (uint64_t)ICE_READ_REG(hw, loreg);
2759 new_data |= (uint64_t)(ICE_READ_REG(hw, hireg) & ICE_8_BIT_MASK) <<
2765 if (new_data >= *offset)
2766 *stat = new_data - *offset;
2768 *stat = (uint64_t)((new_data +
2769 ((uint64_t)1 << ICE_40_BIT_WIDTH)) -
2772 *stat &= ICE_40_BIT_MASK;
2775 /* Get all the statistics of a VSI */
2777 ice_update_vsi_stats(struct ice_vsi *vsi)
2779 struct ice_eth_stats *oes = &vsi->eth_stats_offset;
2780 struct ice_eth_stats *nes = &vsi->eth_stats;
2781 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
2782 int idx = rte_le_to_cpu_16(vsi->vsi_id);
2784 ice_stat_update_40(hw, GLV_GORCH(idx), GLV_GORCL(idx),
2785 vsi->offset_loaded, &oes->rx_bytes,
2787 ice_stat_update_40(hw, GLV_UPRCH(idx), GLV_UPRCL(idx),
2788 vsi->offset_loaded, &oes->rx_unicast,
2790 ice_stat_update_40(hw, GLV_MPRCH(idx), GLV_MPRCL(idx),
2791 vsi->offset_loaded, &oes->rx_multicast,
2792 &nes->rx_multicast);
2793 ice_stat_update_40(hw, GLV_BPRCH(idx), GLV_BPRCL(idx),
2794 vsi->offset_loaded, &oes->rx_broadcast,
2795 &nes->rx_broadcast);
2796 /* exclude CRC bytes */
2797 nes->rx_bytes -= (nes->rx_unicast + nes->rx_multicast +
2798 nes->rx_broadcast) * ETHER_CRC_LEN;
2800 ice_stat_update_32(hw, GLV_RDPC(idx), vsi->offset_loaded,
2801 &oes->rx_discards, &nes->rx_discards);
2802 /* GLV_REPC not supported */
2803 /* GLV_RMPC not supported */
2804 ice_stat_update_32(hw, GLSWID_RUPP(idx), vsi->offset_loaded,
2805 &oes->rx_unknown_protocol,
2806 &nes->rx_unknown_protocol);
2807 ice_stat_update_40(hw, GLV_GOTCH(idx), GLV_GOTCL(idx),
2808 vsi->offset_loaded, &oes->tx_bytes,
2810 ice_stat_update_40(hw, GLV_UPTCH(idx), GLV_UPTCL(idx),
2811 vsi->offset_loaded, &oes->tx_unicast,
2813 ice_stat_update_40(hw, GLV_MPTCH(idx), GLV_MPTCL(idx),
2814 vsi->offset_loaded, &oes->tx_multicast,
2815 &nes->tx_multicast);
2816 ice_stat_update_40(hw, GLV_BPTCH(idx), GLV_BPTCL(idx),
2817 vsi->offset_loaded, &oes->tx_broadcast,
2818 &nes->tx_broadcast);
2819 /* GLV_TDPC not supported */
2820 ice_stat_update_32(hw, GLV_TEPC(idx), vsi->offset_loaded,
2821 &oes->tx_errors, &nes->tx_errors);
2822 vsi->offset_loaded = true;
2824 PMD_DRV_LOG(DEBUG, "************** VSI[%u] stats start **************",
2826 PMD_DRV_LOG(DEBUG, "rx_bytes: %"PRIu64"", nes->rx_bytes);
2827 PMD_DRV_LOG(DEBUG, "rx_unicast: %"PRIu64"", nes->rx_unicast);
2828 PMD_DRV_LOG(DEBUG, "rx_multicast: %"PRIu64"", nes->rx_multicast);
2829 PMD_DRV_LOG(DEBUG, "rx_broadcast: %"PRIu64"", nes->rx_broadcast);
2830 PMD_DRV_LOG(DEBUG, "rx_discards: %"PRIu64"", nes->rx_discards);
2831 PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"",
2832 nes->rx_unknown_protocol);
2833 PMD_DRV_LOG(DEBUG, "tx_bytes: %"PRIu64"", nes->tx_bytes);
2834 PMD_DRV_LOG(DEBUG, "tx_unicast: %"PRIu64"", nes->tx_unicast);
2835 PMD_DRV_LOG(DEBUG, "tx_multicast: %"PRIu64"", nes->tx_multicast);
2836 PMD_DRV_LOG(DEBUG, "tx_broadcast: %"PRIu64"", nes->tx_broadcast);
2837 PMD_DRV_LOG(DEBUG, "tx_discards: %"PRIu64"", nes->tx_discards);
2838 PMD_DRV_LOG(DEBUG, "tx_errors: %"PRIu64"", nes->tx_errors);
2839 PMD_DRV_LOG(DEBUG, "************** VSI[%u] stats end ****************",
2844 ice_read_stats_registers(struct ice_pf *pf, struct ice_hw *hw)
2846 struct ice_hw_port_stats *ns = &pf->stats; /* new stats */
2847 struct ice_hw_port_stats *os = &pf->stats_offset; /* old stats */
2849 /* Get statistics of struct ice_eth_stats */
2850 ice_stat_update_40(hw, GLPRT_GORCH(hw->port_info->lport),
2851 GLPRT_GORCL(hw->port_info->lport),
2852 pf->offset_loaded, &os->eth.rx_bytes,
2854 ice_stat_update_40(hw, GLPRT_UPRCH(hw->port_info->lport),
2855 GLPRT_UPRCL(hw->port_info->lport),
2856 pf->offset_loaded, &os->eth.rx_unicast,
2857 &ns->eth.rx_unicast);
2858 ice_stat_update_40(hw, GLPRT_MPRCH(hw->port_info->lport),
2859 GLPRT_MPRCL(hw->port_info->lport),
2860 pf->offset_loaded, &os->eth.rx_multicast,
2861 &ns->eth.rx_multicast);
2862 ice_stat_update_40(hw, GLPRT_BPRCH(hw->port_info->lport),
2863 GLPRT_BPRCL(hw->port_info->lport),
2864 pf->offset_loaded, &os->eth.rx_broadcast,
2865 &ns->eth.rx_broadcast);
2866 ice_stat_update_32(hw, PRTRPB_RDPC,
2867 pf->offset_loaded, &os->eth.rx_discards,
2868 &ns->eth.rx_discards);
2870 /* Workaround: CRC size should not be included in byte statistics,
2871 * so subtract ETHER_CRC_LEN from the byte counter for each rx packet.
2873 ns->eth.rx_bytes -= (ns->eth.rx_unicast + ns->eth.rx_multicast +
2874 ns->eth.rx_broadcast) * ETHER_CRC_LEN;
2876 /* GLPRT_REPC not supported */
2877 /* GLPRT_RMPC not supported */
2878 ice_stat_update_32(hw, GLSWID_RUPP(hw->port_info->lport),
2880 &os->eth.rx_unknown_protocol,
2881 &ns->eth.rx_unknown_protocol);
2882 ice_stat_update_40(hw, GLPRT_GOTCH(hw->port_info->lport),
2883 GLPRT_GOTCL(hw->port_info->lport),
2884 pf->offset_loaded, &os->eth.tx_bytes,
2886 ice_stat_update_40(hw, GLPRT_UPTCH(hw->port_info->lport),
2887 GLPRT_UPTCL(hw->port_info->lport),
2888 pf->offset_loaded, &os->eth.tx_unicast,
2889 &ns->eth.tx_unicast);
2890 ice_stat_update_40(hw, GLPRT_MPTCH(hw->port_info->lport),
2891 GLPRT_MPTCL(hw->port_info->lport),
2892 pf->offset_loaded, &os->eth.tx_multicast,
2893 &ns->eth.tx_multicast);
2894 ice_stat_update_40(hw, GLPRT_BPTCH(hw->port_info->lport),
2895 GLPRT_BPTCL(hw->port_info->lport),
2896 pf->offset_loaded, &os->eth.tx_broadcast,
2897 &ns->eth.tx_broadcast);
2898 ns->eth.tx_bytes -= (ns->eth.tx_unicast + ns->eth.tx_multicast +
2899 ns->eth.tx_broadcast) * ETHER_CRC_LEN;
2901 /* GLPRT_TEPC not supported */
2903 /* additional port specific stats */
2904 ice_stat_update_32(hw, GLPRT_TDOLD(hw->port_info->lport),
2905 pf->offset_loaded, &os->tx_dropped_link_down,
2906 &ns->tx_dropped_link_down);
2907 ice_stat_update_32(hw, GLPRT_CRCERRS(hw->port_info->lport),
2908 pf->offset_loaded, &os->crc_errors,
2910 ice_stat_update_32(hw, GLPRT_ILLERRC(hw->port_info->lport),
2911 pf->offset_loaded, &os->illegal_bytes,
2912 &ns->illegal_bytes);
2913 /* GLPRT_ERRBC not supported */
2914 ice_stat_update_32(hw, GLPRT_MLFC(hw->port_info->lport),
2915 pf->offset_loaded, &os->mac_local_faults,
2916 &ns->mac_local_faults);
2917 ice_stat_update_32(hw, GLPRT_MRFC(hw->port_info->lport),
2918 pf->offset_loaded, &os->mac_remote_faults,
2919 &ns->mac_remote_faults);
2921 ice_stat_update_32(hw, GLPRT_RLEC(hw->port_info->lport),
2922 pf->offset_loaded, &os->rx_len_errors,
2923 &ns->rx_len_errors);
2925 ice_stat_update_32(hw, GLPRT_LXONRXC(hw->port_info->lport),
2926 pf->offset_loaded, &os->link_xon_rx,
2928 ice_stat_update_32(hw, GLPRT_LXOFFRXC(hw->port_info->lport),
2929 pf->offset_loaded, &os->link_xoff_rx,
2931 ice_stat_update_32(hw, GLPRT_LXONTXC(hw->port_info->lport),
2932 pf->offset_loaded, &os->link_xon_tx,
2934 ice_stat_update_32(hw, GLPRT_LXOFFTXC(hw->port_info->lport),
2935 pf->offset_loaded, &os->link_xoff_tx,
2937 ice_stat_update_40(hw, GLPRT_PRC64H(hw->port_info->lport),
2938 GLPRT_PRC64L(hw->port_info->lport),
2939 pf->offset_loaded, &os->rx_size_64,
2941 ice_stat_update_40(hw, GLPRT_PRC127H(hw->port_info->lport),
2942 GLPRT_PRC127L(hw->port_info->lport),
2943 pf->offset_loaded, &os->rx_size_127,
2945 ice_stat_update_40(hw, GLPRT_PRC255H(hw->port_info->lport),
2946 GLPRT_PRC255L(hw->port_info->lport),
2947 pf->offset_loaded, &os->rx_size_255,
2949 ice_stat_update_40(hw, GLPRT_PRC511H(hw->port_info->lport),
2950 GLPRT_PRC511L(hw->port_info->lport),
2951 pf->offset_loaded, &os->rx_size_511,
2953 ice_stat_update_40(hw, GLPRT_PRC1023H(hw->port_info->lport),
2954 GLPRT_PRC1023L(hw->port_info->lport),
2955 pf->offset_loaded, &os->rx_size_1023,
2957 ice_stat_update_40(hw, GLPRT_PRC1522H(hw->port_info->lport),
2958 GLPRT_PRC1522L(hw->port_info->lport),
2959 pf->offset_loaded, &os->rx_size_1522,
2961 ice_stat_update_40(hw, GLPRT_PRC9522H(hw->port_info->lport),
2962 GLPRT_PRC9522L(hw->port_info->lport),
2963 pf->offset_loaded, &os->rx_size_big,
2965 ice_stat_update_32(hw, GLPRT_RUC(hw->port_info->lport),
2966 pf->offset_loaded, &os->rx_undersize,
2968 ice_stat_update_32(hw, GLPRT_RFC(hw->port_info->lport),
2969 pf->offset_loaded, &os->rx_fragments,
2971 ice_stat_update_32(hw, GLPRT_ROC(hw->port_info->lport),
2972 pf->offset_loaded, &os->rx_oversize,
2974 ice_stat_update_32(hw, GLPRT_RJC(hw->port_info->lport),
2975 pf->offset_loaded, &os->rx_jabber,
2977 ice_stat_update_40(hw, GLPRT_PTC64H(hw->port_info->lport),
2978 GLPRT_PTC64L(hw->port_info->lport),
2979 pf->offset_loaded, &os->tx_size_64,
2981 ice_stat_update_40(hw, GLPRT_PTC127H(hw->port_info->lport),
2982 GLPRT_PTC127L(hw->port_info->lport),
2983 pf->offset_loaded, &os->tx_size_127,
2985 ice_stat_update_40(hw, GLPRT_PTC255H(hw->port_info->lport),
2986 GLPRT_PTC255L(hw->port_info->lport),
2987 pf->offset_loaded, &os->tx_size_255,
2989 ice_stat_update_40(hw, GLPRT_PTC511H(hw->port_info->lport),
2990 GLPRT_PTC511L(hw->port_info->lport),
2991 pf->offset_loaded, &os->tx_size_511,
2993 ice_stat_update_40(hw, GLPRT_PTC1023H(hw->port_info->lport),
2994 GLPRT_PTC1023L(hw->port_info->lport),
2995 pf->offset_loaded, &os->tx_size_1023,
2997 ice_stat_update_40(hw, GLPRT_PTC1522H(hw->port_info->lport),
2998 GLPRT_PTC1522L(hw->port_info->lport),
2999 pf->offset_loaded, &os->tx_size_1522,
3001 ice_stat_update_40(hw, GLPRT_PTC9522H(hw->port_info->lport),
3002 GLPRT_PTC9522L(hw->port_info->lport),
3003 pf->offset_loaded, &os->tx_size_big,
3006 /* GLPRT_MSPDC not supported */
3007 /* GLPRT_XEC not supported */
3009 pf->offset_loaded = true;
3012 ice_update_vsi_stats(pf->main_vsi);
3015 /* Get all statistics of a port */
3017 ice_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
3019 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3020 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3021 struct ice_hw_port_stats *ns = &pf->stats; /* new stats */
3023 /* call read registers - updates values, now write them to struct */
3024 ice_read_stats_registers(pf, hw);
3026 stats->ipackets = ns->eth.rx_unicast +
3027 ns->eth.rx_multicast +
3028 ns->eth.rx_broadcast -
3029 ns->eth.rx_discards -
3030 pf->main_vsi->eth_stats.rx_discards;
3031 stats->opackets = ns->eth.tx_unicast +
3032 ns->eth.tx_multicast +
3033 ns->eth.tx_broadcast;
3034 stats->ibytes = ns->eth.rx_bytes;
3035 stats->obytes = ns->eth.tx_bytes;
3036 stats->oerrors = ns->eth.tx_errors +
3037 pf->main_vsi->eth_stats.tx_errors;
3040 stats->imissed = ns->eth.rx_discards +
3041 pf->main_vsi->eth_stats.rx_discards;
3042 stats->ierrors = ns->crc_errors +
3044 ns->rx_oversize + ns->rx_fragments + ns->rx_jabber;
3046 PMD_DRV_LOG(DEBUG, "*************** PF stats start *****************");
3047 PMD_DRV_LOG(DEBUG, "rx_bytes: %"PRIu64"", ns->eth.rx_bytes);
3048 PMD_DRV_LOG(DEBUG, "rx_unicast: %"PRIu64"", ns->eth.rx_unicast);
3049 PMD_DRV_LOG(DEBUG, "rx_multicast:%"PRIu64"", ns->eth.rx_multicast);
3050 PMD_DRV_LOG(DEBUG, "rx_broadcast:%"PRIu64"", ns->eth.rx_broadcast);
3051 PMD_DRV_LOG(DEBUG, "rx_discards:%"PRIu64"", ns->eth.rx_discards);
3052 PMD_DRV_LOG(DEBUG, "vsi rx_discards:%"PRIu64"",
3053 pf->main_vsi->eth_stats.rx_discards);
3054 PMD_DRV_LOG(DEBUG, "rx_unknown_protocol: %"PRIu64"",
3055 ns->eth.rx_unknown_protocol);
3056 PMD_DRV_LOG(DEBUG, "tx_bytes: %"PRIu64"", ns->eth.tx_bytes);
3057 PMD_DRV_LOG(DEBUG, "tx_unicast: %"PRIu64"", ns->eth.tx_unicast);
3058 PMD_DRV_LOG(DEBUG, "tx_multicast:%"PRIu64"", ns->eth.tx_multicast);
3059 PMD_DRV_LOG(DEBUG, "tx_broadcast:%"PRIu64"", ns->eth.tx_broadcast);
3060 PMD_DRV_LOG(DEBUG, "tx_discards:%"PRIu64"", ns->eth.tx_discards);
3061 PMD_DRV_LOG(DEBUG, "vsi tx_discards:%"PRIu64"",
3062 pf->main_vsi->eth_stats.tx_discards);
3063 PMD_DRV_LOG(DEBUG, "tx_errors: %"PRIu64"", ns->eth.tx_errors);
3065 PMD_DRV_LOG(DEBUG, "tx_dropped_link_down: %"PRIu64"",
3066 ns->tx_dropped_link_down);
3067 PMD_DRV_LOG(DEBUG, "crc_errors: %"PRIu64"", ns->crc_errors);
3068 PMD_DRV_LOG(DEBUG, "illegal_bytes: %"PRIu64"",
3070 PMD_DRV_LOG(DEBUG, "error_bytes: %"PRIu64"", ns->error_bytes);
3071 PMD_DRV_LOG(DEBUG, "mac_local_faults: %"PRIu64"",
3072 ns->mac_local_faults);
3073 PMD_DRV_LOG(DEBUG, "mac_remote_faults: %"PRIu64"",
3074 ns->mac_remote_faults);
3075 PMD_DRV_LOG(DEBUG, "link_xon_rx: %"PRIu64"", ns->link_xon_rx);
3076 PMD_DRV_LOG(DEBUG, "link_xoff_rx: %"PRIu64"", ns->link_xoff_rx);
3077 PMD_DRV_LOG(DEBUG, "link_xon_tx: %"PRIu64"", ns->link_xon_tx);
3078 PMD_DRV_LOG(DEBUG, "link_xoff_tx: %"PRIu64"", ns->link_xoff_tx);
3079 PMD_DRV_LOG(DEBUG, "rx_size_64: %"PRIu64"", ns->rx_size_64);
3080 PMD_DRV_LOG(DEBUG, "rx_size_127: %"PRIu64"", ns->rx_size_127);
3081 PMD_DRV_LOG(DEBUG, "rx_size_255: %"PRIu64"", ns->rx_size_255);
3082 PMD_DRV_LOG(DEBUG, "rx_size_511: %"PRIu64"", ns->rx_size_511);
3083 PMD_DRV_LOG(DEBUG, "rx_size_1023: %"PRIu64"", ns->rx_size_1023);
3084 PMD_DRV_LOG(DEBUG, "rx_size_1522: %"PRIu64"", ns->rx_size_1522);
3085 PMD_DRV_LOG(DEBUG, "rx_size_big: %"PRIu64"", ns->rx_size_big);
3086 PMD_DRV_LOG(DEBUG, "rx_undersize: %"PRIu64"", ns->rx_undersize);
3087 PMD_DRV_LOG(DEBUG, "rx_fragments: %"PRIu64"", ns->rx_fragments);
3088 PMD_DRV_LOG(DEBUG, "rx_oversize: %"PRIu64"", ns->rx_oversize);
3089 PMD_DRV_LOG(DEBUG, "rx_jabber: %"PRIu64"", ns->rx_jabber);
3090 PMD_DRV_LOG(DEBUG, "tx_size_64: %"PRIu64"", ns->tx_size_64);
3091 PMD_DRV_LOG(DEBUG, "tx_size_127: %"PRIu64"", ns->tx_size_127);
3092 PMD_DRV_LOG(DEBUG, "tx_size_255: %"PRIu64"", ns->tx_size_255);
3093 PMD_DRV_LOG(DEBUG, "tx_size_511: %"PRIu64"", ns->tx_size_511);
3094 PMD_DRV_LOG(DEBUG, "tx_size_1023: %"PRIu64"", ns->tx_size_1023);
3095 PMD_DRV_LOG(DEBUG, "tx_size_1522: %"PRIu64"", ns->tx_size_1522);
3096 PMD_DRV_LOG(DEBUG, "tx_size_big: %"PRIu64"", ns->tx_size_big);
3097 PMD_DRV_LOG(DEBUG, "rx_len_errors: %"PRIu64"", ns->rx_len_errors);
3098 PMD_DRV_LOG(DEBUG, "************* PF stats end ****************");
3102 /* Reset the statistics */
3104 ice_stats_reset(struct rte_eth_dev *dev)
3106 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3107 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3109 /* Mark PF and VSI stats to update the offset, aka "reset" */
3110 pf->offset_loaded = false;
3112 pf->main_vsi->offset_loaded = false;
3114 /* read the stats, reading current register values into offset */
3115 ice_read_stats_registers(pf, hw);
3119 ice_xstats_calc_num(void)
3123 num = ICE_NB_ETH_XSTATS + ICE_NB_HW_PORT_XSTATS;
3129 ice_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
3132 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3133 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3136 struct ice_hw_port_stats *hw_stats = &pf->stats;
3138 count = ice_xstats_calc_num();
3142 ice_read_stats_registers(pf, hw);
3149 /* Get stats from ice_eth_stats struct */
3150 for (i = 0; i < ICE_NB_ETH_XSTATS; i++) {
3151 xstats[count].value =
3152 *(uint64_t *)((char *)&hw_stats->eth +
3153 ice_stats_strings[i].offset);
3154 xstats[count].id = count;
3158 /* Get individiual stats from ice_hw_port struct */
3159 for (i = 0; i < ICE_NB_HW_PORT_XSTATS; i++) {
3160 xstats[count].value =
3161 *(uint64_t *)((char *)hw_stats +
3162 ice_hw_port_strings[i].offset);
3163 xstats[count].id = count;
3170 static int ice_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
3171 struct rte_eth_xstat_name *xstats_names,
3172 __rte_unused unsigned int limit)
3174 unsigned int count = 0;
3178 return ice_xstats_calc_num();
3180 /* Note: limit checked in rte_eth_xstats_names() */
3182 /* Get stats from ice_eth_stats struct */
3183 for (i = 0; i < ICE_NB_ETH_XSTATS; i++) {
3184 snprintf(xstats_names[count].name,
3185 sizeof(xstats_names[count].name),
3186 "%s", ice_stats_strings[i].name);
3190 /* Get individiual stats from ice_hw_port struct */
3191 for (i = 0; i < ICE_NB_HW_PORT_XSTATS; i++) {
3192 snprintf(xstats_names[count].name,
3193 sizeof(xstats_names[count].name),
3194 "%s", ice_hw_port_strings[i].name);
3202 ice_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
3203 struct rte_pci_device *pci_dev)
3205 return rte_eth_dev_pci_generic_probe(pci_dev,
3206 sizeof(struct ice_adapter),
3211 ice_pci_remove(struct rte_pci_device *pci_dev)
3213 return rte_eth_dev_pci_generic_remove(pci_dev, ice_dev_uninit);
3216 static struct rte_pci_driver rte_ice_pmd = {
3217 .id_table = pci_id_ice_map,
3218 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
3219 RTE_PCI_DRV_IOVA_AS_VA,
3220 .probe = ice_pci_probe,
3221 .remove = ice_pci_remove,
3225 * Driver initialization routine.
3226 * Invoked once at EAL init time.
3227 * Register itself as the [Poll Mode] Driver of PCI devices.
3229 RTE_PMD_REGISTER_PCI(net_ice, rte_ice_pmd);
3230 RTE_PMD_REGISTER_PCI_TABLE(net_ice, pci_id_ice_map);
3231 RTE_PMD_REGISTER_KMOD_DEP(net_ice, "* igb_uio | uio_pci_generic | vfio-pci");
3232 RTE_PMD_REGISTER_PARAM_STRING(net_ice,
3233 ICE_MAX_QP_NUM "=<int>");
3235 RTE_INIT(ice_init_log)
3237 ice_logtype_init = rte_log_register("pmd.net.ice.init");
3238 if (ice_logtype_init >= 0)
3239 rte_log_set_level(ice_logtype_init, RTE_LOG_NOTICE);
3240 ice_logtype_driver = rte_log_register("pmd.net.ice.driver");
3241 if (ice_logtype_driver >= 0)
3242 rte_log_set_level(ice_logtype_driver, RTE_LOG_NOTICE);