1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
5 #include <rte_ethdev_pci.h>
7 #include "base/ice_sched.h"
8 #include "ice_ethdev.h"
11 #define ICE_MAX_QP_NUM "max_queue_pair_num"
12 #define ICE_DFLT_OUTER_TAG_TYPE ICE_AQ_VSI_OUTER_TAG_VLAN_9100
15 int ice_logtype_driver;
17 static int ice_dev_configure(struct rte_eth_dev *dev);
18 static int ice_dev_start(struct rte_eth_dev *dev);
19 static void ice_dev_stop(struct rte_eth_dev *dev);
20 static void ice_dev_close(struct rte_eth_dev *dev);
21 static int ice_dev_reset(struct rte_eth_dev *dev);
22 static void ice_dev_info_get(struct rte_eth_dev *dev,
23 struct rte_eth_dev_info *dev_info);
24 static int ice_link_update(struct rte_eth_dev *dev,
25 int wait_to_complete);
26 static int ice_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
27 static int ice_vlan_offload_set(struct rte_eth_dev *dev, int mask);
28 static int ice_vlan_tpid_set(struct rte_eth_dev *dev,
29 enum rte_vlan_type vlan_type,
31 static int ice_rss_reta_update(struct rte_eth_dev *dev,
32 struct rte_eth_rss_reta_entry64 *reta_conf,
34 static int ice_rss_reta_query(struct rte_eth_dev *dev,
35 struct rte_eth_rss_reta_entry64 *reta_conf,
37 static int ice_rss_hash_update(struct rte_eth_dev *dev,
38 struct rte_eth_rss_conf *rss_conf);
39 static int ice_rss_hash_conf_get(struct rte_eth_dev *dev,
40 struct rte_eth_rss_conf *rss_conf);
41 static int ice_vlan_filter_set(struct rte_eth_dev *dev,
44 static int ice_macaddr_set(struct rte_eth_dev *dev,
45 struct ether_addr *mac_addr);
46 static int ice_macaddr_add(struct rte_eth_dev *dev,
47 struct ether_addr *mac_addr,
48 __rte_unused uint32_t index,
50 static void ice_macaddr_remove(struct rte_eth_dev *dev, uint32_t index);
51 static int ice_rx_queue_intr_enable(struct rte_eth_dev *dev,
53 static int ice_rx_queue_intr_disable(struct rte_eth_dev *dev,
55 static int ice_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
57 static int ice_vlan_pvid_set(struct rte_eth_dev *dev,
58 uint16_t pvid, int on);
59 static int ice_get_eeprom_length(struct rte_eth_dev *dev);
60 static int ice_get_eeprom(struct rte_eth_dev *dev,
61 struct rte_dev_eeprom_info *eeprom);
63 static const struct rte_pci_id pci_id_ice_map[] = {
64 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_BACKPLANE) },
65 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_QSFP) },
66 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_SFP) },
67 { .vendor_id = 0, /* sentinel */ },
70 static const struct eth_dev_ops ice_eth_dev_ops = {
71 .dev_configure = ice_dev_configure,
72 .dev_start = ice_dev_start,
73 .dev_stop = ice_dev_stop,
74 .dev_close = ice_dev_close,
75 .dev_reset = ice_dev_reset,
76 .rx_queue_start = ice_rx_queue_start,
77 .rx_queue_stop = ice_rx_queue_stop,
78 .tx_queue_start = ice_tx_queue_start,
79 .tx_queue_stop = ice_tx_queue_stop,
80 .rx_queue_setup = ice_rx_queue_setup,
81 .rx_queue_release = ice_rx_queue_release,
82 .tx_queue_setup = ice_tx_queue_setup,
83 .tx_queue_release = ice_tx_queue_release,
84 .dev_infos_get = ice_dev_info_get,
85 .dev_supported_ptypes_get = ice_dev_supported_ptypes_get,
86 .link_update = ice_link_update,
87 .mtu_set = ice_mtu_set,
88 .mac_addr_set = ice_macaddr_set,
89 .mac_addr_add = ice_macaddr_add,
90 .mac_addr_remove = ice_macaddr_remove,
91 .vlan_filter_set = ice_vlan_filter_set,
92 .vlan_offload_set = ice_vlan_offload_set,
93 .vlan_tpid_set = ice_vlan_tpid_set,
94 .reta_update = ice_rss_reta_update,
95 .reta_query = ice_rss_reta_query,
96 .rss_hash_update = ice_rss_hash_update,
97 .rss_hash_conf_get = ice_rss_hash_conf_get,
98 .rx_queue_intr_enable = ice_rx_queue_intr_enable,
99 .rx_queue_intr_disable = ice_rx_queue_intr_disable,
100 .fw_version_get = ice_fw_version_get,
101 .vlan_pvid_set = ice_vlan_pvid_set,
102 .rxq_info_get = ice_rxq_info_get,
103 .txq_info_get = ice_txq_info_get,
104 .get_eeprom_length = ice_get_eeprom_length,
105 .get_eeprom = ice_get_eeprom,
106 .rx_queue_count = ice_rx_queue_count,
110 ice_init_controlq_parameter(struct ice_hw *hw)
112 /* fields for adminq */
113 hw->adminq.num_rq_entries = ICE_ADMINQ_LEN;
114 hw->adminq.num_sq_entries = ICE_ADMINQ_LEN;
115 hw->adminq.rq_buf_size = ICE_ADMINQ_BUF_SZ;
116 hw->adminq.sq_buf_size = ICE_ADMINQ_BUF_SZ;
118 /* fields for mailboxq, DPDK used as PF host */
119 hw->mailboxq.num_rq_entries = ICE_MAILBOXQ_LEN;
120 hw->mailboxq.num_sq_entries = ICE_MAILBOXQ_LEN;
121 hw->mailboxq.rq_buf_size = ICE_MAILBOXQ_BUF_SZ;
122 hw->mailboxq.sq_buf_size = ICE_MAILBOXQ_BUF_SZ;
126 ice_check_qp_num(const char *key, const char *qp_value,
127 __rte_unused void *opaque)
132 while (isblank(*qp_value))
135 num = strtoul(qp_value, &end, 10);
137 if (!num || (*end == '-') || errno) {
138 PMD_DRV_LOG(WARNING, "invalid value:\"%s\" for key:\"%s\", "
148 ice_config_max_queue_pair_num(struct rte_devargs *devargs)
150 struct rte_kvargs *kvlist;
151 const char *queue_num_key = ICE_MAX_QP_NUM;
157 kvlist = rte_kvargs_parse(devargs->args, NULL);
161 if (!rte_kvargs_count(kvlist, queue_num_key)) {
162 rte_kvargs_free(kvlist);
166 if (rte_kvargs_process(kvlist, queue_num_key,
167 ice_check_qp_num, NULL) < 0) {
168 rte_kvargs_free(kvlist);
171 ret = rte_kvargs_process(kvlist, queue_num_key,
172 ice_check_qp_num, NULL);
173 rte_kvargs_free(kvlist);
179 ice_res_pool_init(struct ice_res_pool_info *pool, uint32_t base,
182 struct pool_entry *entry;
187 entry = rte_zmalloc(NULL, sizeof(*entry), 0);
190 "Failed to allocate memory for resource pool");
194 /* queue heap initialize */
195 pool->num_free = num;
198 LIST_INIT(&pool->alloc_list);
199 LIST_INIT(&pool->free_list);
201 /* Initialize element */
205 LIST_INSERT_HEAD(&pool->free_list, entry, next);
210 ice_res_pool_alloc(struct ice_res_pool_info *pool,
213 struct pool_entry *entry, *valid_entry;
216 PMD_INIT_LOG(ERR, "Invalid parameter");
220 if (pool->num_free < num) {
221 PMD_INIT_LOG(ERR, "No resource. ask:%u, available:%u",
222 num, pool->num_free);
227 /* Lookup in free list and find most fit one */
228 LIST_FOREACH(entry, &pool->free_list, next) {
229 if (entry->len >= num) {
231 if (entry->len == num) {
236 valid_entry->len > entry->len)
241 /* Not find one to satisfy the request, return */
243 PMD_INIT_LOG(ERR, "No valid entry found");
247 * The entry have equal queue number as requested,
248 * remove it from alloc_list.
250 if (valid_entry->len == num) {
251 LIST_REMOVE(valid_entry, next);
254 * The entry have more numbers than requested,
255 * create a new entry for alloc_list and minus its
256 * queue base and number in free_list.
258 entry = rte_zmalloc(NULL, sizeof(*entry), 0);
261 "Failed to allocate memory for "
265 entry->base = valid_entry->base;
267 valid_entry->base += num;
268 valid_entry->len -= num;
272 /* Insert it into alloc list, not sorted */
273 LIST_INSERT_HEAD(&pool->alloc_list, valid_entry, next);
275 pool->num_free -= valid_entry->len;
276 pool->num_alloc += valid_entry->len;
278 return valid_entry->base + pool->base;
282 ice_res_pool_destroy(struct ice_res_pool_info *pool)
284 struct pool_entry *entry, *next_entry;
289 for (entry = LIST_FIRST(&pool->alloc_list);
290 entry && (next_entry = LIST_NEXT(entry, next), 1);
291 entry = next_entry) {
292 LIST_REMOVE(entry, next);
296 for (entry = LIST_FIRST(&pool->free_list);
297 entry && (next_entry = LIST_NEXT(entry, next), 1);
298 entry = next_entry) {
299 LIST_REMOVE(entry, next);
306 LIST_INIT(&pool->alloc_list);
307 LIST_INIT(&pool->free_list);
311 ice_vsi_config_default_rss(struct ice_aqc_vsi_props *info)
313 /* Set VSI LUT selection */
314 info->q_opt_rss = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI &
315 ICE_AQ_VSI_Q_OPT_RSS_LUT_M;
316 /* Set Hash scheme */
317 info->q_opt_rss |= ICE_AQ_VSI_Q_OPT_RSS_TPLZ &
318 ICE_AQ_VSI_Q_OPT_RSS_HASH_M;
320 info->q_opt_tc = ICE_AQ_VSI_Q_OPT_TC_OVR_M;
323 static enum ice_status
324 ice_vsi_config_tc_queue_mapping(struct ice_vsi *vsi,
325 struct ice_aqc_vsi_props *info,
326 uint8_t enabled_tcmap)
328 uint16_t bsf, qp_idx;
330 /* default tc 0 now. Multi-TC supporting need to be done later.
331 * Configure TC and queue mapping parameters, for enabled TC,
332 * allocate qpnum_per_tc queues to this traffic.
334 if (enabled_tcmap != 0x01) {
335 PMD_INIT_LOG(ERR, "only TC0 is supported");
339 vsi->nb_qps = RTE_MIN(vsi->nb_qps, ICE_MAX_Q_PER_TC);
340 bsf = rte_bsf32(vsi->nb_qps);
341 /* Adjust the queue number to actual queues that can be applied */
342 vsi->nb_qps = 0x1 << bsf;
345 /* Set tc and queue mapping with VSI */
346 info->tc_mapping[0] = rte_cpu_to_le_16((qp_idx <<
347 ICE_AQ_VSI_TC_Q_OFFSET_S) |
348 (bsf << ICE_AQ_VSI_TC_Q_NUM_S));
350 /* Associate queue number with VSI */
351 info->mapping_flags |= rte_cpu_to_le_16(ICE_AQ_VSI_Q_MAP_CONTIG);
352 info->q_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
353 info->q_mapping[1] = rte_cpu_to_le_16(vsi->nb_qps);
354 info->valid_sections |=
355 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID);
356 /* Set the info.ingress_table and info.egress_table
357 * for UP translate table. Now just set it to 1:1 map by default
358 * -- 0b 111 110 101 100 011 010 001 000 == 0xFAC688
360 #define ICE_TC_QUEUE_TABLE_DFLT 0x00FAC688
361 info->ingress_table = rte_cpu_to_le_32(ICE_TC_QUEUE_TABLE_DFLT);
362 info->egress_table = rte_cpu_to_le_32(ICE_TC_QUEUE_TABLE_DFLT);
363 info->outer_up_table = rte_cpu_to_le_32(ICE_TC_QUEUE_TABLE_DFLT);
368 ice_init_mac_address(struct rte_eth_dev *dev)
370 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
372 if (!is_unicast_ether_addr
373 ((struct ether_addr *)hw->port_info[0].mac.lan_addr)) {
374 PMD_INIT_LOG(ERR, "Invalid MAC address");
378 ether_addr_copy((struct ether_addr *)hw->port_info[0].mac.lan_addr,
379 (struct ether_addr *)hw->port_info[0].mac.perm_addr);
381 dev->data->mac_addrs = rte_zmalloc(NULL, sizeof(struct ether_addr), 0);
382 if (!dev->data->mac_addrs) {
384 "Failed to allocate memory to store mac address");
387 /* store it to dev data */
388 ether_addr_copy((struct ether_addr *)hw->port_info[0].mac.perm_addr,
389 &dev->data->mac_addrs[0]);
393 /* Find out specific MAC filter */
394 static struct ice_mac_filter *
395 ice_find_mac_filter(struct ice_vsi *vsi, struct ether_addr *macaddr)
397 struct ice_mac_filter *f;
399 TAILQ_FOREACH(f, &vsi->mac_list, next) {
400 if (is_same_ether_addr(macaddr, &f->mac_info.mac_addr))
408 ice_add_mac_filter(struct ice_vsi *vsi, struct ether_addr *mac_addr)
410 struct ice_fltr_list_entry *m_list_itr = NULL;
411 struct ice_mac_filter *f;
412 struct LIST_HEAD_TYPE list_head;
413 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
416 /* If it's added and configured, return */
417 f = ice_find_mac_filter(vsi, mac_addr);
419 PMD_DRV_LOG(INFO, "This MAC filter already exists.");
423 INIT_LIST_HEAD(&list_head);
425 m_list_itr = (struct ice_fltr_list_entry *)
426 ice_malloc(hw, sizeof(*m_list_itr));
431 ice_memcpy(m_list_itr->fltr_info.l_data.mac.mac_addr,
432 mac_addr, ETH_ALEN, ICE_NONDMA_TO_NONDMA);
433 m_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI;
434 m_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
435 m_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_MAC;
436 m_list_itr->fltr_info.flag = ICE_FLTR_TX;
437 m_list_itr->fltr_info.vsi_handle = vsi->idx;
439 LIST_ADD(&m_list_itr->list_entry, &list_head);
442 ret = ice_add_mac(hw, &list_head);
443 if (ret != ICE_SUCCESS) {
444 PMD_DRV_LOG(ERR, "Failed to add MAC filter");
448 /* Add the mac addr into mac list */
449 f = rte_zmalloc(NULL, sizeof(*f), 0);
451 PMD_DRV_LOG(ERR, "failed to allocate memory");
455 rte_memcpy(&f->mac_info.mac_addr, mac_addr, ETH_ADDR_LEN);
456 TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
462 rte_free(m_list_itr);
467 ice_remove_mac_filter(struct ice_vsi *vsi, struct ether_addr *mac_addr)
469 struct ice_fltr_list_entry *m_list_itr = NULL;
470 struct ice_mac_filter *f;
471 struct LIST_HEAD_TYPE list_head;
472 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
475 /* Can't find it, return an error */
476 f = ice_find_mac_filter(vsi, mac_addr);
480 INIT_LIST_HEAD(&list_head);
482 m_list_itr = (struct ice_fltr_list_entry *)
483 ice_malloc(hw, sizeof(*m_list_itr));
488 ice_memcpy(m_list_itr->fltr_info.l_data.mac.mac_addr,
489 mac_addr, ETH_ALEN, ICE_NONDMA_TO_NONDMA);
490 m_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI;
491 m_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
492 m_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_MAC;
493 m_list_itr->fltr_info.flag = ICE_FLTR_TX;
494 m_list_itr->fltr_info.vsi_handle = vsi->idx;
496 LIST_ADD(&m_list_itr->list_entry, &list_head);
498 /* remove the mac filter */
499 ret = ice_remove_mac(hw, &list_head);
500 if (ret != ICE_SUCCESS) {
501 PMD_DRV_LOG(ERR, "Failed to remove MAC filter");
506 /* Remove the mac addr from mac list */
507 TAILQ_REMOVE(&vsi->mac_list, f, next);
513 rte_free(m_list_itr);
517 /* Find out specific VLAN filter */
518 static struct ice_vlan_filter *
519 ice_find_vlan_filter(struct ice_vsi *vsi, uint16_t vlan_id)
521 struct ice_vlan_filter *f;
523 TAILQ_FOREACH(f, &vsi->vlan_list, next) {
524 if (vlan_id == f->vlan_info.vlan_id)
532 ice_add_vlan_filter(struct ice_vsi *vsi, uint16_t vlan_id)
534 struct ice_fltr_list_entry *v_list_itr = NULL;
535 struct ice_vlan_filter *f;
536 struct LIST_HEAD_TYPE list_head;
537 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
540 if (!vsi || vlan_id > ETHER_MAX_VLAN_ID)
543 /* If it's added and configured, return. */
544 f = ice_find_vlan_filter(vsi, vlan_id);
546 PMD_DRV_LOG(INFO, "This VLAN filter already exists.");
550 if (!vsi->vlan_anti_spoof_on && !vsi->vlan_filter_on)
553 INIT_LIST_HEAD(&list_head);
555 v_list_itr = (struct ice_fltr_list_entry *)
556 ice_malloc(hw, sizeof(*v_list_itr));
561 v_list_itr->fltr_info.l_data.vlan.vlan_id = vlan_id;
562 v_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI;
563 v_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
564 v_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_VLAN;
565 v_list_itr->fltr_info.flag = ICE_FLTR_TX;
566 v_list_itr->fltr_info.vsi_handle = vsi->idx;
568 LIST_ADD(&v_list_itr->list_entry, &list_head);
571 ret = ice_add_vlan(hw, &list_head);
572 if (ret != ICE_SUCCESS) {
573 PMD_DRV_LOG(ERR, "Failed to add VLAN filter");
578 /* Add vlan into vlan list */
579 f = rte_zmalloc(NULL, sizeof(*f), 0);
581 PMD_DRV_LOG(ERR, "failed to allocate memory");
585 f->vlan_info.vlan_id = vlan_id;
586 TAILQ_INSERT_TAIL(&vsi->vlan_list, f, next);
592 rte_free(v_list_itr);
597 ice_remove_vlan_filter(struct ice_vsi *vsi, uint16_t vlan_id)
599 struct ice_fltr_list_entry *v_list_itr = NULL;
600 struct ice_vlan_filter *f;
601 struct LIST_HEAD_TYPE list_head;
602 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
606 * Vlan 0 is the generic filter for untagged packets
607 * and can't be removed.
609 if (!vsi || vlan_id == 0 || vlan_id > ETHER_MAX_VLAN_ID)
612 /* Can't find it, return an error */
613 f = ice_find_vlan_filter(vsi, vlan_id);
617 INIT_LIST_HEAD(&list_head);
619 v_list_itr = (struct ice_fltr_list_entry *)
620 ice_malloc(hw, sizeof(*v_list_itr));
626 v_list_itr->fltr_info.l_data.vlan.vlan_id = vlan_id;
627 v_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI;
628 v_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
629 v_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_VLAN;
630 v_list_itr->fltr_info.flag = ICE_FLTR_TX;
631 v_list_itr->fltr_info.vsi_handle = vsi->idx;
633 LIST_ADD(&v_list_itr->list_entry, &list_head);
635 /* remove the vlan filter */
636 ret = ice_remove_vlan(hw, &list_head);
637 if (ret != ICE_SUCCESS) {
638 PMD_DRV_LOG(ERR, "Failed to remove VLAN filter");
643 /* Remove the vlan id from vlan list */
644 TAILQ_REMOVE(&vsi->vlan_list, f, next);
650 rte_free(v_list_itr);
655 ice_remove_all_mac_vlan_filters(struct ice_vsi *vsi)
657 struct ice_mac_filter *m_f;
658 struct ice_vlan_filter *v_f;
661 if (!vsi || !vsi->mac_num)
664 TAILQ_FOREACH(m_f, &vsi->mac_list, next) {
665 ret = ice_remove_mac_filter(vsi, &m_f->mac_info.mac_addr);
666 if (ret != ICE_SUCCESS) {
672 if (vsi->vlan_num == 0)
675 TAILQ_FOREACH(v_f, &vsi->vlan_list, next) {
676 ret = ice_remove_vlan_filter(vsi, v_f->vlan_info.vlan_id);
677 if (ret != ICE_SUCCESS) {
688 ice_vsi_config_qinq_insertion(struct ice_vsi *vsi, bool on)
690 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
691 struct ice_vsi_ctx ctxt;
695 /* Check if it has been already on or off */
696 if (vsi->info.valid_sections &
697 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID)) {
699 if ((vsi->info.outer_tag_flags &
700 ICE_AQ_VSI_OUTER_TAG_ACCEPT_HOST) ==
701 ICE_AQ_VSI_OUTER_TAG_ACCEPT_HOST)
702 return 0; /* already on */
704 if (!(vsi->info.outer_tag_flags &
705 ICE_AQ_VSI_OUTER_TAG_ACCEPT_HOST))
706 return 0; /* already off */
711 qinq_flags = ICE_AQ_VSI_OUTER_TAG_ACCEPT_HOST;
714 /* clear global insertion and use per packet insertion */
715 vsi->info.outer_tag_flags &= ~(ICE_AQ_VSI_OUTER_TAG_INSERT);
716 vsi->info.outer_tag_flags &= ~(ICE_AQ_VSI_OUTER_TAG_ACCEPT_HOST);
717 vsi->info.outer_tag_flags |= qinq_flags;
718 /* use default vlan type 0x8100 */
719 vsi->info.outer_tag_flags &= ~(ICE_AQ_VSI_OUTER_TAG_TYPE_M);
720 vsi->info.outer_tag_flags |= ICE_DFLT_OUTER_TAG_TYPE <<
721 ICE_AQ_VSI_OUTER_TAG_TYPE_S;
722 (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
723 ctxt.info.valid_sections =
724 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID);
725 ctxt.vsi_num = vsi->vsi_id;
726 ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
729 "Update VSI failed to %s qinq stripping",
730 on ? "enable" : "disable");
734 vsi->info.valid_sections |=
735 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID);
741 ice_vsi_config_qinq_stripping(struct ice_vsi *vsi, bool on)
743 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
744 struct ice_vsi_ctx ctxt;
748 /* Check if it has been already on or off */
749 if (vsi->info.valid_sections &
750 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID)) {
752 if ((vsi->info.outer_tag_flags &
753 ICE_AQ_VSI_OUTER_TAG_MODE_M) ==
754 ICE_AQ_VSI_OUTER_TAG_COPY)
755 return 0; /* already on */
757 if ((vsi->info.outer_tag_flags &
758 ICE_AQ_VSI_OUTER_TAG_MODE_M) ==
759 ICE_AQ_VSI_OUTER_TAG_NOTHING)
760 return 0; /* already off */
765 qinq_flags = ICE_AQ_VSI_OUTER_TAG_COPY;
767 qinq_flags = ICE_AQ_VSI_OUTER_TAG_NOTHING;
768 vsi->info.outer_tag_flags &= ~(ICE_AQ_VSI_OUTER_TAG_MODE_M);
769 vsi->info.outer_tag_flags |= qinq_flags;
770 /* use default vlan type 0x8100 */
771 vsi->info.outer_tag_flags &= ~(ICE_AQ_VSI_OUTER_TAG_TYPE_M);
772 vsi->info.outer_tag_flags |= ICE_DFLT_OUTER_TAG_TYPE <<
773 ICE_AQ_VSI_OUTER_TAG_TYPE_S;
774 (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
775 ctxt.info.valid_sections =
776 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID);
777 ctxt.vsi_num = vsi->vsi_id;
778 ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
781 "Update VSI failed to %s qinq stripping",
782 on ? "enable" : "disable");
786 vsi->info.valid_sections |=
787 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID);
793 ice_vsi_config_double_vlan(struct ice_vsi *vsi, int on)
797 ret = ice_vsi_config_qinq_stripping(vsi, on);
799 PMD_DRV_LOG(ERR, "Fail to set qinq stripping - %d", ret);
801 ret = ice_vsi_config_qinq_insertion(vsi, on);
803 PMD_DRV_LOG(ERR, "Fail to set qinq insertion - %d", ret);
810 ice_pf_enable_irq0(struct ice_hw *hw)
812 /* reset the registers */
813 ICE_WRITE_REG(hw, PFINT_OICR_ENA, 0);
814 ICE_READ_REG(hw, PFINT_OICR);
817 ICE_WRITE_REG(hw, PFINT_OICR_ENA,
818 (uint32_t)(PFINT_OICR_ENA_INT_ENA_M &
819 (~PFINT_OICR_LINK_STAT_CHANGE_M)));
821 ICE_WRITE_REG(hw, PFINT_OICR_CTL,
822 (0 & PFINT_OICR_CTL_MSIX_INDX_M) |
823 ((0 << PFINT_OICR_CTL_ITR_INDX_S) &
824 PFINT_OICR_CTL_ITR_INDX_M) |
825 PFINT_OICR_CTL_CAUSE_ENA_M);
827 ICE_WRITE_REG(hw, PFINT_FW_CTL,
828 (0 & PFINT_FW_CTL_MSIX_INDX_M) |
829 ((0 << PFINT_FW_CTL_ITR_INDX_S) &
830 PFINT_FW_CTL_ITR_INDX_M) |
831 PFINT_FW_CTL_CAUSE_ENA_M);
833 ICE_WRITE_REG(hw, PFINT_OICR_ENA, PFINT_OICR_ENA_INT_ENA_M);
836 ICE_WRITE_REG(hw, GLINT_DYN_CTL(0),
837 GLINT_DYN_CTL_INTENA_M |
838 GLINT_DYN_CTL_CLEARPBA_M |
839 GLINT_DYN_CTL_ITR_INDX_M);
846 ice_pf_disable_irq0(struct ice_hw *hw)
848 /* Disable all interrupt types */
849 ICE_WRITE_REG(hw, GLINT_DYN_CTL(0), GLINT_DYN_CTL_WB_ON_ITR_M);
855 ice_handle_aq_msg(struct rte_eth_dev *dev)
857 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
858 struct ice_ctl_q_info *cq = &hw->adminq;
859 struct ice_rq_event_info event;
860 uint16_t pending, opcode;
863 event.buf_len = ICE_AQ_MAX_BUF_LEN;
864 event.msg_buf = rte_zmalloc(NULL, event.buf_len, 0);
865 if (!event.msg_buf) {
866 PMD_DRV_LOG(ERR, "Failed to allocate mem");
872 ret = ice_clean_rq_elem(hw, cq, &event, &pending);
874 if (ret != ICE_SUCCESS) {
876 "Failed to read msg from AdminQ, "
878 hw->adminq.sq_last_status);
881 opcode = rte_le_to_cpu_16(event.desc.opcode);
884 case ice_aqc_opc_get_link_status:
885 ret = ice_link_update(dev, 0);
887 _rte_eth_dev_callback_process
888 (dev, RTE_ETH_EVENT_INTR_LSC, NULL);
891 PMD_DRV_LOG(DEBUG, "Request %u is not supported yet",
896 rte_free(event.msg_buf);
901 * Interrupt handler triggered by NIC for handling
902 * specific interrupt.
905 * Pointer to interrupt handle.
907 * The address of parameter (struct rte_eth_dev *) regsitered before.
913 ice_interrupt_handler(void *param)
915 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
916 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
926 /* Disable interrupt */
927 ice_pf_disable_irq0(hw);
929 /* read out interrupt causes */
930 oicr = ICE_READ_REG(hw, PFINT_OICR);
932 int_fw_ctl = ICE_READ_REG(hw, PFINT_FW_CTL);
935 /* No interrupt event indicated */
936 if (!(oicr & PFINT_OICR_INTEVENT_M)) {
937 PMD_DRV_LOG(INFO, "No interrupt event");
942 if (int_fw_ctl & PFINT_FW_CTL_INTEVENT_M) {
943 PMD_DRV_LOG(INFO, "FW_CTL: link state change event");
944 ice_handle_aq_msg(dev);
947 if (oicr & PFINT_OICR_LINK_STAT_CHANGE_M) {
948 PMD_DRV_LOG(INFO, "OICR: link state change event");
949 ice_link_update(dev, 0);
953 if (oicr & PFINT_OICR_MAL_DETECT_M) {
954 PMD_DRV_LOG(WARNING, "OICR: MDD event");
955 reg = ICE_READ_REG(hw, GL_MDET_TX_PQM);
956 if (reg & GL_MDET_TX_PQM_VALID_M) {
957 pf_num = (reg & GL_MDET_TX_PQM_PF_NUM_M) >>
958 GL_MDET_TX_PQM_PF_NUM_S;
959 event = (reg & GL_MDET_TX_PQM_MAL_TYPE_M) >>
960 GL_MDET_TX_PQM_MAL_TYPE_S;
961 queue = (reg & GL_MDET_TX_PQM_QNUM_M) >>
962 GL_MDET_TX_PQM_QNUM_S;
964 PMD_DRV_LOG(WARNING, "Malicious Driver Detection event "
965 "%d by PQM on TX queue %d PF# %d",
966 event, queue, pf_num);
969 reg = ICE_READ_REG(hw, GL_MDET_TX_TCLAN);
970 if (reg & GL_MDET_TX_TCLAN_VALID_M) {
971 pf_num = (reg & GL_MDET_TX_TCLAN_PF_NUM_M) >>
972 GL_MDET_TX_TCLAN_PF_NUM_S;
973 event = (reg & GL_MDET_TX_TCLAN_MAL_TYPE_M) >>
974 GL_MDET_TX_TCLAN_MAL_TYPE_S;
975 queue = (reg & GL_MDET_TX_TCLAN_QNUM_M) >>
976 GL_MDET_TX_TCLAN_QNUM_S;
978 PMD_DRV_LOG(WARNING, "Malicious Driver Detection event "
979 "%d by TCLAN on TX queue %d PF# %d",
980 event, queue, pf_num);
984 /* Enable interrupt */
985 ice_pf_enable_irq0(hw);
986 rte_intr_enable(dev->intr_handle);
989 /* Initialize SW parameters of PF */
991 ice_pf_sw_init(struct rte_eth_dev *dev)
993 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
994 struct ice_hw *hw = ICE_PF_TO_HW(pf);
996 if (ice_config_max_queue_pair_num(dev->device->devargs) > 0)
998 ice_config_max_queue_pair_num(dev->device->devargs);
1001 (uint16_t)RTE_MIN(hw->func_caps.common_cap.num_txq,
1002 hw->func_caps.common_cap.num_rxq);
1004 pf->lan_nb_qps = pf->lan_nb_qp_max;
1009 static struct ice_vsi *
1010 ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type)
1012 struct ice_hw *hw = ICE_PF_TO_HW(pf);
1013 struct ice_vsi *vsi = NULL;
1014 struct ice_vsi_ctx vsi_ctx;
1016 struct ether_addr broadcast = {
1017 .addr_bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff} };
1018 struct ether_addr mac_addr;
1019 uint16_t max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
1020 uint8_t tc_bitmap = 0x1;
1022 /* hw->num_lports = 1 in NIC mode */
1023 vsi = rte_zmalloc(NULL, sizeof(struct ice_vsi), 0);
1027 vsi->idx = pf->next_vsi_idx;
1030 vsi->adapter = ICE_PF_TO_ADAPTER(pf);
1031 vsi->max_macaddrs = ICE_NUM_MACADDR_MAX;
1032 vsi->vlan_anti_spoof_on = 0;
1033 vsi->vlan_filter_on = 1;
1034 TAILQ_INIT(&vsi->mac_list);
1035 TAILQ_INIT(&vsi->vlan_list);
1037 memset(&vsi_ctx, 0, sizeof(vsi_ctx));
1038 /* base_queue in used in queue mapping of VSI add/update command.
1039 * Suppose vsi->base_queue is 0 now, don't consider SRIOV, VMDQ
1040 * cases in the first stage. Only Main VSI.
1042 vsi->base_queue = 0;
1045 vsi->nb_qps = pf->lan_nb_qps;
1046 ice_vsi_config_default_rss(&vsi_ctx.info);
1047 vsi_ctx.alloc_from_pool = true;
1048 vsi_ctx.flags = ICE_AQ_VSI_TYPE_PF;
1049 /* switch_id is queried by get_switch_config aq, which is done
1052 vsi_ctx.info.sw_id = hw->port_info->sw_id;
1053 vsi_ctx.info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA;
1054 /* Allow all untagged or tagged packets */
1055 vsi_ctx.info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL;
1056 vsi_ctx.info.vlan_flags |= ICE_AQ_VSI_VLAN_EMOD_NOTHING;
1057 vsi_ctx.info.q_opt_rss = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF |
1058 ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
1059 /* Enable VLAN/UP trip */
1060 ret = ice_vsi_config_tc_queue_mapping(vsi,
1065 "tc queue mapping with vsi failed, "
1073 /* for other types of VSI */
1074 PMD_INIT_LOG(ERR, "other types of VSI not supported");
1078 /* VF has MSIX interrupt in VF range, don't allocate here */
1079 if (type == ICE_VSI_PF) {
1080 ret = ice_res_pool_alloc(&pf->msix_pool,
1081 RTE_MIN(vsi->nb_qps,
1082 RTE_MAX_RXTX_INTR_VEC_ID));
1084 PMD_INIT_LOG(ERR, "VSI MAIN %d get heap failed %d",
1087 vsi->msix_intr = ret;
1088 vsi->nb_msix = RTE_MIN(vsi->nb_qps, RTE_MAX_RXTX_INTR_VEC_ID);
1093 ret = ice_add_vsi(hw, vsi->idx, &vsi_ctx, NULL);
1094 if (ret != ICE_SUCCESS) {
1095 PMD_INIT_LOG(ERR, "add vsi failed, err = %d", ret);
1098 /* store vsi information is SW structure */
1099 vsi->vsi_id = vsi_ctx.vsi_num;
1100 vsi->info = vsi_ctx.info;
1101 pf->vsis_allocated = vsi_ctx.vsis_allocd;
1102 pf->vsis_unallocated = vsi_ctx.vsis_unallocated;
1104 /* MAC configuration */
1105 rte_memcpy(pf->dev_addr.addr_bytes,
1106 hw->port_info->mac.perm_addr,
1109 rte_memcpy(&mac_addr, &pf->dev_addr, ETHER_ADDR_LEN);
1110 ret = ice_add_mac_filter(vsi, &mac_addr);
1111 if (ret != ICE_SUCCESS)
1112 PMD_INIT_LOG(ERR, "Failed to add dflt MAC filter");
1114 rte_memcpy(&mac_addr, &broadcast, ETHER_ADDR_LEN);
1115 ret = ice_add_mac_filter(vsi, &mac_addr);
1116 if (ret != ICE_SUCCESS)
1117 PMD_INIT_LOG(ERR, "Failed to add MAC filter");
1119 /* At the beginning, only TC0. */
1120 /* What we need here is the maximam number of the TX queues.
1121 * Currently vsi->nb_qps means it.
1122 * Correct it if any change.
1124 max_txqs[0] = vsi->nb_qps;
1125 ret = ice_cfg_vsi_lan(hw->port_info, vsi->idx,
1126 tc_bitmap, max_txqs);
1127 if (ret != ICE_SUCCESS)
1128 PMD_INIT_LOG(ERR, "Failed to config vsi sched");
1138 ice_pf_setup(struct ice_pf *pf)
1140 struct ice_vsi *vsi;
1142 /* Clear all stats counters */
1143 pf->offset_loaded = FALSE;
1144 memset(&pf->stats, 0, sizeof(struct ice_hw_port_stats));
1145 memset(&pf->stats_offset, 0, sizeof(struct ice_hw_port_stats));
1146 memset(&pf->internal_stats, 0, sizeof(struct ice_eth_stats));
1147 memset(&pf->internal_stats_offset, 0, sizeof(struct ice_eth_stats));
1149 vsi = ice_setup_vsi(pf, ICE_VSI_PF);
1151 PMD_INIT_LOG(ERR, "Failed to add vsi for PF");
1161 ice_dev_init(struct rte_eth_dev *dev)
1163 struct rte_pci_device *pci_dev;
1164 struct rte_intr_handle *intr_handle;
1165 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1166 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1167 struct ice_vsi *vsi;
1170 dev->dev_ops = &ice_eth_dev_ops;
1171 dev->rx_pkt_burst = ice_recv_pkts;
1172 dev->tx_pkt_burst = ice_xmit_pkts;
1173 dev->tx_pkt_prepare = ice_prep_pkts;
1175 ice_set_default_ptype_table(dev);
1176 pci_dev = RTE_DEV_TO_PCI(dev->device);
1177 intr_handle = &pci_dev->intr_handle;
1179 pf->adapter = ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1180 pf->adapter->eth_dev = dev;
1181 pf->dev_data = dev->data;
1182 hw->back = pf->adapter;
1183 hw->hw_addr = (uint8_t *)pci_dev->mem_resource[0].addr;
1184 hw->vendor_id = pci_dev->id.vendor_id;
1185 hw->device_id = pci_dev->id.device_id;
1186 hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
1187 hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
1188 hw->bus.device = pci_dev->addr.devid;
1189 hw->bus.func = pci_dev->addr.function;
1191 ice_init_controlq_parameter(hw);
1193 ret = ice_init_hw(hw);
1195 PMD_INIT_LOG(ERR, "Failed to initialize HW");
1199 PMD_INIT_LOG(INFO, "FW %d.%d.%05d API %d.%d",
1200 hw->fw_maj_ver, hw->fw_min_ver, hw->fw_build,
1201 hw->api_maj_ver, hw->api_min_ver);
1203 ice_pf_sw_init(dev);
1204 ret = ice_init_mac_address(dev);
1206 PMD_INIT_LOG(ERR, "Failed to initialize mac address");
1210 ret = ice_res_pool_init(&pf->msix_pool, 1,
1211 hw->func_caps.common_cap.num_msix_vectors - 1);
1213 PMD_INIT_LOG(ERR, "Failed to init MSIX pool");
1214 goto err_msix_pool_init;
1217 ret = ice_pf_setup(pf);
1219 PMD_INIT_LOG(ERR, "Failed to setup PF");
1225 /* Disable double vlan by default */
1226 ice_vsi_config_double_vlan(vsi, FALSE);
1228 /* register callback func to eal lib */
1229 rte_intr_callback_register(intr_handle,
1230 ice_interrupt_handler, dev);
1232 ice_pf_enable_irq0(hw);
1234 /* enable uio intr after callback register */
1235 rte_intr_enable(intr_handle);
1240 ice_res_pool_destroy(&pf->msix_pool);
1242 rte_free(dev->data->mac_addrs);
1244 ice_sched_cleanup_all(hw);
1245 rte_free(hw->port_info);
1246 ice_shutdown_all_ctrlq(hw);
1252 ice_release_vsi(struct ice_vsi *vsi)
1255 struct ice_vsi_ctx vsi_ctx;
1256 enum ice_status ret;
1261 hw = ICE_VSI_TO_HW(vsi);
1263 ice_remove_all_mac_vlan_filters(vsi);
1265 memset(&vsi_ctx, 0, sizeof(vsi_ctx));
1267 vsi_ctx.vsi_num = vsi->vsi_id;
1268 vsi_ctx.info = vsi->info;
1269 ret = ice_free_vsi(hw, vsi->idx, &vsi_ctx, false, NULL);
1270 if (ret != ICE_SUCCESS) {
1271 PMD_INIT_LOG(ERR, "Failed to free vsi by aq, %u", vsi->vsi_id);
1281 ice_vsi_disable_queues_intr(struct ice_vsi *vsi)
1283 struct rte_eth_dev *dev = vsi->adapter->eth_dev;
1284 struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
1285 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1286 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
1287 uint16_t msix_intr, i;
1289 /* disable interrupt and also clear all the exist config */
1290 for (i = 0; i < vsi->nb_qps; i++) {
1291 ICE_WRITE_REG(hw, QINT_TQCTL(vsi->base_queue + i), 0);
1292 ICE_WRITE_REG(hw, QINT_RQCTL(vsi->base_queue + i), 0);
1296 if (rte_intr_allow_others(intr_handle))
1298 for (i = 0; i < vsi->nb_msix; i++) {
1299 msix_intr = vsi->msix_intr + i;
1300 ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr),
1301 GLINT_DYN_CTL_WB_ON_ITR_M);
1305 ICE_WRITE_REG(hw, GLINT_DYN_CTL(0), GLINT_DYN_CTL_WB_ON_ITR_M);
1309 ice_dev_stop(struct rte_eth_dev *dev)
1311 struct rte_eth_dev_data *data = dev->data;
1312 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1313 struct ice_vsi *main_vsi = pf->main_vsi;
1314 struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
1315 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1318 /* avoid stopping again */
1319 if (pf->adapter_stopped)
1322 /* stop and clear all Rx queues */
1323 for (i = 0; i < data->nb_rx_queues; i++)
1324 ice_rx_queue_stop(dev, i);
1326 /* stop and clear all Tx queues */
1327 for (i = 0; i < data->nb_tx_queues; i++)
1328 ice_tx_queue_stop(dev, i);
1330 /* disable all queue interrupts */
1331 ice_vsi_disable_queues_intr(main_vsi);
1333 /* Clear all queues and release mbufs */
1334 ice_clear_queues(dev);
1336 /* Clean datapath event and queue/vec mapping */
1337 rte_intr_efd_disable(intr_handle);
1338 if (intr_handle->intr_vec) {
1339 rte_free(intr_handle->intr_vec);
1340 intr_handle->intr_vec = NULL;
1343 pf->adapter_stopped = true;
1347 ice_dev_close(struct rte_eth_dev *dev)
1349 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1350 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1354 /* release all queue resource */
1355 ice_free_queues(dev);
1357 ice_res_pool_destroy(&pf->msix_pool);
1358 ice_release_vsi(pf->main_vsi);
1360 ice_shutdown_all_ctrlq(hw);
1364 ice_dev_uninit(struct rte_eth_dev *dev)
1366 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1367 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1368 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1369 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1373 dev->dev_ops = NULL;
1374 dev->rx_pkt_burst = NULL;
1375 dev->tx_pkt_burst = NULL;
1377 rte_free(dev->data->mac_addrs);
1378 dev->data->mac_addrs = NULL;
1380 /* disable uio intr before callback unregister */
1381 rte_intr_disable(intr_handle);
1383 /* register callback func to eal lib */
1384 rte_intr_callback_unregister(intr_handle,
1385 ice_interrupt_handler, dev);
1387 ice_release_vsi(pf->main_vsi);
1388 ice_sched_cleanup_all(hw);
1389 rte_free(hw->port_info);
1390 ice_shutdown_all_ctrlq(hw);
1396 ice_dev_configure(__rte_unused struct rte_eth_dev *dev)
1398 struct ice_adapter *ad =
1399 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1401 /* Initialize to TRUE. If any of Rx queues doesn't meet the
1402 * bulk allocation or vector Rx preconditions we will reset it.
1404 ad->rx_bulk_alloc_allowed = true;
1405 ad->tx_simple_allowed = true;
1410 static int ice_init_rss(struct ice_pf *pf)
1412 struct ice_hw *hw = ICE_PF_TO_HW(pf);
1413 struct ice_vsi *vsi = pf->main_vsi;
1414 struct rte_eth_dev *dev = pf->adapter->eth_dev;
1415 struct rte_eth_rss_conf *rss_conf;
1416 struct ice_aqc_get_set_rss_keys key;
1420 rss_conf = &dev->data->dev_conf.rx_adv_conf.rss_conf;
1421 nb_q = dev->data->nb_rx_queues;
1422 vsi->rss_key_size = ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE;
1423 vsi->rss_lut_size = hw->func_caps.common_cap.rss_table_size;
1426 vsi->rss_key = rte_zmalloc(NULL,
1427 vsi->rss_key_size, 0);
1429 vsi->rss_lut = rte_zmalloc(NULL,
1430 vsi->rss_lut_size, 0);
1432 /* configure RSS key */
1433 if (!rss_conf->rss_key) {
1434 /* Calculate the default hash key */
1435 for (i = 0; i <= vsi->rss_key_size; i++)
1436 vsi->rss_key[i] = (uint8_t)rte_rand();
1438 rte_memcpy(vsi->rss_key, rss_conf->rss_key,
1439 RTE_MIN(rss_conf->rss_key_len,
1440 vsi->rss_key_size));
1442 rte_memcpy(key.standard_rss_key, vsi->rss_key, vsi->rss_key_size);
1443 ret = ice_aq_set_rss_key(hw, vsi->idx, &key);
1447 /* init RSS LUT table */
1448 for (i = 0; i < vsi->rss_lut_size; i++)
1449 vsi->rss_lut[i] = i % nb_q;
1451 ret = ice_aq_set_rss_lut(hw, vsi->idx,
1452 ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF,
1453 vsi->rss_lut, vsi->rss_lut_size);
1461 __vsi_queues_bind_intr(struct ice_vsi *vsi, uint16_t msix_vect,
1462 int base_queue, int nb_queue)
1464 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
1465 uint32_t val, val_tx;
1468 for (i = 0; i < nb_queue; i++) {
1470 val = (msix_vect & QINT_RQCTL_MSIX_INDX_M) |
1471 (0 < QINT_RQCTL_ITR_INDX_S) | QINT_RQCTL_CAUSE_ENA_M;
1472 val_tx = (msix_vect & QINT_TQCTL_MSIX_INDX_M) |
1473 (0 < QINT_TQCTL_ITR_INDX_S) | QINT_TQCTL_CAUSE_ENA_M;
1475 PMD_DRV_LOG(INFO, "queue %d is binding to vect %d",
1476 base_queue + i, msix_vect);
1477 /* set ITR0 value */
1478 ICE_WRITE_REG(hw, GLINT_ITR(0, msix_vect), 0x10);
1479 ICE_WRITE_REG(hw, QINT_RQCTL(base_queue + i), val);
1480 ICE_WRITE_REG(hw, QINT_TQCTL(base_queue + i), val_tx);
1485 ice_vsi_queues_bind_intr(struct ice_vsi *vsi)
1487 struct rte_eth_dev *dev = vsi->adapter->eth_dev;
1488 struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
1489 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1490 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
1491 uint16_t msix_vect = vsi->msix_intr;
1492 uint16_t nb_msix = RTE_MIN(vsi->nb_msix, intr_handle->nb_efd);
1493 uint16_t queue_idx = 0;
1497 /* clear Rx/Tx queue interrupt */
1498 for (i = 0; i < vsi->nb_used_qps; i++) {
1499 ICE_WRITE_REG(hw, QINT_TQCTL(vsi->base_queue + i), 0);
1500 ICE_WRITE_REG(hw, QINT_RQCTL(vsi->base_queue + i), 0);
1503 /* PF bind interrupt */
1504 if (rte_intr_dp_is_en(intr_handle)) {
1509 for (i = 0; i < vsi->nb_used_qps; i++) {
1511 if (!rte_intr_allow_others(intr_handle))
1512 msix_vect = ICE_MISC_VEC_ID;
1514 /* uio mapping all queue to one msix_vect */
1515 __vsi_queues_bind_intr(vsi, msix_vect,
1516 vsi->base_queue + i,
1517 vsi->nb_used_qps - i);
1519 for (; !!record && i < vsi->nb_used_qps; i++)
1520 intr_handle->intr_vec[queue_idx + i] =
1525 /* vfio 1:1 queue/msix_vect mapping */
1526 __vsi_queues_bind_intr(vsi, msix_vect,
1527 vsi->base_queue + i, 1);
1530 intr_handle->intr_vec[queue_idx + i] = msix_vect;
1538 ice_vsi_enable_queues_intr(struct ice_vsi *vsi)
1540 struct rte_eth_dev *dev = vsi->adapter->eth_dev;
1541 struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
1542 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1543 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
1544 uint16_t msix_intr, i;
1546 if (rte_intr_allow_others(intr_handle))
1547 for (i = 0; i < vsi->nb_used_qps; i++) {
1548 msix_intr = vsi->msix_intr + i;
1549 ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr),
1550 GLINT_DYN_CTL_INTENA_M |
1551 GLINT_DYN_CTL_CLEARPBA_M |
1552 GLINT_DYN_CTL_ITR_INDX_M |
1553 GLINT_DYN_CTL_WB_ON_ITR_M);
1556 ICE_WRITE_REG(hw, GLINT_DYN_CTL(0),
1557 GLINT_DYN_CTL_INTENA_M |
1558 GLINT_DYN_CTL_CLEARPBA_M |
1559 GLINT_DYN_CTL_ITR_INDX_M |
1560 GLINT_DYN_CTL_WB_ON_ITR_M);
1564 ice_rxq_intr_setup(struct rte_eth_dev *dev)
1566 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1567 struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
1568 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1569 struct ice_vsi *vsi = pf->main_vsi;
1570 uint32_t intr_vector = 0;
1572 rte_intr_disable(intr_handle);
1574 /* check and configure queue intr-vector mapping */
1575 if ((rte_intr_cap_multiple(intr_handle) ||
1576 !RTE_ETH_DEV_SRIOV(dev).active) &&
1577 dev->data->dev_conf.intr_conf.rxq != 0) {
1578 intr_vector = dev->data->nb_rx_queues;
1579 if (intr_vector > ICE_MAX_INTR_QUEUE_NUM) {
1580 PMD_DRV_LOG(ERR, "At most %d intr queues supported",
1581 ICE_MAX_INTR_QUEUE_NUM);
1584 if (rte_intr_efd_enable(intr_handle, intr_vector))
1588 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
1589 intr_handle->intr_vec =
1590 rte_zmalloc(NULL, dev->data->nb_rx_queues * sizeof(int),
1592 if (!intr_handle->intr_vec) {
1594 "Failed to allocate %d rx_queues intr_vec",
1595 dev->data->nb_rx_queues);
1600 /* Map queues with MSIX interrupt */
1601 vsi->nb_used_qps = dev->data->nb_rx_queues;
1602 ice_vsi_queues_bind_intr(vsi);
1604 /* Enable interrupts for all the queues */
1605 ice_vsi_enable_queues_intr(vsi);
1607 rte_intr_enable(intr_handle);
1613 ice_dev_start(struct rte_eth_dev *dev)
1615 struct rte_eth_dev_data *data = dev->data;
1616 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1617 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1618 uint16_t nb_rxq = 0;
1622 /* program Tx queues' context in hardware */
1623 for (nb_txq = 0; nb_txq < data->nb_tx_queues; nb_txq++) {
1624 ret = ice_tx_queue_start(dev, nb_txq);
1626 PMD_DRV_LOG(ERR, "fail to start Tx queue %u", nb_txq);
1631 /* program Rx queues' context in hardware*/
1632 for (nb_rxq = 0; nb_rxq < data->nb_rx_queues; nb_rxq++) {
1633 ret = ice_rx_queue_start(dev, nb_rxq);
1635 PMD_DRV_LOG(ERR, "fail to start Rx queue %u", nb_rxq);
1640 ret = ice_init_rss(pf);
1642 PMD_DRV_LOG(ERR, "Failed to enable rss for PF");
1646 ice_set_rx_function(dev);
1648 /* enable Rx interrput and mapping Rx queue to interrupt vector */
1649 if (ice_rxq_intr_setup(dev))
1652 ret = ice_aq_set_event_mask(hw, hw->port_info->lport,
1653 ((u16)(ICE_AQ_LINK_EVENT_LINK_FAULT |
1654 ICE_AQ_LINK_EVENT_PHY_TEMP_ALARM |
1655 ICE_AQ_LINK_EVENT_EXCESSIVE_ERRORS |
1656 ICE_AQ_LINK_EVENT_SIGNAL_DETECT |
1657 ICE_AQ_LINK_EVENT_AN_COMPLETED |
1658 ICE_AQ_LINK_EVENT_PORT_TX_SUSPENDED)),
1660 if (ret != ICE_SUCCESS)
1661 PMD_DRV_LOG(WARNING, "Fail to set phy mask");
1663 /* Call get_link_info aq commond to enable/disable LSE */
1664 ice_link_update(dev, 0);
1666 pf->adapter_stopped = false;
1670 /* stop the started queues if failed to start all queues */
1672 for (i = 0; i < nb_rxq; i++)
1673 ice_rx_queue_stop(dev, i);
1675 for (i = 0; i < nb_txq; i++)
1676 ice_tx_queue_stop(dev, i);
1682 ice_dev_reset(struct rte_eth_dev *dev)
1686 if (dev->data->sriov.active)
1689 ret = ice_dev_uninit(dev);
1691 PMD_INIT_LOG(ERR, "failed to uninit device, status = %d", ret);
1695 ret = ice_dev_init(dev);
1697 PMD_INIT_LOG(ERR, "failed to init device, status = %d", ret);
1705 ice_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1707 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1708 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1709 struct ice_vsi *vsi = pf->main_vsi;
1710 struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
1712 dev_info->min_rx_bufsize = ICE_BUF_SIZE_MIN;
1713 dev_info->max_rx_pktlen = ICE_FRAME_SIZE_MAX;
1714 dev_info->max_rx_queues = vsi->nb_qps;
1715 dev_info->max_tx_queues = vsi->nb_qps;
1716 dev_info->max_mac_addrs = vsi->max_macaddrs;
1717 dev_info->max_vfs = pci_dev->max_vfs;
1719 dev_info->rx_offload_capa =
1720 DEV_RX_OFFLOAD_VLAN_STRIP |
1721 DEV_RX_OFFLOAD_IPV4_CKSUM |
1722 DEV_RX_OFFLOAD_UDP_CKSUM |
1723 DEV_RX_OFFLOAD_TCP_CKSUM |
1724 DEV_RX_OFFLOAD_QINQ_STRIP |
1725 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
1726 DEV_RX_OFFLOAD_VLAN_EXTEND |
1727 DEV_RX_OFFLOAD_JUMBO_FRAME |
1728 DEV_RX_OFFLOAD_KEEP_CRC |
1729 DEV_RX_OFFLOAD_VLAN_FILTER;
1730 dev_info->tx_offload_capa =
1731 DEV_TX_OFFLOAD_VLAN_INSERT |
1732 DEV_TX_OFFLOAD_QINQ_INSERT |
1733 DEV_TX_OFFLOAD_IPV4_CKSUM |
1734 DEV_TX_OFFLOAD_UDP_CKSUM |
1735 DEV_TX_OFFLOAD_TCP_CKSUM |
1736 DEV_TX_OFFLOAD_SCTP_CKSUM |
1737 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
1738 DEV_TX_OFFLOAD_TCP_TSO |
1739 DEV_TX_OFFLOAD_MULTI_SEGS;
1740 dev_info->rx_queue_offload_capa = 0;
1741 dev_info->tx_queue_offload_capa = 0;
1743 dev_info->reta_size = hw->func_caps.common_cap.rss_table_size;
1744 dev_info->hash_key_size = (VSIQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
1745 dev_info->flow_type_rss_offloads = ICE_RSS_OFFLOAD_ALL;
1747 dev_info->default_rxconf = (struct rte_eth_rxconf) {
1749 .pthresh = ICE_DEFAULT_RX_PTHRESH,
1750 .hthresh = ICE_DEFAULT_RX_HTHRESH,
1751 .wthresh = ICE_DEFAULT_RX_WTHRESH,
1753 .rx_free_thresh = ICE_DEFAULT_RX_FREE_THRESH,
1758 dev_info->default_txconf = (struct rte_eth_txconf) {
1760 .pthresh = ICE_DEFAULT_TX_PTHRESH,
1761 .hthresh = ICE_DEFAULT_TX_HTHRESH,
1762 .wthresh = ICE_DEFAULT_TX_WTHRESH,
1764 .tx_free_thresh = ICE_DEFAULT_TX_FREE_THRESH,
1765 .tx_rs_thresh = ICE_DEFAULT_TX_RSBIT_THRESH,
1769 dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
1770 .nb_max = ICE_MAX_RING_DESC,
1771 .nb_min = ICE_MIN_RING_DESC,
1772 .nb_align = ICE_ALIGN_RING_DESC,
1775 dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
1776 .nb_max = ICE_MAX_RING_DESC,
1777 .nb_min = ICE_MIN_RING_DESC,
1778 .nb_align = ICE_ALIGN_RING_DESC,
1781 dev_info->speed_capa = ETH_LINK_SPEED_10M |
1782 ETH_LINK_SPEED_100M |
1784 ETH_LINK_SPEED_2_5G |
1786 ETH_LINK_SPEED_10G |
1787 ETH_LINK_SPEED_20G |
1788 ETH_LINK_SPEED_25G |
1791 dev_info->nb_rx_queues = dev->data->nb_rx_queues;
1792 dev_info->nb_tx_queues = dev->data->nb_tx_queues;
1794 dev_info->default_rxportconf.burst_size = ICE_RX_MAX_BURST;
1795 dev_info->default_txportconf.burst_size = ICE_TX_MAX_BURST;
1796 dev_info->default_rxportconf.nb_queues = 1;
1797 dev_info->default_txportconf.nb_queues = 1;
1798 dev_info->default_rxportconf.ring_size = ICE_BUF_SIZE_MIN;
1799 dev_info->default_txportconf.ring_size = ICE_BUF_SIZE_MIN;
1803 ice_atomic_read_link_status(struct rte_eth_dev *dev,
1804 struct rte_eth_link *link)
1806 struct rte_eth_link *dst = link;
1807 struct rte_eth_link *src = &dev->data->dev_link;
1809 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
1810 *(uint64_t *)src) == 0)
1817 ice_atomic_write_link_status(struct rte_eth_dev *dev,
1818 struct rte_eth_link *link)
1820 struct rte_eth_link *dst = &dev->data->dev_link;
1821 struct rte_eth_link *src = link;
1823 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
1824 *(uint64_t *)src) == 0)
1831 ice_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
1833 #define CHECK_INTERVAL 100 /* 100ms */
1834 #define MAX_REPEAT_TIME 10 /* 1s (10 * 100ms) in total */
1835 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1836 struct ice_link_status link_status;
1837 struct rte_eth_link link, old;
1839 unsigned int rep_cnt = MAX_REPEAT_TIME;
1840 bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false;
1842 memset(&link, 0, sizeof(link));
1843 memset(&old, 0, sizeof(old));
1844 memset(&link_status, 0, sizeof(link_status));
1845 ice_atomic_read_link_status(dev, &old);
1848 /* Get link status information from hardware */
1849 status = ice_aq_get_link_info(hw->port_info, enable_lse,
1850 &link_status, NULL);
1851 if (status != ICE_SUCCESS) {
1852 link.link_speed = ETH_SPEED_NUM_100M;
1853 link.link_duplex = ETH_LINK_FULL_DUPLEX;
1854 PMD_DRV_LOG(ERR, "Failed to get link info");
1858 link.link_status = link_status.link_info & ICE_AQ_LINK_UP;
1859 if (!wait_to_complete || link.link_status)
1862 rte_delay_ms(CHECK_INTERVAL);
1863 } while (--rep_cnt);
1865 if (!link.link_status)
1868 /* Full-duplex operation at all supported speeds */
1869 link.link_duplex = ETH_LINK_FULL_DUPLEX;
1871 /* Parse the link status */
1872 switch (link_status.link_speed) {
1873 case ICE_AQ_LINK_SPEED_10MB:
1874 link.link_speed = ETH_SPEED_NUM_10M;
1876 case ICE_AQ_LINK_SPEED_100MB:
1877 link.link_speed = ETH_SPEED_NUM_100M;
1879 case ICE_AQ_LINK_SPEED_1000MB:
1880 link.link_speed = ETH_SPEED_NUM_1G;
1882 case ICE_AQ_LINK_SPEED_2500MB:
1883 link.link_speed = ETH_SPEED_NUM_2_5G;
1885 case ICE_AQ_LINK_SPEED_5GB:
1886 link.link_speed = ETH_SPEED_NUM_5G;
1888 case ICE_AQ_LINK_SPEED_10GB:
1889 link.link_speed = ETH_SPEED_NUM_10G;
1891 case ICE_AQ_LINK_SPEED_20GB:
1892 link.link_speed = ETH_SPEED_NUM_20G;
1894 case ICE_AQ_LINK_SPEED_25GB:
1895 link.link_speed = ETH_SPEED_NUM_25G;
1897 case ICE_AQ_LINK_SPEED_40GB:
1898 link.link_speed = ETH_SPEED_NUM_40G;
1900 case ICE_AQ_LINK_SPEED_UNKNOWN:
1902 PMD_DRV_LOG(ERR, "Unknown link speed");
1903 link.link_speed = ETH_SPEED_NUM_NONE;
1907 link.link_autoneg = !(dev->data->dev_conf.link_speeds &
1908 ETH_LINK_SPEED_FIXED);
1911 ice_atomic_write_link_status(dev, &link);
1912 if (link.link_status == old.link_status)
1919 ice_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
1921 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1922 struct rte_eth_dev_data *dev_data = pf->dev_data;
1923 uint32_t frame_size = mtu + ETHER_HDR_LEN
1924 + ETHER_CRC_LEN + ICE_VLAN_TAG_SIZE;
1926 /* check if mtu is within the allowed range */
1927 if (mtu < ETHER_MIN_MTU || frame_size > ICE_FRAME_SIZE_MAX)
1930 /* mtu setting is forbidden if port is start */
1931 if (dev_data->dev_started) {
1933 "port %d must be stopped before configuration",
1938 if (frame_size > ETHER_MAX_LEN)
1939 dev_data->dev_conf.rxmode.offloads |=
1940 DEV_RX_OFFLOAD_JUMBO_FRAME;
1942 dev_data->dev_conf.rxmode.offloads &=
1943 ~DEV_RX_OFFLOAD_JUMBO_FRAME;
1945 dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
1950 static int ice_macaddr_set(struct rte_eth_dev *dev,
1951 struct ether_addr *mac_addr)
1953 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1954 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1955 struct ice_vsi *vsi = pf->main_vsi;
1956 struct ice_mac_filter *f;
1960 if (!is_valid_assigned_ether_addr(mac_addr)) {
1961 PMD_DRV_LOG(ERR, "Tried to set invalid MAC address.");
1965 TAILQ_FOREACH(f, &vsi->mac_list, next) {
1966 if (is_same_ether_addr(&pf->dev_addr, &f->mac_info.mac_addr))
1971 PMD_DRV_LOG(ERR, "Failed to find filter for default mac");
1975 ret = ice_remove_mac_filter(vsi, &f->mac_info.mac_addr);
1976 if (ret != ICE_SUCCESS) {
1977 PMD_DRV_LOG(ERR, "Failed to delete mac filter");
1980 ret = ice_add_mac_filter(vsi, mac_addr);
1981 if (ret != ICE_SUCCESS) {
1982 PMD_DRV_LOG(ERR, "Failed to add mac filter");
1985 memcpy(&pf->dev_addr, mac_addr, ETH_ADDR_LEN);
1987 flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL;
1988 ret = ice_aq_manage_mac_write(hw, mac_addr->addr_bytes, flags, NULL);
1989 if (ret != ICE_SUCCESS)
1990 PMD_DRV_LOG(ERR, "Failed to set manage mac");
1995 /* Add a MAC address, and update filters */
1997 ice_macaddr_add(struct rte_eth_dev *dev,
1998 struct ether_addr *mac_addr,
1999 __rte_unused uint32_t index,
2000 __rte_unused uint32_t pool)
2002 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2003 struct ice_vsi *vsi = pf->main_vsi;
2006 ret = ice_add_mac_filter(vsi, mac_addr);
2007 if (ret != ICE_SUCCESS) {
2008 PMD_DRV_LOG(ERR, "Failed to add MAC filter");
2015 /* Remove a MAC address, and update filters */
2017 ice_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
2019 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2020 struct ice_vsi *vsi = pf->main_vsi;
2021 struct rte_eth_dev_data *data = dev->data;
2022 struct ether_addr *macaddr;
2025 macaddr = &data->mac_addrs[index];
2026 ret = ice_remove_mac_filter(vsi, macaddr);
2028 PMD_DRV_LOG(ERR, "Failed to remove MAC filter");
2034 ice_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
2036 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2037 struct ice_vsi *vsi = pf->main_vsi;
2040 PMD_INIT_FUNC_TRACE();
2043 ret = ice_add_vlan_filter(vsi, vlan_id);
2045 PMD_DRV_LOG(ERR, "Failed to add vlan filter");
2049 ret = ice_remove_vlan_filter(vsi, vlan_id);
2051 PMD_DRV_LOG(ERR, "Failed to remove vlan filter");
2059 /* Configure vlan filter on or off */
2061 ice_vsi_config_vlan_filter(struct ice_vsi *vsi, bool on)
2063 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
2064 struct ice_vsi_ctx ctxt;
2065 uint8_t sec_flags, sw_flags2;
2068 sec_flags = ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
2069 ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S;
2070 sw_flags2 = ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
2073 vsi->info.sec_flags |= sec_flags;
2074 vsi->info.sw_flags2 |= sw_flags2;
2076 vsi->info.sec_flags &= ~sec_flags;
2077 vsi->info.sw_flags2 &= ~sw_flags2;
2079 vsi->info.sw_id = hw->port_info->sw_id;
2080 (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
2081 ctxt.info.valid_sections =
2082 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_SW_VALID |
2083 ICE_AQ_VSI_PROP_SECURITY_VALID);
2084 ctxt.vsi_num = vsi->vsi_id;
2086 ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
2088 PMD_DRV_LOG(INFO, "Update VSI failed to %s vlan rx pruning",
2089 on ? "enable" : "disable");
2092 vsi->info.valid_sections |=
2093 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_SW_VALID |
2094 ICE_AQ_VSI_PROP_SECURITY_VALID);
2101 ice_vsi_config_vlan_stripping(struct ice_vsi *vsi, bool on)
2103 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
2104 struct ice_vsi_ctx ctxt;
2108 /* Check if it has been already on or off */
2109 if (vsi->info.valid_sections &
2110 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID)) {
2112 if ((vsi->info.vlan_flags &
2113 ICE_AQ_VSI_VLAN_EMOD_M) ==
2114 ICE_AQ_VSI_VLAN_EMOD_STR_BOTH)
2115 return 0; /* already on */
2117 if ((vsi->info.vlan_flags &
2118 ICE_AQ_VSI_VLAN_EMOD_M) ==
2119 ICE_AQ_VSI_VLAN_EMOD_NOTHING)
2120 return 0; /* already off */
2125 vlan_flags = ICE_AQ_VSI_VLAN_EMOD_STR_BOTH;
2127 vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING;
2128 vsi->info.vlan_flags &= ~(ICE_AQ_VSI_VLAN_EMOD_M);
2129 vsi->info.vlan_flags |= vlan_flags;
2130 (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
2131 ctxt.info.valid_sections =
2132 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID);
2133 ctxt.vsi_num = vsi->vsi_id;
2134 ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
2136 PMD_DRV_LOG(INFO, "Update VSI failed to %s vlan stripping",
2137 on ? "enable" : "disable");
2141 vsi->info.valid_sections |=
2142 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID);
2148 ice_vlan_offload_set(struct rte_eth_dev *dev, int mask)
2150 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2151 struct ice_vsi *vsi = pf->main_vsi;
2152 struct rte_eth_rxmode *rxmode;
2154 rxmode = &dev->data->dev_conf.rxmode;
2155 if (mask & ETH_VLAN_FILTER_MASK) {
2156 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
2157 ice_vsi_config_vlan_filter(vsi, TRUE);
2159 ice_vsi_config_vlan_filter(vsi, FALSE);
2162 if (mask & ETH_VLAN_STRIP_MASK) {
2163 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
2164 ice_vsi_config_vlan_stripping(vsi, TRUE);
2166 ice_vsi_config_vlan_stripping(vsi, FALSE);
2169 if (mask & ETH_VLAN_EXTEND_MASK) {
2170 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
2171 ice_vsi_config_double_vlan(vsi, TRUE);
2173 ice_vsi_config_double_vlan(vsi, FALSE);
2180 ice_vlan_tpid_set(struct rte_eth_dev *dev,
2181 enum rte_vlan_type vlan_type,
2184 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2185 uint64_t reg_r = 0, reg_w = 0;
2186 uint16_t reg_id = 0;
2188 int qinq = dev->data->dev_conf.rxmode.offloads &
2189 DEV_RX_OFFLOAD_VLAN_EXTEND;
2191 switch (vlan_type) {
2192 case ETH_VLAN_TYPE_OUTER:
2198 case ETH_VLAN_TYPE_INNER:
2203 "Unsupported vlan type in single vlan.");
2208 PMD_DRV_LOG(ERR, "Unsupported vlan type %d", vlan_type);
2211 reg_r = ICE_READ_REG(hw, GL_SWT_L2TAGCTRL(reg_id));
2212 PMD_DRV_LOG(DEBUG, "Debug read from ICE GL_SWT_L2TAGCTRL[%d]: "
2213 "0x%08"PRIx64"", reg_id, reg_r);
2215 reg_w = reg_r & (~(GL_SWT_L2TAGCTRL_ETHERTYPE_M));
2216 reg_w |= ((uint64_t)tpid << GL_SWT_L2TAGCTRL_ETHERTYPE_S);
2217 if (reg_r == reg_w) {
2218 PMD_DRV_LOG(DEBUG, "No need to write");
2222 ICE_WRITE_REG(hw, GL_SWT_L2TAGCTRL(reg_id), reg_w);
2223 PMD_DRV_LOG(DEBUG, "Debug write 0x%08"PRIx64" to "
2224 "ICE GL_SWT_L2TAGCTRL[%d]", reg_w, reg_id);
2230 ice_get_rss_lut(struct ice_vsi *vsi, uint8_t *lut, uint16_t lut_size)
2232 struct ice_pf *pf = ICE_VSI_TO_PF(vsi);
2233 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
2239 if (pf->flags & ICE_FLAG_RSS_AQ_CAPABLE) {
2240 ret = ice_aq_get_rss_lut(hw, vsi->idx, TRUE,
2243 PMD_DRV_LOG(ERR, "Failed to get RSS lookup table");
2247 uint64_t *lut_dw = (uint64_t *)lut;
2248 uint16_t i, lut_size_dw = lut_size / 4;
2250 for (i = 0; i < lut_size_dw; i++)
2251 lut_dw[i] = ICE_READ_REG(hw, PFQF_HLUT(i));
2258 ice_set_rss_lut(struct ice_vsi *vsi, uint8_t *lut, uint16_t lut_size)
2260 struct ice_pf *pf = ICE_VSI_TO_PF(vsi);
2261 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
2267 if (pf->flags & ICE_FLAG_RSS_AQ_CAPABLE) {
2268 ret = ice_aq_set_rss_lut(hw, vsi->idx, TRUE,
2271 PMD_DRV_LOG(ERR, "Failed to set RSS lookup table");
2275 uint64_t *lut_dw = (uint64_t *)lut;
2276 uint16_t i, lut_size_dw = lut_size / 4;
2278 for (i = 0; i < lut_size_dw; i++)
2279 ICE_WRITE_REG(hw, PFQF_HLUT(i), lut_dw[i]);
2288 ice_rss_reta_update(struct rte_eth_dev *dev,
2289 struct rte_eth_rss_reta_entry64 *reta_conf,
2292 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2293 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2294 uint16_t i, lut_size = hw->func_caps.common_cap.rss_table_size;
2295 uint16_t idx, shift;
2299 if (reta_size != lut_size ||
2300 reta_size > ETH_RSS_RETA_SIZE_512) {
2302 "The size of hash lookup table configured (%d)"
2303 "doesn't match the number hardware can "
2305 reta_size, lut_size);
2309 lut = rte_zmalloc(NULL, reta_size, 0);
2311 PMD_DRV_LOG(ERR, "No memory can be allocated");
2314 ret = ice_get_rss_lut(pf->main_vsi, lut, reta_size);
2318 for (i = 0; i < reta_size; i++) {
2319 idx = i / RTE_RETA_GROUP_SIZE;
2320 shift = i % RTE_RETA_GROUP_SIZE;
2321 if (reta_conf[idx].mask & (1ULL << shift))
2322 lut[i] = reta_conf[idx].reta[shift];
2324 ret = ice_set_rss_lut(pf->main_vsi, lut, reta_size);
2333 ice_rss_reta_query(struct rte_eth_dev *dev,
2334 struct rte_eth_rss_reta_entry64 *reta_conf,
2337 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2338 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2339 uint16_t i, lut_size = hw->func_caps.common_cap.rss_table_size;
2340 uint16_t idx, shift;
2344 if (reta_size != lut_size ||
2345 reta_size > ETH_RSS_RETA_SIZE_512) {
2347 "The size of hash lookup table configured (%d)"
2348 "doesn't match the number hardware can "
2350 reta_size, lut_size);
2354 lut = rte_zmalloc(NULL, reta_size, 0);
2356 PMD_DRV_LOG(ERR, "No memory can be allocated");
2360 ret = ice_get_rss_lut(pf->main_vsi, lut, reta_size);
2364 for (i = 0; i < reta_size; i++) {
2365 idx = i / RTE_RETA_GROUP_SIZE;
2366 shift = i % RTE_RETA_GROUP_SIZE;
2367 if (reta_conf[idx].mask & (1ULL << shift))
2368 reta_conf[idx].reta[shift] = lut[i];
2378 ice_set_rss_key(struct ice_vsi *vsi, uint8_t *key, uint8_t key_len)
2380 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
2383 if (!key || key_len == 0) {
2384 PMD_DRV_LOG(DEBUG, "No key to be configured");
2386 } else if (key_len != (VSIQF_HKEY_MAX_INDEX + 1) *
2388 PMD_DRV_LOG(ERR, "Invalid key length %u", key_len);
2392 struct ice_aqc_get_set_rss_keys *key_dw =
2393 (struct ice_aqc_get_set_rss_keys *)key;
2395 ret = ice_aq_set_rss_key(hw, vsi->idx, key_dw);
2397 PMD_DRV_LOG(ERR, "Failed to configure RSS key via AQ");
2405 ice_get_rss_key(struct ice_vsi *vsi, uint8_t *key, uint8_t *key_len)
2407 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
2410 if (!key || !key_len)
2413 ret = ice_aq_get_rss_key
2415 (struct ice_aqc_get_set_rss_keys *)key);
2417 PMD_DRV_LOG(ERR, "Failed to get RSS key via AQ");
2420 *key_len = (VSIQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
2426 ice_rss_hash_update(struct rte_eth_dev *dev,
2427 struct rte_eth_rss_conf *rss_conf)
2429 enum ice_status status = ICE_SUCCESS;
2430 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2431 struct ice_vsi *vsi = pf->main_vsi;
2434 status = ice_set_rss_key(vsi, rss_conf->rss_key, rss_conf->rss_key_len);
2438 /* TODO: hash enable config, ice_add_rss_cfg */
2443 ice_rss_hash_conf_get(struct rte_eth_dev *dev,
2444 struct rte_eth_rss_conf *rss_conf)
2446 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2447 struct ice_vsi *vsi = pf->main_vsi;
2449 ice_get_rss_key(vsi, rss_conf->rss_key,
2450 &rss_conf->rss_key_len);
2452 /* TODO: default set to 0 as hf config is not supported now */
2453 rss_conf->rss_hf = 0;
2457 static int ice_rx_queue_intr_enable(struct rte_eth_dev *dev,
2460 struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
2461 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2462 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2466 msix_intr = intr_handle->intr_vec[queue_id];
2468 val = GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M |
2469 GLINT_DYN_CTL_ITR_INDX_M;
2470 val &= ~GLINT_DYN_CTL_WB_ON_ITR_M;
2472 ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr), val);
2473 rte_intr_enable(&pci_dev->intr_handle);
2478 static int ice_rx_queue_intr_disable(struct rte_eth_dev *dev,
2481 struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
2482 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2483 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2486 msix_intr = intr_handle->intr_vec[queue_id];
2488 ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr), GLINT_DYN_CTL_WB_ON_ITR_M);
2494 ice_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
2496 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2499 ret = snprintf(fw_version, fw_size, "%d.%d.%05d %d.%d",
2500 hw->fw_maj_ver, hw->fw_min_ver, hw->fw_build,
2501 hw->api_maj_ver, hw->api_min_ver);
2503 /* add the size of '\0' */
2505 if (fw_size < (u32)ret)
2512 ice_vsi_vlan_pvid_set(struct ice_vsi *vsi, struct ice_vsi_vlan_pvid_info *info)
2515 struct ice_vsi_ctx ctxt;
2516 uint8_t vlan_flags = 0;
2519 if (!vsi || !info) {
2520 PMD_DRV_LOG(ERR, "invalid parameters");
2525 vsi->info.pvid = info->config.pvid;
2527 * If insert pvid is enabled, only tagged pkts are
2528 * allowed to be sent out.
2530 vlan_flags = ICE_AQ_VSI_PVLAN_INSERT_PVID |
2531 ICE_AQ_VSI_VLAN_MODE_UNTAGGED;
2534 if (info->config.reject.tagged == 0)
2535 vlan_flags |= ICE_AQ_VSI_VLAN_MODE_TAGGED;
2537 if (info->config.reject.untagged == 0)
2538 vlan_flags |= ICE_AQ_VSI_VLAN_MODE_UNTAGGED;
2540 vsi->info.vlan_flags &= ~(ICE_AQ_VSI_PVLAN_INSERT_PVID |
2541 ICE_AQ_VSI_VLAN_MODE_M);
2542 vsi->info.vlan_flags |= vlan_flags;
2543 memset(&ctxt, 0, sizeof(ctxt));
2544 rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
2545 ctxt.info.valid_sections =
2546 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID);
2547 ctxt.vsi_num = vsi->vsi_id;
2549 hw = ICE_VSI_TO_HW(vsi);
2550 ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
2551 if (ret != ICE_SUCCESS) {
2553 "update VSI for VLAN insert failed, err %d",
2558 vsi->info.valid_sections |=
2559 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID);
2565 ice_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on)
2567 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2568 struct ice_vsi *vsi = pf->main_vsi;
2569 struct rte_eth_dev_data *data = pf->dev_data;
2570 struct ice_vsi_vlan_pvid_info info;
2573 memset(&info, 0, sizeof(info));
2576 info.config.pvid = pvid;
2578 info.config.reject.tagged =
2579 data->dev_conf.txmode.hw_vlan_reject_tagged;
2580 info.config.reject.untagged =
2581 data->dev_conf.txmode.hw_vlan_reject_untagged;
2584 ret = ice_vsi_vlan_pvid_set(vsi, &info);
2586 PMD_DRV_LOG(ERR, "Failed to set pvid.");
2594 ice_get_eeprom_length(struct rte_eth_dev *dev)
2596 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2598 /* Convert word count to byte count */
2599 return hw->nvm.sr_words << 1;
2603 ice_get_eeprom(struct rte_eth_dev *dev,
2604 struct rte_dev_eeprom_info *eeprom)
2606 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2607 uint16_t *data = eeprom->data;
2608 uint16_t offset, length, i;
2609 enum ice_status ret_code = ICE_SUCCESS;
2611 offset = eeprom->offset >> 1;
2612 length = eeprom->length >> 1;
2614 if (offset > hw->nvm.sr_words ||
2615 offset + length > hw->nvm.sr_words) {
2616 PMD_DRV_LOG(ERR, "Requested EEPROM bytes out of range.");
2620 eeprom->magic = hw->vendor_id | (hw->device_id << 16);
2622 for (i = 0; i < length; i++) {
2623 ret_code = ice_read_sr_word(hw, offset + i, &data[i]);
2624 if (ret_code != ICE_SUCCESS) {
2625 PMD_DRV_LOG(ERR, "EEPROM read failed.");
2634 ice_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
2635 struct rte_pci_device *pci_dev)
2637 return rte_eth_dev_pci_generic_probe(pci_dev,
2638 sizeof(struct ice_adapter),
2643 ice_pci_remove(struct rte_pci_device *pci_dev)
2645 return rte_eth_dev_pci_generic_remove(pci_dev, ice_dev_uninit);
2648 static struct rte_pci_driver rte_ice_pmd = {
2649 .id_table = pci_id_ice_map,
2650 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
2651 RTE_PCI_DRV_IOVA_AS_VA,
2652 .probe = ice_pci_probe,
2653 .remove = ice_pci_remove,
2657 * Driver initialization routine.
2658 * Invoked once at EAL init time.
2659 * Register itself as the [Poll Mode] Driver of PCI devices.
2661 RTE_PMD_REGISTER_PCI(net_ice, rte_ice_pmd);
2662 RTE_PMD_REGISTER_PCI_TABLE(net_ice, pci_id_ice_map);
2663 RTE_PMD_REGISTER_KMOD_DEP(net_ice, "* igb_uio | uio_pci_generic | vfio-pci");
2664 RTE_PMD_REGISTER_PARAM_STRING(net_ice,
2665 ICE_MAX_QP_NUM "=<int>");
2667 RTE_INIT(ice_init_log)
2669 ice_logtype_init = rte_log_register("pmd.net.ice.init");
2670 if (ice_logtype_init >= 0)
2671 rte_log_set_level(ice_logtype_init, RTE_LOG_NOTICE);
2672 ice_logtype_driver = rte_log_register("pmd.net.ice.driver");
2673 if (ice_logtype_driver >= 0)
2674 rte_log_set_level(ice_logtype_driver, RTE_LOG_NOTICE);