1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
5 #include <rte_ethdev_pci.h>
7 #include "base/ice_sched.h"
8 #include "ice_ethdev.h"
11 #define ICE_MAX_QP_NUM "max_queue_pair_num"
12 #define ICE_DFLT_OUTER_TAG_TYPE ICE_AQ_VSI_OUTER_TAG_VLAN_9100
15 int ice_logtype_driver;
17 static int ice_dev_configure(struct rte_eth_dev *dev);
18 static int ice_dev_start(struct rte_eth_dev *dev);
19 static void ice_dev_stop(struct rte_eth_dev *dev);
20 static void ice_dev_close(struct rte_eth_dev *dev);
21 static int ice_dev_reset(struct rte_eth_dev *dev);
22 static void ice_dev_info_get(struct rte_eth_dev *dev,
23 struct rte_eth_dev_info *dev_info);
24 static int ice_link_update(struct rte_eth_dev *dev,
25 int wait_to_complete);
26 static int ice_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
27 static int ice_vlan_offload_set(struct rte_eth_dev *dev, int mask);
28 static int ice_vlan_tpid_set(struct rte_eth_dev *dev,
29 enum rte_vlan_type vlan_type,
31 static int ice_rss_reta_update(struct rte_eth_dev *dev,
32 struct rte_eth_rss_reta_entry64 *reta_conf,
34 static int ice_rss_reta_query(struct rte_eth_dev *dev,
35 struct rte_eth_rss_reta_entry64 *reta_conf,
37 static int ice_rss_hash_update(struct rte_eth_dev *dev,
38 struct rte_eth_rss_conf *rss_conf);
39 static int ice_rss_hash_conf_get(struct rte_eth_dev *dev,
40 struct rte_eth_rss_conf *rss_conf);
41 static int ice_vlan_filter_set(struct rte_eth_dev *dev,
44 static int ice_macaddr_set(struct rte_eth_dev *dev,
45 struct ether_addr *mac_addr);
46 static int ice_macaddr_add(struct rte_eth_dev *dev,
47 struct ether_addr *mac_addr,
48 __rte_unused uint32_t index,
50 static void ice_macaddr_remove(struct rte_eth_dev *dev, uint32_t index);
51 static int ice_rx_queue_intr_enable(struct rte_eth_dev *dev,
53 static int ice_rx_queue_intr_disable(struct rte_eth_dev *dev,
55 static int ice_vlan_pvid_set(struct rte_eth_dev *dev,
56 uint16_t pvid, int on);
58 static const struct rte_pci_id pci_id_ice_map[] = {
59 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_BACKPLANE) },
60 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_QSFP) },
61 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_SFP) },
62 { .vendor_id = 0, /* sentinel */ },
65 static const struct eth_dev_ops ice_eth_dev_ops = {
66 .dev_configure = ice_dev_configure,
67 .dev_start = ice_dev_start,
68 .dev_stop = ice_dev_stop,
69 .dev_close = ice_dev_close,
70 .dev_reset = ice_dev_reset,
71 .rx_queue_start = ice_rx_queue_start,
72 .rx_queue_stop = ice_rx_queue_stop,
73 .tx_queue_start = ice_tx_queue_start,
74 .tx_queue_stop = ice_tx_queue_stop,
75 .rx_queue_setup = ice_rx_queue_setup,
76 .rx_queue_release = ice_rx_queue_release,
77 .tx_queue_setup = ice_tx_queue_setup,
78 .tx_queue_release = ice_tx_queue_release,
79 .dev_infos_get = ice_dev_info_get,
80 .dev_supported_ptypes_get = ice_dev_supported_ptypes_get,
81 .link_update = ice_link_update,
82 .mtu_set = ice_mtu_set,
83 .mac_addr_set = ice_macaddr_set,
84 .mac_addr_add = ice_macaddr_add,
85 .mac_addr_remove = ice_macaddr_remove,
86 .vlan_filter_set = ice_vlan_filter_set,
87 .vlan_offload_set = ice_vlan_offload_set,
88 .vlan_tpid_set = ice_vlan_tpid_set,
89 .reta_update = ice_rss_reta_update,
90 .reta_query = ice_rss_reta_query,
91 .rss_hash_update = ice_rss_hash_update,
92 .rss_hash_conf_get = ice_rss_hash_conf_get,
93 .rx_queue_intr_enable = ice_rx_queue_intr_enable,
94 .rx_queue_intr_disable = ice_rx_queue_intr_disable,
95 .vlan_pvid_set = ice_vlan_pvid_set,
96 .rxq_info_get = ice_rxq_info_get,
97 .txq_info_get = ice_txq_info_get,
98 .rx_queue_count = ice_rx_queue_count,
102 ice_init_controlq_parameter(struct ice_hw *hw)
104 /* fields for adminq */
105 hw->adminq.num_rq_entries = ICE_ADMINQ_LEN;
106 hw->adminq.num_sq_entries = ICE_ADMINQ_LEN;
107 hw->adminq.rq_buf_size = ICE_ADMINQ_BUF_SZ;
108 hw->adminq.sq_buf_size = ICE_ADMINQ_BUF_SZ;
110 /* fields for mailboxq, DPDK used as PF host */
111 hw->mailboxq.num_rq_entries = ICE_MAILBOXQ_LEN;
112 hw->mailboxq.num_sq_entries = ICE_MAILBOXQ_LEN;
113 hw->mailboxq.rq_buf_size = ICE_MAILBOXQ_BUF_SZ;
114 hw->mailboxq.sq_buf_size = ICE_MAILBOXQ_BUF_SZ;
118 ice_check_qp_num(const char *key, const char *qp_value,
119 __rte_unused void *opaque)
124 while (isblank(*qp_value))
127 num = strtoul(qp_value, &end, 10);
129 if (!num || (*end == '-') || errno) {
130 PMD_DRV_LOG(WARNING, "invalid value:\"%s\" for key:\"%s\", "
140 ice_config_max_queue_pair_num(struct rte_devargs *devargs)
142 struct rte_kvargs *kvlist;
143 const char *queue_num_key = ICE_MAX_QP_NUM;
149 kvlist = rte_kvargs_parse(devargs->args, NULL);
153 if (!rte_kvargs_count(kvlist, queue_num_key)) {
154 rte_kvargs_free(kvlist);
158 if (rte_kvargs_process(kvlist, queue_num_key,
159 ice_check_qp_num, NULL) < 0) {
160 rte_kvargs_free(kvlist);
163 ret = rte_kvargs_process(kvlist, queue_num_key,
164 ice_check_qp_num, NULL);
165 rte_kvargs_free(kvlist);
171 ice_res_pool_init(struct ice_res_pool_info *pool, uint32_t base,
174 struct pool_entry *entry;
179 entry = rte_zmalloc(NULL, sizeof(*entry), 0);
182 "Failed to allocate memory for resource pool");
186 /* queue heap initialize */
187 pool->num_free = num;
190 LIST_INIT(&pool->alloc_list);
191 LIST_INIT(&pool->free_list);
193 /* Initialize element */
197 LIST_INSERT_HEAD(&pool->free_list, entry, next);
202 ice_res_pool_alloc(struct ice_res_pool_info *pool,
205 struct pool_entry *entry, *valid_entry;
208 PMD_INIT_LOG(ERR, "Invalid parameter");
212 if (pool->num_free < num) {
213 PMD_INIT_LOG(ERR, "No resource. ask:%u, available:%u",
214 num, pool->num_free);
219 /* Lookup in free list and find most fit one */
220 LIST_FOREACH(entry, &pool->free_list, next) {
221 if (entry->len >= num) {
223 if (entry->len == num) {
228 valid_entry->len > entry->len)
233 /* Not find one to satisfy the request, return */
235 PMD_INIT_LOG(ERR, "No valid entry found");
239 * The entry have equal queue number as requested,
240 * remove it from alloc_list.
242 if (valid_entry->len == num) {
243 LIST_REMOVE(valid_entry, next);
246 * The entry have more numbers than requested,
247 * create a new entry for alloc_list and minus its
248 * queue base and number in free_list.
250 entry = rte_zmalloc(NULL, sizeof(*entry), 0);
253 "Failed to allocate memory for "
257 entry->base = valid_entry->base;
259 valid_entry->base += num;
260 valid_entry->len -= num;
264 /* Insert it into alloc list, not sorted */
265 LIST_INSERT_HEAD(&pool->alloc_list, valid_entry, next);
267 pool->num_free -= valid_entry->len;
268 pool->num_alloc += valid_entry->len;
270 return valid_entry->base + pool->base;
274 ice_res_pool_destroy(struct ice_res_pool_info *pool)
276 struct pool_entry *entry, *next_entry;
281 for (entry = LIST_FIRST(&pool->alloc_list);
282 entry && (next_entry = LIST_NEXT(entry, next), 1);
283 entry = next_entry) {
284 LIST_REMOVE(entry, next);
288 for (entry = LIST_FIRST(&pool->free_list);
289 entry && (next_entry = LIST_NEXT(entry, next), 1);
290 entry = next_entry) {
291 LIST_REMOVE(entry, next);
298 LIST_INIT(&pool->alloc_list);
299 LIST_INIT(&pool->free_list);
303 ice_vsi_config_default_rss(struct ice_aqc_vsi_props *info)
305 /* Set VSI LUT selection */
306 info->q_opt_rss = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI &
307 ICE_AQ_VSI_Q_OPT_RSS_LUT_M;
308 /* Set Hash scheme */
309 info->q_opt_rss |= ICE_AQ_VSI_Q_OPT_RSS_TPLZ &
310 ICE_AQ_VSI_Q_OPT_RSS_HASH_M;
312 info->q_opt_tc = ICE_AQ_VSI_Q_OPT_TC_OVR_M;
315 static enum ice_status
316 ice_vsi_config_tc_queue_mapping(struct ice_vsi *vsi,
317 struct ice_aqc_vsi_props *info,
318 uint8_t enabled_tcmap)
320 uint16_t bsf, qp_idx;
322 /* default tc 0 now. Multi-TC supporting need to be done later.
323 * Configure TC and queue mapping parameters, for enabled TC,
324 * allocate qpnum_per_tc queues to this traffic.
326 if (enabled_tcmap != 0x01) {
327 PMD_INIT_LOG(ERR, "only TC0 is supported");
331 vsi->nb_qps = RTE_MIN(vsi->nb_qps, ICE_MAX_Q_PER_TC);
332 bsf = rte_bsf32(vsi->nb_qps);
333 /* Adjust the queue number to actual queues that can be applied */
334 vsi->nb_qps = 0x1 << bsf;
337 /* Set tc and queue mapping with VSI */
338 info->tc_mapping[0] = rte_cpu_to_le_16((qp_idx <<
339 ICE_AQ_VSI_TC_Q_OFFSET_S) |
340 (bsf << ICE_AQ_VSI_TC_Q_NUM_S));
342 /* Associate queue number with VSI */
343 info->mapping_flags |= rte_cpu_to_le_16(ICE_AQ_VSI_Q_MAP_CONTIG);
344 info->q_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
345 info->q_mapping[1] = rte_cpu_to_le_16(vsi->nb_qps);
346 info->valid_sections |=
347 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID);
348 /* Set the info.ingress_table and info.egress_table
349 * for UP translate table. Now just set it to 1:1 map by default
350 * -- 0b 111 110 101 100 011 010 001 000 == 0xFAC688
352 #define ICE_TC_QUEUE_TABLE_DFLT 0x00FAC688
353 info->ingress_table = rte_cpu_to_le_32(ICE_TC_QUEUE_TABLE_DFLT);
354 info->egress_table = rte_cpu_to_le_32(ICE_TC_QUEUE_TABLE_DFLT);
355 info->outer_up_table = rte_cpu_to_le_32(ICE_TC_QUEUE_TABLE_DFLT);
360 ice_init_mac_address(struct rte_eth_dev *dev)
362 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
364 if (!is_unicast_ether_addr
365 ((struct ether_addr *)hw->port_info[0].mac.lan_addr)) {
366 PMD_INIT_LOG(ERR, "Invalid MAC address");
370 ether_addr_copy((struct ether_addr *)hw->port_info[0].mac.lan_addr,
371 (struct ether_addr *)hw->port_info[0].mac.perm_addr);
373 dev->data->mac_addrs = rte_zmalloc(NULL, sizeof(struct ether_addr), 0);
374 if (!dev->data->mac_addrs) {
376 "Failed to allocate memory to store mac address");
379 /* store it to dev data */
380 ether_addr_copy((struct ether_addr *)hw->port_info[0].mac.perm_addr,
381 &dev->data->mac_addrs[0]);
385 /* Find out specific MAC filter */
386 static struct ice_mac_filter *
387 ice_find_mac_filter(struct ice_vsi *vsi, struct ether_addr *macaddr)
389 struct ice_mac_filter *f;
391 TAILQ_FOREACH(f, &vsi->mac_list, next) {
392 if (is_same_ether_addr(macaddr, &f->mac_info.mac_addr))
400 ice_add_mac_filter(struct ice_vsi *vsi, struct ether_addr *mac_addr)
402 struct ice_fltr_list_entry *m_list_itr = NULL;
403 struct ice_mac_filter *f;
404 struct LIST_HEAD_TYPE list_head;
405 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
408 /* If it's added and configured, return */
409 f = ice_find_mac_filter(vsi, mac_addr);
411 PMD_DRV_LOG(INFO, "This MAC filter already exists.");
415 INIT_LIST_HEAD(&list_head);
417 m_list_itr = (struct ice_fltr_list_entry *)
418 ice_malloc(hw, sizeof(*m_list_itr));
423 ice_memcpy(m_list_itr->fltr_info.l_data.mac.mac_addr,
424 mac_addr, ETH_ALEN, ICE_NONDMA_TO_NONDMA);
425 m_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI;
426 m_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
427 m_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_MAC;
428 m_list_itr->fltr_info.flag = ICE_FLTR_TX;
429 m_list_itr->fltr_info.vsi_handle = vsi->idx;
431 LIST_ADD(&m_list_itr->list_entry, &list_head);
434 ret = ice_add_mac(hw, &list_head);
435 if (ret != ICE_SUCCESS) {
436 PMD_DRV_LOG(ERR, "Failed to add MAC filter");
440 /* Add the mac addr into mac list */
441 f = rte_zmalloc(NULL, sizeof(*f), 0);
443 PMD_DRV_LOG(ERR, "failed to allocate memory");
447 rte_memcpy(&f->mac_info.mac_addr, mac_addr, ETH_ADDR_LEN);
448 TAILQ_INSERT_TAIL(&vsi->mac_list, f, next);
454 rte_free(m_list_itr);
459 ice_remove_mac_filter(struct ice_vsi *vsi, struct ether_addr *mac_addr)
461 struct ice_fltr_list_entry *m_list_itr = NULL;
462 struct ice_mac_filter *f;
463 struct LIST_HEAD_TYPE list_head;
464 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
467 /* Can't find it, return an error */
468 f = ice_find_mac_filter(vsi, mac_addr);
472 INIT_LIST_HEAD(&list_head);
474 m_list_itr = (struct ice_fltr_list_entry *)
475 ice_malloc(hw, sizeof(*m_list_itr));
480 ice_memcpy(m_list_itr->fltr_info.l_data.mac.mac_addr,
481 mac_addr, ETH_ALEN, ICE_NONDMA_TO_NONDMA);
482 m_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI;
483 m_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
484 m_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_MAC;
485 m_list_itr->fltr_info.flag = ICE_FLTR_TX;
486 m_list_itr->fltr_info.vsi_handle = vsi->idx;
488 LIST_ADD(&m_list_itr->list_entry, &list_head);
490 /* remove the mac filter */
491 ret = ice_remove_mac(hw, &list_head);
492 if (ret != ICE_SUCCESS) {
493 PMD_DRV_LOG(ERR, "Failed to remove MAC filter");
498 /* Remove the mac addr from mac list */
499 TAILQ_REMOVE(&vsi->mac_list, f, next);
505 rte_free(m_list_itr);
509 /* Find out specific VLAN filter */
510 static struct ice_vlan_filter *
511 ice_find_vlan_filter(struct ice_vsi *vsi, uint16_t vlan_id)
513 struct ice_vlan_filter *f;
515 TAILQ_FOREACH(f, &vsi->vlan_list, next) {
516 if (vlan_id == f->vlan_info.vlan_id)
524 ice_add_vlan_filter(struct ice_vsi *vsi, uint16_t vlan_id)
526 struct ice_fltr_list_entry *v_list_itr = NULL;
527 struct ice_vlan_filter *f;
528 struct LIST_HEAD_TYPE list_head;
529 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
532 if (!vsi || vlan_id > ETHER_MAX_VLAN_ID)
535 /* If it's added and configured, return. */
536 f = ice_find_vlan_filter(vsi, vlan_id);
538 PMD_DRV_LOG(INFO, "This VLAN filter already exists.");
542 if (!vsi->vlan_anti_spoof_on && !vsi->vlan_filter_on)
545 INIT_LIST_HEAD(&list_head);
547 v_list_itr = (struct ice_fltr_list_entry *)
548 ice_malloc(hw, sizeof(*v_list_itr));
553 v_list_itr->fltr_info.l_data.vlan.vlan_id = vlan_id;
554 v_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI;
555 v_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
556 v_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_VLAN;
557 v_list_itr->fltr_info.flag = ICE_FLTR_TX;
558 v_list_itr->fltr_info.vsi_handle = vsi->idx;
560 LIST_ADD(&v_list_itr->list_entry, &list_head);
563 ret = ice_add_vlan(hw, &list_head);
564 if (ret != ICE_SUCCESS) {
565 PMD_DRV_LOG(ERR, "Failed to add VLAN filter");
570 /* Add vlan into vlan list */
571 f = rte_zmalloc(NULL, sizeof(*f), 0);
573 PMD_DRV_LOG(ERR, "failed to allocate memory");
577 f->vlan_info.vlan_id = vlan_id;
578 TAILQ_INSERT_TAIL(&vsi->vlan_list, f, next);
584 rte_free(v_list_itr);
589 ice_remove_vlan_filter(struct ice_vsi *vsi, uint16_t vlan_id)
591 struct ice_fltr_list_entry *v_list_itr = NULL;
592 struct ice_vlan_filter *f;
593 struct LIST_HEAD_TYPE list_head;
594 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
598 * Vlan 0 is the generic filter for untagged packets
599 * and can't be removed.
601 if (!vsi || vlan_id == 0 || vlan_id > ETHER_MAX_VLAN_ID)
604 /* Can't find it, return an error */
605 f = ice_find_vlan_filter(vsi, vlan_id);
609 INIT_LIST_HEAD(&list_head);
611 v_list_itr = (struct ice_fltr_list_entry *)
612 ice_malloc(hw, sizeof(*v_list_itr));
618 v_list_itr->fltr_info.l_data.vlan.vlan_id = vlan_id;
619 v_list_itr->fltr_info.src_id = ICE_SRC_ID_VSI;
620 v_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI;
621 v_list_itr->fltr_info.lkup_type = ICE_SW_LKUP_VLAN;
622 v_list_itr->fltr_info.flag = ICE_FLTR_TX;
623 v_list_itr->fltr_info.vsi_handle = vsi->idx;
625 LIST_ADD(&v_list_itr->list_entry, &list_head);
627 /* remove the vlan filter */
628 ret = ice_remove_vlan(hw, &list_head);
629 if (ret != ICE_SUCCESS) {
630 PMD_DRV_LOG(ERR, "Failed to remove VLAN filter");
635 /* Remove the vlan id from vlan list */
636 TAILQ_REMOVE(&vsi->vlan_list, f, next);
642 rte_free(v_list_itr);
647 ice_remove_all_mac_vlan_filters(struct ice_vsi *vsi)
649 struct ice_mac_filter *m_f;
650 struct ice_vlan_filter *v_f;
653 if (!vsi || !vsi->mac_num)
656 TAILQ_FOREACH(m_f, &vsi->mac_list, next) {
657 ret = ice_remove_mac_filter(vsi, &m_f->mac_info.mac_addr);
658 if (ret != ICE_SUCCESS) {
664 if (vsi->vlan_num == 0)
667 TAILQ_FOREACH(v_f, &vsi->vlan_list, next) {
668 ret = ice_remove_vlan_filter(vsi, v_f->vlan_info.vlan_id);
669 if (ret != ICE_SUCCESS) {
680 ice_vsi_config_qinq_insertion(struct ice_vsi *vsi, bool on)
682 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
683 struct ice_vsi_ctx ctxt;
687 /* Check if it has been already on or off */
688 if (vsi->info.valid_sections &
689 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID)) {
691 if ((vsi->info.outer_tag_flags &
692 ICE_AQ_VSI_OUTER_TAG_ACCEPT_HOST) ==
693 ICE_AQ_VSI_OUTER_TAG_ACCEPT_HOST)
694 return 0; /* already on */
696 if (!(vsi->info.outer_tag_flags &
697 ICE_AQ_VSI_OUTER_TAG_ACCEPT_HOST))
698 return 0; /* already off */
703 qinq_flags = ICE_AQ_VSI_OUTER_TAG_ACCEPT_HOST;
706 /* clear global insertion and use per packet insertion */
707 vsi->info.outer_tag_flags &= ~(ICE_AQ_VSI_OUTER_TAG_INSERT);
708 vsi->info.outer_tag_flags &= ~(ICE_AQ_VSI_OUTER_TAG_ACCEPT_HOST);
709 vsi->info.outer_tag_flags |= qinq_flags;
710 /* use default vlan type 0x8100 */
711 vsi->info.outer_tag_flags &= ~(ICE_AQ_VSI_OUTER_TAG_TYPE_M);
712 vsi->info.outer_tag_flags |= ICE_DFLT_OUTER_TAG_TYPE <<
713 ICE_AQ_VSI_OUTER_TAG_TYPE_S;
714 (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
715 ctxt.info.valid_sections =
716 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID);
717 ctxt.vsi_num = vsi->vsi_id;
718 ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
721 "Update VSI failed to %s qinq stripping",
722 on ? "enable" : "disable");
726 vsi->info.valid_sections |=
727 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID);
733 ice_vsi_config_qinq_stripping(struct ice_vsi *vsi, bool on)
735 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
736 struct ice_vsi_ctx ctxt;
740 /* Check if it has been already on or off */
741 if (vsi->info.valid_sections &
742 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID)) {
744 if ((vsi->info.outer_tag_flags &
745 ICE_AQ_VSI_OUTER_TAG_MODE_M) ==
746 ICE_AQ_VSI_OUTER_TAG_COPY)
747 return 0; /* already on */
749 if ((vsi->info.outer_tag_flags &
750 ICE_AQ_VSI_OUTER_TAG_MODE_M) ==
751 ICE_AQ_VSI_OUTER_TAG_NOTHING)
752 return 0; /* already off */
757 qinq_flags = ICE_AQ_VSI_OUTER_TAG_COPY;
759 qinq_flags = ICE_AQ_VSI_OUTER_TAG_NOTHING;
760 vsi->info.outer_tag_flags &= ~(ICE_AQ_VSI_OUTER_TAG_MODE_M);
761 vsi->info.outer_tag_flags |= qinq_flags;
762 /* use default vlan type 0x8100 */
763 vsi->info.outer_tag_flags &= ~(ICE_AQ_VSI_OUTER_TAG_TYPE_M);
764 vsi->info.outer_tag_flags |= ICE_DFLT_OUTER_TAG_TYPE <<
765 ICE_AQ_VSI_OUTER_TAG_TYPE_S;
766 (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
767 ctxt.info.valid_sections =
768 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID);
769 ctxt.vsi_num = vsi->vsi_id;
770 ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
773 "Update VSI failed to %s qinq stripping",
774 on ? "enable" : "disable");
778 vsi->info.valid_sections |=
779 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID);
785 ice_vsi_config_double_vlan(struct ice_vsi *vsi, int on)
789 ret = ice_vsi_config_qinq_stripping(vsi, on);
791 PMD_DRV_LOG(ERR, "Fail to set qinq stripping - %d", ret);
793 ret = ice_vsi_config_qinq_insertion(vsi, on);
795 PMD_DRV_LOG(ERR, "Fail to set qinq insertion - %d", ret);
802 ice_pf_enable_irq0(struct ice_hw *hw)
804 /* reset the registers */
805 ICE_WRITE_REG(hw, PFINT_OICR_ENA, 0);
806 ICE_READ_REG(hw, PFINT_OICR);
809 ICE_WRITE_REG(hw, PFINT_OICR_ENA,
810 (uint32_t)(PFINT_OICR_ENA_INT_ENA_M &
811 (~PFINT_OICR_LINK_STAT_CHANGE_M)));
813 ICE_WRITE_REG(hw, PFINT_OICR_CTL,
814 (0 & PFINT_OICR_CTL_MSIX_INDX_M) |
815 ((0 << PFINT_OICR_CTL_ITR_INDX_S) &
816 PFINT_OICR_CTL_ITR_INDX_M) |
817 PFINT_OICR_CTL_CAUSE_ENA_M);
819 ICE_WRITE_REG(hw, PFINT_FW_CTL,
820 (0 & PFINT_FW_CTL_MSIX_INDX_M) |
821 ((0 << PFINT_FW_CTL_ITR_INDX_S) &
822 PFINT_FW_CTL_ITR_INDX_M) |
823 PFINT_FW_CTL_CAUSE_ENA_M);
825 ICE_WRITE_REG(hw, PFINT_OICR_ENA, PFINT_OICR_ENA_INT_ENA_M);
828 ICE_WRITE_REG(hw, GLINT_DYN_CTL(0),
829 GLINT_DYN_CTL_INTENA_M |
830 GLINT_DYN_CTL_CLEARPBA_M |
831 GLINT_DYN_CTL_ITR_INDX_M);
838 ice_pf_disable_irq0(struct ice_hw *hw)
840 /* Disable all interrupt types */
841 ICE_WRITE_REG(hw, GLINT_DYN_CTL(0), GLINT_DYN_CTL_WB_ON_ITR_M);
847 ice_handle_aq_msg(struct rte_eth_dev *dev)
849 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
850 struct ice_ctl_q_info *cq = &hw->adminq;
851 struct ice_rq_event_info event;
852 uint16_t pending, opcode;
855 event.buf_len = ICE_AQ_MAX_BUF_LEN;
856 event.msg_buf = rte_zmalloc(NULL, event.buf_len, 0);
857 if (!event.msg_buf) {
858 PMD_DRV_LOG(ERR, "Failed to allocate mem");
864 ret = ice_clean_rq_elem(hw, cq, &event, &pending);
866 if (ret != ICE_SUCCESS) {
868 "Failed to read msg from AdminQ, "
870 hw->adminq.sq_last_status);
873 opcode = rte_le_to_cpu_16(event.desc.opcode);
876 case ice_aqc_opc_get_link_status:
877 ret = ice_link_update(dev, 0);
879 _rte_eth_dev_callback_process
880 (dev, RTE_ETH_EVENT_INTR_LSC, NULL);
883 PMD_DRV_LOG(DEBUG, "Request %u is not supported yet",
888 rte_free(event.msg_buf);
893 * Interrupt handler triggered by NIC for handling
894 * specific interrupt.
897 * Pointer to interrupt handle.
899 * The address of parameter (struct rte_eth_dev *) regsitered before.
905 ice_interrupt_handler(void *param)
907 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
908 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
918 /* Disable interrupt */
919 ice_pf_disable_irq0(hw);
921 /* read out interrupt causes */
922 oicr = ICE_READ_REG(hw, PFINT_OICR);
924 int_fw_ctl = ICE_READ_REG(hw, PFINT_FW_CTL);
927 /* No interrupt event indicated */
928 if (!(oicr & PFINT_OICR_INTEVENT_M)) {
929 PMD_DRV_LOG(INFO, "No interrupt event");
934 if (int_fw_ctl & PFINT_FW_CTL_INTEVENT_M) {
935 PMD_DRV_LOG(INFO, "FW_CTL: link state change event");
936 ice_handle_aq_msg(dev);
939 if (oicr & PFINT_OICR_LINK_STAT_CHANGE_M) {
940 PMD_DRV_LOG(INFO, "OICR: link state change event");
941 ice_link_update(dev, 0);
945 if (oicr & PFINT_OICR_MAL_DETECT_M) {
946 PMD_DRV_LOG(WARNING, "OICR: MDD event");
947 reg = ICE_READ_REG(hw, GL_MDET_TX_PQM);
948 if (reg & GL_MDET_TX_PQM_VALID_M) {
949 pf_num = (reg & GL_MDET_TX_PQM_PF_NUM_M) >>
950 GL_MDET_TX_PQM_PF_NUM_S;
951 event = (reg & GL_MDET_TX_PQM_MAL_TYPE_M) >>
952 GL_MDET_TX_PQM_MAL_TYPE_S;
953 queue = (reg & GL_MDET_TX_PQM_QNUM_M) >>
954 GL_MDET_TX_PQM_QNUM_S;
956 PMD_DRV_LOG(WARNING, "Malicious Driver Detection event "
957 "%d by PQM on TX queue %d PF# %d",
958 event, queue, pf_num);
961 reg = ICE_READ_REG(hw, GL_MDET_TX_TCLAN);
962 if (reg & GL_MDET_TX_TCLAN_VALID_M) {
963 pf_num = (reg & GL_MDET_TX_TCLAN_PF_NUM_M) >>
964 GL_MDET_TX_TCLAN_PF_NUM_S;
965 event = (reg & GL_MDET_TX_TCLAN_MAL_TYPE_M) >>
966 GL_MDET_TX_TCLAN_MAL_TYPE_S;
967 queue = (reg & GL_MDET_TX_TCLAN_QNUM_M) >>
968 GL_MDET_TX_TCLAN_QNUM_S;
970 PMD_DRV_LOG(WARNING, "Malicious Driver Detection event "
971 "%d by TCLAN on TX queue %d PF# %d",
972 event, queue, pf_num);
976 /* Enable interrupt */
977 ice_pf_enable_irq0(hw);
978 rte_intr_enable(dev->intr_handle);
981 /* Initialize SW parameters of PF */
983 ice_pf_sw_init(struct rte_eth_dev *dev)
985 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
986 struct ice_hw *hw = ICE_PF_TO_HW(pf);
988 if (ice_config_max_queue_pair_num(dev->device->devargs) > 0)
990 ice_config_max_queue_pair_num(dev->device->devargs);
993 (uint16_t)RTE_MIN(hw->func_caps.common_cap.num_txq,
994 hw->func_caps.common_cap.num_rxq);
996 pf->lan_nb_qps = pf->lan_nb_qp_max;
1001 static struct ice_vsi *
1002 ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type)
1004 struct ice_hw *hw = ICE_PF_TO_HW(pf);
1005 struct ice_vsi *vsi = NULL;
1006 struct ice_vsi_ctx vsi_ctx;
1008 struct ether_addr broadcast = {
1009 .addr_bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff} };
1010 struct ether_addr mac_addr;
1011 uint16_t max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
1012 uint8_t tc_bitmap = 0x1;
1014 /* hw->num_lports = 1 in NIC mode */
1015 vsi = rte_zmalloc(NULL, sizeof(struct ice_vsi), 0);
1019 vsi->idx = pf->next_vsi_idx;
1022 vsi->adapter = ICE_PF_TO_ADAPTER(pf);
1023 vsi->max_macaddrs = ICE_NUM_MACADDR_MAX;
1024 vsi->vlan_anti_spoof_on = 0;
1025 vsi->vlan_filter_on = 1;
1026 TAILQ_INIT(&vsi->mac_list);
1027 TAILQ_INIT(&vsi->vlan_list);
1029 memset(&vsi_ctx, 0, sizeof(vsi_ctx));
1030 /* base_queue in used in queue mapping of VSI add/update command.
1031 * Suppose vsi->base_queue is 0 now, don't consider SRIOV, VMDQ
1032 * cases in the first stage. Only Main VSI.
1034 vsi->base_queue = 0;
1037 vsi->nb_qps = pf->lan_nb_qps;
1038 ice_vsi_config_default_rss(&vsi_ctx.info);
1039 vsi_ctx.alloc_from_pool = true;
1040 vsi_ctx.flags = ICE_AQ_VSI_TYPE_PF;
1041 /* switch_id is queried by get_switch_config aq, which is done
1044 vsi_ctx.info.sw_id = hw->port_info->sw_id;
1045 vsi_ctx.info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA;
1046 /* Allow all untagged or tagged packets */
1047 vsi_ctx.info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL;
1048 vsi_ctx.info.vlan_flags |= ICE_AQ_VSI_VLAN_EMOD_NOTHING;
1049 vsi_ctx.info.q_opt_rss = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF |
1050 ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
1051 /* Enable VLAN/UP trip */
1052 ret = ice_vsi_config_tc_queue_mapping(vsi,
1057 "tc queue mapping with vsi failed, "
1065 /* for other types of VSI */
1066 PMD_INIT_LOG(ERR, "other types of VSI not supported");
1070 /* VF has MSIX interrupt in VF range, don't allocate here */
1071 if (type == ICE_VSI_PF) {
1072 ret = ice_res_pool_alloc(&pf->msix_pool,
1073 RTE_MIN(vsi->nb_qps,
1074 RTE_MAX_RXTX_INTR_VEC_ID));
1076 PMD_INIT_LOG(ERR, "VSI MAIN %d get heap failed %d",
1079 vsi->msix_intr = ret;
1080 vsi->nb_msix = RTE_MIN(vsi->nb_qps, RTE_MAX_RXTX_INTR_VEC_ID);
1085 ret = ice_add_vsi(hw, vsi->idx, &vsi_ctx, NULL);
1086 if (ret != ICE_SUCCESS) {
1087 PMD_INIT_LOG(ERR, "add vsi failed, err = %d", ret);
1090 /* store vsi information is SW structure */
1091 vsi->vsi_id = vsi_ctx.vsi_num;
1092 vsi->info = vsi_ctx.info;
1093 pf->vsis_allocated = vsi_ctx.vsis_allocd;
1094 pf->vsis_unallocated = vsi_ctx.vsis_unallocated;
1096 /* MAC configuration */
1097 rte_memcpy(pf->dev_addr.addr_bytes,
1098 hw->port_info->mac.perm_addr,
1101 rte_memcpy(&mac_addr, &pf->dev_addr, ETHER_ADDR_LEN);
1102 ret = ice_add_mac_filter(vsi, &mac_addr);
1103 if (ret != ICE_SUCCESS)
1104 PMD_INIT_LOG(ERR, "Failed to add dflt MAC filter");
1106 rte_memcpy(&mac_addr, &broadcast, ETHER_ADDR_LEN);
1107 ret = ice_add_mac_filter(vsi, &mac_addr);
1108 if (ret != ICE_SUCCESS)
1109 PMD_INIT_LOG(ERR, "Failed to add MAC filter");
1111 /* At the beginning, only TC0. */
1112 /* What we need here is the maximam number of the TX queues.
1113 * Currently vsi->nb_qps means it.
1114 * Correct it if any change.
1116 max_txqs[0] = vsi->nb_qps;
1117 ret = ice_cfg_vsi_lan(hw->port_info, vsi->idx,
1118 tc_bitmap, max_txqs);
1119 if (ret != ICE_SUCCESS)
1120 PMD_INIT_LOG(ERR, "Failed to config vsi sched");
1130 ice_pf_setup(struct ice_pf *pf)
1132 struct ice_vsi *vsi;
1134 /* Clear all stats counters */
1135 pf->offset_loaded = FALSE;
1136 memset(&pf->stats, 0, sizeof(struct ice_hw_port_stats));
1137 memset(&pf->stats_offset, 0, sizeof(struct ice_hw_port_stats));
1138 memset(&pf->internal_stats, 0, sizeof(struct ice_eth_stats));
1139 memset(&pf->internal_stats_offset, 0, sizeof(struct ice_eth_stats));
1141 vsi = ice_setup_vsi(pf, ICE_VSI_PF);
1143 PMD_INIT_LOG(ERR, "Failed to add vsi for PF");
1153 ice_dev_init(struct rte_eth_dev *dev)
1155 struct rte_pci_device *pci_dev;
1156 struct rte_intr_handle *intr_handle;
1157 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1158 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1159 struct ice_vsi *vsi;
1162 dev->dev_ops = &ice_eth_dev_ops;
1163 dev->rx_pkt_burst = ice_recv_pkts;
1164 dev->tx_pkt_burst = ice_xmit_pkts;
1165 dev->tx_pkt_prepare = ice_prep_pkts;
1167 ice_set_default_ptype_table(dev);
1168 pci_dev = RTE_DEV_TO_PCI(dev->device);
1169 intr_handle = &pci_dev->intr_handle;
1171 pf->adapter = ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1172 pf->adapter->eth_dev = dev;
1173 pf->dev_data = dev->data;
1174 hw->back = pf->adapter;
1175 hw->hw_addr = (uint8_t *)pci_dev->mem_resource[0].addr;
1176 hw->vendor_id = pci_dev->id.vendor_id;
1177 hw->device_id = pci_dev->id.device_id;
1178 hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
1179 hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
1180 hw->bus.device = pci_dev->addr.devid;
1181 hw->bus.func = pci_dev->addr.function;
1183 ice_init_controlq_parameter(hw);
1185 ret = ice_init_hw(hw);
1187 PMD_INIT_LOG(ERR, "Failed to initialize HW");
1191 PMD_INIT_LOG(INFO, "FW %d.%d.%05d API %d.%d",
1192 hw->fw_maj_ver, hw->fw_min_ver, hw->fw_build,
1193 hw->api_maj_ver, hw->api_min_ver);
1195 ice_pf_sw_init(dev);
1196 ret = ice_init_mac_address(dev);
1198 PMD_INIT_LOG(ERR, "Failed to initialize mac address");
1202 ret = ice_res_pool_init(&pf->msix_pool, 1,
1203 hw->func_caps.common_cap.num_msix_vectors - 1);
1205 PMD_INIT_LOG(ERR, "Failed to init MSIX pool");
1206 goto err_msix_pool_init;
1209 ret = ice_pf_setup(pf);
1211 PMD_INIT_LOG(ERR, "Failed to setup PF");
1217 /* Disable double vlan by default */
1218 ice_vsi_config_double_vlan(vsi, FALSE);
1220 /* register callback func to eal lib */
1221 rte_intr_callback_register(intr_handle,
1222 ice_interrupt_handler, dev);
1224 ice_pf_enable_irq0(hw);
1226 /* enable uio intr after callback register */
1227 rte_intr_enable(intr_handle);
1232 ice_res_pool_destroy(&pf->msix_pool);
1234 rte_free(dev->data->mac_addrs);
1236 ice_sched_cleanup_all(hw);
1237 rte_free(hw->port_info);
1238 ice_shutdown_all_ctrlq(hw);
1244 ice_release_vsi(struct ice_vsi *vsi)
1247 struct ice_vsi_ctx vsi_ctx;
1248 enum ice_status ret;
1253 hw = ICE_VSI_TO_HW(vsi);
1255 ice_remove_all_mac_vlan_filters(vsi);
1257 memset(&vsi_ctx, 0, sizeof(vsi_ctx));
1259 vsi_ctx.vsi_num = vsi->vsi_id;
1260 vsi_ctx.info = vsi->info;
1261 ret = ice_free_vsi(hw, vsi->idx, &vsi_ctx, false, NULL);
1262 if (ret != ICE_SUCCESS) {
1263 PMD_INIT_LOG(ERR, "Failed to free vsi by aq, %u", vsi->vsi_id);
1273 ice_vsi_disable_queues_intr(struct ice_vsi *vsi)
1275 struct rte_eth_dev *dev = vsi->adapter->eth_dev;
1276 struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
1277 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1278 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
1279 uint16_t msix_intr, i;
1281 /* disable interrupt and also clear all the exist config */
1282 for (i = 0; i < vsi->nb_qps; i++) {
1283 ICE_WRITE_REG(hw, QINT_TQCTL(vsi->base_queue + i), 0);
1284 ICE_WRITE_REG(hw, QINT_RQCTL(vsi->base_queue + i), 0);
1288 if (rte_intr_allow_others(intr_handle))
1290 for (i = 0; i < vsi->nb_msix; i++) {
1291 msix_intr = vsi->msix_intr + i;
1292 ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr),
1293 GLINT_DYN_CTL_WB_ON_ITR_M);
1297 ICE_WRITE_REG(hw, GLINT_DYN_CTL(0), GLINT_DYN_CTL_WB_ON_ITR_M);
1301 ice_dev_stop(struct rte_eth_dev *dev)
1303 struct rte_eth_dev_data *data = dev->data;
1304 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1305 struct ice_vsi *main_vsi = pf->main_vsi;
1306 struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
1307 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1310 /* avoid stopping again */
1311 if (pf->adapter_stopped)
1314 /* stop and clear all Rx queues */
1315 for (i = 0; i < data->nb_rx_queues; i++)
1316 ice_rx_queue_stop(dev, i);
1318 /* stop and clear all Tx queues */
1319 for (i = 0; i < data->nb_tx_queues; i++)
1320 ice_tx_queue_stop(dev, i);
1322 /* disable all queue interrupts */
1323 ice_vsi_disable_queues_intr(main_vsi);
1325 /* Clear all queues and release mbufs */
1326 ice_clear_queues(dev);
1328 /* Clean datapath event and queue/vec mapping */
1329 rte_intr_efd_disable(intr_handle);
1330 if (intr_handle->intr_vec) {
1331 rte_free(intr_handle->intr_vec);
1332 intr_handle->intr_vec = NULL;
1335 pf->adapter_stopped = true;
1339 ice_dev_close(struct rte_eth_dev *dev)
1341 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1342 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1346 /* release all queue resource */
1347 ice_free_queues(dev);
1349 ice_res_pool_destroy(&pf->msix_pool);
1350 ice_release_vsi(pf->main_vsi);
1352 ice_shutdown_all_ctrlq(hw);
1356 ice_dev_uninit(struct rte_eth_dev *dev)
1358 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1359 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1360 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1361 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1365 dev->dev_ops = NULL;
1366 dev->rx_pkt_burst = NULL;
1367 dev->tx_pkt_burst = NULL;
1369 rte_free(dev->data->mac_addrs);
1370 dev->data->mac_addrs = NULL;
1372 /* disable uio intr before callback unregister */
1373 rte_intr_disable(intr_handle);
1375 /* register callback func to eal lib */
1376 rte_intr_callback_unregister(intr_handle,
1377 ice_interrupt_handler, dev);
1379 ice_release_vsi(pf->main_vsi);
1380 ice_sched_cleanup_all(hw);
1381 rte_free(hw->port_info);
1382 ice_shutdown_all_ctrlq(hw);
1388 ice_dev_configure(__rte_unused struct rte_eth_dev *dev)
1390 struct ice_adapter *ad =
1391 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1393 /* Initialize to TRUE. If any of Rx queues doesn't meet the
1394 * bulk allocation or vector Rx preconditions we will reset it.
1396 ad->rx_bulk_alloc_allowed = true;
1397 ad->tx_simple_allowed = true;
1402 static int ice_init_rss(struct ice_pf *pf)
1404 struct ice_hw *hw = ICE_PF_TO_HW(pf);
1405 struct ice_vsi *vsi = pf->main_vsi;
1406 struct rte_eth_dev *dev = pf->adapter->eth_dev;
1407 struct rte_eth_rss_conf *rss_conf;
1408 struct ice_aqc_get_set_rss_keys key;
1412 rss_conf = &dev->data->dev_conf.rx_adv_conf.rss_conf;
1413 nb_q = dev->data->nb_rx_queues;
1414 vsi->rss_key_size = ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE;
1415 vsi->rss_lut_size = hw->func_caps.common_cap.rss_table_size;
1418 vsi->rss_key = rte_zmalloc(NULL,
1419 vsi->rss_key_size, 0);
1421 vsi->rss_lut = rte_zmalloc(NULL,
1422 vsi->rss_lut_size, 0);
1424 /* configure RSS key */
1425 if (!rss_conf->rss_key) {
1426 /* Calculate the default hash key */
1427 for (i = 0; i <= vsi->rss_key_size; i++)
1428 vsi->rss_key[i] = (uint8_t)rte_rand();
1430 rte_memcpy(vsi->rss_key, rss_conf->rss_key,
1431 RTE_MIN(rss_conf->rss_key_len,
1432 vsi->rss_key_size));
1434 rte_memcpy(key.standard_rss_key, vsi->rss_key, vsi->rss_key_size);
1435 ret = ice_aq_set_rss_key(hw, vsi->idx, &key);
1439 /* init RSS LUT table */
1440 for (i = 0; i < vsi->rss_lut_size; i++)
1441 vsi->rss_lut[i] = i % nb_q;
1443 ret = ice_aq_set_rss_lut(hw, vsi->idx,
1444 ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF,
1445 vsi->rss_lut, vsi->rss_lut_size);
1453 __vsi_queues_bind_intr(struct ice_vsi *vsi, uint16_t msix_vect,
1454 int base_queue, int nb_queue)
1456 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
1457 uint32_t val, val_tx;
1460 for (i = 0; i < nb_queue; i++) {
1462 val = (msix_vect & QINT_RQCTL_MSIX_INDX_M) |
1463 (0 < QINT_RQCTL_ITR_INDX_S) | QINT_RQCTL_CAUSE_ENA_M;
1464 val_tx = (msix_vect & QINT_TQCTL_MSIX_INDX_M) |
1465 (0 < QINT_TQCTL_ITR_INDX_S) | QINT_TQCTL_CAUSE_ENA_M;
1467 PMD_DRV_LOG(INFO, "queue %d is binding to vect %d",
1468 base_queue + i, msix_vect);
1469 /* set ITR0 value */
1470 ICE_WRITE_REG(hw, GLINT_ITR(0, msix_vect), 0x10);
1471 ICE_WRITE_REG(hw, QINT_RQCTL(base_queue + i), val);
1472 ICE_WRITE_REG(hw, QINT_TQCTL(base_queue + i), val_tx);
1477 ice_vsi_queues_bind_intr(struct ice_vsi *vsi)
1479 struct rte_eth_dev *dev = vsi->adapter->eth_dev;
1480 struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
1481 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1482 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
1483 uint16_t msix_vect = vsi->msix_intr;
1484 uint16_t nb_msix = RTE_MIN(vsi->nb_msix, intr_handle->nb_efd);
1485 uint16_t queue_idx = 0;
1489 /* clear Rx/Tx queue interrupt */
1490 for (i = 0; i < vsi->nb_used_qps; i++) {
1491 ICE_WRITE_REG(hw, QINT_TQCTL(vsi->base_queue + i), 0);
1492 ICE_WRITE_REG(hw, QINT_RQCTL(vsi->base_queue + i), 0);
1495 /* PF bind interrupt */
1496 if (rte_intr_dp_is_en(intr_handle)) {
1501 for (i = 0; i < vsi->nb_used_qps; i++) {
1503 if (!rte_intr_allow_others(intr_handle))
1504 msix_vect = ICE_MISC_VEC_ID;
1506 /* uio mapping all queue to one msix_vect */
1507 __vsi_queues_bind_intr(vsi, msix_vect,
1508 vsi->base_queue + i,
1509 vsi->nb_used_qps - i);
1511 for (; !!record && i < vsi->nb_used_qps; i++)
1512 intr_handle->intr_vec[queue_idx + i] =
1517 /* vfio 1:1 queue/msix_vect mapping */
1518 __vsi_queues_bind_intr(vsi, msix_vect,
1519 vsi->base_queue + i, 1);
1522 intr_handle->intr_vec[queue_idx + i] = msix_vect;
1530 ice_vsi_enable_queues_intr(struct ice_vsi *vsi)
1532 struct rte_eth_dev *dev = vsi->adapter->eth_dev;
1533 struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
1534 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1535 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
1536 uint16_t msix_intr, i;
1538 if (rte_intr_allow_others(intr_handle))
1539 for (i = 0; i < vsi->nb_used_qps; i++) {
1540 msix_intr = vsi->msix_intr + i;
1541 ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr),
1542 GLINT_DYN_CTL_INTENA_M |
1543 GLINT_DYN_CTL_CLEARPBA_M |
1544 GLINT_DYN_CTL_ITR_INDX_M |
1545 GLINT_DYN_CTL_WB_ON_ITR_M);
1548 ICE_WRITE_REG(hw, GLINT_DYN_CTL(0),
1549 GLINT_DYN_CTL_INTENA_M |
1550 GLINT_DYN_CTL_CLEARPBA_M |
1551 GLINT_DYN_CTL_ITR_INDX_M |
1552 GLINT_DYN_CTL_WB_ON_ITR_M);
1556 ice_rxq_intr_setup(struct rte_eth_dev *dev)
1558 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1559 struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
1560 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1561 struct ice_vsi *vsi = pf->main_vsi;
1562 uint32_t intr_vector = 0;
1564 rte_intr_disable(intr_handle);
1566 /* check and configure queue intr-vector mapping */
1567 if ((rte_intr_cap_multiple(intr_handle) ||
1568 !RTE_ETH_DEV_SRIOV(dev).active) &&
1569 dev->data->dev_conf.intr_conf.rxq != 0) {
1570 intr_vector = dev->data->nb_rx_queues;
1571 if (intr_vector > ICE_MAX_INTR_QUEUE_NUM) {
1572 PMD_DRV_LOG(ERR, "At most %d intr queues supported",
1573 ICE_MAX_INTR_QUEUE_NUM);
1576 if (rte_intr_efd_enable(intr_handle, intr_vector))
1580 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
1581 intr_handle->intr_vec =
1582 rte_zmalloc(NULL, dev->data->nb_rx_queues * sizeof(int),
1584 if (!intr_handle->intr_vec) {
1586 "Failed to allocate %d rx_queues intr_vec",
1587 dev->data->nb_rx_queues);
1592 /* Map queues with MSIX interrupt */
1593 vsi->nb_used_qps = dev->data->nb_rx_queues;
1594 ice_vsi_queues_bind_intr(vsi);
1596 /* Enable interrupts for all the queues */
1597 ice_vsi_enable_queues_intr(vsi);
1599 rte_intr_enable(intr_handle);
1605 ice_dev_start(struct rte_eth_dev *dev)
1607 struct rte_eth_dev_data *data = dev->data;
1608 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1609 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1610 uint16_t nb_rxq = 0;
1614 /* program Tx queues' context in hardware */
1615 for (nb_txq = 0; nb_txq < data->nb_tx_queues; nb_txq++) {
1616 ret = ice_tx_queue_start(dev, nb_txq);
1618 PMD_DRV_LOG(ERR, "fail to start Tx queue %u", nb_txq);
1623 /* program Rx queues' context in hardware*/
1624 for (nb_rxq = 0; nb_rxq < data->nb_rx_queues; nb_rxq++) {
1625 ret = ice_rx_queue_start(dev, nb_rxq);
1627 PMD_DRV_LOG(ERR, "fail to start Rx queue %u", nb_rxq);
1632 ret = ice_init_rss(pf);
1634 PMD_DRV_LOG(ERR, "Failed to enable rss for PF");
1638 ice_set_rx_function(dev);
1640 /* enable Rx interrput and mapping Rx queue to interrupt vector */
1641 if (ice_rxq_intr_setup(dev))
1644 ret = ice_aq_set_event_mask(hw, hw->port_info->lport,
1645 ((u16)(ICE_AQ_LINK_EVENT_LINK_FAULT |
1646 ICE_AQ_LINK_EVENT_PHY_TEMP_ALARM |
1647 ICE_AQ_LINK_EVENT_EXCESSIVE_ERRORS |
1648 ICE_AQ_LINK_EVENT_SIGNAL_DETECT |
1649 ICE_AQ_LINK_EVENT_AN_COMPLETED |
1650 ICE_AQ_LINK_EVENT_PORT_TX_SUSPENDED)),
1652 if (ret != ICE_SUCCESS)
1653 PMD_DRV_LOG(WARNING, "Fail to set phy mask");
1655 /* Call get_link_info aq commond to enable/disable LSE */
1656 ice_link_update(dev, 0);
1658 pf->adapter_stopped = false;
1662 /* stop the started queues if failed to start all queues */
1664 for (i = 0; i < nb_rxq; i++)
1665 ice_rx_queue_stop(dev, i);
1667 for (i = 0; i < nb_txq; i++)
1668 ice_tx_queue_stop(dev, i);
1674 ice_dev_reset(struct rte_eth_dev *dev)
1678 if (dev->data->sriov.active)
1681 ret = ice_dev_uninit(dev);
1683 PMD_INIT_LOG(ERR, "failed to uninit device, status = %d", ret);
1687 ret = ice_dev_init(dev);
1689 PMD_INIT_LOG(ERR, "failed to init device, status = %d", ret);
1697 ice_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1699 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1700 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1701 struct ice_vsi *vsi = pf->main_vsi;
1702 struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
1704 dev_info->min_rx_bufsize = ICE_BUF_SIZE_MIN;
1705 dev_info->max_rx_pktlen = ICE_FRAME_SIZE_MAX;
1706 dev_info->max_rx_queues = vsi->nb_qps;
1707 dev_info->max_tx_queues = vsi->nb_qps;
1708 dev_info->max_mac_addrs = vsi->max_macaddrs;
1709 dev_info->max_vfs = pci_dev->max_vfs;
1711 dev_info->rx_offload_capa =
1712 DEV_RX_OFFLOAD_VLAN_STRIP |
1713 DEV_RX_OFFLOAD_IPV4_CKSUM |
1714 DEV_RX_OFFLOAD_UDP_CKSUM |
1715 DEV_RX_OFFLOAD_TCP_CKSUM |
1716 DEV_RX_OFFLOAD_QINQ_STRIP |
1717 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
1718 DEV_RX_OFFLOAD_VLAN_EXTEND |
1719 DEV_RX_OFFLOAD_JUMBO_FRAME |
1720 DEV_RX_OFFLOAD_KEEP_CRC |
1721 DEV_RX_OFFLOAD_VLAN_FILTER;
1722 dev_info->tx_offload_capa =
1723 DEV_TX_OFFLOAD_VLAN_INSERT |
1724 DEV_TX_OFFLOAD_QINQ_INSERT |
1725 DEV_TX_OFFLOAD_IPV4_CKSUM |
1726 DEV_TX_OFFLOAD_UDP_CKSUM |
1727 DEV_TX_OFFLOAD_TCP_CKSUM |
1728 DEV_TX_OFFLOAD_SCTP_CKSUM |
1729 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
1730 DEV_TX_OFFLOAD_TCP_TSO |
1731 DEV_TX_OFFLOAD_MULTI_SEGS;
1732 dev_info->rx_queue_offload_capa = 0;
1733 dev_info->tx_queue_offload_capa = 0;
1735 dev_info->reta_size = hw->func_caps.common_cap.rss_table_size;
1736 dev_info->hash_key_size = (VSIQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
1737 dev_info->flow_type_rss_offloads = ICE_RSS_OFFLOAD_ALL;
1739 dev_info->default_rxconf = (struct rte_eth_rxconf) {
1741 .pthresh = ICE_DEFAULT_RX_PTHRESH,
1742 .hthresh = ICE_DEFAULT_RX_HTHRESH,
1743 .wthresh = ICE_DEFAULT_RX_WTHRESH,
1745 .rx_free_thresh = ICE_DEFAULT_RX_FREE_THRESH,
1750 dev_info->default_txconf = (struct rte_eth_txconf) {
1752 .pthresh = ICE_DEFAULT_TX_PTHRESH,
1753 .hthresh = ICE_DEFAULT_TX_HTHRESH,
1754 .wthresh = ICE_DEFAULT_TX_WTHRESH,
1756 .tx_free_thresh = ICE_DEFAULT_TX_FREE_THRESH,
1757 .tx_rs_thresh = ICE_DEFAULT_TX_RSBIT_THRESH,
1761 dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
1762 .nb_max = ICE_MAX_RING_DESC,
1763 .nb_min = ICE_MIN_RING_DESC,
1764 .nb_align = ICE_ALIGN_RING_DESC,
1767 dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
1768 .nb_max = ICE_MAX_RING_DESC,
1769 .nb_min = ICE_MIN_RING_DESC,
1770 .nb_align = ICE_ALIGN_RING_DESC,
1773 dev_info->speed_capa = ETH_LINK_SPEED_10M |
1774 ETH_LINK_SPEED_100M |
1776 ETH_LINK_SPEED_2_5G |
1778 ETH_LINK_SPEED_10G |
1779 ETH_LINK_SPEED_20G |
1780 ETH_LINK_SPEED_25G |
1783 dev_info->nb_rx_queues = dev->data->nb_rx_queues;
1784 dev_info->nb_tx_queues = dev->data->nb_tx_queues;
1786 dev_info->default_rxportconf.burst_size = ICE_RX_MAX_BURST;
1787 dev_info->default_txportconf.burst_size = ICE_TX_MAX_BURST;
1788 dev_info->default_rxportconf.nb_queues = 1;
1789 dev_info->default_txportconf.nb_queues = 1;
1790 dev_info->default_rxportconf.ring_size = ICE_BUF_SIZE_MIN;
1791 dev_info->default_txportconf.ring_size = ICE_BUF_SIZE_MIN;
1795 ice_atomic_read_link_status(struct rte_eth_dev *dev,
1796 struct rte_eth_link *link)
1798 struct rte_eth_link *dst = link;
1799 struct rte_eth_link *src = &dev->data->dev_link;
1801 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
1802 *(uint64_t *)src) == 0)
1809 ice_atomic_write_link_status(struct rte_eth_dev *dev,
1810 struct rte_eth_link *link)
1812 struct rte_eth_link *dst = &dev->data->dev_link;
1813 struct rte_eth_link *src = link;
1815 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
1816 *(uint64_t *)src) == 0)
1823 ice_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
1825 #define CHECK_INTERVAL 100 /* 100ms */
1826 #define MAX_REPEAT_TIME 10 /* 1s (10 * 100ms) in total */
1827 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1828 struct ice_link_status link_status;
1829 struct rte_eth_link link, old;
1831 unsigned int rep_cnt = MAX_REPEAT_TIME;
1832 bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false;
1834 memset(&link, 0, sizeof(link));
1835 memset(&old, 0, sizeof(old));
1836 memset(&link_status, 0, sizeof(link_status));
1837 ice_atomic_read_link_status(dev, &old);
1840 /* Get link status information from hardware */
1841 status = ice_aq_get_link_info(hw->port_info, enable_lse,
1842 &link_status, NULL);
1843 if (status != ICE_SUCCESS) {
1844 link.link_speed = ETH_SPEED_NUM_100M;
1845 link.link_duplex = ETH_LINK_FULL_DUPLEX;
1846 PMD_DRV_LOG(ERR, "Failed to get link info");
1850 link.link_status = link_status.link_info & ICE_AQ_LINK_UP;
1851 if (!wait_to_complete || link.link_status)
1854 rte_delay_ms(CHECK_INTERVAL);
1855 } while (--rep_cnt);
1857 if (!link.link_status)
1860 /* Full-duplex operation at all supported speeds */
1861 link.link_duplex = ETH_LINK_FULL_DUPLEX;
1863 /* Parse the link status */
1864 switch (link_status.link_speed) {
1865 case ICE_AQ_LINK_SPEED_10MB:
1866 link.link_speed = ETH_SPEED_NUM_10M;
1868 case ICE_AQ_LINK_SPEED_100MB:
1869 link.link_speed = ETH_SPEED_NUM_100M;
1871 case ICE_AQ_LINK_SPEED_1000MB:
1872 link.link_speed = ETH_SPEED_NUM_1G;
1874 case ICE_AQ_LINK_SPEED_2500MB:
1875 link.link_speed = ETH_SPEED_NUM_2_5G;
1877 case ICE_AQ_LINK_SPEED_5GB:
1878 link.link_speed = ETH_SPEED_NUM_5G;
1880 case ICE_AQ_LINK_SPEED_10GB:
1881 link.link_speed = ETH_SPEED_NUM_10G;
1883 case ICE_AQ_LINK_SPEED_20GB:
1884 link.link_speed = ETH_SPEED_NUM_20G;
1886 case ICE_AQ_LINK_SPEED_25GB:
1887 link.link_speed = ETH_SPEED_NUM_25G;
1889 case ICE_AQ_LINK_SPEED_40GB:
1890 link.link_speed = ETH_SPEED_NUM_40G;
1892 case ICE_AQ_LINK_SPEED_UNKNOWN:
1894 PMD_DRV_LOG(ERR, "Unknown link speed");
1895 link.link_speed = ETH_SPEED_NUM_NONE;
1899 link.link_autoneg = !(dev->data->dev_conf.link_speeds &
1900 ETH_LINK_SPEED_FIXED);
1903 ice_atomic_write_link_status(dev, &link);
1904 if (link.link_status == old.link_status)
1911 ice_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
1913 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1914 struct rte_eth_dev_data *dev_data = pf->dev_data;
1915 uint32_t frame_size = mtu + ETHER_HDR_LEN
1916 + ETHER_CRC_LEN + ICE_VLAN_TAG_SIZE;
1918 /* check if mtu is within the allowed range */
1919 if (mtu < ETHER_MIN_MTU || frame_size > ICE_FRAME_SIZE_MAX)
1922 /* mtu setting is forbidden if port is start */
1923 if (dev_data->dev_started) {
1925 "port %d must be stopped before configuration",
1930 if (frame_size > ETHER_MAX_LEN)
1931 dev_data->dev_conf.rxmode.offloads |=
1932 DEV_RX_OFFLOAD_JUMBO_FRAME;
1934 dev_data->dev_conf.rxmode.offloads &=
1935 ~DEV_RX_OFFLOAD_JUMBO_FRAME;
1937 dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
1942 static int ice_macaddr_set(struct rte_eth_dev *dev,
1943 struct ether_addr *mac_addr)
1945 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1946 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1947 struct ice_vsi *vsi = pf->main_vsi;
1948 struct ice_mac_filter *f;
1952 if (!is_valid_assigned_ether_addr(mac_addr)) {
1953 PMD_DRV_LOG(ERR, "Tried to set invalid MAC address.");
1957 TAILQ_FOREACH(f, &vsi->mac_list, next) {
1958 if (is_same_ether_addr(&pf->dev_addr, &f->mac_info.mac_addr))
1963 PMD_DRV_LOG(ERR, "Failed to find filter for default mac");
1967 ret = ice_remove_mac_filter(vsi, &f->mac_info.mac_addr);
1968 if (ret != ICE_SUCCESS) {
1969 PMD_DRV_LOG(ERR, "Failed to delete mac filter");
1972 ret = ice_add_mac_filter(vsi, mac_addr);
1973 if (ret != ICE_SUCCESS) {
1974 PMD_DRV_LOG(ERR, "Failed to add mac filter");
1977 memcpy(&pf->dev_addr, mac_addr, ETH_ADDR_LEN);
1979 flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL;
1980 ret = ice_aq_manage_mac_write(hw, mac_addr->addr_bytes, flags, NULL);
1981 if (ret != ICE_SUCCESS)
1982 PMD_DRV_LOG(ERR, "Failed to set manage mac");
1987 /* Add a MAC address, and update filters */
1989 ice_macaddr_add(struct rte_eth_dev *dev,
1990 struct ether_addr *mac_addr,
1991 __rte_unused uint32_t index,
1992 __rte_unused uint32_t pool)
1994 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1995 struct ice_vsi *vsi = pf->main_vsi;
1998 ret = ice_add_mac_filter(vsi, mac_addr);
1999 if (ret != ICE_SUCCESS) {
2000 PMD_DRV_LOG(ERR, "Failed to add MAC filter");
2007 /* Remove a MAC address, and update filters */
2009 ice_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
2011 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2012 struct ice_vsi *vsi = pf->main_vsi;
2013 struct rte_eth_dev_data *data = dev->data;
2014 struct ether_addr *macaddr;
2017 macaddr = &data->mac_addrs[index];
2018 ret = ice_remove_mac_filter(vsi, macaddr);
2020 PMD_DRV_LOG(ERR, "Failed to remove MAC filter");
2026 ice_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
2028 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2029 struct ice_vsi *vsi = pf->main_vsi;
2032 PMD_INIT_FUNC_TRACE();
2035 ret = ice_add_vlan_filter(vsi, vlan_id);
2037 PMD_DRV_LOG(ERR, "Failed to add vlan filter");
2041 ret = ice_remove_vlan_filter(vsi, vlan_id);
2043 PMD_DRV_LOG(ERR, "Failed to remove vlan filter");
2051 /* Configure vlan filter on or off */
2053 ice_vsi_config_vlan_filter(struct ice_vsi *vsi, bool on)
2055 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
2056 struct ice_vsi_ctx ctxt;
2057 uint8_t sec_flags, sw_flags2;
2060 sec_flags = ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
2061 ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S;
2062 sw_flags2 = ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
2065 vsi->info.sec_flags |= sec_flags;
2066 vsi->info.sw_flags2 |= sw_flags2;
2068 vsi->info.sec_flags &= ~sec_flags;
2069 vsi->info.sw_flags2 &= ~sw_flags2;
2071 vsi->info.sw_id = hw->port_info->sw_id;
2072 (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
2073 ctxt.info.valid_sections =
2074 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_SW_VALID |
2075 ICE_AQ_VSI_PROP_SECURITY_VALID);
2076 ctxt.vsi_num = vsi->vsi_id;
2078 ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
2080 PMD_DRV_LOG(INFO, "Update VSI failed to %s vlan rx pruning",
2081 on ? "enable" : "disable");
2084 vsi->info.valid_sections |=
2085 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_SW_VALID |
2086 ICE_AQ_VSI_PROP_SECURITY_VALID);
2093 ice_vsi_config_vlan_stripping(struct ice_vsi *vsi, bool on)
2095 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
2096 struct ice_vsi_ctx ctxt;
2100 /* Check if it has been already on or off */
2101 if (vsi->info.valid_sections &
2102 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID)) {
2104 if ((vsi->info.vlan_flags &
2105 ICE_AQ_VSI_VLAN_EMOD_M) ==
2106 ICE_AQ_VSI_VLAN_EMOD_STR_BOTH)
2107 return 0; /* already on */
2109 if ((vsi->info.vlan_flags &
2110 ICE_AQ_VSI_VLAN_EMOD_M) ==
2111 ICE_AQ_VSI_VLAN_EMOD_NOTHING)
2112 return 0; /* already off */
2117 vlan_flags = ICE_AQ_VSI_VLAN_EMOD_STR_BOTH;
2119 vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING;
2120 vsi->info.vlan_flags &= ~(ICE_AQ_VSI_VLAN_EMOD_M);
2121 vsi->info.vlan_flags |= vlan_flags;
2122 (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
2123 ctxt.info.valid_sections =
2124 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID);
2125 ctxt.vsi_num = vsi->vsi_id;
2126 ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
2128 PMD_DRV_LOG(INFO, "Update VSI failed to %s vlan stripping",
2129 on ? "enable" : "disable");
2133 vsi->info.valid_sections |=
2134 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID);
2140 ice_vlan_offload_set(struct rte_eth_dev *dev, int mask)
2142 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2143 struct ice_vsi *vsi = pf->main_vsi;
2144 struct rte_eth_rxmode *rxmode;
2146 rxmode = &dev->data->dev_conf.rxmode;
2147 if (mask & ETH_VLAN_FILTER_MASK) {
2148 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
2149 ice_vsi_config_vlan_filter(vsi, TRUE);
2151 ice_vsi_config_vlan_filter(vsi, FALSE);
2154 if (mask & ETH_VLAN_STRIP_MASK) {
2155 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
2156 ice_vsi_config_vlan_stripping(vsi, TRUE);
2158 ice_vsi_config_vlan_stripping(vsi, FALSE);
2161 if (mask & ETH_VLAN_EXTEND_MASK) {
2162 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
2163 ice_vsi_config_double_vlan(vsi, TRUE);
2165 ice_vsi_config_double_vlan(vsi, FALSE);
2172 ice_vlan_tpid_set(struct rte_eth_dev *dev,
2173 enum rte_vlan_type vlan_type,
2176 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2177 uint64_t reg_r = 0, reg_w = 0;
2178 uint16_t reg_id = 0;
2180 int qinq = dev->data->dev_conf.rxmode.offloads &
2181 DEV_RX_OFFLOAD_VLAN_EXTEND;
2183 switch (vlan_type) {
2184 case ETH_VLAN_TYPE_OUTER:
2190 case ETH_VLAN_TYPE_INNER:
2195 "Unsupported vlan type in single vlan.");
2200 PMD_DRV_LOG(ERR, "Unsupported vlan type %d", vlan_type);
2203 reg_r = ICE_READ_REG(hw, GL_SWT_L2TAGCTRL(reg_id));
2204 PMD_DRV_LOG(DEBUG, "Debug read from ICE GL_SWT_L2TAGCTRL[%d]: "
2205 "0x%08"PRIx64"", reg_id, reg_r);
2207 reg_w = reg_r & (~(GL_SWT_L2TAGCTRL_ETHERTYPE_M));
2208 reg_w |= ((uint64_t)tpid << GL_SWT_L2TAGCTRL_ETHERTYPE_S);
2209 if (reg_r == reg_w) {
2210 PMD_DRV_LOG(DEBUG, "No need to write");
2214 ICE_WRITE_REG(hw, GL_SWT_L2TAGCTRL(reg_id), reg_w);
2215 PMD_DRV_LOG(DEBUG, "Debug write 0x%08"PRIx64" to "
2216 "ICE GL_SWT_L2TAGCTRL[%d]", reg_w, reg_id);
2222 ice_get_rss_lut(struct ice_vsi *vsi, uint8_t *lut, uint16_t lut_size)
2224 struct ice_pf *pf = ICE_VSI_TO_PF(vsi);
2225 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
2231 if (pf->flags & ICE_FLAG_RSS_AQ_CAPABLE) {
2232 ret = ice_aq_get_rss_lut(hw, vsi->idx, TRUE,
2235 PMD_DRV_LOG(ERR, "Failed to get RSS lookup table");
2239 uint64_t *lut_dw = (uint64_t *)lut;
2240 uint16_t i, lut_size_dw = lut_size / 4;
2242 for (i = 0; i < lut_size_dw; i++)
2243 lut_dw[i] = ICE_READ_REG(hw, PFQF_HLUT(i));
2250 ice_set_rss_lut(struct ice_vsi *vsi, uint8_t *lut, uint16_t lut_size)
2252 struct ice_pf *pf = ICE_VSI_TO_PF(vsi);
2253 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
2259 if (pf->flags & ICE_FLAG_RSS_AQ_CAPABLE) {
2260 ret = ice_aq_set_rss_lut(hw, vsi->idx, TRUE,
2263 PMD_DRV_LOG(ERR, "Failed to set RSS lookup table");
2267 uint64_t *lut_dw = (uint64_t *)lut;
2268 uint16_t i, lut_size_dw = lut_size / 4;
2270 for (i = 0; i < lut_size_dw; i++)
2271 ICE_WRITE_REG(hw, PFQF_HLUT(i), lut_dw[i]);
2280 ice_rss_reta_update(struct rte_eth_dev *dev,
2281 struct rte_eth_rss_reta_entry64 *reta_conf,
2284 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2285 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2286 uint16_t i, lut_size = hw->func_caps.common_cap.rss_table_size;
2287 uint16_t idx, shift;
2291 if (reta_size != lut_size ||
2292 reta_size > ETH_RSS_RETA_SIZE_512) {
2294 "The size of hash lookup table configured (%d)"
2295 "doesn't match the number hardware can "
2297 reta_size, lut_size);
2301 lut = rte_zmalloc(NULL, reta_size, 0);
2303 PMD_DRV_LOG(ERR, "No memory can be allocated");
2306 ret = ice_get_rss_lut(pf->main_vsi, lut, reta_size);
2310 for (i = 0; i < reta_size; i++) {
2311 idx = i / RTE_RETA_GROUP_SIZE;
2312 shift = i % RTE_RETA_GROUP_SIZE;
2313 if (reta_conf[idx].mask & (1ULL << shift))
2314 lut[i] = reta_conf[idx].reta[shift];
2316 ret = ice_set_rss_lut(pf->main_vsi, lut, reta_size);
2325 ice_rss_reta_query(struct rte_eth_dev *dev,
2326 struct rte_eth_rss_reta_entry64 *reta_conf,
2329 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2330 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2331 uint16_t i, lut_size = hw->func_caps.common_cap.rss_table_size;
2332 uint16_t idx, shift;
2336 if (reta_size != lut_size ||
2337 reta_size > ETH_RSS_RETA_SIZE_512) {
2339 "The size of hash lookup table configured (%d)"
2340 "doesn't match the number hardware can "
2342 reta_size, lut_size);
2346 lut = rte_zmalloc(NULL, reta_size, 0);
2348 PMD_DRV_LOG(ERR, "No memory can be allocated");
2352 ret = ice_get_rss_lut(pf->main_vsi, lut, reta_size);
2356 for (i = 0; i < reta_size; i++) {
2357 idx = i / RTE_RETA_GROUP_SIZE;
2358 shift = i % RTE_RETA_GROUP_SIZE;
2359 if (reta_conf[idx].mask & (1ULL << shift))
2360 reta_conf[idx].reta[shift] = lut[i];
2370 ice_set_rss_key(struct ice_vsi *vsi, uint8_t *key, uint8_t key_len)
2372 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
2375 if (!key || key_len == 0) {
2376 PMD_DRV_LOG(DEBUG, "No key to be configured");
2378 } else if (key_len != (VSIQF_HKEY_MAX_INDEX + 1) *
2380 PMD_DRV_LOG(ERR, "Invalid key length %u", key_len);
2384 struct ice_aqc_get_set_rss_keys *key_dw =
2385 (struct ice_aqc_get_set_rss_keys *)key;
2387 ret = ice_aq_set_rss_key(hw, vsi->idx, key_dw);
2389 PMD_DRV_LOG(ERR, "Failed to configure RSS key via AQ");
2397 ice_get_rss_key(struct ice_vsi *vsi, uint8_t *key, uint8_t *key_len)
2399 struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
2402 if (!key || !key_len)
2405 ret = ice_aq_get_rss_key
2407 (struct ice_aqc_get_set_rss_keys *)key);
2409 PMD_DRV_LOG(ERR, "Failed to get RSS key via AQ");
2412 *key_len = (VSIQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
2418 ice_rss_hash_update(struct rte_eth_dev *dev,
2419 struct rte_eth_rss_conf *rss_conf)
2421 enum ice_status status = ICE_SUCCESS;
2422 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2423 struct ice_vsi *vsi = pf->main_vsi;
2426 status = ice_set_rss_key(vsi, rss_conf->rss_key, rss_conf->rss_key_len);
2430 /* TODO: hash enable config, ice_add_rss_cfg */
2435 ice_rss_hash_conf_get(struct rte_eth_dev *dev,
2436 struct rte_eth_rss_conf *rss_conf)
2438 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2439 struct ice_vsi *vsi = pf->main_vsi;
2441 ice_get_rss_key(vsi, rss_conf->rss_key,
2442 &rss_conf->rss_key_len);
2444 /* TODO: default set to 0 as hf config is not supported now */
2445 rss_conf->rss_hf = 0;
2449 static int ice_rx_queue_intr_enable(struct rte_eth_dev *dev,
2452 struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
2453 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2454 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2458 msix_intr = intr_handle->intr_vec[queue_id];
2460 val = GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M |
2461 GLINT_DYN_CTL_ITR_INDX_M;
2462 val &= ~GLINT_DYN_CTL_WB_ON_ITR_M;
2464 ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr), val);
2465 rte_intr_enable(&pci_dev->intr_handle);
2470 static int ice_rx_queue_intr_disable(struct rte_eth_dev *dev,
2473 struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
2474 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2475 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2478 msix_intr = intr_handle->intr_vec[queue_id];
2480 ICE_WRITE_REG(hw, GLINT_DYN_CTL(msix_intr), GLINT_DYN_CTL_WB_ON_ITR_M);
2486 ice_vsi_vlan_pvid_set(struct ice_vsi *vsi, struct ice_vsi_vlan_pvid_info *info)
2489 struct ice_vsi_ctx ctxt;
2490 uint8_t vlan_flags = 0;
2493 if (!vsi || !info) {
2494 PMD_DRV_LOG(ERR, "invalid parameters");
2499 vsi->info.pvid = info->config.pvid;
2501 * If insert pvid is enabled, only tagged pkts are
2502 * allowed to be sent out.
2504 vlan_flags = ICE_AQ_VSI_PVLAN_INSERT_PVID |
2505 ICE_AQ_VSI_VLAN_MODE_UNTAGGED;
2508 if (info->config.reject.tagged == 0)
2509 vlan_flags |= ICE_AQ_VSI_VLAN_MODE_TAGGED;
2511 if (info->config.reject.untagged == 0)
2512 vlan_flags |= ICE_AQ_VSI_VLAN_MODE_UNTAGGED;
2514 vsi->info.vlan_flags &= ~(ICE_AQ_VSI_PVLAN_INSERT_PVID |
2515 ICE_AQ_VSI_VLAN_MODE_M);
2516 vsi->info.vlan_flags |= vlan_flags;
2517 memset(&ctxt, 0, sizeof(ctxt));
2518 rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
2519 ctxt.info.valid_sections =
2520 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID);
2521 ctxt.vsi_num = vsi->vsi_id;
2523 hw = ICE_VSI_TO_HW(vsi);
2524 ret = ice_update_vsi(hw, vsi->idx, &ctxt, NULL);
2525 if (ret != ICE_SUCCESS) {
2527 "update VSI for VLAN insert failed, err %d",
2532 vsi->info.valid_sections |=
2533 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_VLAN_VALID);
2539 ice_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on)
2541 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2542 struct ice_vsi *vsi = pf->main_vsi;
2543 struct rte_eth_dev_data *data = pf->dev_data;
2544 struct ice_vsi_vlan_pvid_info info;
2547 memset(&info, 0, sizeof(info));
2550 info.config.pvid = pvid;
2552 info.config.reject.tagged =
2553 data->dev_conf.txmode.hw_vlan_reject_tagged;
2554 info.config.reject.untagged =
2555 data->dev_conf.txmode.hw_vlan_reject_untagged;
2558 ret = ice_vsi_vlan_pvid_set(vsi, &info);
2560 PMD_DRV_LOG(ERR, "Failed to set pvid.");
2568 ice_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
2569 struct rte_pci_device *pci_dev)
2571 return rte_eth_dev_pci_generic_probe(pci_dev,
2572 sizeof(struct ice_adapter),
2577 ice_pci_remove(struct rte_pci_device *pci_dev)
2579 return rte_eth_dev_pci_generic_remove(pci_dev, ice_dev_uninit);
2582 static struct rte_pci_driver rte_ice_pmd = {
2583 .id_table = pci_id_ice_map,
2584 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
2585 RTE_PCI_DRV_IOVA_AS_VA,
2586 .probe = ice_pci_probe,
2587 .remove = ice_pci_remove,
2591 * Driver initialization routine.
2592 * Invoked once at EAL init time.
2593 * Register itself as the [Poll Mode] Driver of PCI devices.
2595 RTE_PMD_REGISTER_PCI(net_ice, rte_ice_pmd);
2596 RTE_PMD_REGISTER_PCI_TABLE(net_ice, pci_id_ice_map);
2597 RTE_PMD_REGISTER_KMOD_DEP(net_ice, "* igb_uio | uio_pci_generic | vfio-pci");
2598 RTE_PMD_REGISTER_PARAM_STRING(net_ice,
2599 ICE_MAX_QP_NUM "=<int>");
2601 RTE_INIT(ice_init_log)
2603 ice_logtype_init = rte_log_register("pmd.net.ice.init");
2604 if (ice_logtype_init >= 0)
2605 rte_log_set_level(ice_logtype_init, RTE_LOG_NOTICE);
2606 ice_logtype_driver = rte_log_register("pmd.net.ice.driver");
2607 if (ice_logtype_driver >= 0)
2608 rte_log_set_level(ice_logtype_driver, RTE_LOG_NOTICE);