1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
5 #include <rte_ethdev_pci.h>
7 #include "base/ice_sched.h"
8 #include "ice_ethdev.h"
11 #define ICE_MAX_QP_NUM "max_queue_pair_num"
12 #define ICE_DFLT_OUTER_TAG_TYPE ICE_AQ_VSI_OUTER_TAG_VLAN_9100
15 int ice_logtype_driver;
17 static int ice_dev_configure(struct rte_eth_dev *dev);
18 static int ice_dev_start(struct rte_eth_dev *dev);
19 static void ice_dev_stop(struct rte_eth_dev *dev);
20 static void ice_dev_close(struct rte_eth_dev *dev);
21 static int ice_dev_reset(struct rte_eth_dev *dev);
22 static void ice_dev_info_get(struct rte_eth_dev *dev,
23 struct rte_eth_dev_info *dev_info);
24 static int ice_link_update(struct rte_eth_dev *dev,
25 int wait_to_complete);
27 static const struct rte_pci_id pci_id_ice_map[] = {
28 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_BACKPLANE) },
29 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_QSFP) },
30 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_SFP) },
31 { .vendor_id = 0, /* sentinel */ },
34 static const struct eth_dev_ops ice_eth_dev_ops = {
35 .dev_configure = ice_dev_configure,
36 .dev_start = ice_dev_start,
37 .dev_stop = ice_dev_stop,
38 .dev_close = ice_dev_close,
39 .dev_reset = ice_dev_reset,
40 .rx_queue_start = ice_rx_queue_start,
41 .rx_queue_stop = ice_rx_queue_stop,
42 .tx_queue_start = ice_tx_queue_start,
43 .tx_queue_stop = ice_tx_queue_stop,
44 .rx_queue_setup = ice_rx_queue_setup,
45 .rx_queue_release = ice_rx_queue_release,
46 .tx_queue_setup = ice_tx_queue_setup,
47 .tx_queue_release = ice_tx_queue_release,
48 .dev_infos_get = ice_dev_info_get,
49 .dev_supported_ptypes_get = ice_dev_supported_ptypes_get,
50 .link_update = ice_link_update,
51 .rxq_info_get = ice_rxq_info_get,
52 .txq_info_get = ice_txq_info_get,
53 .rx_queue_count = ice_rx_queue_count,
57 ice_init_controlq_parameter(struct ice_hw *hw)
59 /* fields for adminq */
60 hw->adminq.num_rq_entries = ICE_ADMINQ_LEN;
61 hw->adminq.num_sq_entries = ICE_ADMINQ_LEN;
62 hw->adminq.rq_buf_size = ICE_ADMINQ_BUF_SZ;
63 hw->adminq.sq_buf_size = ICE_ADMINQ_BUF_SZ;
65 /* fields for mailboxq, DPDK used as PF host */
66 hw->mailboxq.num_rq_entries = ICE_MAILBOXQ_LEN;
67 hw->mailboxq.num_sq_entries = ICE_MAILBOXQ_LEN;
68 hw->mailboxq.rq_buf_size = ICE_MAILBOXQ_BUF_SZ;
69 hw->mailboxq.sq_buf_size = ICE_MAILBOXQ_BUF_SZ;
73 ice_check_qp_num(const char *key, const char *qp_value,
74 __rte_unused void *opaque)
79 while (isblank(*qp_value))
82 num = strtoul(qp_value, &end, 10);
84 if (!num || (*end == '-') || errno) {
85 PMD_DRV_LOG(WARNING, "invalid value:\"%s\" for key:\"%s\", "
95 ice_config_max_queue_pair_num(struct rte_devargs *devargs)
97 struct rte_kvargs *kvlist;
98 const char *queue_num_key = ICE_MAX_QP_NUM;
104 kvlist = rte_kvargs_parse(devargs->args, NULL);
108 if (!rte_kvargs_count(kvlist, queue_num_key)) {
109 rte_kvargs_free(kvlist);
113 if (rte_kvargs_process(kvlist, queue_num_key,
114 ice_check_qp_num, NULL) < 0) {
115 rte_kvargs_free(kvlist);
118 ret = rte_kvargs_process(kvlist, queue_num_key,
119 ice_check_qp_num, NULL);
120 rte_kvargs_free(kvlist);
126 ice_res_pool_init(struct ice_res_pool_info *pool, uint32_t base,
129 struct pool_entry *entry;
134 entry = rte_zmalloc(NULL, sizeof(*entry), 0);
137 "Failed to allocate memory for resource pool");
141 /* queue heap initialize */
142 pool->num_free = num;
145 LIST_INIT(&pool->alloc_list);
146 LIST_INIT(&pool->free_list);
148 /* Initialize element */
152 LIST_INSERT_HEAD(&pool->free_list, entry, next);
157 ice_res_pool_alloc(struct ice_res_pool_info *pool,
160 struct pool_entry *entry, *valid_entry;
163 PMD_INIT_LOG(ERR, "Invalid parameter");
167 if (pool->num_free < num) {
168 PMD_INIT_LOG(ERR, "No resource. ask:%u, available:%u",
169 num, pool->num_free);
174 /* Lookup in free list and find most fit one */
175 LIST_FOREACH(entry, &pool->free_list, next) {
176 if (entry->len >= num) {
178 if (entry->len == num) {
183 valid_entry->len > entry->len)
188 /* Not find one to satisfy the request, return */
190 PMD_INIT_LOG(ERR, "No valid entry found");
194 * The entry have equal queue number as requested,
195 * remove it from alloc_list.
197 if (valid_entry->len == num) {
198 LIST_REMOVE(valid_entry, next);
201 * The entry have more numbers than requested,
202 * create a new entry for alloc_list and minus its
203 * queue base and number in free_list.
205 entry = rte_zmalloc(NULL, sizeof(*entry), 0);
208 "Failed to allocate memory for "
212 entry->base = valid_entry->base;
214 valid_entry->base += num;
215 valid_entry->len -= num;
219 /* Insert it into alloc list, not sorted */
220 LIST_INSERT_HEAD(&pool->alloc_list, valid_entry, next);
222 pool->num_free -= valid_entry->len;
223 pool->num_alloc += valid_entry->len;
225 return valid_entry->base + pool->base;
229 ice_res_pool_destroy(struct ice_res_pool_info *pool)
231 struct pool_entry *entry, *next_entry;
236 for (entry = LIST_FIRST(&pool->alloc_list);
237 entry && (next_entry = LIST_NEXT(entry, next), 1);
238 entry = next_entry) {
239 LIST_REMOVE(entry, next);
243 for (entry = LIST_FIRST(&pool->free_list);
244 entry && (next_entry = LIST_NEXT(entry, next), 1);
245 entry = next_entry) {
246 LIST_REMOVE(entry, next);
253 LIST_INIT(&pool->alloc_list);
254 LIST_INIT(&pool->free_list);
258 ice_vsi_config_default_rss(struct ice_aqc_vsi_props *info)
260 /* Set VSI LUT selection */
261 info->q_opt_rss = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI &
262 ICE_AQ_VSI_Q_OPT_RSS_LUT_M;
263 /* Set Hash scheme */
264 info->q_opt_rss |= ICE_AQ_VSI_Q_OPT_RSS_TPLZ &
265 ICE_AQ_VSI_Q_OPT_RSS_HASH_M;
267 info->q_opt_tc = ICE_AQ_VSI_Q_OPT_TC_OVR_M;
270 static enum ice_status
271 ice_vsi_config_tc_queue_mapping(struct ice_vsi *vsi,
272 struct ice_aqc_vsi_props *info,
273 uint8_t enabled_tcmap)
275 uint16_t bsf, qp_idx;
277 /* default tc 0 now. Multi-TC supporting need to be done later.
278 * Configure TC and queue mapping parameters, for enabled TC,
279 * allocate qpnum_per_tc queues to this traffic.
281 if (enabled_tcmap != 0x01) {
282 PMD_INIT_LOG(ERR, "only TC0 is supported");
286 vsi->nb_qps = RTE_MIN(vsi->nb_qps, ICE_MAX_Q_PER_TC);
287 bsf = rte_bsf32(vsi->nb_qps);
288 /* Adjust the queue number to actual queues that can be applied */
289 vsi->nb_qps = 0x1 << bsf;
292 /* Set tc and queue mapping with VSI */
293 info->tc_mapping[0] = rte_cpu_to_le_16((qp_idx <<
294 ICE_AQ_VSI_TC_Q_OFFSET_S) |
295 (bsf << ICE_AQ_VSI_TC_Q_NUM_S));
297 /* Associate queue number with VSI */
298 info->mapping_flags |= rte_cpu_to_le_16(ICE_AQ_VSI_Q_MAP_CONTIG);
299 info->q_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
300 info->q_mapping[1] = rte_cpu_to_le_16(vsi->nb_qps);
301 info->valid_sections |=
302 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID);
303 /* Set the info.ingress_table and info.egress_table
304 * for UP translate table. Now just set it to 1:1 map by default
305 * -- 0b 111 110 101 100 011 010 001 000 == 0xFAC688
307 #define ICE_TC_QUEUE_TABLE_DFLT 0x00FAC688
308 info->ingress_table = rte_cpu_to_le_32(ICE_TC_QUEUE_TABLE_DFLT);
309 info->egress_table = rte_cpu_to_le_32(ICE_TC_QUEUE_TABLE_DFLT);
310 info->outer_up_table = rte_cpu_to_le_32(ICE_TC_QUEUE_TABLE_DFLT);
315 ice_init_mac_address(struct rte_eth_dev *dev)
317 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
319 if (!is_unicast_ether_addr
320 ((struct ether_addr *)hw->port_info[0].mac.lan_addr)) {
321 PMD_INIT_LOG(ERR, "Invalid MAC address");
325 ether_addr_copy((struct ether_addr *)hw->port_info[0].mac.lan_addr,
326 (struct ether_addr *)hw->port_info[0].mac.perm_addr);
328 dev->data->mac_addrs = rte_zmalloc(NULL, sizeof(struct ether_addr), 0);
329 if (!dev->data->mac_addrs) {
331 "Failed to allocate memory to store mac address");
334 /* store it to dev data */
335 ether_addr_copy((struct ether_addr *)hw->port_info[0].mac.perm_addr,
336 &dev->data->mac_addrs[0]);
342 ice_pf_enable_irq0(struct ice_hw *hw)
344 /* reset the registers */
345 ICE_WRITE_REG(hw, PFINT_OICR_ENA, 0);
346 ICE_READ_REG(hw, PFINT_OICR);
349 ICE_WRITE_REG(hw, PFINT_OICR_ENA,
350 (uint32_t)(PFINT_OICR_ENA_INT_ENA_M &
351 (~PFINT_OICR_LINK_STAT_CHANGE_M)));
353 ICE_WRITE_REG(hw, PFINT_OICR_CTL,
354 (0 & PFINT_OICR_CTL_MSIX_INDX_M) |
355 ((0 << PFINT_OICR_CTL_ITR_INDX_S) &
356 PFINT_OICR_CTL_ITR_INDX_M) |
357 PFINT_OICR_CTL_CAUSE_ENA_M);
359 ICE_WRITE_REG(hw, PFINT_FW_CTL,
360 (0 & PFINT_FW_CTL_MSIX_INDX_M) |
361 ((0 << PFINT_FW_CTL_ITR_INDX_S) &
362 PFINT_FW_CTL_ITR_INDX_M) |
363 PFINT_FW_CTL_CAUSE_ENA_M);
365 ICE_WRITE_REG(hw, PFINT_OICR_ENA, PFINT_OICR_ENA_INT_ENA_M);
368 ICE_WRITE_REG(hw, GLINT_DYN_CTL(0),
369 GLINT_DYN_CTL_INTENA_M |
370 GLINT_DYN_CTL_CLEARPBA_M |
371 GLINT_DYN_CTL_ITR_INDX_M);
378 ice_pf_disable_irq0(struct ice_hw *hw)
380 /* Disable all interrupt types */
381 ICE_WRITE_REG(hw, GLINT_DYN_CTL(0), GLINT_DYN_CTL_WB_ON_ITR_M);
387 ice_handle_aq_msg(struct rte_eth_dev *dev)
389 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
390 struct ice_ctl_q_info *cq = &hw->adminq;
391 struct ice_rq_event_info event;
392 uint16_t pending, opcode;
395 event.buf_len = ICE_AQ_MAX_BUF_LEN;
396 event.msg_buf = rte_zmalloc(NULL, event.buf_len, 0);
397 if (!event.msg_buf) {
398 PMD_DRV_LOG(ERR, "Failed to allocate mem");
404 ret = ice_clean_rq_elem(hw, cq, &event, &pending);
406 if (ret != ICE_SUCCESS) {
408 "Failed to read msg from AdminQ, "
410 hw->adminq.sq_last_status);
413 opcode = rte_le_to_cpu_16(event.desc.opcode);
416 case ice_aqc_opc_get_link_status:
417 ret = ice_link_update(dev, 0);
419 _rte_eth_dev_callback_process
420 (dev, RTE_ETH_EVENT_INTR_LSC, NULL);
423 PMD_DRV_LOG(DEBUG, "Request %u is not supported yet",
428 rte_free(event.msg_buf);
433 * Interrupt handler triggered by NIC for handling
434 * specific interrupt.
437 * Pointer to interrupt handle.
439 * The address of parameter (struct rte_eth_dev *) regsitered before.
445 ice_interrupt_handler(void *param)
447 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
448 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
458 /* Disable interrupt */
459 ice_pf_disable_irq0(hw);
461 /* read out interrupt causes */
462 oicr = ICE_READ_REG(hw, PFINT_OICR);
464 int_fw_ctl = ICE_READ_REG(hw, PFINT_FW_CTL);
467 /* No interrupt event indicated */
468 if (!(oicr & PFINT_OICR_INTEVENT_M)) {
469 PMD_DRV_LOG(INFO, "No interrupt event");
474 if (int_fw_ctl & PFINT_FW_CTL_INTEVENT_M) {
475 PMD_DRV_LOG(INFO, "FW_CTL: link state change event");
476 ice_handle_aq_msg(dev);
479 if (oicr & PFINT_OICR_LINK_STAT_CHANGE_M) {
480 PMD_DRV_LOG(INFO, "OICR: link state change event");
481 ice_link_update(dev, 0);
485 if (oicr & PFINT_OICR_MAL_DETECT_M) {
486 PMD_DRV_LOG(WARNING, "OICR: MDD event");
487 reg = ICE_READ_REG(hw, GL_MDET_TX_PQM);
488 if (reg & GL_MDET_TX_PQM_VALID_M) {
489 pf_num = (reg & GL_MDET_TX_PQM_PF_NUM_M) >>
490 GL_MDET_TX_PQM_PF_NUM_S;
491 event = (reg & GL_MDET_TX_PQM_MAL_TYPE_M) >>
492 GL_MDET_TX_PQM_MAL_TYPE_S;
493 queue = (reg & GL_MDET_TX_PQM_QNUM_M) >>
494 GL_MDET_TX_PQM_QNUM_S;
496 PMD_DRV_LOG(WARNING, "Malicious Driver Detection event "
497 "%d by PQM on TX queue %d PF# %d",
498 event, queue, pf_num);
501 reg = ICE_READ_REG(hw, GL_MDET_TX_TCLAN);
502 if (reg & GL_MDET_TX_TCLAN_VALID_M) {
503 pf_num = (reg & GL_MDET_TX_TCLAN_PF_NUM_M) >>
504 GL_MDET_TX_TCLAN_PF_NUM_S;
505 event = (reg & GL_MDET_TX_TCLAN_MAL_TYPE_M) >>
506 GL_MDET_TX_TCLAN_MAL_TYPE_S;
507 queue = (reg & GL_MDET_TX_TCLAN_QNUM_M) >>
508 GL_MDET_TX_TCLAN_QNUM_S;
510 PMD_DRV_LOG(WARNING, "Malicious Driver Detection event "
511 "%d by TCLAN on TX queue %d PF# %d",
512 event, queue, pf_num);
516 /* Enable interrupt */
517 ice_pf_enable_irq0(hw);
518 rte_intr_enable(dev->intr_handle);
521 /* Initialize SW parameters of PF */
523 ice_pf_sw_init(struct rte_eth_dev *dev)
525 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
526 struct ice_hw *hw = ICE_PF_TO_HW(pf);
528 if (ice_config_max_queue_pair_num(dev->device->devargs) > 0)
530 ice_config_max_queue_pair_num(dev->device->devargs);
533 (uint16_t)RTE_MIN(hw->func_caps.common_cap.num_txq,
534 hw->func_caps.common_cap.num_rxq);
536 pf->lan_nb_qps = pf->lan_nb_qp_max;
541 static struct ice_vsi *
542 ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type)
544 struct ice_hw *hw = ICE_PF_TO_HW(pf);
545 struct ice_vsi *vsi = NULL;
546 struct ice_vsi_ctx vsi_ctx;
548 uint16_t max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
549 uint8_t tc_bitmap = 0x1;
551 /* hw->num_lports = 1 in NIC mode */
552 vsi = rte_zmalloc(NULL, sizeof(struct ice_vsi), 0);
556 vsi->idx = pf->next_vsi_idx;
559 vsi->adapter = ICE_PF_TO_ADAPTER(pf);
560 vsi->max_macaddrs = ICE_NUM_MACADDR_MAX;
561 vsi->vlan_anti_spoof_on = 0;
562 vsi->vlan_filter_on = 1;
563 TAILQ_INIT(&vsi->mac_list);
564 TAILQ_INIT(&vsi->vlan_list);
566 memset(&vsi_ctx, 0, sizeof(vsi_ctx));
567 /* base_queue in used in queue mapping of VSI add/update command.
568 * Suppose vsi->base_queue is 0 now, don't consider SRIOV, VMDQ
569 * cases in the first stage. Only Main VSI.
574 vsi->nb_qps = pf->lan_nb_qps;
575 ice_vsi_config_default_rss(&vsi_ctx.info);
576 vsi_ctx.alloc_from_pool = true;
577 vsi_ctx.flags = ICE_AQ_VSI_TYPE_PF;
578 /* switch_id is queried by get_switch_config aq, which is done
581 vsi_ctx.info.sw_id = hw->port_info->sw_id;
582 vsi_ctx.info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA;
583 /* Allow all untagged or tagged packets */
584 vsi_ctx.info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL;
585 vsi_ctx.info.vlan_flags |= ICE_AQ_VSI_VLAN_EMOD_NOTHING;
586 vsi_ctx.info.q_opt_rss = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF |
587 ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
588 /* Enable VLAN/UP trip */
589 ret = ice_vsi_config_tc_queue_mapping(vsi,
594 "tc queue mapping with vsi failed, "
602 /* for other types of VSI */
603 PMD_INIT_LOG(ERR, "other types of VSI not supported");
607 /* VF has MSIX interrupt in VF range, don't allocate here */
608 if (type == ICE_VSI_PF) {
609 ret = ice_res_pool_alloc(&pf->msix_pool,
611 RTE_MAX_RXTX_INTR_VEC_ID));
613 PMD_INIT_LOG(ERR, "VSI MAIN %d get heap failed %d",
616 vsi->msix_intr = ret;
617 vsi->nb_msix = RTE_MIN(vsi->nb_qps, RTE_MAX_RXTX_INTR_VEC_ID);
622 ret = ice_add_vsi(hw, vsi->idx, &vsi_ctx, NULL);
623 if (ret != ICE_SUCCESS) {
624 PMD_INIT_LOG(ERR, "add vsi failed, err = %d", ret);
627 /* store vsi information is SW structure */
628 vsi->vsi_id = vsi_ctx.vsi_num;
629 vsi->info = vsi_ctx.info;
630 pf->vsis_allocated = vsi_ctx.vsis_allocd;
631 pf->vsis_unallocated = vsi_ctx.vsis_unallocated;
633 /* At the beginning, only TC0. */
634 /* What we need here is the maximam number of the TX queues.
635 * Currently vsi->nb_qps means it.
636 * Correct it if any change.
638 max_txqs[0] = vsi->nb_qps;
639 ret = ice_cfg_vsi_lan(hw->port_info, vsi->idx,
640 tc_bitmap, max_txqs);
641 if (ret != ICE_SUCCESS)
642 PMD_INIT_LOG(ERR, "Failed to config vsi sched");
652 ice_pf_setup(struct ice_pf *pf)
656 /* Clear all stats counters */
657 pf->offset_loaded = FALSE;
658 memset(&pf->stats, 0, sizeof(struct ice_hw_port_stats));
659 memset(&pf->stats_offset, 0, sizeof(struct ice_hw_port_stats));
660 memset(&pf->internal_stats, 0, sizeof(struct ice_eth_stats));
661 memset(&pf->internal_stats_offset, 0, sizeof(struct ice_eth_stats));
663 vsi = ice_setup_vsi(pf, ICE_VSI_PF);
665 PMD_INIT_LOG(ERR, "Failed to add vsi for PF");
675 ice_dev_init(struct rte_eth_dev *dev)
677 struct rte_pci_device *pci_dev;
678 struct rte_intr_handle *intr_handle;
679 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
680 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
683 dev->dev_ops = &ice_eth_dev_ops;
684 dev->rx_pkt_burst = ice_recv_pkts;
685 dev->tx_pkt_burst = ice_xmit_pkts;
686 dev->tx_pkt_prepare = ice_prep_pkts;
688 ice_set_default_ptype_table(dev);
689 pci_dev = RTE_DEV_TO_PCI(dev->device);
690 intr_handle = &pci_dev->intr_handle;
692 pf->adapter = ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
693 pf->adapter->eth_dev = dev;
694 pf->dev_data = dev->data;
695 hw->back = pf->adapter;
696 hw->hw_addr = (uint8_t *)pci_dev->mem_resource[0].addr;
697 hw->vendor_id = pci_dev->id.vendor_id;
698 hw->device_id = pci_dev->id.device_id;
699 hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
700 hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
701 hw->bus.device = pci_dev->addr.devid;
702 hw->bus.func = pci_dev->addr.function;
704 ice_init_controlq_parameter(hw);
706 ret = ice_init_hw(hw);
708 PMD_INIT_LOG(ERR, "Failed to initialize HW");
712 PMD_INIT_LOG(INFO, "FW %d.%d.%05d API %d.%d",
713 hw->fw_maj_ver, hw->fw_min_ver, hw->fw_build,
714 hw->api_maj_ver, hw->api_min_ver);
717 ret = ice_init_mac_address(dev);
719 PMD_INIT_LOG(ERR, "Failed to initialize mac address");
723 ret = ice_res_pool_init(&pf->msix_pool, 1,
724 hw->func_caps.common_cap.num_msix_vectors - 1);
726 PMD_INIT_LOG(ERR, "Failed to init MSIX pool");
727 goto err_msix_pool_init;
730 ret = ice_pf_setup(pf);
732 PMD_INIT_LOG(ERR, "Failed to setup PF");
736 /* register callback func to eal lib */
737 rte_intr_callback_register(intr_handle,
738 ice_interrupt_handler, dev);
740 ice_pf_enable_irq0(hw);
742 /* enable uio intr after callback register */
743 rte_intr_enable(intr_handle);
748 ice_res_pool_destroy(&pf->msix_pool);
750 rte_free(dev->data->mac_addrs);
752 ice_sched_cleanup_all(hw);
753 rte_free(hw->port_info);
754 ice_shutdown_all_ctrlq(hw);
760 ice_release_vsi(struct ice_vsi *vsi)
763 struct ice_vsi_ctx vsi_ctx;
769 hw = ICE_VSI_TO_HW(vsi);
771 memset(&vsi_ctx, 0, sizeof(vsi_ctx));
773 vsi_ctx.vsi_num = vsi->vsi_id;
774 vsi_ctx.info = vsi->info;
775 ret = ice_free_vsi(hw, vsi->idx, &vsi_ctx, false, NULL);
776 if (ret != ICE_SUCCESS) {
777 PMD_INIT_LOG(ERR, "Failed to free vsi by aq, %u", vsi->vsi_id);
787 ice_dev_stop(struct rte_eth_dev *dev)
789 struct rte_eth_dev_data *data = dev->data;
790 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
791 struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
792 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
795 /* avoid stopping again */
796 if (pf->adapter_stopped)
799 /* stop and clear all Rx queues */
800 for (i = 0; i < data->nb_rx_queues; i++)
801 ice_rx_queue_stop(dev, i);
803 /* stop and clear all Tx queues */
804 for (i = 0; i < data->nb_tx_queues; i++)
805 ice_tx_queue_stop(dev, i);
807 /* Clear all queues and release mbufs */
808 ice_clear_queues(dev);
810 /* Clean datapath event and queue/vec mapping */
811 rte_intr_efd_disable(intr_handle);
812 if (intr_handle->intr_vec) {
813 rte_free(intr_handle->intr_vec);
814 intr_handle->intr_vec = NULL;
817 pf->adapter_stopped = true;
821 ice_dev_close(struct rte_eth_dev *dev)
823 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
824 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
828 /* release all queue resource */
829 ice_free_queues(dev);
831 ice_res_pool_destroy(&pf->msix_pool);
832 ice_release_vsi(pf->main_vsi);
834 ice_shutdown_all_ctrlq(hw);
838 ice_dev_uninit(struct rte_eth_dev *dev)
840 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
841 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
842 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
843 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
848 dev->rx_pkt_burst = NULL;
849 dev->tx_pkt_burst = NULL;
851 rte_free(dev->data->mac_addrs);
852 dev->data->mac_addrs = NULL;
854 /* disable uio intr before callback unregister */
855 rte_intr_disable(intr_handle);
857 /* register callback func to eal lib */
858 rte_intr_callback_unregister(intr_handle,
859 ice_interrupt_handler, dev);
861 ice_release_vsi(pf->main_vsi);
862 ice_sched_cleanup_all(hw);
863 rte_free(hw->port_info);
864 ice_shutdown_all_ctrlq(hw);
870 ice_dev_configure(__rte_unused struct rte_eth_dev *dev)
872 struct ice_adapter *ad =
873 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
875 /* Initialize to TRUE. If any of Rx queues doesn't meet the
876 * bulk allocation or vector Rx preconditions we will reset it.
878 ad->rx_bulk_alloc_allowed = true;
879 ad->tx_simple_allowed = true;
884 static int ice_init_rss(struct ice_pf *pf)
886 struct ice_hw *hw = ICE_PF_TO_HW(pf);
887 struct ice_vsi *vsi = pf->main_vsi;
888 struct rte_eth_dev *dev = pf->adapter->eth_dev;
889 struct rte_eth_rss_conf *rss_conf;
890 struct ice_aqc_get_set_rss_keys key;
894 rss_conf = &dev->data->dev_conf.rx_adv_conf.rss_conf;
895 nb_q = dev->data->nb_rx_queues;
896 vsi->rss_key_size = ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE;
897 vsi->rss_lut_size = hw->func_caps.common_cap.rss_table_size;
900 vsi->rss_key = rte_zmalloc(NULL,
901 vsi->rss_key_size, 0);
903 vsi->rss_lut = rte_zmalloc(NULL,
904 vsi->rss_lut_size, 0);
906 /* configure RSS key */
907 if (!rss_conf->rss_key) {
908 /* Calculate the default hash key */
909 for (i = 0; i <= vsi->rss_key_size; i++)
910 vsi->rss_key[i] = (uint8_t)rte_rand();
912 rte_memcpy(vsi->rss_key, rss_conf->rss_key,
913 RTE_MIN(rss_conf->rss_key_len,
916 rte_memcpy(key.standard_rss_key, vsi->rss_key, vsi->rss_key_size);
917 ret = ice_aq_set_rss_key(hw, vsi->idx, &key);
921 /* init RSS LUT table */
922 for (i = 0; i < vsi->rss_lut_size; i++)
923 vsi->rss_lut[i] = i % nb_q;
925 ret = ice_aq_set_rss_lut(hw, vsi->idx,
926 ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF,
927 vsi->rss_lut, vsi->rss_lut_size);
935 ice_dev_start(struct rte_eth_dev *dev)
937 struct rte_eth_dev_data *data = dev->data;
938 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
939 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
944 /* program Tx queues' context in hardware */
945 for (nb_txq = 0; nb_txq < data->nb_tx_queues; nb_txq++) {
946 ret = ice_tx_queue_start(dev, nb_txq);
948 PMD_DRV_LOG(ERR, "fail to start Tx queue %u", nb_txq);
953 /* program Rx queues' context in hardware*/
954 for (nb_rxq = 0; nb_rxq < data->nb_rx_queues; nb_rxq++) {
955 ret = ice_rx_queue_start(dev, nb_rxq);
957 PMD_DRV_LOG(ERR, "fail to start Rx queue %u", nb_rxq);
962 ret = ice_init_rss(pf);
964 PMD_DRV_LOG(ERR, "Failed to enable rss for PF");
968 ice_set_rx_function(dev);
970 ret = ice_aq_set_event_mask(hw, hw->port_info->lport,
971 ((u16)(ICE_AQ_LINK_EVENT_LINK_FAULT |
972 ICE_AQ_LINK_EVENT_PHY_TEMP_ALARM |
973 ICE_AQ_LINK_EVENT_EXCESSIVE_ERRORS |
974 ICE_AQ_LINK_EVENT_SIGNAL_DETECT |
975 ICE_AQ_LINK_EVENT_AN_COMPLETED |
976 ICE_AQ_LINK_EVENT_PORT_TX_SUSPENDED)),
978 if (ret != ICE_SUCCESS)
979 PMD_DRV_LOG(WARNING, "Fail to set phy mask");
981 /* Call get_link_info aq commond to enable/disable LSE */
982 ice_link_update(dev, 0);
984 pf->adapter_stopped = false;
988 /* stop the started queues if failed to start all queues */
990 for (i = 0; i < nb_rxq; i++)
991 ice_rx_queue_stop(dev, i);
993 for (i = 0; i < nb_txq; i++)
994 ice_tx_queue_stop(dev, i);
1000 ice_dev_reset(struct rte_eth_dev *dev)
1004 if (dev->data->sriov.active)
1007 ret = ice_dev_uninit(dev);
1009 PMD_INIT_LOG(ERR, "failed to uninit device, status = %d", ret);
1013 ret = ice_dev_init(dev);
1015 PMD_INIT_LOG(ERR, "failed to init device, status = %d", ret);
1023 ice_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1025 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1026 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1027 struct ice_vsi *vsi = pf->main_vsi;
1028 struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
1030 dev_info->min_rx_bufsize = ICE_BUF_SIZE_MIN;
1031 dev_info->max_rx_pktlen = ICE_FRAME_SIZE_MAX;
1032 dev_info->max_rx_queues = vsi->nb_qps;
1033 dev_info->max_tx_queues = vsi->nb_qps;
1034 dev_info->max_mac_addrs = vsi->max_macaddrs;
1035 dev_info->max_vfs = pci_dev->max_vfs;
1037 dev_info->rx_offload_capa =
1038 DEV_RX_OFFLOAD_IPV4_CKSUM |
1039 DEV_RX_OFFLOAD_UDP_CKSUM |
1040 DEV_RX_OFFLOAD_TCP_CKSUM |
1041 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
1042 DEV_RX_OFFLOAD_KEEP_CRC;
1043 dev_info->tx_offload_capa =
1044 DEV_TX_OFFLOAD_IPV4_CKSUM |
1045 DEV_TX_OFFLOAD_UDP_CKSUM |
1046 DEV_TX_OFFLOAD_TCP_CKSUM |
1047 DEV_TX_OFFLOAD_SCTP_CKSUM |
1048 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
1049 DEV_TX_OFFLOAD_TCP_TSO |
1050 DEV_TX_OFFLOAD_MULTI_SEGS;
1051 dev_info->rx_queue_offload_capa = 0;
1052 dev_info->tx_queue_offload_capa = 0;
1054 dev_info->reta_size = hw->func_caps.common_cap.rss_table_size;
1055 dev_info->hash_key_size = (VSIQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
1057 dev_info->default_rxconf = (struct rte_eth_rxconf) {
1059 .pthresh = ICE_DEFAULT_RX_PTHRESH,
1060 .hthresh = ICE_DEFAULT_RX_HTHRESH,
1061 .wthresh = ICE_DEFAULT_RX_WTHRESH,
1063 .rx_free_thresh = ICE_DEFAULT_RX_FREE_THRESH,
1068 dev_info->default_txconf = (struct rte_eth_txconf) {
1070 .pthresh = ICE_DEFAULT_TX_PTHRESH,
1071 .hthresh = ICE_DEFAULT_TX_HTHRESH,
1072 .wthresh = ICE_DEFAULT_TX_WTHRESH,
1074 .tx_free_thresh = ICE_DEFAULT_TX_FREE_THRESH,
1075 .tx_rs_thresh = ICE_DEFAULT_TX_RSBIT_THRESH,
1079 dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
1080 .nb_max = ICE_MAX_RING_DESC,
1081 .nb_min = ICE_MIN_RING_DESC,
1082 .nb_align = ICE_ALIGN_RING_DESC,
1085 dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
1086 .nb_max = ICE_MAX_RING_DESC,
1087 .nb_min = ICE_MIN_RING_DESC,
1088 .nb_align = ICE_ALIGN_RING_DESC,
1091 dev_info->speed_capa = ETH_LINK_SPEED_10M |
1092 ETH_LINK_SPEED_100M |
1094 ETH_LINK_SPEED_2_5G |
1096 ETH_LINK_SPEED_10G |
1097 ETH_LINK_SPEED_20G |
1098 ETH_LINK_SPEED_25G |
1101 dev_info->nb_rx_queues = dev->data->nb_rx_queues;
1102 dev_info->nb_tx_queues = dev->data->nb_tx_queues;
1104 dev_info->default_rxportconf.burst_size = ICE_RX_MAX_BURST;
1105 dev_info->default_txportconf.burst_size = ICE_TX_MAX_BURST;
1106 dev_info->default_rxportconf.nb_queues = 1;
1107 dev_info->default_txportconf.nb_queues = 1;
1108 dev_info->default_rxportconf.ring_size = ICE_BUF_SIZE_MIN;
1109 dev_info->default_txportconf.ring_size = ICE_BUF_SIZE_MIN;
1113 ice_atomic_read_link_status(struct rte_eth_dev *dev,
1114 struct rte_eth_link *link)
1116 struct rte_eth_link *dst = link;
1117 struct rte_eth_link *src = &dev->data->dev_link;
1119 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
1120 *(uint64_t *)src) == 0)
1127 ice_atomic_write_link_status(struct rte_eth_dev *dev,
1128 struct rte_eth_link *link)
1130 struct rte_eth_link *dst = &dev->data->dev_link;
1131 struct rte_eth_link *src = link;
1133 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
1134 *(uint64_t *)src) == 0)
1141 ice_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
1143 #define CHECK_INTERVAL 100 /* 100ms */
1144 #define MAX_REPEAT_TIME 10 /* 1s (10 * 100ms) in total */
1145 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1146 struct ice_link_status link_status;
1147 struct rte_eth_link link, old;
1149 unsigned int rep_cnt = MAX_REPEAT_TIME;
1150 bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false;
1152 memset(&link, 0, sizeof(link));
1153 memset(&old, 0, sizeof(old));
1154 memset(&link_status, 0, sizeof(link_status));
1155 ice_atomic_read_link_status(dev, &old);
1158 /* Get link status information from hardware */
1159 status = ice_aq_get_link_info(hw->port_info, enable_lse,
1160 &link_status, NULL);
1161 if (status != ICE_SUCCESS) {
1162 link.link_speed = ETH_SPEED_NUM_100M;
1163 link.link_duplex = ETH_LINK_FULL_DUPLEX;
1164 PMD_DRV_LOG(ERR, "Failed to get link info");
1168 link.link_status = link_status.link_info & ICE_AQ_LINK_UP;
1169 if (!wait_to_complete || link.link_status)
1172 rte_delay_ms(CHECK_INTERVAL);
1173 } while (--rep_cnt);
1175 if (!link.link_status)
1178 /* Full-duplex operation at all supported speeds */
1179 link.link_duplex = ETH_LINK_FULL_DUPLEX;
1181 /* Parse the link status */
1182 switch (link_status.link_speed) {
1183 case ICE_AQ_LINK_SPEED_10MB:
1184 link.link_speed = ETH_SPEED_NUM_10M;
1186 case ICE_AQ_LINK_SPEED_100MB:
1187 link.link_speed = ETH_SPEED_NUM_100M;
1189 case ICE_AQ_LINK_SPEED_1000MB:
1190 link.link_speed = ETH_SPEED_NUM_1G;
1192 case ICE_AQ_LINK_SPEED_2500MB:
1193 link.link_speed = ETH_SPEED_NUM_2_5G;
1195 case ICE_AQ_LINK_SPEED_5GB:
1196 link.link_speed = ETH_SPEED_NUM_5G;
1198 case ICE_AQ_LINK_SPEED_10GB:
1199 link.link_speed = ETH_SPEED_NUM_10G;
1201 case ICE_AQ_LINK_SPEED_20GB:
1202 link.link_speed = ETH_SPEED_NUM_20G;
1204 case ICE_AQ_LINK_SPEED_25GB:
1205 link.link_speed = ETH_SPEED_NUM_25G;
1207 case ICE_AQ_LINK_SPEED_40GB:
1208 link.link_speed = ETH_SPEED_NUM_40G;
1210 case ICE_AQ_LINK_SPEED_UNKNOWN:
1212 PMD_DRV_LOG(ERR, "Unknown link speed");
1213 link.link_speed = ETH_SPEED_NUM_NONE;
1217 link.link_autoneg = !(dev->data->dev_conf.link_speeds &
1218 ETH_LINK_SPEED_FIXED);
1221 ice_atomic_write_link_status(dev, &link);
1222 if (link.link_status == old.link_status)
1229 ice_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
1230 struct rte_pci_device *pci_dev)
1232 return rte_eth_dev_pci_generic_probe(pci_dev,
1233 sizeof(struct ice_adapter),
1238 ice_pci_remove(struct rte_pci_device *pci_dev)
1240 return rte_eth_dev_pci_generic_remove(pci_dev, ice_dev_uninit);
1243 static struct rte_pci_driver rte_ice_pmd = {
1244 .id_table = pci_id_ice_map,
1245 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
1246 RTE_PCI_DRV_IOVA_AS_VA,
1247 .probe = ice_pci_probe,
1248 .remove = ice_pci_remove,
1252 * Driver initialization routine.
1253 * Invoked once at EAL init time.
1254 * Register itself as the [Poll Mode] Driver of PCI devices.
1256 RTE_PMD_REGISTER_PCI(net_ice, rte_ice_pmd);
1257 RTE_PMD_REGISTER_PCI_TABLE(net_ice, pci_id_ice_map);
1258 RTE_PMD_REGISTER_KMOD_DEP(net_ice, "* igb_uio | uio_pci_generic | vfio-pci");
1259 RTE_PMD_REGISTER_PARAM_STRING(net_ice,
1260 ICE_MAX_QP_NUM "=<int>");
1262 RTE_INIT(ice_init_log)
1264 ice_logtype_init = rte_log_register("pmd.net.ice.init");
1265 if (ice_logtype_init >= 0)
1266 rte_log_set_level(ice_logtype_init, RTE_LOG_NOTICE);
1267 ice_logtype_driver = rte_log_register("pmd.net.ice.driver");
1268 if (ice_logtype_driver >= 0)
1269 rte_log_set_level(ice_logtype_driver, RTE_LOG_NOTICE);