1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
5 #include <rte_ethdev_pci.h>
7 #include "base/ice_sched.h"
8 #include "ice_ethdev.h"
11 #define ICE_MAX_QP_NUM "max_queue_pair_num"
12 #define ICE_DFLT_OUTER_TAG_TYPE ICE_AQ_VSI_OUTER_TAG_VLAN_9100
15 int ice_logtype_driver;
17 static int ice_dev_configure(struct rte_eth_dev *dev);
18 static int ice_dev_start(struct rte_eth_dev *dev);
19 static void ice_dev_stop(struct rte_eth_dev *dev);
20 static void ice_dev_close(struct rte_eth_dev *dev);
21 static int ice_dev_reset(struct rte_eth_dev *dev);
22 static void ice_dev_info_get(struct rte_eth_dev *dev,
23 struct rte_eth_dev_info *dev_info);
25 static const struct rte_pci_id pci_id_ice_map[] = {
26 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_BACKPLANE) },
27 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_QSFP) },
28 { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_SFP) },
29 { .vendor_id = 0, /* sentinel */ },
32 static const struct eth_dev_ops ice_eth_dev_ops = {
33 .dev_configure = ice_dev_configure,
34 .dev_start = ice_dev_start,
35 .dev_stop = ice_dev_stop,
36 .dev_close = ice_dev_close,
37 .dev_reset = ice_dev_reset,
38 .rx_queue_start = ice_rx_queue_start,
39 .rx_queue_stop = ice_rx_queue_stop,
40 .tx_queue_start = ice_tx_queue_start,
41 .tx_queue_stop = ice_tx_queue_stop,
42 .rx_queue_setup = ice_rx_queue_setup,
43 .rx_queue_release = ice_rx_queue_release,
44 .tx_queue_setup = ice_tx_queue_setup,
45 .tx_queue_release = ice_tx_queue_release,
46 .dev_infos_get = ice_dev_info_get,
50 ice_init_controlq_parameter(struct ice_hw *hw)
52 /* fields for adminq */
53 hw->adminq.num_rq_entries = ICE_ADMINQ_LEN;
54 hw->adminq.num_sq_entries = ICE_ADMINQ_LEN;
55 hw->adminq.rq_buf_size = ICE_ADMINQ_BUF_SZ;
56 hw->adminq.sq_buf_size = ICE_ADMINQ_BUF_SZ;
58 /* fields for mailboxq, DPDK used as PF host */
59 hw->mailboxq.num_rq_entries = ICE_MAILBOXQ_LEN;
60 hw->mailboxq.num_sq_entries = ICE_MAILBOXQ_LEN;
61 hw->mailboxq.rq_buf_size = ICE_MAILBOXQ_BUF_SZ;
62 hw->mailboxq.sq_buf_size = ICE_MAILBOXQ_BUF_SZ;
66 ice_check_qp_num(const char *key, const char *qp_value,
67 __rte_unused void *opaque)
72 while (isblank(*qp_value))
75 num = strtoul(qp_value, &end, 10);
77 if (!num || (*end == '-') || errno) {
78 PMD_DRV_LOG(WARNING, "invalid value:\"%s\" for key:\"%s\", "
88 ice_config_max_queue_pair_num(struct rte_devargs *devargs)
90 struct rte_kvargs *kvlist;
91 const char *queue_num_key = ICE_MAX_QP_NUM;
97 kvlist = rte_kvargs_parse(devargs->args, NULL);
101 if (!rte_kvargs_count(kvlist, queue_num_key)) {
102 rte_kvargs_free(kvlist);
106 if (rte_kvargs_process(kvlist, queue_num_key,
107 ice_check_qp_num, NULL) < 0) {
108 rte_kvargs_free(kvlist);
111 ret = rte_kvargs_process(kvlist, queue_num_key,
112 ice_check_qp_num, NULL);
113 rte_kvargs_free(kvlist);
119 ice_res_pool_init(struct ice_res_pool_info *pool, uint32_t base,
122 struct pool_entry *entry;
127 entry = rte_zmalloc(NULL, sizeof(*entry), 0);
130 "Failed to allocate memory for resource pool");
134 /* queue heap initialize */
135 pool->num_free = num;
138 LIST_INIT(&pool->alloc_list);
139 LIST_INIT(&pool->free_list);
141 /* Initialize element */
145 LIST_INSERT_HEAD(&pool->free_list, entry, next);
150 ice_res_pool_alloc(struct ice_res_pool_info *pool,
153 struct pool_entry *entry, *valid_entry;
156 PMD_INIT_LOG(ERR, "Invalid parameter");
160 if (pool->num_free < num) {
161 PMD_INIT_LOG(ERR, "No resource. ask:%u, available:%u",
162 num, pool->num_free);
167 /* Lookup in free list and find most fit one */
168 LIST_FOREACH(entry, &pool->free_list, next) {
169 if (entry->len >= num) {
171 if (entry->len == num) {
176 valid_entry->len > entry->len)
181 /* Not find one to satisfy the request, return */
183 PMD_INIT_LOG(ERR, "No valid entry found");
187 * The entry have equal queue number as requested,
188 * remove it from alloc_list.
190 if (valid_entry->len == num) {
191 LIST_REMOVE(valid_entry, next);
194 * The entry have more numbers than requested,
195 * create a new entry for alloc_list and minus its
196 * queue base and number in free_list.
198 entry = rte_zmalloc(NULL, sizeof(*entry), 0);
201 "Failed to allocate memory for "
205 entry->base = valid_entry->base;
207 valid_entry->base += num;
208 valid_entry->len -= num;
212 /* Insert it into alloc list, not sorted */
213 LIST_INSERT_HEAD(&pool->alloc_list, valid_entry, next);
215 pool->num_free -= valid_entry->len;
216 pool->num_alloc += valid_entry->len;
218 return valid_entry->base + pool->base;
222 ice_res_pool_destroy(struct ice_res_pool_info *pool)
224 struct pool_entry *entry, *next_entry;
229 for (entry = LIST_FIRST(&pool->alloc_list);
230 entry && (next_entry = LIST_NEXT(entry, next), 1);
231 entry = next_entry) {
232 LIST_REMOVE(entry, next);
236 for (entry = LIST_FIRST(&pool->free_list);
237 entry && (next_entry = LIST_NEXT(entry, next), 1);
238 entry = next_entry) {
239 LIST_REMOVE(entry, next);
246 LIST_INIT(&pool->alloc_list);
247 LIST_INIT(&pool->free_list);
251 ice_vsi_config_default_rss(struct ice_aqc_vsi_props *info)
253 /* Set VSI LUT selection */
254 info->q_opt_rss = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI &
255 ICE_AQ_VSI_Q_OPT_RSS_LUT_M;
256 /* Set Hash scheme */
257 info->q_opt_rss |= ICE_AQ_VSI_Q_OPT_RSS_TPLZ &
258 ICE_AQ_VSI_Q_OPT_RSS_HASH_M;
260 info->q_opt_tc = ICE_AQ_VSI_Q_OPT_TC_OVR_M;
263 static enum ice_status
264 ice_vsi_config_tc_queue_mapping(struct ice_vsi *vsi,
265 struct ice_aqc_vsi_props *info,
266 uint8_t enabled_tcmap)
268 uint16_t bsf, qp_idx;
270 /* default tc 0 now. Multi-TC supporting need to be done later.
271 * Configure TC and queue mapping parameters, for enabled TC,
272 * allocate qpnum_per_tc queues to this traffic.
274 if (enabled_tcmap != 0x01) {
275 PMD_INIT_LOG(ERR, "only TC0 is supported");
279 vsi->nb_qps = RTE_MIN(vsi->nb_qps, ICE_MAX_Q_PER_TC);
280 bsf = rte_bsf32(vsi->nb_qps);
281 /* Adjust the queue number to actual queues that can be applied */
282 vsi->nb_qps = 0x1 << bsf;
285 /* Set tc and queue mapping with VSI */
286 info->tc_mapping[0] = rte_cpu_to_le_16((qp_idx <<
287 ICE_AQ_VSI_TC_Q_OFFSET_S) |
288 (bsf << ICE_AQ_VSI_TC_Q_NUM_S));
290 /* Associate queue number with VSI */
291 info->mapping_flags |= rte_cpu_to_le_16(ICE_AQ_VSI_Q_MAP_CONTIG);
292 info->q_mapping[0] = rte_cpu_to_le_16(vsi->base_queue);
293 info->q_mapping[1] = rte_cpu_to_le_16(vsi->nb_qps);
294 info->valid_sections |=
295 rte_cpu_to_le_16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID);
296 /* Set the info.ingress_table and info.egress_table
297 * for UP translate table. Now just set it to 1:1 map by default
298 * -- 0b 111 110 101 100 011 010 001 000 == 0xFAC688
300 #define ICE_TC_QUEUE_TABLE_DFLT 0x00FAC688
301 info->ingress_table = rte_cpu_to_le_32(ICE_TC_QUEUE_TABLE_DFLT);
302 info->egress_table = rte_cpu_to_le_32(ICE_TC_QUEUE_TABLE_DFLT);
303 info->outer_up_table = rte_cpu_to_le_32(ICE_TC_QUEUE_TABLE_DFLT);
308 ice_init_mac_address(struct rte_eth_dev *dev)
310 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
312 if (!is_unicast_ether_addr
313 ((struct ether_addr *)hw->port_info[0].mac.lan_addr)) {
314 PMD_INIT_LOG(ERR, "Invalid MAC address");
318 ether_addr_copy((struct ether_addr *)hw->port_info[0].mac.lan_addr,
319 (struct ether_addr *)hw->port_info[0].mac.perm_addr);
321 dev->data->mac_addrs = rte_zmalloc(NULL, sizeof(struct ether_addr), 0);
322 if (!dev->data->mac_addrs) {
324 "Failed to allocate memory to store mac address");
327 /* store it to dev data */
328 ether_addr_copy((struct ether_addr *)hw->port_info[0].mac.perm_addr,
329 &dev->data->mac_addrs[0]);
333 /* Initialize SW parameters of PF */
335 ice_pf_sw_init(struct rte_eth_dev *dev)
337 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
338 struct ice_hw *hw = ICE_PF_TO_HW(pf);
340 if (ice_config_max_queue_pair_num(dev->device->devargs) > 0)
342 ice_config_max_queue_pair_num(dev->device->devargs);
345 (uint16_t)RTE_MIN(hw->func_caps.common_cap.num_txq,
346 hw->func_caps.common_cap.num_rxq);
348 pf->lan_nb_qps = pf->lan_nb_qp_max;
353 static struct ice_vsi *
354 ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type)
356 struct ice_hw *hw = ICE_PF_TO_HW(pf);
357 struct ice_vsi *vsi = NULL;
358 struct ice_vsi_ctx vsi_ctx;
360 uint16_t max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
361 uint8_t tc_bitmap = 0x1;
363 /* hw->num_lports = 1 in NIC mode */
364 vsi = rte_zmalloc(NULL, sizeof(struct ice_vsi), 0);
368 vsi->idx = pf->next_vsi_idx;
371 vsi->adapter = ICE_PF_TO_ADAPTER(pf);
372 vsi->max_macaddrs = ICE_NUM_MACADDR_MAX;
373 vsi->vlan_anti_spoof_on = 0;
374 vsi->vlan_filter_on = 1;
375 TAILQ_INIT(&vsi->mac_list);
376 TAILQ_INIT(&vsi->vlan_list);
378 memset(&vsi_ctx, 0, sizeof(vsi_ctx));
379 /* base_queue in used in queue mapping of VSI add/update command.
380 * Suppose vsi->base_queue is 0 now, don't consider SRIOV, VMDQ
381 * cases in the first stage. Only Main VSI.
386 vsi->nb_qps = pf->lan_nb_qps;
387 ice_vsi_config_default_rss(&vsi_ctx.info);
388 vsi_ctx.alloc_from_pool = true;
389 vsi_ctx.flags = ICE_AQ_VSI_TYPE_PF;
390 /* switch_id is queried by get_switch_config aq, which is done
393 vsi_ctx.info.sw_id = hw->port_info->sw_id;
394 vsi_ctx.info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA;
395 /* Allow all untagged or tagged packets */
396 vsi_ctx.info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL;
397 vsi_ctx.info.vlan_flags |= ICE_AQ_VSI_VLAN_EMOD_NOTHING;
398 vsi_ctx.info.q_opt_rss = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF |
399 ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
400 /* Enable VLAN/UP trip */
401 ret = ice_vsi_config_tc_queue_mapping(vsi,
406 "tc queue mapping with vsi failed, "
414 /* for other types of VSI */
415 PMD_INIT_LOG(ERR, "other types of VSI not supported");
419 /* VF has MSIX interrupt in VF range, don't allocate here */
420 if (type == ICE_VSI_PF) {
421 ret = ice_res_pool_alloc(&pf->msix_pool,
423 RTE_MAX_RXTX_INTR_VEC_ID));
425 PMD_INIT_LOG(ERR, "VSI MAIN %d get heap failed %d",
428 vsi->msix_intr = ret;
429 vsi->nb_msix = RTE_MIN(vsi->nb_qps, RTE_MAX_RXTX_INTR_VEC_ID);
434 ret = ice_add_vsi(hw, vsi->idx, &vsi_ctx, NULL);
435 if (ret != ICE_SUCCESS) {
436 PMD_INIT_LOG(ERR, "add vsi failed, err = %d", ret);
439 /* store vsi information is SW structure */
440 vsi->vsi_id = vsi_ctx.vsi_num;
441 vsi->info = vsi_ctx.info;
442 pf->vsis_allocated = vsi_ctx.vsis_allocd;
443 pf->vsis_unallocated = vsi_ctx.vsis_unallocated;
445 /* At the beginning, only TC0. */
446 /* What we need here is the maximam number of the TX queues.
447 * Currently vsi->nb_qps means it.
448 * Correct it if any change.
450 max_txqs[0] = vsi->nb_qps;
451 ret = ice_cfg_vsi_lan(hw->port_info, vsi->idx,
452 tc_bitmap, max_txqs);
453 if (ret != ICE_SUCCESS)
454 PMD_INIT_LOG(ERR, "Failed to config vsi sched");
464 ice_pf_setup(struct ice_pf *pf)
468 /* Clear all stats counters */
469 pf->offset_loaded = FALSE;
470 memset(&pf->stats, 0, sizeof(struct ice_hw_port_stats));
471 memset(&pf->stats_offset, 0, sizeof(struct ice_hw_port_stats));
472 memset(&pf->internal_stats, 0, sizeof(struct ice_eth_stats));
473 memset(&pf->internal_stats_offset, 0, sizeof(struct ice_eth_stats));
475 vsi = ice_setup_vsi(pf, ICE_VSI_PF);
477 PMD_INIT_LOG(ERR, "Failed to add vsi for PF");
487 ice_dev_init(struct rte_eth_dev *dev)
489 struct rte_pci_device *pci_dev;
490 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
491 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
494 dev->dev_ops = &ice_eth_dev_ops;
496 pci_dev = RTE_DEV_TO_PCI(dev->device);
498 pf->adapter = ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
499 pf->adapter->eth_dev = dev;
500 pf->dev_data = dev->data;
501 hw->back = pf->adapter;
502 hw->hw_addr = (uint8_t *)pci_dev->mem_resource[0].addr;
503 hw->vendor_id = pci_dev->id.vendor_id;
504 hw->device_id = pci_dev->id.device_id;
505 hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
506 hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
507 hw->bus.device = pci_dev->addr.devid;
508 hw->bus.func = pci_dev->addr.function;
510 ice_init_controlq_parameter(hw);
512 ret = ice_init_hw(hw);
514 PMD_INIT_LOG(ERR, "Failed to initialize HW");
518 PMD_INIT_LOG(INFO, "FW %d.%d.%05d API %d.%d",
519 hw->fw_maj_ver, hw->fw_min_ver, hw->fw_build,
520 hw->api_maj_ver, hw->api_min_ver);
523 ret = ice_init_mac_address(dev);
525 PMD_INIT_LOG(ERR, "Failed to initialize mac address");
529 ret = ice_res_pool_init(&pf->msix_pool, 1,
530 hw->func_caps.common_cap.num_msix_vectors - 1);
532 PMD_INIT_LOG(ERR, "Failed to init MSIX pool");
533 goto err_msix_pool_init;
536 ret = ice_pf_setup(pf);
538 PMD_INIT_LOG(ERR, "Failed to setup PF");
545 ice_res_pool_destroy(&pf->msix_pool);
547 rte_free(dev->data->mac_addrs);
549 ice_sched_cleanup_all(hw);
550 rte_free(hw->port_info);
551 ice_shutdown_all_ctrlq(hw);
557 ice_release_vsi(struct ice_vsi *vsi)
560 struct ice_vsi_ctx vsi_ctx;
566 hw = ICE_VSI_TO_HW(vsi);
568 memset(&vsi_ctx, 0, sizeof(vsi_ctx));
570 vsi_ctx.vsi_num = vsi->vsi_id;
571 vsi_ctx.info = vsi->info;
572 ret = ice_free_vsi(hw, vsi->idx, &vsi_ctx, false, NULL);
573 if (ret != ICE_SUCCESS) {
574 PMD_INIT_LOG(ERR, "Failed to free vsi by aq, %u", vsi->vsi_id);
584 ice_dev_stop(struct rte_eth_dev *dev)
586 struct rte_eth_dev_data *data = dev->data;
587 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
590 /* avoid stopping again */
591 if (pf->adapter_stopped)
594 /* stop and clear all Rx queues */
595 for (i = 0; i < data->nb_rx_queues; i++)
596 ice_rx_queue_stop(dev, i);
598 /* stop and clear all Tx queues */
599 for (i = 0; i < data->nb_tx_queues; i++)
600 ice_tx_queue_stop(dev, i);
602 /* Clear all queues and release mbufs */
603 ice_clear_queues(dev);
605 pf->adapter_stopped = true;
609 ice_dev_close(struct rte_eth_dev *dev)
611 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
612 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
616 /* release all queue resource */
617 ice_free_queues(dev);
619 ice_res_pool_destroy(&pf->msix_pool);
620 ice_release_vsi(pf->main_vsi);
622 ice_shutdown_all_ctrlq(hw);
626 ice_dev_uninit(struct rte_eth_dev *dev)
628 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
629 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
634 dev->rx_pkt_burst = NULL;
635 dev->tx_pkt_burst = NULL;
637 rte_free(dev->data->mac_addrs);
638 dev->data->mac_addrs = NULL;
640 ice_release_vsi(pf->main_vsi);
641 ice_sched_cleanup_all(hw);
642 rte_free(hw->port_info);
643 ice_shutdown_all_ctrlq(hw);
649 ice_dev_configure(__rte_unused struct rte_eth_dev *dev)
651 struct ice_adapter *ad =
652 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
654 /* Initialize to TRUE. If any of Rx queues doesn't meet the
655 * bulk allocation or vector Rx preconditions we will reset it.
657 ad->rx_bulk_alloc_allowed = true;
658 ad->tx_simple_allowed = true;
663 static int ice_init_rss(struct ice_pf *pf)
665 struct ice_hw *hw = ICE_PF_TO_HW(pf);
666 struct ice_vsi *vsi = pf->main_vsi;
667 struct rte_eth_dev *dev = pf->adapter->eth_dev;
668 struct rte_eth_rss_conf *rss_conf;
669 struct ice_aqc_get_set_rss_keys key;
673 rss_conf = &dev->data->dev_conf.rx_adv_conf.rss_conf;
674 nb_q = dev->data->nb_rx_queues;
675 vsi->rss_key_size = ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE;
676 vsi->rss_lut_size = hw->func_caps.common_cap.rss_table_size;
679 vsi->rss_key = rte_zmalloc(NULL,
680 vsi->rss_key_size, 0);
682 vsi->rss_lut = rte_zmalloc(NULL,
683 vsi->rss_lut_size, 0);
685 /* configure RSS key */
686 if (!rss_conf->rss_key) {
687 /* Calculate the default hash key */
688 for (i = 0; i <= vsi->rss_key_size; i++)
689 vsi->rss_key[i] = (uint8_t)rte_rand();
691 rte_memcpy(vsi->rss_key, rss_conf->rss_key,
692 RTE_MIN(rss_conf->rss_key_len,
695 rte_memcpy(key.standard_rss_key, vsi->rss_key, vsi->rss_key_size);
696 ret = ice_aq_set_rss_key(hw, vsi->idx, &key);
700 /* init RSS LUT table */
701 for (i = 0; i < vsi->rss_lut_size; i++)
702 vsi->rss_lut[i] = i % nb_q;
704 ret = ice_aq_set_rss_lut(hw, vsi->idx,
705 ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF,
706 vsi->rss_lut, vsi->rss_lut_size);
714 ice_dev_start(struct rte_eth_dev *dev)
716 struct rte_eth_dev_data *data = dev->data;
717 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
718 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
723 /* program Tx queues' context in hardware */
724 for (nb_txq = 0; nb_txq < data->nb_tx_queues; nb_txq++) {
725 ret = ice_tx_queue_start(dev, nb_txq);
727 PMD_DRV_LOG(ERR, "fail to start Tx queue %u", nb_txq);
732 /* program Rx queues' context in hardware*/
733 for (nb_rxq = 0; nb_rxq < data->nb_rx_queues; nb_rxq++) {
734 ret = ice_rx_queue_start(dev, nb_rxq);
736 PMD_DRV_LOG(ERR, "fail to start Rx queue %u", nb_rxq);
741 ret = ice_init_rss(pf);
743 PMD_DRV_LOG(ERR, "Failed to enable rss for PF");
747 ret = ice_aq_set_event_mask(hw, hw->port_info->lport,
748 ((u16)(ICE_AQ_LINK_EVENT_LINK_FAULT |
749 ICE_AQ_LINK_EVENT_PHY_TEMP_ALARM |
750 ICE_AQ_LINK_EVENT_EXCESSIVE_ERRORS |
751 ICE_AQ_LINK_EVENT_SIGNAL_DETECT |
752 ICE_AQ_LINK_EVENT_AN_COMPLETED |
753 ICE_AQ_LINK_EVENT_PORT_TX_SUSPENDED)),
755 if (ret != ICE_SUCCESS)
756 PMD_DRV_LOG(WARNING, "Fail to set phy mask");
758 pf->adapter_stopped = false;
762 /* stop the started queues if failed to start all queues */
764 for (i = 0; i < nb_rxq; i++)
765 ice_rx_queue_stop(dev, i);
767 for (i = 0; i < nb_txq; i++)
768 ice_tx_queue_stop(dev, i);
774 ice_dev_reset(struct rte_eth_dev *dev)
778 if (dev->data->sriov.active)
781 ret = ice_dev_uninit(dev);
783 PMD_INIT_LOG(ERR, "failed to uninit device, status = %d", ret);
787 ret = ice_dev_init(dev);
789 PMD_INIT_LOG(ERR, "failed to init device, status = %d", ret);
797 ice_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
799 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
800 struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
801 struct ice_vsi *vsi = pf->main_vsi;
802 struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
804 dev_info->min_rx_bufsize = ICE_BUF_SIZE_MIN;
805 dev_info->max_rx_pktlen = ICE_FRAME_SIZE_MAX;
806 dev_info->max_rx_queues = vsi->nb_qps;
807 dev_info->max_tx_queues = vsi->nb_qps;
808 dev_info->max_mac_addrs = vsi->max_macaddrs;
809 dev_info->max_vfs = pci_dev->max_vfs;
811 dev_info->rx_offload_capa =
812 DEV_RX_OFFLOAD_VLAN_STRIP |
813 DEV_RX_OFFLOAD_IPV4_CKSUM |
814 DEV_RX_OFFLOAD_UDP_CKSUM |
815 DEV_RX_OFFLOAD_TCP_CKSUM |
816 DEV_RX_OFFLOAD_QINQ_STRIP |
817 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
818 DEV_RX_OFFLOAD_VLAN_EXTEND |
819 DEV_RX_OFFLOAD_JUMBO_FRAME |
820 DEV_RX_OFFLOAD_KEEP_CRC |
821 DEV_RX_OFFLOAD_SCATTER |
822 DEV_RX_OFFLOAD_VLAN_FILTER;
823 dev_info->tx_offload_capa =
824 DEV_TX_OFFLOAD_VLAN_INSERT |
825 DEV_TX_OFFLOAD_QINQ_INSERT |
826 DEV_TX_OFFLOAD_IPV4_CKSUM |
827 DEV_TX_OFFLOAD_UDP_CKSUM |
828 DEV_TX_OFFLOAD_TCP_CKSUM |
829 DEV_TX_OFFLOAD_SCTP_CKSUM |
830 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
831 DEV_TX_OFFLOAD_TCP_TSO |
832 DEV_TX_OFFLOAD_MULTI_SEGS |
833 DEV_TX_OFFLOAD_MBUF_FAST_FREE;
834 dev_info->rx_queue_offload_capa = 0;
835 dev_info->tx_queue_offload_capa = 0;
837 dev_info->reta_size = hw->func_caps.common_cap.rss_table_size;
838 dev_info->hash_key_size = (VSIQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
839 dev_info->flow_type_rss_offloads = ICE_RSS_OFFLOAD_ALL;
841 dev_info->default_rxconf = (struct rte_eth_rxconf) {
843 .pthresh = ICE_DEFAULT_RX_PTHRESH,
844 .hthresh = ICE_DEFAULT_RX_HTHRESH,
845 .wthresh = ICE_DEFAULT_RX_WTHRESH,
847 .rx_free_thresh = ICE_DEFAULT_RX_FREE_THRESH,
852 dev_info->default_txconf = (struct rte_eth_txconf) {
854 .pthresh = ICE_DEFAULT_TX_PTHRESH,
855 .hthresh = ICE_DEFAULT_TX_HTHRESH,
856 .wthresh = ICE_DEFAULT_TX_WTHRESH,
858 .tx_free_thresh = ICE_DEFAULT_TX_FREE_THRESH,
859 .tx_rs_thresh = ICE_DEFAULT_TX_RSBIT_THRESH,
863 dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
864 .nb_max = ICE_MAX_RING_DESC,
865 .nb_min = ICE_MIN_RING_DESC,
866 .nb_align = ICE_ALIGN_RING_DESC,
869 dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
870 .nb_max = ICE_MAX_RING_DESC,
871 .nb_min = ICE_MIN_RING_DESC,
872 .nb_align = ICE_ALIGN_RING_DESC,
875 dev_info->speed_capa = ETH_LINK_SPEED_10M |
876 ETH_LINK_SPEED_100M |
878 ETH_LINK_SPEED_2_5G |
885 dev_info->nb_rx_queues = dev->data->nb_rx_queues;
886 dev_info->nb_tx_queues = dev->data->nb_tx_queues;
888 dev_info->default_rxportconf.burst_size = ICE_RX_MAX_BURST;
889 dev_info->default_txportconf.burst_size = ICE_TX_MAX_BURST;
890 dev_info->default_rxportconf.nb_queues = 1;
891 dev_info->default_txportconf.nb_queues = 1;
892 dev_info->default_rxportconf.ring_size = ICE_BUF_SIZE_MIN;
893 dev_info->default_txportconf.ring_size = ICE_BUF_SIZE_MIN;
897 ice_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
898 struct rte_pci_device *pci_dev)
900 return rte_eth_dev_pci_generic_probe(pci_dev,
901 sizeof(struct ice_adapter),
906 ice_pci_remove(struct rte_pci_device *pci_dev)
908 return rte_eth_dev_pci_generic_remove(pci_dev, ice_dev_uninit);
911 static struct rte_pci_driver rte_ice_pmd = {
912 .id_table = pci_id_ice_map,
913 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
914 RTE_PCI_DRV_IOVA_AS_VA,
915 .probe = ice_pci_probe,
916 .remove = ice_pci_remove,
920 * Driver initialization routine.
921 * Invoked once at EAL init time.
922 * Register itself as the [Poll Mode] Driver of PCI devices.
924 RTE_PMD_REGISTER_PCI(net_ice, rte_ice_pmd);
925 RTE_PMD_REGISTER_PCI_TABLE(net_ice, pci_id_ice_map);
926 RTE_PMD_REGISTER_KMOD_DEP(net_ice, "* igb_uio | uio_pci_generic | vfio-pci");
927 RTE_PMD_REGISTER_PARAM_STRING(net_ice,
928 ICE_MAX_QP_NUM "=<int>");
930 RTE_INIT(ice_init_log)
932 ice_logtype_init = rte_log_register("pmd.net.ice.init");
933 if (ice_logtype_init >= 0)
934 rte_log_set_level(ice_logtype_init, RTE_LOG_NOTICE);
935 ice_logtype_driver = rte_log_register("pmd.net.ice.driver");
936 if (ice_logtype_driver >= 0)
937 rte_log_set_level(ice_logtype_driver, RTE_LOG_NOTICE);