X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fqede%2Fqede_ethdev.c;h=bfd38a97721966eeba38141d11335f6e10557550;hb=6ceb7ab83f168fa6b8e90e4bd5a1392de1a48c70;hp=1542073a278439600084e9dd806f30d98c08dba2;hpb=611faa5f46cc67449f272e14450fc6a0a275767d;p=dpdk.git diff --git a/drivers/net/qede/qede_ethdev.c b/drivers/net/qede/qede_ethdev.c index 1542073a27..bfd38a9772 100644 --- a/drivers/net/qede/qede_ethdev.c +++ b/drivers/net/qede/qede_ethdev.c @@ -7,13 +7,8 @@ #include "qede_ethdev.h" #include #include -#include #include -/* Globals */ -int qede_logtype_init; -int qede_logtype_driver; - static const struct qed_eth_ops *qed_ops; static int qede_eth_dev_uninit(struct rte_eth_dev *eth_dev); static int qede_eth_dev_init(struct rte_eth_dev *eth_dev); @@ -232,9 +227,63 @@ static const struct rte_qede_xstats_name_off qede_rxq_xstats_strings[] = { offsetof(struct qede_rx_queue, rx_alloc_errors)} }; +/* Get FW version string based on fw_size */ +static int +qede_fw_version_get(struct rte_eth_dev *dev, char *fw_ver, size_t fw_size) +{ + struct qede_dev *qdev = dev->data->dev_private; + struct ecore_dev *edev = &qdev->edev; + struct qed_dev_info *info = &qdev->dev_info.common; + static char ver_str[QEDE_PMD_DRV_VER_STR_SIZE]; + size_t size; + + if (fw_ver == NULL) + return 0; + + if (IS_PF(edev)) + snprintf(ver_str, QEDE_PMD_DRV_VER_STR_SIZE, "%s", + QEDE_PMD_FW_VERSION); + else + snprintf(ver_str, QEDE_PMD_DRV_VER_STR_SIZE, "%d.%d.%d.%d", + info->fw_major, info->fw_minor, + info->fw_rev, info->fw_eng); + size = strlen(ver_str); + if (size + 1 <= fw_size) /* Add 1 byte for "\0" */ + strlcpy(fw_ver, ver_str, fw_size); + else + return (size + 1); + + snprintf(ver_str + size, (QEDE_PMD_DRV_VER_STR_SIZE - size), + " MFW: %d.%d.%d.%d", + GET_MFW_FIELD(info->mfw_rev, QED_MFW_VERSION_3), + GET_MFW_FIELD(info->mfw_rev, QED_MFW_VERSION_2), + GET_MFW_FIELD(info->mfw_rev, QED_MFW_VERSION_1), + GET_MFW_FIELD(info->mfw_rev, QED_MFW_VERSION_0)); + size = strlen(ver_str); + if (size + 1 <= fw_size) + strlcpy(fw_ver, ver_str, fw_size); + + if (fw_size <= 32) + goto out; + + snprintf(ver_str + size, (QEDE_PMD_DRV_VER_STR_SIZE - size), + " MBI: %d.%d.%d", + GET_MFW_FIELD(info->mbi_version, QED_MBI_VERSION_2), + GET_MFW_FIELD(info->mbi_version, QED_MBI_VERSION_1), + GET_MFW_FIELD(info->mbi_version, QED_MBI_VERSION_0)); + size = strlen(ver_str); + if (size + 1 <= fw_size) + strlcpy(fw_ver, ver_str, fw_size); + +out: + return 0; +} + static void qede_interrupt_action(struct ecore_hwfn *p_hwfn) { + OSAL_SPIN_LOCK(&p_hwfn->spq_lock); ecore_int_sp_dpc((osal_int_ptr_t)(p_hwfn)); + OSAL_SPIN_UNLOCK(&p_hwfn->spq_lock); } static void @@ -268,13 +317,19 @@ qede_interrupt_handler(void *param) } static void -qede_assign_rxtx_handlers(struct rte_eth_dev *dev) +qede_assign_rxtx_handlers(struct rte_eth_dev *dev, bool is_dummy) { uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads; struct qede_dev *qdev = dev->data->dev_private; struct ecore_dev *edev = &qdev->edev; bool use_tx_offload = false; + if (is_dummy) { + dev->rx_pkt_burst = qede_rxtx_pkts_dummy; + dev->tx_pkt_burst = qede_rxtx_pkts_dummy; + return; + } + if (ECORE_IS_CMT(edev)) { dev->rx_pkt_burst = qede_recv_pkts_cmt; dev->tx_pkt_burst = qede_xmit_pkts_cmt; @@ -310,47 +365,27 @@ qede_alloc_etherdev(struct qede_dev *qdev, struct qed_dev_eth_info *info) qdev->ops = qed_ops; } -static void qede_print_adapter_info(struct qede_dev *qdev) +static void qede_print_adapter_info(struct rte_eth_dev *dev) { + struct qede_dev *qdev = dev->data->dev_private; struct ecore_dev *edev = &qdev->edev; - struct qed_dev_info *info = &qdev->dev_info.common; static char ver_str[QEDE_PMD_DRV_VER_STR_SIZE]; DP_INFO(edev, "**************************************************\n"); - DP_INFO(edev, " DPDK version\t\t\t: %s\n", rte_version()); - DP_INFO(edev, " Chip details\t\t\t: %s %c%d\n", + DP_INFO(edev, " %-20s: %s\n", "DPDK version", rte_version()); + DP_INFO(edev, " %-20s: %s %c%d\n", "Chip details", ECORE_IS_BB(edev) ? "BB" : "AH", 'A' + edev->chip_rev, (int)edev->chip_metal); snprintf(ver_str, QEDE_PMD_DRV_VER_STR_SIZE, "%s", QEDE_PMD_DRV_VERSION); - DP_INFO(edev, " Driver version\t\t\t: %s\n", ver_str); - + DP_INFO(edev, " %-20s: %s\n", "Driver version", ver_str); snprintf(ver_str, QEDE_PMD_DRV_VER_STR_SIZE, "%s", QEDE_PMD_BASE_VERSION); - DP_INFO(edev, " Base version\t\t\t: %s\n", ver_str); - - if (!IS_VF(edev)) - snprintf(ver_str, QEDE_PMD_DRV_VER_STR_SIZE, "%s", - QEDE_PMD_FW_VERSION); - else - snprintf(ver_str, QEDE_PMD_DRV_VER_STR_SIZE, "%d.%d.%d.%d", - info->fw_major, info->fw_minor, - info->fw_rev, info->fw_eng); - DP_INFO(edev, " Firmware version\t\t\t: %s\n", ver_str); - - snprintf(ver_str, MCP_DRV_VER_STR_SIZE, - "%d.%d.%d.%d", - (info->mfw_rev & QED_MFW_VERSION_3_MASK) >> - QED_MFW_VERSION_3_OFFSET, - (info->mfw_rev & QED_MFW_VERSION_2_MASK) >> - QED_MFW_VERSION_2_OFFSET, - (info->mfw_rev & QED_MFW_VERSION_1_MASK) >> - QED_MFW_VERSION_1_OFFSET, - (info->mfw_rev & QED_MFW_VERSION_0_MASK) >> - QED_MFW_VERSION_0_OFFSET); - DP_INFO(edev, " Management Firmware version\t: %s\n", ver_str); - DP_INFO(edev, " Firmware file\t\t\t: %s\n", qede_fw_file); + DP_INFO(edev, " %-20s: %s\n", "Base version", ver_str); + qede_fw_version_get(dev, ver_str, sizeof(ver_str)); + DP_INFO(edev, " %-20s: %s\n", "Firmware version", ver_str); + DP_INFO(edev, " %-20s: %s\n", "Firmware file", qede_fw_file); DP_INFO(edev, "**************************************************\n"); } @@ -587,17 +622,16 @@ qed_configure_filter_rx_mode(struct rte_eth_dev *eth_dev, ECORE_ACCEPT_BCAST; if (type == QED_FILTER_RX_MODE_TYPE_PROMISC) { - flags.rx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED; + flags.rx_accept_filter |= (ECORE_ACCEPT_UCAST_UNMATCHED | + ECORE_ACCEPT_MCAST_UNMATCHED); if (IS_VF(edev)) { - flags.tx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED; - DP_INFO(edev, "Enabling Tx unmatched flag for VF\n"); + flags.tx_accept_filter |= + (ECORE_ACCEPT_UCAST_UNMATCHED | + ECORE_ACCEPT_MCAST_UNMATCHED); + DP_INFO(edev, "Enabling Tx unmatched flags for VF\n"); } } else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC) { flags.rx_accept_filter |= ECORE_ACCEPT_MCAST_UNMATCHED; - } else if (type == (QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC | - QED_FILTER_RX_MODE_TYPE_PROMISC)) { - flags.rx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED | - ECORE_ACCEPT_MCAST_UNMATCHED; } return ecore_filter_accept_cmd(edev, 0, flags, false, false, @@ -998,9 +1032,6 @@ static int qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask) } } - if (mask & ETH_VLAN_EXTEND_MASK) - DP_ERR(edev, "Extend VLAN not supported\n"); - qdev->vlan_offload_mask = mask; DP_INFO(edev, "VLAN offload mask %d\n", mask); @@ -1112,13 +1143,18 @@ static int qede_dev_start(struct rte_eth_dev *eth_dev) if (qede_activate_vport(eth_dev, true)) goto err; + /* Bring-up the link */ + qede_dev_set_link_state(eth_dev, true); + /* Update link status */ qede_link_update(eth_dev, 0); /* Start/resume traffic */ qede_fastpath_start(edev); - qede_assign_rxtx_handlers(eth_dev); + /* Assign I/O handlers */ + qede_assign_rxtx_handlers(eth_dev, false); + DP_INFO(edev, "Device started\n"); return 0; @@ -1127,16 +1163,28 @@ err: return -1; /* common error code is < 0 */ } -static void qede_dev_stop(struct rte_eth_dev *eth_dev) +static int qede_dev_stop(struct rte_eth_dev *eth_dev) { struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); PMD_INIT_FUNC_TRACE(edev); + eth_dev->data->dev_started = 0; + + /* Bring the link down */ + qede_dev_set_link_state(eth_dev, false); + + /* Update link status */ + qede_link_update(eth_dev, 0); + + /* Replace I/O functions with dummy ones. It cannot + * be set to NULL because rte_eth_rx_burst() doesn't check for NULL. + */ + qede_assign_rxtx_handlers(eth_dev, true); /* Disable vport */ if (qede_activate_vport(eth_dev, false)) - return; + return 0; if (qdev->enable_lro) qede_enable_tpa(eth_dev, false); @@ -1148,6 +1196,8 @@ static void qede_dev_stop(struct rte_eth_dev *eth_dev) ecore_hw_stop_fastpath(edev); /* TBD - loop */ DP_INFO(edev, "Device is stopped\n"); + + return 0; } static const char * const valid_args[] = { @@ -1219,6 +1269,8 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev) struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); struct rte_eth_rxmode *rxmode = ð_dev->data->dev_conf.rxmode; + uint8_t num_rxqs; + uint8_t num_txqs; int ret; PMD_INIT_FUNC_TRACE(edev); @@ -1251,12 +1303,17 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev) if (qede_check_fdir_support(eth_dev)) return -ENOTSUP; - qede_dealloc_fp_resc(eth_dev); - qdev->num_tx_queues = eth_dev->data->nb_tx_queues * edev->num_hwfns; - qdev->num_rx_queues = eth_dev->data->nb_rx_queues * edev->num_hwfns; - - if (qede_alloc_fp_resc(qdev)) - return -ENOMEM; + /* Allocate/reallocate fastpath resources only for new queue config */ + num_txqs = eth_dev->data->nb_tx_queues * edev->num_hwfns; + num_rxqs = eth_dev->data->nb_rx_queues * edev->num_hwfns; + if (qdev->num_tx_queues != num_txqs || + qdev->num_rx_queues != num_rxqs) { + qede_dealloc_fp_resc(eth_dev); + qdev->num_tx_queues = num_txqs; + qdev->num_rx_queues = num_rxqs; + if (qede_alloc_fp_resc(qdev)) + return -ENOMEM; + } /* If jumbo enabled adjust MTU */ if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) @@ -1441,16 +1498,13 @@ qede_link_update(struct rte_eth_dev *eth_dev, __rte_unused int wait_to_complete) static int qede_promiscuous_enable(struct rte_eth_dev *eth_dev) { - struct qede_dev *qdev = eth_dev->data->dev_private; - struct ecore_dev *edev = &qdev->edev; - enum qed_filter_rx_mode_type type = QED_FILTER_RX_MODE_TYPE_PROMISC; enum _ecore_status_t ecore_status; + struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); + struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); + enum qed_filter_rx_mode_type type = QED_FILTER_RX_MODE_TYPE_PROMISC; PMD_INIT_FUNC_TRACE(edev); - if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1) - type |= QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC; - ecore_status = qed_configure_filter_rx_mode(eth_dev, type); return ecore_status >= ECORE_SUCCESS ? 0 : -EAGAIN; @@ -1493,21 +1547,26 @@ static void qede_poll_sp_sb_cb(void *param) } } -static void qede_dev_close(struct rte_eth_dev *eth_dev) +static int qede_dev_close(struct rte_eth_dev *eth_dev) { struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); + int ret = 0; PMD_INIT_FUNC_TRACE(edev); + /* only close in case of the primary process */ + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; + /* dev_stop() shall cleanup fp resources in hw but without releasing * dma memories and sw structures so that dev_start() can be called * by the app without reconfiguration. However, in dev_close() we * can release all the resources and device can be brought up newly */ if (eth_dev->data->dev_started) - qede_dev_stop(eth_dev); + ret = qede_dev_stop(eth_dev); if (qdev->vport_started) qede_stop_vport(edev); @@ -1518,8 +1577,6 @@ static void qede_dev_close(struct rte_eth_dev *eth_dev) eth_dev->data->nb_rx_queues = 0; eth_dev->data->nb_tx_queues = 0; - /* Bring the link down */ - qede_dev_set_link_state(eth_dev, false); qdev->ops->common->slowpath_stop(edev); qdev->ops->common->remove(edev); rte_intr_disable(&pci_dev->intr_handle); @@ -1539,6 +1596,8 @@ static void qede_dev_close(struct rte_eth_dev *eth_dev) if (ECORE_IS_CMT(edev)) rte_eal_alarm_cancel(qede_poll_sp_sb_cb, (void *)eth_dev); + + return ret; } static int @@ -1827,8 +1886,7 @@ static int qede_allmulticast_enable(struct rte_eth_dev *eth_dev) enum _ecore_status_t ecore_status; if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1) - type |= QED_FILTER_RX_MODE_TYPE_PROMISC; - + type = QED_FILTER_RX_MODE_TYPE_PROMISC; ecore_status = qed_configure_filter_rx_mode(eth_dev, type); return ecore_status >= ECORE_SUCCESS ? 0 : -EAGAIN; @@ -2081,8 +2139,10 @@ int qede_rss_hash_update(struct rte_eth_dev *eth_dev, /* RSS hash key */ if (key) { if (len > (ECORE_RSS_KEY_SIZE * sizeof(uint32_t))) { - DP_ERR(edev, "RSS key length exceeds limit\n"); - return -EINVAL; + len = ECORE_RSS_KEY_SIZE * sizeof(uint32_t); + DP_NOTICE(edev, false, + "RSS key length too big, trimmed to %d\n", + len); } DP_INFO(edev, "Applying user supplied hash key\n"); rss_params.update_rss_key = 1; @@ -2284,14 +2344,11 @@ static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) dev->data->min_rx_buf_size); return -EINVAL; } - /* Temporarily replace I/O functions with dummy ones. It cannot - * be set to NULL because rte_eth_rx_burst() doesn't check for NULL. - */ - dev->rx_pkt_burst = qede_rxtx_pkts_dummy; - dev->tx_pkt_burst = qede_rxtx_pkts_dummy; if (dev->data->dev_started) { dev->data->dev_started = 0; - qede_dev_stop(dev); + rc = qede_dev_stop(dev); + if (rc != 0) + return rc; restart = true; } rte_delay_ms(1000); @@ -2314,7 +2371,7 @@ static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) fp->rxq->rx_buf_size = rc; } } - if (max_rx_pkt_len > RTE_ETHER_MAX_LEN) + if (frame_size > QEDE_ETH_MAX_LEN) dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME; else dev->data->dev_conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME; @@ -2327,15 +2384,6 @@ static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) /* update max frame size */ dev->data->dev_conf.rxmode.max_rx_pkt_len = max_rx_pkt_len; - /* Reassign back */ - qede_assign_rxtx_handlers(dev); - if (ECORE_IS_CMT(edev)) { - dev->rx_pkt_burst = qede_recv_pkts_cmt; - dev->tx_pkt_burst = qede_xmit_pkts_cmt; - } else { - dev->rx_pkt_burst = qede_recv_pkts; - dev->tx_pkt_burst = qede_xmit_pkts; - } return 0; } @@ -2356,7 +2404,6 @@ static const struct eth_dev_ops qede_eth_dev_ops = { .dev_infos_get = qede_dev_info_get, .rx_queue_setup = qede_rx_queue_setup, .rx_queue_release = qede_rx_queue_release, - .rx_descriptor_status = qede_rx_descriptor_status, .tx_queue_setup = qede_tx_queue_setup, .tx_queue_release = qede_tx_queue_release, .dev_start = qede_dev_start, @@ -2392,6 +2439,8 @@ static const struct eth_dev_ops qede_eth_dev_ops = { .filter_ctrl = qede_dev_filter_ctrl, .udp_tunnel_port_add = qede_udp_dst_port_add, .udp_tunnel_port_del = qede_udp_dst_port_del, + .fw_version_get = qede_fw_version_get, + .get_reg = qede_get_regs, }; static const struct eth_dev_ops qede_eth_vf_dev_ops = { @@ -2399,7 +2448,6 @@ static const struct eth_dev_ops qede_eth_vf_dev_ops = { .dev_infos_get = qede_dev_info_get, .rx_queue_setup = qede_rx_queue_setup, .rx_queue_release = qede_rx_queue_release, - .rx_descriptor_status = qede_rx_descriptor_status, .tx_queue_setup = qede_tx_queue_setup, .tx_queue_release = qede_tx_queue_release, .dev_start = qede_dev_start, @@ -2432,6 +2480,7 @@ static const struct eth_dev_ops qede_eth_vf_dev_ops = { .mac_addr_add = qede_mac_addr_add, .mac_addr_remove = qede_mac_addr_remove, .mac_addr_set = qede_mac_addr_set, + .fw_version_get = qede_fw_version_get, }; static void qede_update_pf_params(struct ecore_dev *edev) @@ -2444,6 +2493,24 @@ static void qede_update_pf_params(struct ecore_dev *edev) qed_ops->common->update_pf_params(edev, &pf_params); } +static void qede_generate_random_mac_addr(struct rte_ether_addr *mac_addr) +{ + uint64_t random; + + /* Set Organizationally Unique Identifier (OUI) prefix. */ + mac_addr->addr_bytes[0] = 0x00; + mac_addr->addr_bytes[1] = 0x09; + mac_addr->addr_bytes[2] = 0xC0; + + /* Force indication of locally assigned MAC address. */ + mac_addr->addr_bytes[0] |= RTE_ETHER_LOCAL_ADMIN_ADDR; + + /* Generate the last 3 bytes of the MAC address with a random number. */ + random = rte_rand(); + + memcpy(&mac_addr->addr_bytes[3], &random, 3); +} + static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf) { struct rte_pci_device *pci_dev; @@ -2456,7 +2523,7 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf) uint8_t bulletin_change; uint8_t vf_mac[RTE_ETHER_ADDR_LEN]; uint8_t is_mac_forced; - bool is_mac_exist; + bool is_mac_exist = false; /* Fix up ecore debug level */ uint32_t dp_module = ~0 & ~ECORE_MSG_HW; uint8_t dp_level = ECORE_LEVEL_VERBOSE; @@ -2482,6 +2549,7 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf) } rte_eth_copy_pci_info(eth_dev, pci_dev); + eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; /* @DPDK */ edev->vendor_id = pci_dev->id.vendor_id; @@ -2536,7 +2604,7 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf) strncpy((char *)params.name, QEDE_PMD_VER_PREFIX, QEDE_PMD_DRV_VER_STR_SIZE); - qede_assign_rxtx_handlers(eth_dev); + qede_assign_rxtx_handlers(eth_dev, true); eth_dev->tx_pkt_prepare = qede_xmit_prep_pkts; /* For CMT mode device do periodic polling for slowpath events. @@ -2578,7 +2646,7 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf) qede_alloc_etherdev(adapter, &dev_info); if (do_once) { - qede_print_adapter_info(adapter); + qede_print_adapter_info(eth_dev); do_once = false; } @@ -2634,12 +2702,24 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf) DP_ERR(edev, "No VF macaddr assigned\n"); } } + + /* If MAC doesn't exist from PF, generate random one */ + if (!is_mac_exist) { + struct rte_ether_addr *mac_addr; + + mac_addr = (struct rte_ether_addr *)&vf_mac; + qede_generate_random_mac_addr(mac_addr); + + rte_ether_addr_copy(mac_addr, + ð_dev->data->mac_addrs[0]); + + rte_ether_addr_copy(ð_dev->data->mac_addrs[0], + &adapter->primary_mac); + } } eth_dev->dev_ops = (is_vf) ? &qede_eth_vf_dev_ops : &qede_eth_dev_ops; - - /* Bring-up the link */ - qede_dev_set_link_state(eth_dev, true); + eth_dev->rx_descriptor_status = qede_rx_descriptor_status; adapter->num_tx_queues = 0; adapter->num_rx_queues = 0; @@ -2670,6 +2750,7 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf) adapter->vxlan.enable = false; adapter->geneve.enable = false; adapter->ipgre.enable = false; + qed_ops->sriov_configure(edev, pci_dev->max_vfs); } DP_INFO(edev, "MAC address : %02x:%02x:%02x:%02x:%02x:%02x\n", @@ -2686,7 +2767,7 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf) err: if (do_once) { - qede_print_adapter_info(adapter); + qede_print_adapter_info(eth_dev); do_once = false; } return rc; @@ -2706,20 +2787,8 @@ static int qede_dev_common_uninit(struct rte_eth_dev *eth_dev) { struct qede_dev *qdev = eth_dev->data->dev_private; struct ecore_dev *edev = &qdev->edev; - PMD_INIT_FUNC_TRACE(edev); - - /* only uninitialize in the primary process */ - if (rte_eal_process_type() != RTE_PROC_PRIMARY) - return 0; - - /* safe to close dev here */ qede_dev_close(eth_dev); - - eth_dev->dev_ops = NULL; - eth_dev->rx_pkt_burst = NULL; - eth_dev->tx_pkt_burst = NULL; - return 0; } @@ -2826,13 +2895,5 @@ RTE_PMD_REGISTER_KMOD_DEP(net_qede, "* igb_uio | uio_pci_generic | vfio-pci"); RTE_PMD_REGISTER_PCI(net_qede_vf, rte_qedevf_pmd); RTE_PMD_REGISTER_PCI_TABLE(net_qede_vf, pci_id_qedevf_map); RTE_PMD_REGISTER_KMOD_DEP(net_qede_vf, "* igb_uio | vfio-pci"); - -RTE_INIT(qede_init_log) -{ - qede_logtype_init = rte_log_register("pmd.net.qede.init"); - if (qede_logtype_init >= 0) - rte_log_set_level(qede_logtype_init, RTE_LOG_NOTICE); - qede_logtype_driver = rte_log_register("pmd.net.qede.driver"); - if (qede_logtype_driver >= 0) - rte_log_set_level(qede_logtype_driver, RTE_LOG_NOTICE); -} +RTE_LOG_REGISTER(qede_logtype_init, pmd.net.qede.init, NOTICE); +RTE_LOG_REGISTER(qede_logtype_driver, pmd.net.qede.driver, NOTICE);