X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fqede%2Fqede_ethdev.c;h=001166a5458f9955de6f0f8c9528e7eedf4ac106;hb=0880c40113ef2d69b6433d7dfa0b4032cc378b0d;hp=37544546a2fccbae1922aa26392fcfa4f394c6bd;hpb=3dadf73e9482586e31d37396973d1b43df7d3a03;p=dpdk.git diff --git a/drivers/net/qede/qede_ethdev.c b/drivers/net/qede/qede_ethdev.c index 37544546a2..001166a545 100644 --- a/drivers/net/qede/qede_ethdev.c +++ b/drivers/net/qede/qede_ethdev.c @@ -7,10 +7,167 @@ */ #include "qede_ethdev.h" +#include +#include /* Globals */ static const struct qed_eth_ops *qed_ops; static const char *drivername = "qede pmd"; +static int64_t timer_period = 1; + +struct rte_qede_xstats_name_off { + char name[RTE_ETH_XSTATS_NAME_SIZE]; + uint64_t offset; +}; + +static const struct rte_qede_xstats_name_off qede_xstats_strings[] = { + {"rx_unicast_bytes", offsetof(struct ecore_eth_stats, rx_ucast_bytes)}, + {"rx_multicast_bytes", + offsetof(struct ecore_eth_stats, rx_mcast_bytes)}, + {"rx_broadcast_bytes", + offsetof(struct ecore_eth_stats, rx_bcast_bytes)}, + {"rx_unicast_packets", offsetof(struct ecore_eth_stats, rx_ucast_pkts)}, + {"rx_multicast_packets", + offsetof(struct ecore_eth_stats, rx_mcast_pkts)}, + {"rx_broadcast_packets", + offsetof(struct ecore_eth_stats, rx_bcast_pkts)}, + + {"tx_unicast_bytes", offsetof(struct ecore_eth_stats, tx_ucast_bytes)}, + {"tx_multicast_bytes", + offsetof(struct ecore_eth_stats, tx_mcast_bytes)}, + {"tx_broadcast_bytes", + offsetof(struct ecore_eth_stats, tx_bcast_bytes)}, + {"tx_unicast_packets", offsetof(struct ecore_eth_stats, tx_ucast_pkts)}, + {"tx_multicast_packets", + offsetof(struct ecore_eth_stats, tx_mcast_pkts)}, + {"tx_broadcast_packets", + offsetof(struct ecore_eth_stats, tx_bcast_pkts)}, + + {"rx_64_byte_packets", + offsetof(struct ecore_eth_stats, rx_64_byte_packets)}, + {"rx_65_to_127_byte_packets", + offsetof(struct ecore_eth_stats, rx_65_to_127_byte_packets)}, + {"rx_128_to_255_byte_packets", + offsetof(struct ecore_eth_stats, rx_128_to_255_byte_packets)}, + {"rx_256_to_511_byte_packets", + offsetof(struct ecore_eth_stats, rx_256_to_511_byte_packets)}, + {"rx_512_to_1023_byte_packets", + offsetof(struct ecore_eth_stats, rx_512_to_1023_byte_packets)}, + {"rx_1024_to_1518_byte_packets", + offsetof(struct ecore_eth_stats, rx_1024_to_1518_byte_packets)}, + {"rx_1519_to_1522_byte_packets", + offsetof(struct ecore_eth_stats, rx_1519_to_1522_byte_packets)}, + {"rx_1519_to_2047_byte_packets", + offsetof(struct ecore_eth_stats, rx_1519_to_2047_byte_packets)}, + {"rx_2048_to_4095_byte_packets", + offsetof(struct ecore_eth_stats, rx_2048_to_4095_byte_packets)}, + {"rx_4096_to_9216_byte_packets", + offsetof(struct ecore_eth_stats, rx_4096_to_9216_byte_packets)}, + {"rx_9217_to_16383_byte_packets", + offsetof(struct ecore_eth_stats, + rx_9217_to_16383_byte_packets)}, + {"tx_64_byte_packets", + offsetof(struct ecore_eth_stats, tx_64_byte_packets)}, + {"tx_65_to_127_byte_packets", + offsetof(struct ecore_eth_stats, tx_65_to_127_byte_packets)}, + {"tx_128_to_255_byte_packets", + offsetof(struct ecore_eth_stats, tx_128_to_255_byte_packets)}, + {"tx_256_to_511_byte_packets", + offsetof(struct ecore_eth_stats, tx_256_to_511_byte_packets)}, + {"tx_512_to_1023_byte_packets", + offsetof(struct ecore_eth_stats, tx_512_to_1023_byte_packets)}, + {"tx_1024_to_1518_byte_packets", + offsetof(struct ecore_eth_stats, tx_1024_to_1518_byte_packets)}, + {"trx_1519_to_1522_byte_packets", + offsetof(struct ecore_eth_stats, tx_1519_to_2047_byte_packets)}, + {"tx_2048_to_4095_byte_packets", + offsetof(struct ecore_eth_stats, tx_2048_to_4095_byte_packets)}, + {"tx_4096_to_9216_byte_packets", + offsetof(struct ecore_eth_stats, tx_4096_to_9216_byte_packets)}, + {"tx_9217_to_16383_byte_packets", + offsetof(struct ecore_eth_stats, + tx_9217_to_16383_byte_packets)}, + + {"rx_mac_crtl_frames", + offsetof(struct ecore_eth_stats, rx_mac_crtl_frames)}, + {"tx_mac_control_frames", + offsetof(struct ecore_eth_stats, tx_mac_ctrl_frames)}, + {"rx_pause_frames", offsetof(struct ecore_eth_stats, rx_pause_frames)}, + {"tx_pause_frames", offsetof(struct ecore_eth_stats, tx_pause_frames)}, + {"rx_priority_flow_control_frames", + offsetof(struct ecore_eth_stats, rx_pfc_frames)}, + {"tx_priority_flow_control_frames", + offsetof(struct ecore_eth_stats, tx_pfc_frames)}, + + {"rx_crc_errors", offsetof(struct ecore_eth_stats, rx_crc_errors)}, + {"rx_align_errors", offsetof(struct ecore_eth_stats, rx_align_errors)}, + {"rx_carrier_errors", + offsetof(struct ecore_eth_stats, rx_carrier_errors)}, + {"rx_oversize_packet_errors", + offsetof(struct ecore_eth_stats, rx_oversize_packets)}, + {"rx_jabber_errors", offsetof(struct ecore_eth_stats, rx_jabbers)}, + {"rx_undersize_packet_errors", + offsetof(struct ecore_eth_stats, rx_undersize_packets)}, + {"rx_fragments", offsetof(struct ecore_eth_stats, rx_fragments)}, + {"rx_host_buffer_not_available", + offsetof(struct ecore_eth_stats, no_buff_discards)}, + /* Number of packets discarded because they are bigger than MTU */ + {"rx_packet_too_big_discards", + offsetof(struct ecore_eth_stats, packet_too_big_discard)}, + {"rx_ttl_zero_discards", + offsetof(struct ecore_eth_stats, ttl0_discard)}, + {"rx_multi_function_tag_filter_discards", + offsetof(struct ecore_eth_stats, mftag_filter_discards)}, + {"rx_mac_filter_discards", + offsetof(struct ecore_eth_stats, mac_filter_discards)}, + {"rx_hw_buffer_truncates", + offsetof(struct ecore_eth_stats, brb_truncates)}, + {"rx_hw_buffer_discards", + offsetof(struct ecore_eth_stats, brb_discards)}, + {"tx_lpi_entry_count", + offsetof(struct ecore_eth_stats, tx_lpi_entry_count)}, + {"tx_total_collisions", + offsetof(struct ecore_eth_stats, tx_total_collisions)}, + {"tx_error_drop_packets", + offsetof(struct ecore_eth_stats, tx_err_drop_pkts)}, + + {"rx_mac_bytes", offsetof(struct ecore_eth_stats, rx_mac_bytes)}, + {"rx_mac_unicast_packets", + offsetof(struct ecore_eth_stats, rx_mac_uc_packets)}, + {"rx_mac_multicast_packets", + offsetof(struct ecore_eth_stats, rx_mac_mc_packets)}, + {"rx_mac_broadcast_packets", + offsetof(struct ecore_eth_stats, rx_mac_bc_packets)}, + {"rx_mac_frames_ok", + offsetof(struct ecore_eth_stats, rx_mac_frames_ok)}, + {"tx_mac_bytes", offsetof(struct ecore_eth_stats, tx_mac_bytes)}, + {"tx_mac_unicast_packets", + offsetof(struct ecore_eth_stats, tx_mac_uc_packets)}, + {"tx_mac_multicast_packets", + offsetof(struct ecore_eth_stats, tx_mac_mc_packets)}, + {"tx_mac_broadcast_packets", + offsetof(struct ecore_eth_stats, tx_mac_bc_packets)}, + + {"lro_coalesced_packets", + offsetof(struct ecore_eth_stats, tpa_coalesced_pkts)}, + {"lro_coalesced_events", + offsetof(struct ecore_eth_stats, tpa_coalesced_events)}, + {"lro_aborts_num", + offsetof(struct ecore_eth_stats, tpa_aborts_num)}, + {"lro_not_coalesced_packets", + offsetof(struct ecore_eth_stats, tpa_not_coalesced_pkts)}, + {"lro_coalesced_bytes", + offsetof(struct ecore_eth_stats, tpa_coalesced_bytes)}, +}; + +static const struct rte_qede_xstats_name_off qede_rxq_xstats_strings[] = { + {"rx_q_segments", + offsetof(struct qede_rx_queue, rx_segs)}, + {"rx_q_hw_errors", + offsetof(struct qede_rx_queue, rx_hw_errors)}, + {"rx_q_allocation_errors", + offsetof(struct qede_rx_queue, rx_alloc_errors)} +}; static void qede_interrupt_action(struct ecore_hwfn *p_hwfn) { @@ -41,31 +198,28 @@ static void qede_print_adapter_info(struct qede_dev *qdev) { struct ecore_dev *edev = &qdev->edev; struct qed_dev_info *info = &qdev->dev_info.common; - static char ver_str[QED_DRV_VER_STR_SIZE]; + static char drv_ver[QEDE_PMD_DRV_VER_STR_SIZE]; + static char ver_str[QEDE_PMD_DRV_VER_STR_SIZE]; DP_INFO(edev, "*********************************\n"); + DP_INFO(edev, " DPDK version:%s\n", rte_version()); DP_INFO(edev, " Chip details : %s%d\n", - ECORE_IS_BB(edev) ? "BB" : "AH", - CHIP_REV_IS_A0(edev) ? 0 : 1); - - sprintf(ver_str, "%s %s_%d.%d.%d.%d", QEDE_PMD_VER_PREFIX, - edev->ver_str, QEDE_PMD_VERSION_MAJOR, QEDE_PMD_VERSION_MINOR, - QEDE_PMD_VERSION_REVISION, QEDE_PMD_VERSION_PATCH); - strcpy(qdev->drv_ver, ver_str); - DP_INFO(edev, " Driver version : %s\n", ver_str); - - sprintf(ver_str, "%d.%d.%d.%d", info->fw_major, info->fw_minor, - info->fw_rev, info->fw_eng); + ECORE_IS_BB(edev) ? "BB" : "AH", + CHIP_REV_IS_A0(edev) ? 0 : 1); + snprintf(ver_str, QEDE_PMD_DRV_VER_STR_SIZE, "%d.%d.%d.%d", + info->fw_major, info->fw_minor, info->fw_rev, info->fw_eng); + snprintf(drv_ver, QEDE_PMD_DRV_VER_STR_SIZE, "%s_%s", + ver_str, QEDE_PMD_VERSION); + DP_INFO(edev, " Driver version : %s\n", drv_ver); DP_INFO(edev, " Firmware version : %s\n", ver_str); - sprintf(ver_str, "%d.%d.%d.%d", + snprintf(ver_str, MCP_DRV_VER_STR_SIZE, + "%d.%d.%d.%d", (info->mfw_rev >> 24) & 0xff, (info->mfw_rev >> 16) & 0xff, (info->mfw_rev >> 8) & 0xff, (info->mfw_rev) & 0xff); - DP_INFO(edev, " Management firmware version : %s\n", ver_str); - + DP_INFO(edev, " Management Firmware version : %s\n", ver_str); DP_INFO(edev, " Firmware file : %s\n", fw_file); - DP_INFO(edev, "*********************************\n"); } @@ -201,52 +355,6 @@ static void qede_config_accept_any_vlan(struct qede_dev *qdev, bool action) } } -void qede_config_rx_mode(struct rte_eth_dev *eth_dev) -{ - struct qede_dev *qdev = eth_dev->data->dev_private; - struct ecore_dev *edev = &qdev->edev; - /* TODO: - QED_FILTER_TYPE_UCAST */ - enum qed_filter_rx_mode_type accept_flags = - QED_FILTER_RX_MODE_TYPE_REGULAR; - struct qed_filter_params rx_mode; - int rc; - - /* Configure the struct for the Rx mode */ - memset(&rx_mode, 0, sizeof(struct qed_filter_params)); - rx_mode.type = QED_FILTER_TYPE_RX_MODE; - - rc = qede_set_ucast_rx_mac(qdev, QED_FILTER_XCAST_TYPE_REPLACE, - eth_dev->data->mac_addrs[0].addr_bytes); - if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1) { - accept_flags = QED_FILTER_RX_MODE_TYPE_PROMISC; - } else { - rc = qede_set_ucast_rx_mac(qdev, QED_FILTER_XCAST_TYPE_ADD, - eth_dev->data-> - mac_addrs[0].addr_bytes); - if (rc) { - DP_ERR(edev, "Unable to add filter\n"); - return; - } - } - - /* take care of VLAN mode */ - if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1) { - qede_config_accept_any_vlan(qdev, true); - } else if (!qdev->non_configured_vlans) { - /* If we dont have non-configured VLANs and promisc - * is not set, then check if we need to disable - * accept_any_vlan mode. - * Because in this case, accept_any_vlan mode is set - * as part of IFF_RPOMISC flag handling. - */ - qede_config_accept_any_vlan(qdev, false); - } - rx_mode.filter.accept_flags = accept_flags; - rc = qdev->ops->filter_config(edev, &rx_mode); - if (rc) - DP_ERR(edev, "Filter config failed rc=%d\n", rc); -} - static int qede_vlan_stripping(struct rte_eth_dev *eth_dev, bool set_stripping) { struct qed_update_vport_params vport_update_params; @@ -271,16 +379,40 @@ static void qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask) { struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); + struct rte_eth_rxmode *rxmode = ð_dev->data->dev_conf.rxmode; if (mask & ETH_VLAN_STRIP_MASK) { - if (eth_dev->data->dev_conf.rxmode.hw_vlan_strip) + if (rxmode->hw_vlan_strip) (void)qede_vlan_stripping(eth_dev, 1); else (void)qede_vlan_stripping(eth_dev, 0); } - DP_INFO(edev, "vlan offload mask %d vlan-strip %d\n", - mask, eth_dev->data->dev_conf.rxmode.hw_vlan_strip); + if (mask & ETH_VLAN_FILTER_MASK) { + /* VLAN filtering kicks in when a VLAN is added */ + if (rxmode->hw_vlan_filter) { + qede_vlan_filter_set(eth_dev, 0, 1); + } else { + if (qdev->configured_vlans > 1) { /* Excluding VLAN0 */ + DP_NOTICE(edev, false, + " Please remove existing VLAN filters" + " before disabling VLAN filtering\n"); + /* Signal app that VLAN filtering is still + * enabled + */ + rxmode->hw_vlan_filter = true; + } else { + qede_vlan_filter_set(eth_dev, 0, 0); + } + } + } + + if (mask & ETH_VLAN_EXTEND_MASK) + DP_INFO(edev, "No offloads are supported with VLAN Q-in-Q" + " and classification is based on outer tag only\n"); + + DP_INFO(edev, "vlan offload mask %d vlan-strip %d vlan-filter %d\n", + mask, rxmode->hw_vlan_strip, rxmode->hw_vlan_filter); } static int qede_set_ucast_rx_vlan(struct qede_dev *qdev, @@ -305,70 +437,137 @@ static int qede_vlan_filter_set(struct rte_eth_dev *eth_dev, struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); struct qed_dev_eth_info *dev_info = &qdev->dev_info; + struct qede_vlan_entry *tmp = NULL; + struct qede_vlan_entry *vlan; int rc; - if (vlan_id != 0 && - qdev->configured_vlans == dev_info->num_vlan_filters) { - DP_NOTICE(edev, false, "Reached max VLAN filter limit" - " enabling accept_any_vlan\n"); - qede_config_accept_any_vlan(qdev, true); - return 0; - } - if (on) { + if (qdev->configured_vlans == dev_info->num_vlan_filters) { + DP_INFO(edev, "Reached max VLAN filter limit" + " enabling accept_any_vlan\n"); + qede_config_accept_any_vlan(qdev, true); + return 0; + } + + SLIST_FOREACH(tmp, &qdev->vlan_list_head, list) { + if (tmp->vid == vlan_id) { + DP_ERR(edev, "VLAN %u already configured\n", + vlan_id); + return -EEXIST; + } + } + + vlan = rte_malloc(NULL, sizeof(struct qede_vlan_entry), + RTE_CACHE_LINE_SIZE); + + if (!vlan) { + DP_ERR(edev, "Did not allocate memory for VLAN\n"); + return -ENOMEM; + } + rc = qede_set_ucast_rx_vlan(qdev, QED_FILTER_XCAST_TYPE_ADD, vlan_id); - if (rc) + if (rc) { DP_ERR(edev, "Failed to add VLAN %u rc %d\n", vlan_id, rc); - else - if (vlan_id != 0) - qdev->configured_vlans++; + rte_free(vlan); + } else { + vlan->vid = vlan_id; + SLIST_INSERT_HEAD(&qdev->vlan_list_head, vlan, list); + qdev->configured_vlans++; + DP_INFO(edev, "VLAN %u added, configured_vlans %u\n", + vlan_id, qdev->configured_vlans); + } } else { + SLIST_FOREACH(tmp, &qdev->vlan_list_head, list) { + if (tmp->vid == vlan_id) + break; + } + + if (!tmp) { + if (qdev->configured_vlans == 0) { + DP_INFO(edev, + "No VLAN filters configured yet\n"); + return 0; + } + + DP_ERR(edev, "VLAN %u not configured\n", vlan_id); + return -EINVAL; + } + + SLIST_REMOVE(&qdev->vlan_list_head, tmp, qede_vlan_entry, list); + rc = qede_set_ucast_rx_vlan(qdev, QED_FILTER_XCAST_TYPE_DEL, vlan_id); - if (rc) + if (rc) { DP_ERR(edev, "Failed to delete VLAN %u rc %d\n", vlan_id, rc); - else - if (vlan_id != 0) - qdev->configured_vlans--; + } else { + qdev->configured_vlans--; + DP_INFO(edev, "VLAN %u removed configured_vlans %u\n", + vlan_id, qdev->configured_vlans); + } } - DP_INFO(edev, "vlan_id %u on %u rc %d configured_vlans %u\n", - vlan_id, on, rc, qdev->configured_vlans); - return rc; } +static int qede_init_vport(struct qede_dev *qdev) +{ + struct ecore_dev *edev = &qdev->edev; + struct qed_start_vport_params start = {0}; + int rc; + + start.remove_inner_vlan = 1; + start.gro_enable = 0; + start.mtu = ETHER_MTU + QEDE_ETH_OVERHEAD; + start.vport_id = 0; + start.drop_ttl0 = false; + start.clear_stats = 1; + start.handle_ptp_pkts = 0; + + rc = qdev->ops->vport_start(edev, &start); + if (rc) { + DP_ERR(edev, "Start V-PORT failed %d\n", rc); + return rc; + } + + DP_INFO(edev, + "Start vport ramrod passed, vport_id = %d, MTU = %u\n", + start.vport_id, ETHER_MTU); + + return 0; +} + static int qede_dev_configure(struct rte_eth_dev *eth_dev) { struct qede_dev *qdev = eth_dev->data->dev_private; struct ecore_dev *edev = &qdev->edev; struct rte_eth_rxmode *rxmode = ð_dev->data->dev_conf.rxmode; + int rc, i, j; PMD_INIT_FUNC_TRACE(edev); - if (eth_dev->data->nb_rx_queues != eth_dev->data->nb_tx_queues) { - DP_NOTICE(edev, false, - "Unequal number of rx/tx queues " - "is not supported RX=%u TX=%u\n", - eth_dev->data->nb_rx_queues, - eth_dev->data->nb_tx_queues); - return -EINVAL; - } - - qdev->num_rss = eth_dev->data->nb_rx_queues; + /* Check requirements for 100G mode */ + if (edev->num_hwfns > 1) { + if (eth_dev->data->nb_rx_queues < 2 || + eth_dev->data->nb_tx_queues < 2) { + DP_NOTICE(edev, false, + "100G mode needs min. 2 RX/TX queues\n"); + return -EINVAL; + } - /* Initial state */ - qdev->state = QEDE_CLOSE; + if ((eth_dev->data->nb_rx_queues % 2 != 0) || + (eth_dev->data->nb_tx_queues % 2 != 0)) { + DP_NOTICE(edev, false, + "100G mode needs even no. of RX/TX queues\n"); + return -EINVAL; + } + } /* Sanity checks and throw warnings */ - - if (rxmode->enable_scatter == 1) { - DP_ERR(edev, "RX scatter packets is not supported\n"); - return -EINVAL; - } + if (rxmode->enable_scatter == 1) + eth_dev->data->scattered_rx = 1; if (rxmode->enable_lro == 1) { DP_INFO(edev, "LRO is not supported\n"); @@ -382,16 +581,48 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev) DP_INFO(edev, "IP/UDP/TCP checksum offload is always enabled " "in hw\n"); + /* Check for the port restart case */ + if (qdev->state != QEDE_DEV_INIT) { + rc = qdev->ops->vport_stop(edev, 0); + if (rc != 0) + return rc; + qede_dealloc_fp_resc(eth_dev); + } + + qdev->fp_num_tx = eth_dev->data->nb_tx_queues; + qdev->fp_num_rx = eth_dev->data->nb_rx_queues; + qdev->num_queues = qdev->fp_num_tx + qdev->fp_num_rx; + + /* Fastpath status block should be initialized before sending + * VPORT-START in the case of VF. Anyway, do it for both VF/PF. + */ + rc = qede_alloc_fp_resc(qdev); + if (rc != 0) + return rc; + + /* Issue VPORT-START with default config values to allow + * other port configurations early on. + */ + rc = qede_init_vport(qdev); + if (rc != 0) + return rc; + + SLIST_INIT(&qdev->vlan_list_head); + + /* Add primary mac for PF */ + if (IS_PF(edev)) + qede_mac_addr_set(eth_dev, &qdev->primary_mac); + + /* Enable VLAN offloads by default */ + qede_vlan_offload_set(eth_dev, ETH_VLAN_STRIP_MASK | + ETH_VLAN_FILTER_MASK | + ETH_VLAN_EXTEND_MASK); - DP_INFO(edev, "Allocated %d RSS queues on %d TC/s\n", - QEDE_RSS_CNT(qdev), qdev->num_tc); + qdev->state = QEDE_DEV_CONFIG; - DP_INFO(edev, "my_id %u rel_pf_id %u abs_pf_id %u" - " port %u first_on_engine %d\n", - edev->hwfns[0].my_id, - edev->hwfns[0].rel_pf_id, - edev->hwfns[0].abs_pf_id, - edev->hwfns[0].port_id, edev->hwfns[0].first_on_engine); + DP_INFO(edev, "Allocated RSS=%d TSS=%d (with CoS=%d)\n", + (int)QEDE_RSS_COUNT(qdev), (int)QEDE_TSS_COUNT(qdev), + qdev->num_tc); return 0; } @@ -415,6 +646,8 @@ qede_dev_info_get(struct rte_eth_dev *eth_dev, { struct qede_dev *qdev = eth_dev->data->dev_private; struct ecore_dev *edev = &qdev->edev; + struct qed_link_output link; + uint32_t speed_cap = 0; PMD_INIT_FUNC_TRACE(edev); @@ -447,7 +680,21 @@ qede_dev_info_get(struct rte_eth_dev *eth_dev, DEV_TX_OFFLOAD_UDP_CKSUM | DEV_TX_OFFLOAD_TCP_CKSUM); - dev_info->speed_capa = ETH_LINK_SPEED_25G | ETH_LINK_SPEED_40G; + memset(&link, 0, sizeof(struct qed_link_output)); + qdev->ops->common->get_link(edev, &link); + if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) + speed_cap |= ETH_LINK_SPEED_1G; + if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) + speed_cap |= ETH_LINK_SPEED_10G; + if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) + speed_cap |= ETH_LINK_SPEED_25G; + if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) + speed_cap |= ETH_LINK_SPEED_40G; + if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) + speed_cap |= ETH_LINK_SPEED_50G; + if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) + speed_cap |= ETH_LINK_SPEED_100G; + dev_info->speed_capa = speed_cap; } /* return 0 means link status changed, -1 means not changed */ @@ -540,10 +787,31 @@ static void qede_promiscuous_disable(struct rte_eth_dev *eth_dev) qede_rx_mode_setting(eth_dev, QED_FILTER_RX_MODE_TYPE_REGULAR); } +static void qede_poll_sp_sb_cb(void *param) +{ + struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param; + struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); + struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); + int rc; + + qede_interrupt_action(ECORE_LEADING_HWFN(edev)); + qede_interrupt_action(&edev->hwfns[1]); + + rc = rte_eal_alarm_set(timer_period * US_PER_S, + qede_poll_sp_sb_cb, + (void *)eth_dev); + if (rc != 0) { + DP_ERR(edev, "Unable to start periodic" + " timer rc %d\n", rc); + assert(false && "Unable to start periodic timer"); + } +} + static void qede_dev_close(struct rte_eth_dev *eth_dev) { - struct qede_dev *qdev = eth_dev->data->dev_private; - struct ecore_dev *edev = &qdev->edev; + struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); + struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); + int rc; PMD_INIT_FUNC_TRACE(edev); @@ -552,16 +820,16 @@ static void qede_dev_close(struct rte_eth_dev *eth_dev) * by the app without reconfiguration. However, in dev_close() we * can release all the resources and device can be brought up newly */ - if (qdev->state != QEDE_STOP) + if (qdev->state != QEDE_DEV_STOP) qede_dev_stop(eth_dev); else DP_INFO(edev, "Device is already stopped\n"); - qede_free_mem_load(qdev); + rc = qdev->ops->vport_stop(edev, 0); + if (rc != 0) + DP_ERR(edev, "Failed to stop VPORT\n"); - qede_free_fp_arrays(qdev); - - qede_dev_set_link_state(eth_dev, false); + qede_dealloc_fp_resc(eth_dev); qdev->ops->common->slowpath_stop(edev); @@ -572,7 +840,10 @@ static void qede_dev_close(struct rte_eth_dev *eth_dev) rte_intr_callback_unregister(ð_dev->pci_dev->intr_handle, qede_interrupt_handler, (void *)eth_dev); - qdev->state = QEDE_CLOSE; + if (edev->num_hwfns > 1) + rte_eal_alarm_cancel(qede_poll_sp_sb_cb, (void *)eth_dev); + + qdev->state = QEDE_DEV_INIT; /* Go back to init state */ } static void @@ -581,6 +852,8 @@ qede_get_stats(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats) struct qede_dev *qdev = eth_dev->data->dev_private; struct ecore_dev *edev = &qdev->edev; struct ecore_eth_stats stats; + unsigned int i = 0, j = 0, qid; + struct qede_tx_queue *txq; qdev->ops->get_vport_stats(edev, &stats); @@ -612,14 +885,118 @@ qede_get_stats(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats) eth_stats->oerrors = stats.tx_err_drop_pkts; - DP_INFO(edev, - "no_buff_discards=%" PRIu64 "" - " mac_filter_discards=%" PRIu64 "" - " brb_truncates=%" PRIu64 "" - " brb_discards=%" PRIu64 "\n", - stats.no_buff_discards, - stats.mac_filter_discards, - stats.brb_truncates, stats.brb_discards); + /* Queue stats */ + for (qid = 0; qid < QEDE_QUEUE_CNT(qdev); qid++) { + if (qdev->fp_array[qid].type & QEDE_FASTPATH_RX) { + eth_stats->q_ipackets[i] = + *(uint64_t *)( + ((char *)(qdev->fp_array[(qid)].rxq)) + + offsetof(struct qede_rx_queue, + rcv_pkts)); + eth_stats->q_errors[i] = + *(uint64_t *)( + ((char *)(qdev->fp_array[(qid)].rxq)) + + offsetof(struct qede_rx_queue, + rx_hw_errors)) + + *(uint64_t *)( + ((char *)(qdev->fp_array[(qid)].rxq)) + + offsetof(struct qede_rx_queue, + rx_alloc_errors)); + i++; + } + + if (qdev->fp_array[qid].type & QEDE_FASTPATH_TX) { + txq = qdev->fp_array[(qid)].txqs[0]; + eth_stats->q_opackets[j] = + *((uint64_t *)(uintptr_t) + (((uint64_t)(uintptr_t)(txq)) + + offsetof(struct qede_tx_queue, + xmit_pkts))); + j++; + } + } +} + +static unsigned +qede_get_xstats_count(struct qede_dev *qdev) { + return RTE_DIM(qede_xstats_strings) + + (RTE_DIM(qede_rxq_xstats_strings) * QEDE_RSS_COUNT(qdev)); +} + +static int +qede_get_xstats_names(__rte_unused struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, unsigned limit) +{ + struct qede_dev *qdev = dev->data->dev_private; + const unsigned int stat_cnt = qede_get_xstats_count(qdev); + unsigned int i, qid, stat_idx = 0; + + if (xstats_names != NULL) { + for (i = 0; i < RTE_DIM(qede_xstats_strings); i++) { + snprintf(xstats_names[stat_idx].name, + sizeof(xstats_names[stat_idx].name), + "%s", + qede_xstats_strings[i].name); + stat_idx++; + } + + for (qid = 0; qid < QEDE_RSS_COUNT(qdev); qid++) { + for (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) { + snprintf(xstats_names[stat_idx].name, + sizeof(xstats_names[stat_idx].name), + "%.4s%d%s", + qede_rxq_xstats_strings[i].name, qid, + qede_rxq_xstats_strings[i].name + 4); + stat_idx++; + } + } + } + + return stat_cnt; +} + +static int +qede_get_xstats(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, + unsigned int n) +{ + struct qede_dev *qdev = dev->data->dev_private; + struct ecore_dev *edev = &qdev->edev; + struct ecore_eth_stats stats; + const unsigned int num = qede_get_xstats_count(qdev); + unsigned int i, qid, stat_idx = 0; + + if (n < num) + return num; + + qdev->ops->get_vport_stats(edev, &stats); + + for (i = 0; i < RTE_DIM(qede_xstats_strings); i++) { + xstats[stat_idx].value = *(uint64_t *)(((char *)&stats) + + qede_xstats_strings[i].offset); + stat_idx++; + } + + for (qid = 0; qid < QEDE_QUEUE_CNT(qdev); qid++) { + if (qdev->fp_array[qid].type & QEDE_FASTPATH_RX) { + for (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) { + xstats[stat_idx].value = *(uint64_t *)( + ((char *)(qdev->fp_array[(qid)].rxq)) + + qede_rxq_xstats_strings[i].offset); + stat_idx++; + } + } + } + + return stat_idx; +} + +static void +qede_reset_xstats(struct rte_eth_dev *dev) +{ + struct qede_dev *qdev = dev->data->dev_private; + struct ecore_dev *edev = &qdev->edev; + + ecore_reset_vport_stats(edev); } int qede_dev_set_link_state(struct rte_eth_dev *eth_dev, bool link_up) @@ -753,42 +1130,51 @@ qede_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev) return NULL; } -int qede_rss_hash_update(struct rte_eth_dev *eth_dev, - struct rte_eth_rss_conf *rss_conf) +void qede_init_rss_caps(uint8_t *rss_caps, uint64_t hf) +{ + *rss_caps = 0; + *rss_caps |= (hf & ETH_RSS_IPV4) ? ECORE_RSS_IPV4 : 0; + *rss_caps |= (hf & ETH_RSS_IPV6) ? ECORE_RSS_IPV6 : 0; + *rss_caps |= (hf & ETH_RSS_IPV6_EX) ? ECORE_RSS_IPV6 : 0; + *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_TCP) ? ECORE_RSS_IPV4_TCP : 0; + *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_TCP) ? ECORE_RSS_IPV6_TCP : 0; + *rss_caps |= (hf & ETH_RSS_IPV6_TCP_EX) ? ECORE_RSS_IPV6_TCP : 0; +} + +static int qede_rss_hash_update(struct rte_eth_dev *eth_dev, + struct rte_eth_rss_conf *rss_conf) { struct qed_update_vport_params vport_update_params; struct qede_dev *qdev = eth_dev->data->dev_private; struct ecore_dev *edev = &qdev->edev; - uint8_t rss_caps; uint32_t *key = (uint32_t *)rss_conf->rss_key; uint64_t hf = rss_conf->rss_hf; int i; - if (hf == 0) - DP_ERR(edev, "hash function 0 will disable RSS\n"); + memset(&vport_update_params, 0, sizeof(vport_update_params)); - rss_caps = 0; - rss_caps |= (hf & ETH_RSS_IPV4) ? ECORE_RSS_IPV4 : 0; - rss_caps |= (hf & ETH_RSS_IPV6) ? ECORE_RSS_IPV6 : 0; - rss_caps |= (hf & ETH_RSS_IPV6_EX) ? ECORE_RSS_IPV6 : 0; - rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_TCP) ? ECORE_RSS_IPV4_TCP : 0; - rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_TCP) ? ECORE_RSS_IPV6_TCP : 0; - rss_caps |= (hf & ETH_RSS_IPV6_TCP_EX) ? ECORE_RSS_IPV6_TCP : 0; + if (hf != 0) { + /* Enable RSS */ + qede_init_rss_caps(&qdev->rss_params.rss_caps, hf); + memcpy(&vport_update_params.rss_params, &qdev->rss_params, + sizeof(vport_update_params.rss_params)); + if (key) + memcpy(qdev->rss_params.rss_key, rss_conf->rss_key, + rss_conf->rss_key_len); + vport_update_params.update_rss_flg = 1; + qdev->rss_enabled = 1; + } else { + /* Disable RSS */ + qdev->rss_enabled = 0; + } /* If the mapping doesn't fit any supported, return */ - if (rss_caps == 0 && hf != 0) + if (qdev->rss_params.rss_caps == 0 && hf != 0) return -EINVAL; - memset(&vport_update_params, 0, sizeof(vport_update_params)); - - if (key != NULL) - memcpy(qdev->rss_params.rss_key, rss_conf->rss_key, - rss_conf->rss_key_len); + DP_INFO(edev, "%s\n", (vport_update_params.update_rss_flg) ? + "Enabling RSS" : "Disabling RSS"); - qdev->rss_params.rss_caps = rss_caps; - memcpy(&vport_update_params.rss_params, &qdev->rss_params, - sizeof(vport_update_params.rss_params)); - vport_update_params.update_rss_flg = 1; vport_update_params.vport_id = 0; return qdev->ops->vport_update(edev, &vport_update_params); @@ -826,9 +1212,9 @@ int qede_rss_hash_conf_get(struct rte_eth_dev *eth_dev, return 0; } -int qede_rss_reta_update(struct rte_eth_dev *eth_dev, - struct rte_eth_rss_reta_entry64 *reta_conf, - uint16_t reta_size) +static int qede_rss_reta_update(struct rte_eth_dev *eth_dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size) { struct qed_update_vport_params vport_update_params; struct qede_dev *qdev = eth_dev->data->dev_private; @@ -885,6 +1271,38 @@ int qede_rss_reta_query(struct rte_eth_dev *eth_dev, return 0; } +int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) +{ + uint32_t frame_size; + struct qede_dev *qdev = dev->data->dev_private; + struct rte_eth_dev_info dev_info = {0}; + + qede_dev_info_get(dev, &dev_info); + + /* VLAN_TAG = 4 */ + frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + 4; + + if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen)) + return -EINVAL; + + if (!dev->data->scattered_rx && + frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) + return -EINVAL; + + if (frame_size > ETHER_MAX_LEN) + dev->data->dev_conf.rxmode.jumbo_frame = 1; + else + dev->data->dev_conf.rxmode.jumbo_frame = 0; + + /* update max frame size */ + dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size; + qdev->mtu = mtu; + qede_dev_stop(dev); + qede_dev_start(dev); + + return 0; +} + static const struct eth_dev_ops qede_eth_dev_ops = { .dev_configure = qede_dev_configure, .dev_infos_get = qede_dev_info_get, @@ -904,6 +1322,9 @@ static const struct eth_dev_ops qede_eth_dev_ops = { .dev_close = qede_dev_close, .stats_get = qede_get_stats, .stats_reset = qede_reset_stats, + .xstats_get = qede_get_xstats, + .xstats_reset = qede_reset_xstats, + .xstats_get_names = qede_get_xstats_names, .mac_addr_add = qede_mac_addr_add, .mac_addr_remove = qede_mac_addr_remove, .mac_addr_set = qede_mac_addr_set, @@ -916,6 +1337,7 @@ static const struct eth_dev_ops qede_eth_dev_ops = { .rss_hash_conf_get = qede_rss_hash_conf_get, .reta_update = qede_rss_reta_update, .reta_query = qede_rss_reta_query, + .mtu_set = qede_set_mtu, }; static const struct eth_dev_ops qede_eth_vf_dev_ops = { @@ -937,6 +1359,9 @@ static const struct eth_dev_ops qede_eth_vf_dev_ops = { .dev_close = qede_dev_close, .stats_get = qede_get_stats, .stats_reset = qede_reset_stats, + .xstats_get = qede_get_xstats, + .xstats_reset = qede_reset_xstats, + .xstats_get_names = qede_get_xstats_names, .vlan_offload_set = qede_vlan_offload_set, .vlan_filter_set = qede_vlan_filter_set, .dev_supported_ptypes_get = qede_dev_supported_ptypes_get, @@ -944,6 +1369,7 @@ static const struct eth_dev_ops qede_eth_vf_dev_ops = { .rss_hash_conf_get = qede_rss_hash_conf_get, .reta_update = qede_rss_reta_update, .reta_query = qede_rss_reta_query, + .mtu_set = qede_set_mtu, }; static void qede_update_pf_params(struct ecore_dev *edev) @@ -963,7 +1389,6 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf) struct ecore_dev *edev; struct qed_dev_eth_info dev_info; struct qed_slowpath_params params; - uint32_t qed_ver; static bool do_once = true; uint8_t bulletin_change; uint8_t vf_mac[ETHER_ADDR_LEN]; @@ -999,8 +1424,6 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf) rte_eth_copy_pci_info(eth_dev, pci_dev); - qed_ver = qed_get_protocol_version(QED_PROTOCOL_ETH); - qed_ops = qed_get_eth_ops(); if (!qed_ops) { DP_ERR(edev, "Failed to get qed_eth_ops_pass\n"); @@ -1030,15 +1453,33 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf) /* Start the Slowpath-process */ memset(¶ms, 0, sizeof(struct qed_slowpath_params)); params.int_mode = ECORE_INT_MODE_MSIX; - params.drv_major = QEDE_MAJOR_VERSION; - params.drv_minor = QEDE_MINOR_VERSION; - params.drv_rev = QEDE_REVISION_VERSION; - params.drv_eng = QEDE_ENGINEERING_VERSION; - strncpy((char *)params.name, "qede LAN", QED_DRV_VER_STR_SIZE); + params.drv_major = QEDE_PMD_VERSION_MAJOR; + params.drv_minor = QEDE_PMD_VERSION_MINOR; + params.drv_rev = QEDE_PMD_VERSION_REVISION; + params.drv_eng = QEDE_PMD_VERSION_PATCH; + strncpy((char *)params.name, QEDE_PMD_VER_PREFIX, + QEDE_PMD_DRV_VER_STR_SIZE); + + /* For CMT mode device do periodic polling for slowpath events. + * This is required since uio device uses only one MSI-x + * interrupt vector but we need one for each engine. + */ + if (edev->num_hwfns > 1 && IS_PF(edev)) { + rc = rte_eal_alarm_set(timer_period * US_PER_S, + qede_poll_sp_sb_cb, + (void *)eth_dev); + if (rc != 0) { + DP_ERR(edev, "Unable to start periodic" + " timer rc %d\n", rc); + return -EINVAL; + } + } rc = qed_ops->common->slowpath_start(edev, ¶ms); if (rc) { DP_ERR(edev, "Cannot start slowpath rc = %d\n", rc); + rte_eal_alarm_cancel(qede_poll_sp_sb_cb, + (void *)eth_dev); return -ENODEV; } @@ -1047,12 +1488,14 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf) DP_ERR(edev, "Cannot get device_info rc %d\n", rc); qed_ops->common->slowpath_stop(edev); qed_ops->common->remove(edev); + rte_eal_alarm_cancel(qede_poll_sp_sb_cb, + (void *)eth_dev); return -ENODEV; } qede_alloc_etherdev(adapter, &dev_info); - adapter->ops->common->set_id(edev, edev->name, QEDE_DRV_MODULE_VERSION); + adapter->ops->common->set_id(edev, edev->name, QEDE_PMD_VERSION); if (!is_vf) adapter->dev_info.num_mac_addrs = @@ -1072,6 +1515,8 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf) DP_ERR(edev, "Failed to allocate MAC address\n"); qed_ops->common->slowpath_stop(edev); qed_ops->common->remove(edev); + rte_eal_alarm_cancel(qede_poll_sp_sb_cb, + (void *)eth_dev); return -ENOMEM; } @@ -1110,6 +1555,8 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf) do_once = false; } + adapter->state = QEDE_DEV_INIT; + DP_NOTICE(edev, false, "MAC address : %02x:%02x:%02x:%02x:%02x:%02x\n", adapter->primary_mac.addr_bytes[0], adapter->primary_mac.addr_bytes[1], @@ -1187,16 +1634,20 @@ static struct rte_pci_id pci_id_qede_map[] = { { QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_57980S_25) }, + { + QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_57980S_100) + }, {.vendor_id = 0,} }; static struct eth_driver rte_qedevf_pmd = { .pci_drv = { - .name = "rte_qedevf_pmd", .id_table = pci_id_qedevf_map, .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, - }, + .probe = rte_eth_dev_pci_probe, + .remove = rte_eth_dev_pci_remove, + }, .eth_dev_init = qedevf_eth_dev_init, .eth_dev_uninit = qedevf_eth_dev_uninit, .dev_private_size = sizeof(struct qede_dev), @@ -1204,43 +1655,20 @@ static struct eth_driver rte_qedevf_pmd = { static struct eth_driver rte_qede_pmd = { .pci_drv = { - .name = "rte_qede_pmd", .id_table = pci_id_qede_map, .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, - }, + .probe = rte_eth_dev_pci_probe, + .remove = rte_eth_dev_pci_remove, + }, .eth_dev_init = qede_eth_dev_init, .eth_dev_uninit = qede_eth_dev_uninit, .dev_private_size = sizeof(struct qede_dev), }; -static int -rte_qedevf_pmd_init(const char *name __rte_unused, - const char *params __rte_unused) -{ - rte_eth_driver_register(&rte_qedevf_pmd); - - return 0; -} - -static int -rte_qede_pmd_init(const char *name __rte_unused, - const char *params __rte_unused) -{ - rte_eth_driver_register(&rte_qede_pmd); - - return 0; -} - -static struct rte_driver rte_qedevf_driver = { - .type = PMD_PDEV, - .init = rte_qede_pmd_init -}; - -static struct rte_driver rte_qede_driver = { - .type = PMD_PDEV, - .init = rte_qedevf_pmd_init -}; - -PMD_REGISTER_DRIVER(rte_qede_driver); -PMD_REGISTER_DRIVER(rte_qedevf_driver); +RTE_PMD_REGISTER_PCI(net_qede, rte_qede_pmd.pci_drv); +RTE_PMD_REGISTER_PCI_TABLE(net_qede, pci_id_qede_map); +RTE_PMD_REGISTER_KMOD_DEP(net_qede, "* igb_uio | uio_pci_generic | vfio"); +RTE_PMD_REGISTER_PCI(net_qede_vf, rte_qedevf_pmd.pci_drv); +RTE_PMD_REGISTER_PCI_TABLE(net_qede_vf, pci_id_qedevf_map); +RTE_PMD_REGISTER_KMOD_DEP(net_qede_vf, "* igb_uio | vfio");