X-Git-Url: http://git.droids-corp.org/?p=dpdk.git;a=blobdiff_plain;f=drivers%2Fnet%2Fqede%2Fqede_ethdev.c;h=6f5ba2a9270c754674a578569196bd34ebb07fb6;hp=bd190d073a5396c01ae0dc683f0478b2b06ae7f4;hb=1282943aa05b2f211bd1b831b9d2962859323063;hpb=69d7ba88f1a1a7a5d65003d33d887c75a4f192e4 diff --git a/drivers/net/qede/qede_ethdev.c b/drivers/net/qede/qede_ethdev.c index bd190d073a..6f5ba2a927 100644 --- a/drivers/net/qede/qede_ethdev.c +++ b/drivers/net/qede/qede_ethdev.c @@ -125,143 +125,199 @@ struct rte_qede_xstats_name_off { }; static const struct rte_qede_xstats_name_off qede_xstats_strings[] = { - {"rx_unicast_bytes", offsetof(struct ecore_eth_stats, rx_ucast_bytes)}, + {"rx_unicast_bytes", + offsetof(struct ecore_eth_stats_common, rx_ucast_bytes)}, {"rx_multicast_bytes", - offsetof(struct ecore_eth_stats, rx_mcast_bytes)}, + offsetof(struct ecore_eth_stats_common, rx_mcast_bytes)}, {"rx_broadcast_bytes", - offsetof(struct ecore_eth_stats, rx_bcast_bytes)}, - {"rx_unicast_packets", offsetof(struct ecore_eth_stats, rx_ucast_pkts)}, + offsetof(struct ecore_eth_stats_common, rx_bcast_bytes)}, + {"rx_unicast_packets", + offsetof(struct ecore_eth_stats_common, rx_ucast_pkts)}, {"rx_multicast_packets", - offsetof(struct ecore_eth_stats, rx_mcast_pkts)}, + offsetof(struct ecore_eth_stats_common, rx_mcast_pkts)}, {"rx_broadcast_packets", - offsetof(struct ecore_eth_stats, rx_bcast_pkts)}, + offsetof(struct ecore_eth_stats_common, rx_bcast_pkts)}, - {"tx_unicast_bytes", offsetof(struct ecore_eth_stats, tx_ucast_bytes)}, + {"tx_unicast_bytes", + offsetof(struct ecore_eth_stats_common, tx_ucast_bytes)}, {"tx_multicast_bytes", - offsetof(struct ecore_eth_stats, tx_mcast_bytes)}, + offsetof(struct ecore_eth_stats_common, tx_mcast_bytes)}, {"tx_broadcast_bytes", - offsetof(struct ecore_eth_stats, tx_bcast_bytes)}, - {"tx_unicast_packets", offsetof(struct ecore_eth_stats, tx_ucast_pkts)}, + offsetof(struct ecore_eth_stats_common, tx_bcast_bytes)}, + {"tx_unicast_packets", + offsetof(struct ecore_eth_stats_common, tx_ucast_pkts)}, {"tx_multicast_packets", - offsetof(struct ecore_eth_stats, tx_mcast_pkts)}, + offsetof(struct ecore_eth_stats_common, tx_mcast_pkts)}, {"tx_broadcast_packets", - offsetof(struct ecore_eth_stats, tx_bcast_pkts)}, + offsetof(struct ecore_eth_stats_common, tx_bcast_pkts)}, {"rx_64_byte_packets", - offsetof(struct ecore_eth_stats, rx_64_byte_packets)}, + offsetof(struct ecore_eth_stats_common, rx_64_byte_packets)}, {"rx_65_to_127_byte_packets", - offsetof(struct ecore_eth_stats, rx_65_to_127_byte_packets)}, + offsetof(struct ecore_eth_stats_common, + rx_65_to_127_byte_packets)}, {"rx_128_to_255_byte_packets", - offsetof(struct ecore_eth_stats, rx_128_to_255_byte_packets)}, + offsetof(struct ecore_eth_stats_common, + rx_128_to_255_byte_packets)}, {"rx_256_to_511_byte_packets", - offsetof(struct ecore_eth_stats, rx_256_to_511_byte_packets)}, + offsetof(struct ecore_eth_stats_common, + rx_256_to_511_byte_packets)}, {"rx_512_to_1023_byte_packets", - offsetof(struct ecore_eth_stats, rx_512_to_1023_byte_packets)}, + offsetof(struct ecore_eth_stats_common, + rx_512_to_1023_byte_packets)}, {"rx_1024_to_1518_byte_packets", - offsetof(struct ecore_eth_stats, rx_1024_to_1518_byte_packets)}, - {"rx_1519_to_1522_byte_packets", - offsetof(struct ecore_eth_stats, rx_1519_to_1522_byte_packets)}, - {"rx_1519_to_2047_byte_packets", - offsetof(struct ecore_eth_stats, rx_1519_to_2047_byte_packets)}, - {"rx_2048_to_4095_byte_packets", - offsetof(struct ecore_eth_stats, rx_2048_to_4095_byte_packets)}, - {"rx_4096_to_9216_byte_packets", - offsetof(struct ecore_eth_stats, rx_4096_to_9216_byte_packets)}, - {"rx_9217_to_16383_byte_packets", - offsetof(struct ecore_eth_stats, - rx_9217_to_16383_byte_packets)}, + offsetof(struct ecore_eth_stats_common, + rx_1024_to_1518_byte_packets)}, {"tx_64_byte_packets", - offsetof(struct ecore_eth_stats, tx_64_byte_packets)}, + offsetof(struct ecore_eth_stats_common, tx_64_byte_packets)}, {"tx_65_to_127_byte_packets", - offsetof(struct ecore_eth_stats, tx_65_to_127_byte_packets)}, + offsetof(struct ecore_eth_stats_common, + tx_65_to_127_byte_packets)}, {"tx_128_to_255_byte_packets", - offsetof(struct ecore_eth_stats, tx_128_to_255_byte_packets)}, + offsetof(struct ecore_eth_stats_common, + tx_128_to_255_byte_packets)}, {"tx_256_to_511_byte_packets", - offsetof(struct ecore_eth_stats, tx_256_to_511_byte_packets)}, + offsetof(struct ecore_eth_stats_common, + tx_256_to_511_byte_packets)}, {"tx_512_to_1023_byte_packets", - offsetof(struct ecore_eth_stats, tx_512_to_1023_byte_packets)}, + offsetof(struct ecore_eth_stats_common, + tx_512_to_1023_byte_packets)}, {"tx_1024_to_1518_byte_packets", - offsetof(struct ecore_eth_stats, tx_1024_to_1518_byte_packets)}, - {"trx_1519_to_1522_byte_packets", - offsetof(struct ecore_eth_stats, tx_1519_to_2047_byte_packets)}, - {"tx_2048_to_4095_byte_packets", - offsetof(struct ecore_eth_stats, tx_2048_to_4095_byte_packets)}, - {"tx_4096_to_9216_byte_packets", - offsetof(struct ecore_eth_stats, tx_4096_to_9216_byte_packets)}, - {"tx_9217_to_16383_byte_packets", - offsetof(struct ecore_eth_stats, - tx_9217_to_16383_byte_packets)}, + offsetof(struct ecore_eth_stats_common, + tx_1024_to_1518_byte_packets)}, {"rx_mac_crtl_frames", - offsetof(struct ecore_eth_stats, rx_mac_crtl_frames)}, + offsetof(struct ecore_eth_stats_common, rx_mac_crtl_frames)}, {"tx_mac_control_frames", - offsetof(struct ecore_eth_stats, tx_mac_ctrl_frames)}, - {"rx_pause_frames", offsetof(struct ecore_eth_stats, rx_pause_frames)}, - {"tx_pause_frames", offsetof(struct ecore_eth_stats, tx_pause_frames)}, + offsetof(struct ecore_eth_stats_common, tx_mac_ctrl_frames)}, + {"rx_pause_frames", + offsetof(struct ecore_eth_stats_common, rx_pause_frames)}, + {"tx_pause_frames", + offsetof(struct ecore_eth_stats_common, tx_pause_frames)}, {"rx_priority_flow_control_frames", - offsetof(struct ecore_eth_stats, rx_pfc_frames)}, + offsetof(struct ecore_eth_stats_common, rx_pfc_frames)}, {"tx_priority_flow_control_frames", - offsetof(struct ecore_eth_stats, tx_pfc_frames)}, + offsetof(struct ecore_eth_stats_common, tx_pfc_frames)}, - {"rx_crc_errors", offsetof(struct ecore_eth_stats, rx_crc_errors)}, - {"rx_align_errors", offsetof(struct ecore_eth_stats, rx_align_errors)}, + {"rx_crc_errors", + offsetof(struct ecore_eth_stats_common, rx_crc_errors)}, + {"rx_align_errors", + offsetof(struct ecore_eth_stats_common, rx_align_errors)}, {"rx_carrier_errors", - offsetof(struct ecore_eth_stats, rx_carrier_errors)}, + offsetof(struct ecore_eth_stats_common, rx_carrier_errors)}, {"rx_oversize_packet_errors", - offsetof(struct ecore_eth_stats, rx_oversize_packets)}, - {"rx_jabber_errors", offsetof(struct ecore_eth_stats, rx_jabbers)}, + offsetof(struct ecore_eth_stats_common, rx_oversize_packets)}, + {"rx_jabber_errors", + offsetof(struct ecore_eth_stats_common, rx_jabbers)}, {"rx_undersize_packet_errors", - offsetof(struct ecore_eth_stats, rx_undersize_packets)}, - {"rx_fragments", offsetof(struct ecore_eth_stats, rx_fragments)}, + offsetof(struct ecore_eth_stats_common, rx_undersize_packets)}, + {"rx_fragments", offsetof(struct ecore_eth_stats_common, rx_fragments)}, {"rx_host_buffer_not_available", - offsetof(struct ecore_eth_stats, no_buff_discards)}, + offsetof(struct ecore_eth_stats_common, no_buff_discards)}, /* Number of packets discarded because they are bigger than MTU */ {"rx_packet_too_big_discards", - offsetof(struct ecore_eth_stats, packet_too_big_discard)}, + offsetof(struct ecore_eth_stats_common, + packet_too_big_discard)}, {"rx_ttl_zero_discards", - offsetof(struct ecore_eth_stats, ttl0_discard)}, + offsetof(struct ecore_eth_stats_common, ttl0_discard)}, {"rx_multi_function_tag_filter_discards", - offsetof(struct ecore_eth_stats, mftag_filter_discards)}, + offsetof(struct ecore_eth_stats_common, mftag_filter_discards)}, {"rx_mac_filter_discards", - offsetof(struct ecore_eth_stats, mac_filter_discards)}, + offsetof(struct ecore_eth_stats_common, mac_filter_discards)}, {"rx_hw_buffer_truncates", - offsetof(struct ecore_eth_stats, brb_truncates)}, + offsetof(struct ecore_eth_stats_common, brb_truncates)}, {"rx_hw_buffer_discards", - offsetof(struct ecore_eth_stats, brb_discards)}, - {"tx_lpi_entry_count", - offsetof(struct ecore_eth_stats, tx_lpi_entry_count)}, - {"tx_total_collisions", - offsetof(struct ecore_eth_stats, tx_total_collisions)}, + offsetof(struct ecore_eth_stats_common, brb_discards)}, {"tx_error_drop_packets", - offsetof(struct ecore_eth_stats, tx_err_drop_pkts)}, + offsetof(struct ecore_eth_stats_common, tx_err_drop_pkts)}, - {"rx_mac_bytes", offsetof(struct ecore_eth_stats, rx_mac_bytes)}, + {"rx_mac_bytes", offsetof(struct ecore_eth_stats_common, rx_mac_bytes)}, {"rx_mac_unicast_packets", - offsetof(struct ecore_eth_stats, rx_mac_uc_packets)}, + offsetof(struct ecore_eth_stats_common, rx_mac_uc_packets)}, {"rx_mac_multicast_packets", - offsetof(struct ecore_eth_stats, rx_mac_mc_packets)}, + offsetof(struct ecore_eth_stats_common, rx_mac_mc_packets)}, {"rx_mac_broadcast_packets", - offsetof(struct ecore_eth_stats, rx_mac_bc_packets)}, + offsetof(struct ecore_eth_stats_common, rx_mac_bc_packets)}, {"rx_mac_frames_ok", - offsetof(struct ecore_eth_stats, rx_mac_frames_ok)}, - {"tx_mac_bytes", offsetof(struct ecore_eth_stats, tx_mac_bytes)}, + offsetof(struct ecore_eth_stats_common, rx_mac_frames_ok)}, + {"tx_mac_bytes", offsetof(struct ecore_eth_stats_common, tx_mac_bytes)}, {"tx_mac_unicast_packets", - offsetof(struct ecore_eth_stats, tx_mac_uc_packets)}, + offsetof(struct ecore_eth_stats_common, tx_mac_uc_packets)}, {"tx_mac_multicast_packets", - offsetof(struct ecore_eth_stats, tx_mac_mc_packets)}, + offsetof(struct ecore_eth_stats_common, tx_mac_mc_packets)}, {"tx_mac_broadcast_packets", - offsetof(struct ecore_eth_stats, tx_mac_bc_packets)}, + offsetof(struct ecore_eth_stats_common, tx_mac_bc_packets)}, {"lro_coalesced_packets", - offsetof(struct ecore_eth_stats, tpa_coalesced_pkts)}, + offsetof(struct ecore_eth_stats_common, tpa_coalesced_pkts)}, {"lro_coalesced_events", - offsetof(struct ecore_eth_stats, tpa_coalesced_events)}, + offsetof(struct ecore_eth_stats_common, tpa_coalesced_events)}, {"lro_aborts_num", - offsetof(struct ecore_eth_stats, tpa_aborts_num)}, + offsetof(struct ecore_eth_stats_common, tpa_aborts_num)}, {"lro_not_coalesced_packets", - offsetof(struct ecore_eth_stats, tpa_not_coalesced_pkts)}, + offsetof(struct ecore_eth_stats_common, + tpa_not_coalesced_pkts)}, {"lro_coalesced_bytes", - offsetof(struct ecore_eth_stats, tpa_coalesced_bytes)}, + offsetof(struct ecore_eth_stats_common, + tpa_coalesced_bytes)}, +}; + +static const struct rte_qede_xstats_name_off qede_bb_xstats_strings[] = { + {"rx_1519_to_1522_byte_packets", + offsetof(struct ecore_eth_stats, bb) + + offsetof(struct ecore_eth_stats_bb, + rx_1519_to_1522_byte_packets)}, + {"rx_1519_to_2047_byte_packets", + offsetof(struct ecore_eth_stats, bb) + + offsetof(struct ecore_eth_stats_bb, + rx_1519_to_2047_byte_packets)}, + {"rx_2048_to_4095_byte_packets", + offsetof(struct ecore_eth_stats, bb) + + offsetof(struct ecore_eth_stats_bb, + rx_2048_to_4095_byte_packets)}, + {"rx_4096_to_9216_byte_packets", + offsetof(struct ecore_eth_stats, bb) + + offsetof(struct ecore_eth_stats_bb, + rx_4096_to_9216_byte_packets)}, + {"rx_9217_to_16383_byte_packets", + offsetof(struct ecore_eth_stats, bb) + + offsetof(struct ecore_eth_stats_bb, + rx_9217_to_16383_byte_packets)}, + + {"tx_1519_to_2047_byte_packets", + offsetof(struct ecore_eth_stats, bb) + + offsetof(struct ecore_eth_stats_bb, + tx_1519_to_2047_byte_packets)}, + {"tx_2048_to_4095_byte_packets", + offsetof(struct ecore_eth_stats, bb) + + offsetof(struct ecore_eth_stats_bb, + tx_2048_to_4095_byte_packets)}, + {"tx_4096_to_9216_byte_packets", + offsetof(struct ecore_eth_stats, bb) + + offsetof(struct ecore_eth_stats_bb, + tx_4096_to_9216_byte_packets)}, + {"tx_9217_to_16383_byte_packets", + offsetof(struct ecore_eth_stats, bb) + + offsetof(struct ecore_eth_stats_bb, + tx_9217_to_16383_byte_packets)}, + + {"tx_lpi_entry_count", + offsetof(struct ecore_eth_stats, bb) + + offsetof(struct ecore_eth_stats_bb, tx_lpi_entry_count)}, + {"tx_total_collisions", + offsetof(struct ecore_eth_stats, bb) + + offsetof(struct ecore_eth_stats_bb, tx_total_collisions)}, +}; + +static const struct rte_qede_xstats_name_off qede_ah_xstats_strings[] = { + {"rx_1519_to_max_byte_packets", + offsetof(struct ecore_eth_stats, ah) + + offsetof(struct ecore_eth_stats_ah, + rx_1519_to_max_byte_packets)}, + {"tx_1519_to_max_byte_packets", + offsetof(struct ecore_eth_stats, ah) + + offsetof(struct ecore_eth_stats_ah, + tx_1519_to_max_byte_packets)}, }; static const struct rte_qede_xstats_name_off qede_rxq_xstats_strings[] = { @@ -279,14 +335,14 @@ static void qede_interrupt_action(struct ecore_hwfn *p_hwfn) } static void -qede_interrupt_handler(struct rte_intr_handle *handle, void *param) +qede_interrupt_handler(void *param) { struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param; struct qede_dev *qdev = eth_dev->data->dev_private; struct ecore_dev *edev = &qdev->edev; qede_interrupt_action(ECORE_LEADING_HWFN(edev)); - if (rte_intr_enable(handle)) + if (rte_intr_enable(eth_dev->intr_handle)) DP_ERR(edev, "rte_intr_enable failed\n"); } @@ -294,10 +350,10 @@ static void qede_alloc_etherdev(struct qede_dev *qdev, struct qed_dev_eth_info *info) { rte_memcpy(&qdev->dev_info, info, sizeof(*info)); - qdev->num_tc = qdev->dev_info.num_tc; qdev->ops = qed_ops; } +#ifdef RTE_LIBRTE_QEDE_DEBUG_INFO static void qede_print_adapter_info(struct qede_dev *qdev) { struct ecore_dev *edev = &qdev->edev; @@ -307,9 +363,10 @@ static void qede_print_adapter_info(struct qede_dev *qdev) DP_INFO(edev, "*********************************\n"); DP_INFO(edev, " DPDK version:%s\n", rte_version()); - DP_INFO(edev, " Chip details : %s%d\n", + DP_INFO(edev, " Chip details : %s %c%d\n", ECORE_IS_BB(edev) ? "BB" : "AH", - CHIP_REV_IS_A0(edev) ? 0 : 1); + 'A' + edev->chip_rev, + (int)edev->chip_metal); snprintf(ver_str, QEDE_PMD_DRV_VER_STR_SIZE, "%d.%d.%d.%d", info->fw_major, info->fw_minor, info->fw_rev, info->fw_eng); snprintf(drv_ver, QEDE_PMD_DRV_VER_STR_SIZE, "%s_%s", @@ -326,6 +383,187 @@ static void qede_print_adapter_info(struct qede_dev *qdev) DP_INFO(edev, " Firmware file : %s\n", fw_file); DP_INFO(edev, "*********************************\n"); } +#endif + +static int +qede_start_vport(struct qede_dev *qdev, uint16_t mtu) +{ + struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); + struct ecore_sp_vport_start_params params; + struct ecore_hwfn *p_hwfn; + int rc; + int i; + + memset(¶ms, 0, sizeof(params)); + params.vport_id = 0; + params.mtu = mtu; + /* @DPDK - Disable FW placement */ + params.zero_placement_offset = 1; + for_each_hwfn(edev, i) { + p_hwfn = &edev->hwfns[i]; + params.concrete_fid = p_hwfn->hw_info.concrete_fid; + params.opaque_fid = p_hwfn->hw_info.opaque_fid; + rc = ecore_sp_vport_start(p_hwfn, ¶ms); + if (rc != ECORE_SUCCESS) { + DP_ERR(edev, "Start V-PORT failed %d\n", rc); + return rc; + } + } + ecore_reset_vport_stats(edev); + DP_INFO(edev, "VPORT started with MTU = %u\n", mtu); + + return 0; +} + +static int +qede_stop_vport(struct ecore_dev *edev) +{ + struct ecore_hwfn *p_hwfn; + uint8_t vport_id; + int rc; + int i; + + vport_id = 0; + for_each_hwfn(edev, i) { + p_hwfn = &edev->hwfns[i]; + rc = ecore_sp_vport_stop(p_hwfn, p_hwfn->hw_info.opaque_fid, + vport_id); + if (rc != ECORE_SUCCESS) { + DP_ERR(edev, "Stop V-PORT failed rc = %d\n", rc); + return rc; + } + } + + return 0; +} + +/* Activate or deactivate vport via vport-update */ +int qede_activate_vport(struct rte_eth_dev *eth_dev, bool flg) +{ + struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); + struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); + struct ecore_sp_vport_update_params params; + struct ecore_hwfn *p_hwfn; + uint8_t i; + int rc = -1; + + memset(¶ms, 0, sizeof(struct ecore_sp_vport_update_params)); + params.vport_id = 0; + params.update_vport_active_rx_flg = 1; + params.update_vport_active_tx_flg = 1; + params.vport_active_rx_flg = flg; + params.vport_active_tx_flg = flg; +#ifndef RTE_LIBRTE_QEDE_VF_TX_SWITCH + if (IS_VF(edev)) { + params.update_tx_switching_flg = 1; + params.tx_switching_flg = !flg; + DP_INFO(edev, "VF tx-switching is disabled\n"); + } +#endif + for_each_hwfn(edev, i) { + p_hwfn = &edev->hwfns[i]; + params.opaque_fid = p_hwfn->hw_info.opaque_fid; + rc = ecore_sp_vport_update(p_hwfn, ¶ms, + ECORE_SPQ_MODE_EBLOCK, NULL); + if (rc != ECORE_SUCCESS) { + DP_ERR(edev, "Failed to update vport\n"); + break; + } + } + DP_INFO(edev, "vport is %s\n", flg ? "activated" : "deactivated"); + + return rc; +} + +static void +qede_update_sge_tpa_params(struct ecore_sge_tpa_params *sge_tpa_params, + uint16_t mtu, bool enable) +{ + /* Enable LRO in split mode */ + sge_tpa_params->tpa_ipv4_en_flg = enable; + sge_tpa_params->tpa_ipv6_en_flg = enable; + sge_tpa_params->tpa_ipv4_tunn_en_flg = false; + sge_tpa_params->tpa_ipv6_tunn_en_flg = false; + /* set if tpa enable changes */ + sge_tpa_params->update_tpa_en_flg = 1; + /* set if tpa parameters should be handled */ + sge_tpa_params->update_tpa_param_flg = enable; + + sge_tpa_params->max_buffers_per_cqe = 20; + /* Enable TPA in split mode. In this mode each TPA segment + * starts on the new BD, so there is one BD per segment. + */ + sge_tpa_params->tpa_pkt_split_flg = 1; + sge_tpa_params->tpa_hdr_data_split_flg = 0; + sge_tpa_params->tpa_gro_consistent_flg = 0; + sge_tpa_params->tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM; + sge_tpa_params->tpa_max_size = 0x7FFF; + sge_tpa_params->tpa_min_size_to_start = mtu / 2; + sge_tpa_params->tpa_min_size_to_cont = mtu / 2; +} + +/* Enable/disable LRO via vport-update */ +int qede_enable_tpa(struct rte_eth_dev *eth_dev, bool flg) +{ + struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); + struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); + struct ecore_sp_vport_update_params params; + struct ecore_sge_tpa_params tpa_params; + struct ecore_hwfn *p_hwfn; + int rc; + int i; + + memset(¶ms, 0, sizeof(struct ecore_sp_vport_update_params)); + memset(&tpa_params, 0, sizeof(struct ecore_sge_tpa_params)); + qede_update_sge_tpa_params(&tpa_params, qdev->mtu, flg); + params.vport_id = 0; + params.sge_tpa_params = &tpa_params; + for_each_hwfn(edev, i) { + p_hwfn = &edev->hwfns[i]; + params.opaque_fid = p_hwfn->hw_info.opaque_fid; + rc = ecore_sp_vport_update(p_hwfn, ¶ms, + ECORE_SPQ_MODE_EBLOCK, NULL); + if (rc != ECORE_SUCCESS) { + DP_ERR(edev, "Failed to update LRO\n"); + return -1; + } + } + qdev->enable_lro = flg; + DP_INFO(edev, "LRO is %s\n", flg ? "enabled" : "disabled"); + + return 0; +} + +/* Update MTU via vport-update without doing port restart. + * The vport must be deactivated before calling this API. + */ +int qede_update_mtu(struct rte_eth_dev *eth_dev, uint16_t mtu) +{ + struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); + struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); + struct ecore_sp_vport_update_params params; + struct ecore_hwfn *p_hwfn; + int rc; + int i; + + memset(¶ms, 0, sizeof(struct ecore_sp_vport_update_params)); + params.vport_id = 0; + params.mtu = mtu; + params.vport_id = 0; + for_each_hwfn(edev, i) { + p_hwfn = &edev->hwfns[i]; + params.opaque_fid = p_hwfn->hw_info.opaque_fid; + rc = ecore_sp_vport_update(p_hwfn, ¶ms, + ECORE_SPQ_MODE_EBLOCK, NULL); + if (rc != ECORE_SUCCESS) { + DP_ERR(edev, "Failed to update MTU\n"); + return -1; + } + } + DP_INFO(edev, "MTU updated to %u\n", mtu); + + return 0; +} static void qede_set_ucast_cmn_params(struct ecore_filter_ucast *ucast) { @@ -335,15 +573,90 @@ static void qede_set_ucast_cmn_params(struct ecore_filter_ucast *ucast) /* ucast->assert_on_error = true; - For debug */ } -static void qede_set_cmn_tunn_param(struct ecore_tunnel_info *p_tunn, - uint8_t clss, bool mode, bool mask) +static int +qed_configure_filter_rx_mode(struct rte_eth_dev *eth_dev, + enum qed_filter_rx_mode_type type) +{ + struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); + struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); + struct ecore_filter_accept_flags flags; + + memset(&flags, 0, sizeof(flags)); + + flags.update_rx_mode_config = 1; + flags.update_tx_mode_config = 1; + flags.rx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED | + ECORE_ACCEPT_MCAST_MATCHED | + ECORE_ACCEPT_BCAST; + + flags.tx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED | + ECORE_ACCEPT_MCAST_MATCHED | + ECORE_ACCEPT_BCAST; + + if (type == QED_FILTER_RX_MODE_TYPE_PROMISC) { + flags.rx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED; + if (IS_VF(edev)) { + flags.tx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED; + DP_INFO(edev, "Enabling Tx unmatched flag for VF\n"); + } + } else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC) { + flags.rx_accept_filter |= ECORE_ACCEPT_MCAST_UNMATCHED; + } else if (type == (QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC | + QED_FILTER_RX_MODE_TYPE_PROMISC)) { + flags.rx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED | + ECORE_ACCEPT_MCAST_UNMATCHED; + } + + return ecore_filter_accept_cmd(edev, 0, flags, false, false, + ECORE_SPQ_MODE_CB, NULL); +} + +static int +qede_vxlan_enable(struct rte_eth_dev *eth_dev, uint8_t clss, + bool enable, bool mask) { - memset(p_tunn, 0, sizeof(struct ecore_tunnel_info)); - p_tunn->vxlan.b_update_mode = mode; - p_tunn->vxlan.b_mode_enabled = mask; - p_tunn->b_update_rx_cls = true; - p_tunn->b_update_tx_cls = true; - p_tunn->vxlan.tun_cls = clss; + struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); + struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); + enum _ecore_status_t rc = ECORE_INVAL; + struct ecore_ptt *p_ptt; + struct ecore_tunnel_info tunn; + struct ecore_hwfn *p_hwfn; + int i; + + memset(&tunn, 0, sizeof(struct ecore_tunnel_info)); + tunn.vxlan.b_update_mode = enable; + tunn.vxlan.b_mode_enabled = mask; + tunn.b_update_rx_cls = true; + tunn.b_update_tx_cls = true; + tunn.vxlan.tun_cls = clss; + + for_each_hwfn(edev, i) { + p_hwfn = &edev->hwfns[i]; + if (IS_PF(edev)) { + p_ptt = ecore_ptt_acquire(p_hwfn); + if (!p_ptt) + return -EAGAIN; + } else { + p_ptt = NULL; + } + rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, p_ptt, + &tunn, ECORE_SPQ_MODE_CB, NULL); + if (rc != ECORE_SUCCESS) { + DP_ERR(edev, "Failed to update tunn_clss %u\n", + tunn.vxlan.tun_cls); + if (IS_PF(edev)) + ecore_ptt_release(p_hwfn, p_ptt); + break; + } + } + + if (rc == ECORE_SUCCESS) { + qdev->vxlan.enable = enable; + qdev->vxlan.udp_port = (enable) ? QEDE_VXLAN_DEF_PORT : 0; + DP_INFO(edev, "vxlan is %s\n", enable ? "enabled" : "disabled"); + } + + return rc; } static int @@ -361,6 +674,7 @@ qede_ucast_filter(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast, SLIST_FOREACH(tmp, &qdev->uc_list_head, list) { if ((memcmp(mac_addr, &tmp->mac, ETHER_ADDR_LEN) == 0) && + ucast->vni == tmp->vni && ucast->vlan == tmp->vlan) { DP_ERR(edev, "Unicast MAC is already added" " with vlan = %u, vni = %u\n", @@ -506,16 +820,18 @@ qede_mac_int_ops(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast, return rc; } -static void +static int qede_mac_addr_add(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr, - uint32_t index, __rte_unused uint32_t pool) + __rte_unused uint32_t index, __rte_unused uint32_t pool) { struct ecore_filter_ucast ucast; + int re; qede_set_ucast_cmn_params(&ucast); ucast.type = ECORE_FILTER_MAC; ether_addr_copy(mac_addr, (struct ether_addr *)&ucast.mac); - (void)qede_mac_int_ops(eth_dev, &ucast, 1); + re = (int)qede_mac_int_ops(eth_dev, &ucast, 1); + return re; } static void @@ -523,9 +839,7 @@ qede_mac_addr_remove(struct rte_eth_dev *eth_dev, uint32_t index) { struct qede_dev *qdev = eth_dev->data->dev_private; struct ecore_dev *edev = &qdev->edev; - struct ether_addr mac_addr; struct ecore_filter_ucast ucast; - int rc; PMD_INIT_FUNC_TRACE(edev); @@ -551,8 +865,6 @@ qede_mac_addr_set(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr) { struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); - struct ecore_filter_ucast ucast; - int rc; if (IS_VF(edev) && !ecore_vf_check_mac(ECORE_LEADING_HWFN(edev), mac_addr->addr_bytes)) { @@ -562,114 +874,61 @@ qede_mac_addr_set(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr) return; } - /* First remove the primary mac */ - qede_set_ucast_cmn_params(&ucast); - ucast.opcode = ECORE_FILTER_REMOVE; - ucast.type = ECORE_FILTER_MAC; - ether_addr_copy(&qdev->primary_mac, - (struct ether_addr *)&ucast.mac); - rc = ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB, NULL); - if (rc != 0) { - DP_ERR(edev, "Unable to remove current macaddr" - " Reverting to previous default mac\n"); - ether_addr_copy(&qdev->primary_mac, - ð_dev->data->mac_addrs[0]); - return; - } - - /* Add new MAC */ - ucast.opcode = ECORE_FILTER_ADD; - ether_addr_copy(mac_addr, (struct ether_addr *)&ucast.mac); - rc = ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB, NULL); - if (rc != 0) - DP_ERR(edev, "Unable to add new default mac\n"); - else - ether_addr_copy(mac_addr, &qdev->primary_mac); + qede_mac_addr_add(eth_dev, mac_addr, 0, 0); } -static void qede_config_accept_any_vlan(struct qede_dev *qdev, bool action) +static void qede_config_accept_any_vlan(struct qede_dev *qdev, bool flg) { - struct ecore_dev *edev = &qdev->edev; - struct qed_update_vport_params params = { - .vport_id = 0, - .accept_any_vlan = action, - .update_accept_any_vlan_flg = 1, - }; - int rc; - - /* Proceed only if action actually needs to be performed */ - if (qdev->accept_any_vlan == action) - return; - - rc = qdev->ops->vport_update(edev, ¶ms); - if (rc) { - DP_ERR(edev, "Failed to %s accept-any-vlan\n", - action ? "enable" : "disable"); - } else { - DP_INFO(edev, "%s accept-any-vlan\n", - action ? "enabled" : "disabled"); - qdev->accept_any_vlan = action; - } -} - -static int qede_vlan_stripping(struct rte_eth_dev *eth_dev, bool set_stripping) -{ - struct qed_update_vport_params vport_update_params; - struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); + struct ecore_sp_vport_update_params params; + struct ecore_hwfn *p_hwfn; + uint8_t i; int rc; - memset(&vport_update_params, 0, sizeof(vport_update_params)); - vport_update_params.vport_id = 0; - vport_update_params.update_inner_vlan_removal_flg = 1; - vport_update_params.inner_vlan_removal_flg = set_stripping; - rc = qdev->ops->vport_update(edev, &vport_update_params); - if (rc) { - DP_ERR(edev, "Update V-PORT failed %d\n", rc); - return rc; + memset(¶ms, 0, sizeof(struct ecore_sp_vport_update_params)); + params.vport_id = 0; + params.update_accept_any_vlan_flg = 1; + params.accept_any_vlan = flg; + for_each_hwfn(edev, i) { + p_hwfn = &edev->hwfns[i]; + params.opaque_fid = p_hwfn->hw_info.opaque_fid; + rc = ecore_sp_vport_update(p_hwfn, ¶ms, + ECORE_SPQ_MODE_EBLOCK, NULL); + if (rc != ECORE_SUCCESS) { + DP_ERR(edev, "Failed to configure accept-any-vlan\n"); + return; + } } - return 0; + DP_INFO(edev, "%s accept-any-vlan\n", flg ? "enabled" : "disabled"); } -static void qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask) +static int qede_vlan_stripping(struct rte_eth_dev *eth_dev, bool flg) { struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); - struct rte_eth_rxmode *rxmode = ð_dev->data->dev_conf.rxmode; - - if (mask & ETH_VLAN_STRIP_MASK) { - if (rxmode->hw_vlan_strip) - (void)qede_vlan_stripping(eth_dev, 1); - else - (void)qede_vlan_stripping(eth_dev, 0); - } + struct ecore_sp_vport_update_params params; + struct ecore_hwfn *p_hwfn; + uint8_t i; + int rc; - if (mask & ETH_VLAN_FILTER_MASK) { - /* VLAN filtering kicks in when a VLAN is added */ - if (rxmode->hw_vlan_filter) { - qede_vlan_filter_set(eth_dev, 0, 1); - } else { - if (qdev->configured_vlans > 1) { /* Excluding VLAN0 */ - DP_ERR(edev, - " Please remove existing VLAN filters" - " before disabling VLAN filtering\n"); - /* Signal app that VLAN filtering is still - * enabled - */ - rxmode->hw_vlan_filter = true; - } else { - qede_vlan_filter_set(eth_dev, 0, 0); - } + memset(¶ms, 0, sizeof(struct ecore_sp_vport_update_params)); + params.vport_id = 0; + params.update_inner_vlan_removal_flg = 1; + params.inner_vlan_removal_flg = flg; + for_each_hwfn(edev, i) { + p_hwfn = &edev->hwfns[i]; + params.opaque_fid = p_hwfn->hw_info.opaque_fid; + rc = ecore_sp_vport_update(p_hwfn, ¶ms, + ECORE_SPQ_MODE_EBLOCK, NULL); + if (rc != ECORE_SUCCESS) { + DP_ERR(edev, "Failed to update vport\n"); + return -1; } } - if (mask & ETH_VLAN_EXTEND_MASK) - DP_INFO(edev, "No offloads are supported with VLAN Q-in-Q" - " and classification is based on outer tag only\n"); - - DP_INFO(edev, "vlan offload mask %d vlan-strip %d vlan-filter %d\n", - mask, rxmode->hw_vlan_strip, rxmode->hw_vlan_filter); + DP_INFO(edev, "VLAN stripping %s\n", flg ? "enabled" : "disabled"); + return 0; } static int qede_vlan_filter_set(struct rte_eth_dev *eth_dev, @@ -762,29 +1021,44 @@ static int qede_vlan_filter_set(struct rte_eth_dev *eth_dev, return rc; } -static int qede_init_vport(struct qede_dev *qdev) +static int qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask) { - struct ecore_dev *edev = &qdev->edev; - struct qed_start_vport_params start = {0}; - int rc; + struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); + struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); + struct rte_eth_rxmode *rxmode = ð_dev->data->dev_conf.rxmode; - start.remove_inner_vlan = 1; - start.gro_enable = 0; - start.mtu = ETHER_MTU + QEDE_ETH_OVERHEAD; - start.vport_id = 0; - start.drop_ttl0 = false; - start.clear_stats = 1; - start.handle_ptp_pkts = 0; + if (mask & ETH_VLAN_STRIP_MASK) { + if (rxmode->hw_vlan_strip) + (void)qede_vlan_stripping(eth_dev, 1); + else + (void)qede_vlan_stripping(eth_dev, 0); + } - rc = qdev->ops->vport_start(edev, &start); - if (rc) { - DP_ERR(edev, "Start V-PORT failed %d\n", rc); - return rc; + if (mask & ETH_VLAN_FILTER_MASK) { + /* VLAN filtering kicks in when a VLAN is added */ + if (rxmode->hw_vlan_filter) { + qede_vlan_filter_set(eth_dev, 0, 1); + } else { + if (qdev->configured_vlans > 1) { /* Excluding VLAN0 */ + DP_ERR(edev, + " Please remove existing VLAN filters" + " before disabling VLAN filtering\n"); + /* Signal app that VLAN filtering is still + * enabled + */ + rxmode->hw_vlan_filter = true; + } else { + qede_vlan_filter_set(eth_dev, 0, 0); + } + } } - DP_INFO(edev, - "Start vport ramrod passed, vport_id = %d, MTU = %u\n", - start.vport_id, ETHER_MTU); + if (mask & ETH_VLAN_EXTEND_MASK) + DP_INFO(edev, "No offloads are supported with VLAN Q-in-Q" + " and classification is based on outer tag only\n"); + + DP_INFO(edev, "vlan offload mask %d vlan-strip %d vlan-filter %d\n", + mask, rxmode->hw_vlan_strip, rxmode->hw_vlan_filter); return 0; } @@ -798,10 +1072,12 @@ static void qede_prandom_bytes(uint32_t *buff) buff[i] = rand(); } -static int qede_config_rss(struct rte_eth_dev *eth_dev) +int qede_config_rss(struct rte_eth_dev *eth_dev) { struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); +#ifdef RTE_LIBRTE_QEDE_DEBUG_INFO struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); +#endif uint32_t def_rss_key[ECORE_RSS_KEY_SIZE]; struct rte_eth_rss_reta_entry64 reta_conf[2]; struct rte_eth_rss_conf rss_conf; @@ -837,124 +1113,202 @@ static int qede_config_rss(struct rte_eth_dev *eth_dev) return 0; } +static void qede_fastpath_start(struct ecore_dev *edev) +{ + struct ecore_hwfn *p_hwfn; + int i; + + for_each_hwfn(edev, i) { + p_hwfn = &edev->hwfns[i]; + ecore_hw_start_fastpath(p_hwfn); + } +} + +static int qede_dev_start(struct rte_eth_dev *eth_dev) +{ + struct rte_eth_rxmode *rxmode = ð_dev->data->dev_conf.rxmode; + struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); + struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); + + PMD_INIT_FUNC_TRACE(edev); + + /* Update MTU only if it has changed */ + if (qdev->mtu != qdev->new_mtu) { + if (qede_update_mtu(eth_dev, qdev->new_mtu)) + goto err; + qdev->mtu = qdev->new_mtu; + } + + /* Configure TPA parameters */ + if (rxmode->enable_lro) { + if (qede_enable_tpa(eth_dev, true)) + return -EINVAL; + /* Enable scatter mode for LRO */ + if (!rxmode->enable_scatter) + eth_dev->data->scattered_rx = 1; + } + + /* Start queues */ + if (qede_start_queues(eth_dev)) + goto err; + + /* Newer SR-IOV PF driver expects RX/TX queues to be started before + * enabling RSS. Hence RSS configuration is deferred upto this point. + * Also, we would like to retain similar behavior in PF case, so we + * don't do PF/VF specific check here. + */ + if (rxmode->mq_mode == ETH_MQ_RX_RSS) + if (qede_config_rss(eth_dev)) + goto err; + + /* Enable vport*/ + if (qede_activate_vport(eth_dev, true)) + goto err; + + /* Bring-up the link */ + qede_dev_set_link_state(eth_dev, true); + + /* Update link status */ + qede_link_update(eth_dev, 0); + + /* Start/resume traffic */ + qede_fastpath_start(edev); + + DP_INFO(edev, "Device started\n"); + + return 0; +err: + DP_ERR(edev, "Device start fails\n"); + return -1; /* common error code is < 0 */ +} + +static void qede_dev_stop(struct rte_eth_dev *eth_dev) +{ + struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); + struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); + + PMD_INIT_FUNC_TRACE(edev); + + /* Disable vport */ + if (qede_activate_vport(eth_dev, false)) + return; + + if (qdev->enable_lro) + qede_enable_tpa(eth_dev, false); + + /* Stop queues */ + qede_stop_queues(eth_dev); + + /* Disable traffic */ + ecore_hw_stop_fastpath(edev); /* TBD - loop */ + + /* Bring the link down */ + qede_dev_set_link_state(eth_dev, false); + + DP_INFO(edev, "Device is stopped\n"); +} + static int qede_dev_configure(struct rte_eth_dev *eth_dev) { - struct qede_dev *qdev = eth_dev->data->dev_private; - struct ecore_dev *edev = &qdev->edev; + struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); + struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); struct rte_eth_rxmode *rxmode = ð_dev->data->dev_conf.rxmode; - int rc, i, j; + int ret; PMD_INIT_FUNC_TRACE(edev); /* Check requirements for 100G mode */ - if (edev->num_hwfns > 1) { + if (ECORE_IS_CMT(edev)) { if (eth_dev->data->nb_rx_queues < 2 || - eth_dev->data->nb_tx_queues < 2) { + eth_dev->data->nb_tx_queues < 2) { DP_ERR(edev, "100G mode needs min. 2 RX/TX queues\n"); return -EINVAL; } if ((eth_dev->data->nb_rx_queues % 2 != 0) || - (eth_dev->data->nb_tx_queues % 2 != 0)) { + (eth_dev->data->nb_tx_queues % 2 != 0)) { DP_ERR(edev, - "100G mode needs even no. of RX/TX queues\n"); + "100G mode needs even no. of RX/TX queues\n"); return -EINVAL; } } /* Sanity checks and throw warnings */ - if (rxmode->enable_scatter == 1) + if (rxmode->enable_scatter) eth_dev->data->scattered_rx = 1; - if (rxmode->enable_lro == 1) { - DP_ERR(edev, "LRO is not supported\n"); - return -EINVAL; - } - if (!rxmode->hw_strip_crc) DP_INFO(edev, "L2 CRC stripping is always enabled in hw\n"); if (!rxmode->hw_ip_checksum) DP_INFO(edev, "IP/UDP/TCP checksum offload is always enabled " - "in hw\n"); - - /* Check for the port restart case */ - if (qdev->state != QEDE_DEV_INIT) { - rc = qdev->ops->vport_stop(edev, 0); - if (rc != 0) - return rc; - qede_dealloc_fp_resc(eth_dev); + "in hw\n"); + if (rxmode->header_split) + DP_INFO(edev, "Header split enable is not supported\n"); + if (!(rxmode->mq_mode == ETH_MQ_RX_NONE || rxmode->mq_mode == + ETH_MQ_RX_RSS)) { + DP_ERR(edev, "Unsupported multi-queue mode\n"); + return -ENOTSUP; } - - qdev->fp_num_tx = eth_dev->data->nb_tx_queues; - qdev->fp_num_rx = eth_dev->data->nb_rx_queues; - qdev->num_queues = qdev->fp_num_tx + qdev->fp_num_rx; - - /* Fastpath status block should be initialized before sending - * VPORT-START in the case of VF. Anyway, do it for both VF/PF. - */ - rc = qede_alloc_fp_resc(qdev); - if (rc != 0) - return rc; - - /* Issue VPORT-START with default config values to allow - * other port configurations early on. + /* Flow director mode check */ + if (qede_check_fdir_support(eth_dev)) + return -ENOTSUP; + + /* Deallocate resources if held previously. It is needed only if the + * queue count has been changed from previous configuration. If its + * going to change then it means RX/TX queue setup will be called + * again and the fastpath pointers will be reinitialized there. */ - rc = qede_init_vport(qdev); - if (rc != 0) - return rc; - - /* Do RSS configuration after vport-start */ - switch (rxmode->mq_mode) { - case ETH_MQ_RX_RSS: - rc = qede_config_rss(eth_dev); - if (rc != 0) { - qdev->ops->vport_stop(edev, 0); - qede_dealloc_fp_resc(eth_dev); - return -EINVAL; - } - break; - case ETH_MQ_RX_NONE: - DP_INFO(edev, "RSS is disabled\n"); - break; - default: - DP_ERR(edev, "Unsupported RSS mode\n"); - qdev->ops->vport_stop(edev, 0); + if (qdev->num_tx_queues != eth_dev->data->nb_tx_queues || + qdev->num_rx_queues != eth_dev->data->nb_rx_queues) { qede_dealloc_fp_resc(eth_dev); - return -EINVAL; + /* Proceed with updated queue count */ + qdev->num_tx_queues = eth_dev->data->nb_tx_queues; + qdev->num_rx_queues = eth_dev->data->nb_rx_queues; + if (qede_alloc_fp_resc(qdev)) + return -ENOMEM; } - SLIST_INIT(&qdev->vlan_list_head); + /* VF's MTU has to be set using vport-start where as + * PF's MTU can be updated via vport-update. + */ + if (IS_VF(edev)) { + if (qede_start_vport(qdev, rxmode->max_rx_pkt_len)) + return -1; + } else { + if (qede_update_mtu(eth_dev, rxmode->max_rx_pkt_len)) + return -1; + } - /* Add primary mac for PF */ - if (IS_PF(edev)) - qede_mac_addr_set(eth_dev, &qdev->primary_mac); + qdev->mtu = rxmode->max_rx_pkt_len; + qdev->new_mtu = qdev->mtu; /* Enable VLAN offloads by default */ - qede_vlan_offload_set(eth_dev, ETH_VLAN_STRIP_MASK | - ETH_VLAN_FILTER_MASK | - ETH_VLAN_EXTEND_MASK); - - qdev->state = QEDE_DEV_CONFIG; + ret = qede_vlan_offload_set(eth_dev, ETH_VLAN_STRIP_MASK | + ETH_VLAN_FILTER_MASK | + ETH_VLAN_EXTEND_MASK); + if (ret) + return ret; - DP_INFO(edev, "Allocated RSS=%d TSS=%d (with CoS=%d)\n", - (int)QEDE_RSS_COUNT(qdev), (int)QEDE_TSS_COUNT(qdev), - qdev->num_tc); + DP_INFO(edev, "Device configured with RSS=%d TSS=%d\n", + QEDE_RSS_COUNT(qdev), QEDE_TSS_COUNT(qdev)); return 0; } /* Info about HW descriptor ring limitations */ static const struct rte_eth_desc_lim qede_rx_desc_lim = { - .nb_max = NUM_RX_BDS_MAX, + .nb_max = 0x8000, /* 32K */ .nb_min = 128, - .nb_align = 128 /* lowest common multiple */ + .nb_align = 128 /* lowest common multiple */ }; static const struct rte_eth_desc_lim qede_tx_desc_lim = { - .nb_max = NUM_TX_BDS_MAX, + .nb_max = 0x8000, /* 32K */ .nb_min = 256, - .nb_align = 256 + .nb_align = 256, + .nb_seg_max = ETH_TX_MAX_BDS_PER_LSO_PACKET, + .nb_mtu_seg_max = ETH_TX_MAX_BDS_PER_NON_LSO_PACKET }; static void @@ -968,7 +1322,7 @@ qede_dev_info_get(struct rte_eth_dev *eth_dev, PMD_INIT_FUNC_TRACE(edev); - dev_info->pci_dev = RTE_DEV_TO_PCI(eth_dev->device); + dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); dev_info->min_rx_bufsize = (uint32_t)QEDE_MIN_RX_BUFF_SIZE; dev_info->max_rx_pktlen = (uint32_t)ETH_TX_MAX_NON_LSO_PKT_LEN; dev_info->rx_desc_lim = qede_rx_desc_lim; @@ -996,12 +1350,16 @@ qede_dev_info_get(struct rte_eth_dev *eth_dev, DEV_RX_OFFLOAD_IPV4_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM | DEV_RX_OFFLOAD_TCP_CKSUM | - DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM); + DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | + DEV_RX_OFFLOAD_TCP_LRO); + dev_info->tx_offload_capa = (DEV_TX_OFFLOAD_VLAN_INSERT | DEV_TX_OFFLOAD_IPV4_CKSUM | DEV_TX_OFFLOAD_UDP_CKSUM | DEV_TX_OFFLOAD_TCP_CKSUM | - DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM); + DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | + DEV_TX_OFFLOAD_TCP_TSO | + DEV_TX_OFFLOAD_VXLAN_TNL_TSO); memset(&link, 0, sizeof(struct qed_link_output)); qdev->ops->common->get_link(edev, &link); @@ -1021,7 +1379,7 @@ qede_dev_info_get(struct rte_eth_dev *eth_dev, } /* return 0 means link status changed, -1 means not changed */ -static int +int qede_link_update(struct rte_eth_dev *eth_dev, __rte_unused int wait_to_complete) { struct qede_dev *qdev = eth_dev->data->dev_private; @@ -1067,10 +1425,12 @@ qede_link_update(struct rte_eth_dev *eth_dev, __rte_unused int wait_to_complete) static void qede_promiscuous_enable(struct rte_eth_dev *eth_dev) { +#ifdef RTE_LIBRTE_QEDE_DEBUG_INIT struct qede_dev *qdev = eth_dev->data->dev_private; struct ecore_dev *edev = &qdev->edev; PMD_INIT_FUNC_TRACE(edev); +#endif enum qed_filter_rx_mode_type type = QED_FILTER_RX_MODE_TYPE_PROMISC; @@ -1082,10 +1442,12 @@ static void qede_promiscuous_enable(struct rte_eth_dev *eth_dev) static void qede_promiscuous_disable(struct rte_eth_dev *eth_dev) { +#ifdef RTE_LIBRTE_QEDE_DEBUG_INIT struct qede_dev *qdev = eth_dev->data->dev_private; struct ecore_dev *edev = &qdev->edev; PMD_INIT_FUNC_TRACE(edev); +#endif if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1) qed_configure_filter_rx_mode(eth_dev, @@ -1117,10 +1479,9 @@ static void qede_poll_sp_sb_cb(void *param) static void qede_dev_close(struct rte_eth_dev *eth_dev) { - struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(eth_dev->device); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); - int rc; PMD_INIT_FUNC_TRACE(edev); @@ -1129,33 +1490,26 @@ static void qede_dev_close(struct rte_eth_dev *eth_dev) * by the app without reconfiguration. However, in dev_close() we * can release all the resources and device can be brought up newly */ - if (qdev->state != QEDE_DEV_STOP) + if (eth_dev->data->dev_started) qede_dev_stop(eth_dev); - else - DP_INFO(edev, "Device is already stopped\n"); - - rc = qdev->ops->vport_stop(edev, 0); - if (rc != 0) - DP_ERR(edev, "Failed to stop VPORT\n"); + qede_stop_vport(edev); + qede_fdir_dealloc_resc(eth_dev); qede_dealloc_fp_resc(eth_dev); - qdev->ops->common->slowpath_stop(edev); + eth_dev->data->nb_rx_queues = 0; + eth_dev->data->nb_tx_queues = 0; + qdev->ops->common->slowpath_stop(edev); qdev->ops->common->remove(edev); - rte_intr_disable(&pci_dev->intr_handle); - rte_intr_callback_unregister(&pci_dev->intr_handle, qede_interrupt_handler, (void *)eth_dev); - - if (edev->num_hwfns > 1) + if (ECORE_IS_CMT(edev)) rte_eal_alarm_cancel(qede_poll_sp_sb_cb, (void *)eth_dev); - - qdev->state = QEDE_DEV_INIT; /* Go back to init state */ } -static void +static int qede_get_stats(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats) { struct qede_dev *qdev = eth_dev->data->dev_private; @@ -1165,98 +1519,107 @@ qede_get_stats(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats) unsigned int rxq_stat_cntrs, txq_stat_cntrs; struct qede_tx_queue *txq; - qdev->ops->get_vport_stats(edev, &stats); + ecore_get_vport_stats(edev, &stats); /* RX Stats */ - eth_stats->ipackets = stats.rx_ucast_pkts + - stats.rx_mcast_pkts + stats.rx_bcast_pkts; + eth_stats->ipackets = stats.common.rx_ucast_pkts + + stats.common.rx_mcast_pkts + stats.common.rx_bcast_pkts; - eth_stats->ibytes = stats.rx_ucast_bytes + - stats.rx_mcast_bytes + stats.rx_bcast_bytes; + eth_stats->ibytes = stats.common.rx_ucast_bytes + + stats.common.rx_mcast_bytes + stats.common.rx_bcast_bytes; - eth_stats->ierrors = stats.rx_crc_errors + - stats.rx_align_errors + - stats.rx_carrier_errors + - stats.rx_oversize_packets + - stats.rx_jabbers + stats.rx_undersize_packets; + eth_stats->ierrors = stats.common.rx_crc_errors + + stats.common.rx_align_errors + + stats.common.rx_carrier_errors + + stats.common.rx_oversize_packets + + stats.common.rx_jabbers + stats.common.rx_undersize_packets; - eth_stats->rx_nombuf = stats.no_buff_discards; + eth_stats->rx_nombuf = stats.common.no_buff_discards; - eth_stats->imissed = stats.mftag_filter_discards + - stats.mac_filter_discards + - stats.no_buff_discards + stats.brb_truncates + stats.brb_discards; + eth_stats->imissed = stats.common.mftag_filter_discards + + stats.common.mac_filter_discards + + stats.common.no_buff_discards + + stats.common.brb_truncates + stats.common.brb_discards; /* TX stats */ - eth_stats->opackets = stats.tx_ucast_pkts + - stats.tx_mcast_pkts + stats.tx_bcast_pkts; + eth_stats->opackets = stats.common.tx_ucast_pkts + + stats.common.tx_mcast_pkts + stats.common.tx_bcast_pkts; - eth_stats->obytes = stats.tx_ucast_bytes + - stats.tx_mcast_bytes + stats.tx_bcast_bytes; + eth_stats->obytes = stats.common.tx_ucast_bytes + + stats.common.tx_mcast_bytes + stats.common.tx_bcast_bytes; - eth_stats->oerrors = stats.tx_err_drop_pkts; + eth_stats->oerrors = stats.common.tx_err_drop_pkts; /* Queue stats */ rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev), RTE_ETHDEV_QUEUE_STAT_CNTRS); txq_stat_cntrs = RTE_MIN(QEDE_TSS_COUNT(qdev), RTE_ETHDEV_QUEUE_STAT_CNTRS); - if ((rxq_stat_cntrs != QEDE_RSS_COUNT(qdev)) || - (txq_stat_cntrs != QEDE_TSS_COUNT(qdev))) + if ((rxq_stat_cntrs != (unsigned int)QEDE_RSS_COUNT(qdev)) || + (txq_stat_cntrs != (unsigned int)QEDE_TSS_COUNT(qdev))) DP_VERBOSE(edev, ECORE_MSG_DEBUG, "Not all the queue stats will be displayed. Set" " RTE_ETHDEV_QUEUE_STAT_CNTRS config param" " appropriately and retry.\n"); - for (qid = 0; qid < QEDE_QUEUE_CNT(qdev); qid++) { - if (qdev->fp_array[qid].type & QEDE_FASTPATH_RX) { - eth_stats->q_ipackets[i] = - *(uint64_t *)( - ((char *)(qdev->fp_array[(qid)].rxq)) + - offsetof(struct qede_rx_queue, - rcv_pkts)); - eth_stats->q_errors[i] = - *(uint64_t *)( - ((char *)(qdev->fp_array[(qid)].rxq)) + - offsetof(struct qede_rx_queue, - rx_hw_errors)) + - *(uint64_t *)( - ((char *)(qdev->fp_array[(qid)].rxq)) + - offsetof(struct qede_rx_queue, - rx_alloc_errors)); - i++; - } + for_each_rss(qid) { + eth_stats->q_ipackets[i] = + *(uint64_t *)( + ((char *)(qdev->fp_array[qid].rxq)) + + offsetof(struct qede_rx_queue, + rcv_pkts)); + eth_stats->q_errors[i] = + *(uint64_t *)( + ((char *)(qdev->fp_array[qid].rxq)) + + offsetof(struct qede_rx_queue, + rx_hw_errors)) + + *(uint64_t *)( + ((char *)(qdev->fp_array[qid].rxq)) + + offsetof(struct qede_rx_queue, + rx_alloc_errors)); + i++; if (i == rxq_stat_cntrs) break; } - for (qid = 0; qid < QEDE_QUEUE_CNT(qdev); qid++) { - if (qdev->fp_array[qid].type & QEDE_FASTPATH_TX) { - txq = qdev->fp_array[(qid)].txqs[0]; - eth_stats->q_opackets[j] = - *((uint64_t *)(uintptr_t) - (((uint64_t)(uintptr_t)(txq)) + - offsetof(struct qede_tx_queue, - xmit_pkts))); - j++; - } + for_each_tss(qid) { + txq = qdev->fp_array[qid].txq; + eth_stats->q_opackets[j] = + *((uint64_t *)(uintptr_t) + (((uint64_t)(uintptr_t)(txq)) + + offsetof(struct qede_tx_queue, + xmit_pkts))); + j++; if (j == txq_stat_cntrs) break; } + + return 0; } static unsigned qede_get_xstats_count(struct qede_dev *qdev) { - return RTE_DIM(qede_xstats_strings) + - (RTE_DIM(qede_rxq_xstats_strings) * - RTE_MIN(QEDE_RSS_COUNT(qdev), - RTE_ETHDEV_QUEUE_STAT_CNTRS)); + if (ECORE_IS_BB(&qdev->edev)) + return RTE_DIM(qede_xstats_strings) + + RTE_DIM(qede_bb_xstats_strings) + + (RTE_DIM(qede_rxq_xstats_strings) * + RTE_MIN(QEDE_RSS_COUNT(qdev), + RTE_ETHDEV_QUEUE_STAT_CNTRS)); + else + return RTE_DIM(qede_xstats_strings) + + RTE_DIM(qede_ah_xstats_strings) + + (RTE_DIM(qede_rxq_xstats_strings) * + RTE_MIN(QEDE_RSS_COUNT(qdev), + RTE_ETHDEV_QUEUE_STAT_CNTRS)); } static int -qede_get_xstats_names(__rte_unused struct rte_eth_dev *dev, - struct rte_eth_xstat_name *xstats_names, unsigned limit) +qede_get_xstats_names(struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, + __rte_unused unsigned int limit) { struct qede_dev *qdev = dev->data->dev_private; + struct ecore_dev *edev = &qdev->edev; const unsigned int stat_cnt = qede_get_xstats_count(qdev); unsigned int i, qid, stat_idx = 0; unsigned int rxq_stat_cntrs; @@ -1270,6 +1633,24 @@ qede_get_xstats_names(__rte_unused struct rte_eth_dev *dev, stat_idx++; } + if (ECORE_IS_BB(edev)) { + for (i = 0; i < RTE_DIM(qede_bb_xstats_strings); i++) { + snprintf(xstats_names[stat_idx].name, + sizeof(xstats_names[stat_idx].name), + "%s", + qede_bb_xstats_strings[i].name); + stat_idx++; + } + } else { + for (i = 0; i < RTE_DIM(qede_ah_xstats_strings); i++) { + snprintf(xstats_names[stat_idx].name, + sizeof(xstats_names[stat_idx].name), + "%s", + qede_ah_xstats_strings[i].name); + stat_idx++; + } + } + rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev), RTE_ETHDEV_QUEUE_STAT_CNTRS); for (qid = 0; qid < rxq_stat_cntrs; qid++) { @@ -1301,7 +1682,7 @@ qede_get_xstats(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, if (n < num) return num; - qdev->ops->get_vport_stats(edev, &stats); + ecore_get_vport_stats(edev, &stats); for (i = 0; i < RTE_DIM(qede_xstats_strings); i++) { xstats[stat_idx].value = *(uint64_t *)(((char *)&stats) + @@ -1310,13 +1691,31 @@ qede_get_xstats(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, stat_idx++; } + if (ECORE_IS_BB(edev)) { + for (i = 0; i < RTE_DIM(qede_bb_xstats_strings); i++) { + xstats[stat_idx].value = + *(uint64_t *)(((char *)&stats) + + qede_bb_xstats_strings[i].offset); + xstats[stat_idx].id = stat_idx; + stat_idx++; + } + } else { + for (i = 0; i < RTE_DIM(qede_ah_xstats_strings); i++) { + xstats[stat_idx].value = + *(uint64_t *)(((char *)&stats) + + qede_ah_xstats_strings[i].offset); + xstats[stat_idx].id = stat_idx; + stat_idx++; + } + } + rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev), RTE_ETHDEV_QUEUE_STAT_CNTRS); for (qid = 0; qid < rxq_stat_cntrs; qid++) { - if (qdev->fp_array[qid].type & QEDE_FASTPATH_RX) { + for_each_rss(qid) { for (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) { xstats[stat_idx].value = *(uint64_t *)( - ((char *)(qdev->fp_array[(qid)].rxq)) + + ((char *)(qdev->fp_array[qid].rxq)) + qede_rxq_xstats_strings[i].offset); xstats[stat_idx].id = stat_idx; stat_idx++; @@ -1458,8 +1857,22 @@ static const uint32_t * qede_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev) { static const uint32_t ptypes[] = { + RTE_PTYPE_L2_ETHER, + RTE_PTYPE_L2_ETHER_VLAN, RTE_PTYPE_L3_IPV4, RTE_PTYPE_L3_IPV6, + RTE_PTYPE_L4_TCP, + RTE_PTYPE_L4_UDP, + RTE_PTYPE_TUNNEL_VXLAN, + RTE_PTYPE_L4_FRAG, + /* Inner */ + RTE_PTYPE_INNER_L2_ETHER, + RTE_PTYPE_INNER_L2_ETHER_VLAN, + RTE_PTYPE_INNER_L3_IPV4, + RTE_PTYPE_INNER_L3_IPV6, + RTE_PTYPE_INNER_L4_TCP, + RTE_PTYPE_INNER_L4_UDP, + RTE_PTYPE_INNER_L4_FRAG, RTE_PTYPE_UNKNOWN }; @@ -1478,10 +1891,12 @@ static void qede_init_rss_caps(uint8_t *rss_caps, uint64_t hf) *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_TCP) ? ECORE_RSS_IPV4_TCP : 0; *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_TCP) ? ECORE_RSS_IPV6_TCP : 0; *rss_caps |= (hf & ETH_RSS_IPV6_TCP_EX) ? ECORE_RSS_IPV6_TCP : 0; + *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_UDP) ? ECORE_RSS_IPV4_UDP : 0; + *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_UDP) ? ECORE_RSS_IPV6_UDP : 0; } -static int qede_rss_hash_update(struct rte_eth_dev *eth_dev, - struct rte_eth_rss_conf *rss_conf) +int qede_rss_hash_update(struct rte_eth_dev *eth_dev, + struct rte_eth_rss_conf *rss_conf) { struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); @@ -1585,18 +2000,73 @@ static int qede_rss_hash_conf_get(struct rte_eth_dev *eth_dev, return 0; } -static int qede_rss_reta_update(struct rte_eth_dev *eth_dev, - struct rte_eth_rss_reta_entry64 *reta_conf, - uint16_t reta_size) +static bool qede_update_rss_parm_cmt(struct ecore_dev *edev, + struct ecore_rss_params *rss) +{ + int i, fn; + bool rss_mode = 1; /* enable */ + struct ecore_queue_cid *cid; + struct ecore_rss_params *t_rss; + + /* In regular scenario, we'd simply need to take input handlers. + * But in CMT, we'd have to split the handlers according to the + * engine they were configured on. We'd then have to understand + * whether RSS is really required, since 2-queues on CMT doesn't + * require RSS. + */ + + /* CMT should be round-robin */ + for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) { + cid = rss->rss_ind_table[i]; + + if (cid->p_owner == ECORE_LEADING_HWFN(edev)) + t_rss = &rss[0]; + else + t_rss = &rss[1]; + + t_rss->rss_ind_table[i / edev->num_hwfns] = cid; + } + + t_rss = &rss[1]; + t_rss->update_rss_ind_table = 1; + t_rss->rss_table_size_log = 7; + t_rss->update_rss_config = 1; + + /* Make sure RSS is actually required */ + for_each_hwfn(edev, fn) { + for (i = 1; i < ECORE_RSS_IND_TABLE_SIZE / edev->num_hwfns; + i++) { + if (rss[fn].rss_ind_table[i] != + rss[fn].rss_ind_table[0]) + break; + } + + if (i == ECORE_RSS_IND_TABLE_SIZE / edev->num_hwfns) { + DP_INFO(edev, + "CMT - 1 queue per-hwfn; Disabling RSS\n"); + rss_mode = 0; + goto out; + } + } + +out: + t_rss->rss_enable = rss_mode; + + return rss_mode; +} + +int qede_rss_reta_update(struct rte_eth_dev *eth_dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size) { struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); struct ecore_sp_vport_update_params vport_update_params; - struct ecore_rss_params params; + struct ecore_rss_params *params; struct ecore_hwfn *p_hwfn; uint16_t i, idx, shift; uint8_t entry; - int rc; + int rc = 0; if (reta_size > ETH_RSS_RETA_SIZE_128) { DP_ERR(edev, "reta_size %d is not supported by hardware\n", @@ -1605,7 +2075,12 @@ static int qede_rss_reta_update(struct rte_eth_dev *eth_dev, } memset(&vport_update_params, 0, sizeof(vport_update_params)); - memset(¶ms, 0, sizeof(params)); + params = rte_zmalloc("qede_rss", sizeof(*params) * edev->num_hwfns, + RTE_CACHE_LINE_SIZE); + if (params == NULL) { + DP_ERR(edev, "failed to allocate memory\n"); + return -ENOMEM; + } for (i = 0; i < reta_size; i++) { idx = i / RTE_RETA_GROUP_SIZE; @@ -1613,24 +2088,25 @@ static int qede_rss_reta_update(struct rte_eth_dev *eth_dev, if (reta_conf[idx].mask & (1ULL << shift)) { entry = reta_conf[idx].reta[shift]; /* Pass rxq handles to ecore */ - params.rss_ind_table[i] = + params->rss_ind_table[i] = qdev->fp_array[entry].rxq->handle; /* Update the local copy for RETA query command */ qdev->rss_ind_table[i] = entry; } } + params->update_rss_ind_table = 1; + params->rss_table_size_log = 7; + params->update_rss_config = 1; + /* Fix up RETA for CMT mode device */ - if (edev->num_hwfns > 1) - qdev->rss_enable = qed_update_rss_parm_cmt(edev, - params.rss_ind_table[0]); - params.update_rss_ind_table = 1; - params.rss_table_size_log = 7; - params.update_rss_config = 1; + if (ECORE_IS_CMT(edev)) + qdev->rss_enable = qede_update_rss_parm_cmt(edev, + params); vport_update_params.vport_id = 0; /* Use the current value of rss_enable */ - params.rss_enable = qdev->rss_enable; - vport_update_params.rss_params = ¶ms; + params->rss_enable = qdev->rss_enable; + vport_update_params.rss_params = params; for_each_hwfn(edev, i) { p_hwfn = &edev->hwfns[i]; @@ -1639,11 +2115,13 @@ static int qede_rss_reta_update(struct rte_eth_dev *eth_dev, ECORE_SPQ_MODE_EBLOCK, NULL); if (rc) { DP_ERR(edev, "vport-update for RSS failed\n"); - return rc; + goto out; } } - return 0; +out: + rte_free(params); + return rc; } static int qede_rss_reta_query(struct rte_eth_dev *eth_dev, @@ -1673,34 +2151,63 @@ static int qede_rss_reta_query(struct rte_eth_dev *eth_dev, return 0; } -int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) + + +static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) { - uint32_t frame_size; - struct qede_dev *qdev = dev->data->dev_private; + struct qede_dev *qdev = QEDE_INIT_QDEV(dev); + struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); struct rte_eth_dev_info dev_info = {0}; + struct qede_fastpath *fp; + uint32_t frame_size; + uint16_t rx_buf_size; + uint16_t bufsz; + int i; + PMD_INIT_FUNC_TRACE(edev); qede_dev_info_get(dev, &dev_info); - - /* VLAN_TAG = 4 */ - frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + 4; - - if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen)) + frame_size = mtu + QEDE_ETH_OVERHEAD; + if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen)) { + DP_ERR(edev, "MTU %u out of range\n", mtu); return -EINVAL; - + } if (!dev->data->scattered_rx && - frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) + frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) { + DP_INFO(edev, "MTU greater than minimum RX buffer size of %u\n", + dev->data->min_rx_buf_size); return -EINVAL; - + } + /* Temporarily replace I/O functions with dummy ones. It cannot + * be set to NULL because rte_eth_rx_burst() doesn't check for NULL. + */ + dev->rx_pkt_burst = qede_rxtx_pkts_dummy; + dev->tx_pkt_burst = qede_rxtx_pkts_dummy; + qede_dev_stop(dev); + rte_delay_ms(1000); + qdev->mtu = mtu; + /* Fix up RX buf size for all queues of the port */ + for_each_rss(i) { + fp = &qdev->fp_array[i]; + bufsz = (uint16_t)rte_pktmbuf_data_room_size( + fp->rxq->mb_pool) - RTE_PKTMBUF_HEADROOM; + if (dev->data->scattered_rx) + rx_buf_size = bufsz + QEDE_ETH_OVERHEAD; + else + rx_buf_size = mtu + QEDE_ETH_OVERHEAD; + rx_buf_size = QEDE_CEIL_TO_CACHE_LINE_SIZE(rx_buf_size); + fp->rxq->rx_buf_size = rx_buf_size; + DP_INFO(edev, "buf_size adjusted to %u\n", rx_buf_size); + } + qede_dev_start(dev); if (frame_size > ETHER_MAX_LEN) dev->data->dev_conf.rxmode.jumbo_frame = 1; else dev->data->dev_conf.rxmode.jumbo_frame = 0; - /* update max frame size */ dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size; - qdev->mtu = mtu; - qede_dev_stop(dev); - qede_dev_start(dev); + /* Reassign back */ + dev->rx_pkt_burst = qede_recv_pkts; + dev->tx_pkt_burst = qede_xmit_pkts; return 0; } @@ -1714,38 +2221,89 @@ qede_conf_udp_dst_port(struct rte_eth_dev *eth_dev, struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); struct ecore_tunnel_info tunn; /* @DPDK */ struct ecore_hwfn *p_hwfn; + struct ecore_ptt *p_ptt; + uint16_t udp_port; int rc, i; PMD_INIT_FUNC_TRACE(edev); memset(&tunn, 0, sizeof(tunn)); if (tunnel_udp->prot_type == RTE_TUNNEL_TYPE_VXLAN) { + /* Enable VxLAN tunnel if needed before UDP port update using + * default MAC/VLAN classification. + */ + if (add) { + if (qdev->vxlan.udp_port == tunnel_udp->udp_port) { + DP_INFO(edev, + "UDP port %u was already configured\n", + tunnel_udp->udp_port); + return ECORE_SUCCESS; + } + /* Enable VXLAN if it was not enabled while adding + * VXLAN filter. + */ + if (!qdev->vxlan.enable) { + rc = qede_vxlan_enable(eth_dev, + ECORE_TUNN_CLSS_MAC_VLAN, true, true); + if (rc != ECORE_SUCCESS) { + DP_ERR(edev, "Failed to enable VXLAN " + "prior to updating UDP port\n"); + return rc; + } + } + udp_port = tunnel_udp->udp_port; + } else { + if (qdev->vxlan.udp_port != tunnel_udp->udp_port) { + DP_ERR(edev, "UDP port %u doesn't exist\n", + tunnel_udp->udp_port); + return ECORE_INVAL; + } + udp_port = 0; + } + tunn.vxlan_port.b_update_port = true; - tunn.vxlan_port.port = (add) ? tunnel_udp->udp_port : - QEDE_VXLAN_DEF_PORT; + tunn.vxlan_port.port = udp_port; for_each_hwfn(edev, i) { p_hwfn = &edev->hwfns[i]; - rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, &tunn, + if (IS_PF(edev)) { + p_ptt = ecore_ptt_acquire(p_hwfn); + if (!p_ptt) + return -EAGAIN; + } else { + p_ptt = NULL; + } + rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, p_ptt, &tunn, ECORE_SPQ_MODE_CB, NULL); if (rc != ECORE_SUCCESS) { DP_ERR(edev, "Unable to config UDP port %u\n", tunn.vxlan_port.port); + if (IS_PF(edev)) + ecore_ptt_release(p_hwfn, p_ptt); return rc; } } + + qdev->vxlan.udp_port = udp_port; + /* If the request is to delete UDP port and if the number of + * VXLAN filters have reached 0 then VxLAN offload can be be + * disabled. + */ + if (!add && qdev->vxlan.enable && qdev->vxlan.num_filters == 0) + return qede_vxlan_enable(eth_dev, + ECORE_TUNN_CLSS_MAC_VLAN, false, true); } return 0; } -int +static int qede_udp_dst_port_del(struct rte_eth_dev *eth_dev, struct rte_eth_udp_tunnel *tunnel_udp) { return qede_conf_udp_dst_port(eth_dev, tunnel_udp, false); } -int +static int qede_udp_dst_port_add(struct rte_eth_dev *eth_dev, struct rte_eth_udp_tunnel *tunnel_udp) { @@ -1822,33 +2380,38 @@ static int qede_vxlan_tunn_config(struct rte_eth_dev *eth_dev, { struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); - struct ecore_tunnel_info tunn; - struct ecore_hwfn *p_hwfn; enum ecore_filter_ucast_type type; - enum ecore_tunn_clss clss; - struct ecore_filter_ucast ucast; + enum ecore_tunn_clss clss = MAX_ECORE_TUNN_CLSS; + struct ecore_filter_ucast ucast = {0}; char str[80]; - uint16_t filter_type; - int rc, i; + uint16_t filter_type = 0; + int rc; + + PMD_INIT_FUNC_TRACE(edev); - filter_type = conf->filter_type | qdev->vxlan_filter_type; - /* First determine if the given filter classification is supported */ - qede_get_ecore_tunn_params(filter_type, &type, &clss, str); - if (clss == MAX_ECORE_TUNN_CLSS) { - DP_ERR(edev, "Wrong filter type\n"); - return -EINVAL; - } - /* Init tunnel ucast params */ - rc = qede_set_ucast_tunn_cmn_param(&ucast, conf, type); - if (rc != ECORE_SUCCESS) { - DP_ERR(edev, "Unsupported VxLAN filter type 0x%x\n", - conf->filter_type); - return rc; - } - DP_INFO(edev, "Rule: \"%s\", op %d, type 0x%x\n", - str, filter_op, ucast.type); switch (filter_op) { case RTE_ETH_FILTER_ADD: + if (IS_VF(edev)) + return qede_vxlan_enable(eth_dev, + ECORE_TUNN_CLSS_MAC_VLAN, true, true); + + filter_type = conf->filter_type; + /* Determine if the given filter classification is supported */ + qede_get_ecore_tunn_params(filter_type, &type, &clss, str); + if (clss == MAX_ECORE_TUNN_CLSS) { + DP_ERR(edev, "Unsupported filter type\n"); + return -EINVAL; + } + /* Init tunnel ucast params */ + rc = qede_set_ucast_tunn_cmn_param(&ucast, conf, type); + if (rc != ECORE_SUCCESS) { + DP_ERR(edev, "Unsupported VxLAN filter type 0x%x\n", + conf->filter_type); + return rc; + } + DP_INFO(edev, "Rule: \"%s\", op %d, type 0x%x\n", + str, filter_op, ucast.type); + ucast.opcode = ECORE_FILTER_ADD; /* Skip MAC/VLAN if filter is based on VNI */ @@ -1868,22 +2431,34 @@ static int qede_vxlan_tunn_config(struct rte_eth_dev *eth_dev, if (rc != ECORE_SUCCESS) return rc; - qdev->vxlan_filter_type = filter_type; + qdev->vxlan.num_filters++; + qdev->vxlan.filter_type = filter_type; + if (!qdev->vxlan.enable) + return qede_vxlan_enable(eth_dev, clss, true, true); - DP_INFO(edev, "Enabling VXLAN tunneling\n"); - qede_set_cmn_tunn_param(&tunn, clss, true, true); - for_each_hwfn(edev, i) { - p_hwfn = &edev->hwfns[i]; - rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, - &tunn, ECORE_SPQ_MODE_CB, NULL); - if (rc != ECORE_SUCCESS) { - DP_ERR(edev, "Failed to update tunn_clss %u\n", - tunn.vxlan.tun_cls); - } - } - qdev->num_tunn_filters++; /* Filter added successfully */ break; case RTE_ETH_FILTER_DELETE: + if (IS_VF(edev)) + return qede_vxlan_enable(eth_dev, + ECORE_TUNN_CLSS_MAC_VLAN, false, true); + + filter_type = conf->filter_type; + /* Determine if the given filter classification is supported */ + qede_get_ecore_tunn_params(filter_type, &type, &clss, str); + if (clss == MAX_ECORE_TUNN_CLSS) { + DP_ERR(edev, "Unsupported filter type\n"); + return -EINVAL; + } + /* Init tunnel ucast params */ + rc = qede_set_ucast_tunn_cmn_param(&ucast, conf, type); + if (rc != ECORE_SUCCESS) { + DP_ERR(edev, "Unsupported VxLAN filter type 0x%x\n", + conf->filter_type); + return rc; + } + DP_INFO(edev, "Rule: \"%s\", op %d, type 0x%x\n", + str, filter_op, ucast.type); + ucast.opcode = ECORE_FILTER_REMOVE; if (!(filter_type & ETH_TUNNEL_FILTER_TENID)) { @@ -1897,33 +2472,16 @@ static int qede_vxlan_tunn_config(struct rte_eth_dev *eth_dev, if (rc != ECORE_SUCCESS) return rc; - qdev->vxlan_filter_type = filter_type; - qdev->num_tunn_filters--; + qdev->vxlan.num_filters--; /* Disable VXLAN if VXLAN filters become 0 */ - if (qdev->num_tunn_filters == 0) { - DP_INFO(edev, "Disabling VXLAN tunneling\n"); - - /* Use 0 as tunnel mode */ - qede_set_cmn_tunn_param(&tunn, clss, false, true); - for_each_hwfn(edev, i) { - p_hwfn = &edev->hwfns[i]; - rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, &tunn, - ECORE_SPQ_MODE_CB, NULL); - if (rc != ECORE_SUCCESS) { - DP_ERR(edev, - "Failed to update tunn_clss %u\n", - tunn.vxlan.tun_cls); - break; - } - } - } + if (qdev->vxlan.num_filters == 0) + return qede_vxlan_enable(eth_dev, clss, false, true); break; default: DP_ERR(edev, "Unsupported operation %d\n", filter_op); return -EINVAL; } - DP_INFO(edev, "Current VXLAN filters %d\n", qdev->num_tunn_filters); return 0; } @@ -1962,11 +2520,13 @@ int qede_dev_filter_ctrl(struct rte_eth_dev *eth_dev, } break; case RTE_ETH_FILTER_FDIR: + return qede_fdir_filter_conf(eth_dev, filter_op, arg); + case RTE_ETH_FILTER_NTUPLE: + return qede_ntuple_filter_conf(eth_dev, filter_op, arg); case RTE_ETH_FILTER_MACVLAN: case RTE_ETH_FILTER_ETHERTYPE: case RTE_ETH_FILTER_FLEXIBLE: case RTE_ETH_FILTER_SYN: - case RTE_ETH_FILTER_NTUPLE: case RTE_ETH_FILTER_HASH: case RTE_ETH_FILTER_L2_TUNNEL: case RTE_ETH_FILTER_MAX: @@ -2049,6 +2609,8 @@ static const struct eth_dev_ops qede_eth_vf_dev_ops = { .reta_update = qede_rss_reta_update, .reta_query = qede_rss_reta_query, .mtu_set = qede_set_mtu, + .udp_tunnel_port_add = qede_udp_dst_port_add, + .udp_tunnel_port_del = qede_udp_dst_port_del, }; static void qede_update_pf_params(struct ecore_dev *edev) @@ -2057,6 +2619,7 @@ static void qede_update_pf_params(struct ecore_dev *edev) memset(&pf_params, 0, sizeof(struct ecore_pf_params)); pf_params.eth_pf_params.num_cons = QEDE_PF_NUM_CONNS; + pf_params.eth_pf_params.num_arfs_filters = QEDE_RFS_MAX_FLTR; qed_ops->common->update_pf_params(edev, &pf_params); } @@ -2076,13 +2639,13 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf) /* Fix up ecore debug level */ uint32_t dp_module = ~0 & ~ECORE_MSG_HW; uint8_t dp_level = ECORE_LEVEL_VERBOSE; - uint32_t max_mac_addrs; int rc; /* Extract key data structures */ adapter = eth_dev->data->dev_private; + adapter->ethdev = eth_dev; edev = &adapter->edev; - pci_dev = RTE_DEV_TO_PCI(eth_dev->device); + pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); pci_addr = pci_dev->addr; PMD_INIT_FUNC_TRACE(edev); @@ -2093,10 +2656,10 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf) eth_dev->rx_pkt_burst = qede_recv_pkts; eth_dev->tx_pkt_burst = qede_xmit_pkts; + eth_dev->tx_pkt_prepare = qede_xmit_prep_pkts; if (rte_eal_process_type() != RTE_PROC_PRIMARY) { - DP_NOTICE(edev, false, - "Skipping device init from secondary process\n"); + DP_ERR(edev, "Skipping device init from secondary process\n"); return 0; } @@ -2113,20 +2676,15 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf) } DP_INFO(edev, "Starting qede probe\n"); - - rc = qed_ops->common->probe(edev, pci_dev, QED_PROTOCOL_ETH, - dp_module, dp_level, is_vf); - + rc = qed_ops->common->probe(edev, pci_dev, dp_module, + dp_level, is_vf); if (rc != 0) { DP_ERR(edev, "qede probe failed rc %d\n", rc); return -ENODEV; } - qede_update_pf_params(edev); - rte_intr_callback_register(&pci_dev->intr_handle, qede_interrupt_handler, (void *)eth_dev); - if (rte_intr_enable(&pci_dev->intr_handle)) { DP_ERR(edev, "rte_intr_enable() failed\n"); return -ENODEV; @@ -2146,7 +2704,7 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf) * This is required since uio device uses only one MSI-x * interrupt vector but we need one for each engine. */ - if (edev->num_hwfns > 1 && IS_PF(edev)) { + if (ECORE_IS_CMT(edev) && IS_PF(edev)) { rc = rte_eal_alarm_set(timer_period * US_PER_S, qede_poll_sp_sb_cb, (void *)eth_dev); @@ -2224,8 +2782,7 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf) ether_addr_copy(ð_dev->data->mac_addrs[0], &adapter->primary_mac); } else { - DP_NOTICE(edev, false, - "No VF macaddr assigned\n"); + DP_ERR(edev, "No VF macaddr assigned\n"); } } } @@ -2233,21 +2790,34 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf) eth_dev->dev_ops = (is_vf) ? &qede_eth_vf_dev_ops : &qede_eth_dev_ops; if (do_once) { +#ifdef RTE_LIBRTE_QEDE_DEBUG_INFO qede_print_adapter_info(adapter); +#endif do_once = false; } - adapter->state = QEDE_DEV_INIT; + adapter->num_tx_queues = 0; + adapter->num_rx_queues = 0; + SLIST_INIT(&adapter->fdir_info.fdir_list_head); + SLIST_INIT(&adapter->vlan_list_head); + SLIST_INIT(&adapter->uc_list_head); + adapter->mtu = ETHER_MTU; + adapter->new_mtu = ETHER_MTU; + if (!is_vf) + if (qede_start_vport(adapter, adapter->mtu)) + return -1; - DP_NOTICE(edev, false, "MAC address : %02x:%02x:%02x:%02x:%02x:%02x\n", - adapter->primary_mac.addr_bytes[0], - adapter->primary_mac.addr_bytes[1], - adapter->primary_mac.addr_bytes[2], - adapter->primary_mac.addr_bytes[3], - adapter->primary_mac.addr_bytes[4], - adapter->primary_mac.addr_bytes[5]); + DP_INFO(edev, "MAC address : %02x:%02x:%02x:%02x:%02x:%02x\n", + adapter->primary_mac.addr_bytes[0], + adapter->primary_mac.addr_bytes[1], + adapter->primary_mac.addr_bytes[2], + adapter->primary_mac.addr_bytes[3], + adapter->primary_mac.addr_bytes[4], + adapter->primary_mac.addr_bytes[5]); - return rc; + DP_INFO(edev, "Device initialized\n"); + + return 0; } static int qedevf_eth_dev_init(struct rte_eth_dev *eth_dev) @@ -2262,6 +2832,13 @@ static int qede_eth_dev_init(struct rte_eth_dev *eth_dev) static int qede_dev_common_uninit(struct rte_eth_dev *eth_dev) { +#ifdef RTE_LIBRTE_QEDE_DEBUG_INIT + struct qede_dev *qdev = eth_dev->data->dev_private; + struct ecore_dev *edev = &qdev->edev; + + PMD_INIT_FUNC_TRACE(edev); +#endif + /* only uninitialize in the primary process */ if (rte_eal_process_type() != RTE_PROC_PRIMARY) return 0; @@ -2340,35 +2917,47 @@ static const struct rte_pci_id pci_id_qede_map[] = { {.vendor_id = 0,} }; -static struct eth_driver rte_qedevf_pmd = { - .pci_drv = { - .id_table = pci_id_qedevf_map, - .drv_flags = - RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, - .probe = rte_eth_dev_pci_probe, - .remove = rte_eth_dev_pci_remove, - }, - .eth_dev_init = qedevf_eth_dev_init, - .eth_dev_uninit = qedevf_eth_dev_uninit, - .dev_private_size = sizeof(struct qede_dev), +static int qedevf_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, + struct rte_pci_device *pci_dev) +{ + return rte_eth_dev_pci_generic_probe(pci_dev, + sizeof(struct qede_dev), qedevf_eth_dev_init); +} + +static int qedevf_eth_dev_pci_remove(struct rte_pci_device *pci_dev) +{ + return rte_eth_dev_pci_generic_remove(pci_dev, qedevf_eth_dev_uninit); +} + +static struct rte_pci_driver rte_qedevf_pmd = { + .id_table = pci_id_qedevf_map, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, + .probe = qedevf_eth_dev_pci_probe, + .remove = qedevf_eth_dev_pci_remove, }; -static struct eth_driver rte_qede_pmd = { - .pci_drv = { - .id_table = pci_id_qede_map, - .drv_flags = - RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, - .probe = rte_eth_dev_pci_probe, - .remove = rte_eth_dev_pci_remove, - }, - .eth_dev_init = qede_eth_dev_init, - .eth_dev_uninit = qede_eth_dev_uninit, - .dev_private_size = sizeof(struct qede_dev), +static int qede_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, + struct rte_pci_device *pci_dev) +{ + return rte_eth_dev_pci_generic_probe(pci_dev, + sizeof(struct qede_dev), qede_eth_dev_init); +} + +static int qede_eth_dev_pci_remove(struct rte_pci_device *pci_dev) +{ + return rte_eth_dev_pci_generic_remove(pci_dev, qede_eth_dev_uninit); +} + +static struct rte_pci_driver rte_qede_pmd = { + .id_table = pci_id_qede_map, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, + .probe = qede_eth_dev_pci_probe, + .remove = qede_eth_dev_pci_remove, }; -RTE_PMD_REGISTER_PCI(net_qede, rte_qede_pmd.pci_drv); +RTE_PMD_REGISTER_PCI(net_qede, rte_qede_pmd); RTE_PMD_REGISTER_PCI_TABLE(net_qede, pci_id_qede_map); -RTE_PMD_REGISTER_KMOD_DEP(net_qede, "* igb_uio | uio_pci_generic | vfio"); -RTE_PMD_REGISTER_PCI(net_qede_vf, rte_qedevf_pmd.pci_drv); +RTE_PMD_REGISTER_KMOD_DEP(net_qede, "* igb_uio | uio_pci_generic | vfio-pci"); +RTE_PMD_REGISTER_PCI(net_qede_vf, rte_qedevf_pmd); RTE_PMD_REGISTER_PCI_TABLE(net_qede_vf, pci_id_qedevf_map); -RTE_PMD_REGISTER_KMOD_DEP(net_qede_vf, "* igb_uio | vfio"); +RTE_PMD_REGISTER_KMOD_DEP(net_qede_vf, "* igb_uio | vfio-pci");