-/*
- * Copyright (c) 2016 QLogic Corporation.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
+ * www.cavium.com
*/
#include <limits.h>
#include <time.h>
#include <rte_alarm.h>
+#include <rte_string_fns.h>
#include "qede_ethdev.h"
char fw_file[PATH_MAX];
const char *QEDE_DEFAULT_FIRMWARE =
- "/lib/firmware/qed/qed_init_values-8.20.0.0.bin";
+ "/lib/firmware/qed/qed_init_values-8.37.7.0.bin";
static void
qed_update_pf_params(struct ecore_dev *edev, struct ecore_pf_params *params)
{
edev->regview = pci_dev->mem_resource[0].addr;
edev->doorbells = pci_dev->mem_resource[2].addr;
+ edev->db_size = pci_dev->mem_resource[2].len;
}
static int
hw_prepare_params.chk_reg_fifo = false;
hw_prepare_params.initiate_pf_flr = true;
hw_prepare_params.allow_mdump = false;
+ hw_prepare_params.b_en_pacing = false;
hw_prepare_params.epoch = (u32)time(NULL);
rc = ecore_hw_prepare(edev, &hw_prepare_params);
if (rc) {
/* Start the slowpath */
memset(&hw_init_params, 0, sizeof(hw_init_params));
hw_init_params.b_hw_start = true;
- hw_init_params.int_mode = ECORE_INT_MODE_MSIX;
+ hw_init_params.int_mode = params->int_mode;
hw_init_params.allow_npar_tx_switch = true;
hw_init_params.bin_fw_data = data;
drv_version.version = (params->drv_major << 24) |
(params->drv_minor << 16) |
(params->drv_rev << 8) | (params->drv_eng);
- /* TBD: strlcpy() */
- strncpy((char *)drv_version.name, (const char *)params->name,
- MCP_DRV_VER_STR_SIZE - 4);
+ strlcpy((char *)drv_version.name, (const char *)params->name,
+ sizeof(drv_version.name));
rc = ecore_mcp_send_drv_version(hwfn, hwfn->p_main_ptt,
&drv_version);
if (rc) {
dev_info->fw_eng = FW_ENGINEERING_VERSION;
if (IS_PF(edev)) {
- dev_info->mf_mode = edev->mf_mode;
+ dev_info->b_inter_pf_switch =
+ OSAL_TEST_BIT(ECORE_MF_INTER_PF_SWITCH, &edev->mf_bits);
+ if (!OSAL_TEST_BIT(ECORE_MF_DISABLE_ARFS, &edev->mf_bits))
+ dev_info->b_arfs_capable = true;
dev_info->tx_switching = false;
dev_info->smart_an = ecore_mcp_is_smart_an_supported(p_hwfn);
info->num_queues +=
FEAT_NUM(&edev->hwfns[i], ECORE_PF_L2_QUE);
- if (edev->p_iov_info)
+ if (IS_ECORE_SRIOV(edev))
max_vf_vlan_filters = edev->p_iov_info->total_vfs *
ECORE_ETH_VF_NUM_VLAN_FILTERS;
info->num_vlan_filters = RESC_NUM(&edev->hwfns[0], ECORE_VLAN) -
} else {
ecore_vf_get_num_rxqs(ECORE_LEADING_HWFN(edev),
&info->num_queues);
- if (edev->num_hwfns > 1) {
+ if (ECORE_IS_CMT(edev)) {
ecore_vf_get_num_rxqs(&edev->hwfns[1], &queues);
info->num_queues += queues;
}
}
static void qed_fill_link(struct ecore_hwfn *hwfn,
+ __rte_unused struct ecore_ptt *ptt,
struct qed_link_output *if_link)
{
struct ecore_mcp_link_params params;
static void
qed_get_current_link(struct ecore_dev *edev, struct qed_link_output *if_link)
{
- qed_fill_link(&edev->hwfns[0], if_link);
+ struct ecore_hwfn *hwfn;
+ struct ecore_ptt *ptt;
-#ifdef CONFIG_QED_SRIOV
- for_each_hwfn(cdev, i)
- qed_inform_vf_link_state(&cdev->hwfns[i]);
-#endif
+ hwfn = &edev->hwfns[0];
+ if (IS_PF(edev)) {
+ ptt = ecore_ptt_acquire(hwfn);
+ if (!ptt)
+ DP_NOTICE(hwfn, true, "Failed to fill link; No PTT\n");
+
+ qed_fill_link(hwfn, ptt, if_link);
+
+ if (ptt)
+ ecore_ptt_release(hwfn, ptt);
+ } else {
+ qed_fill_link(hwfn, NULL, if_link);
+ }
}
static int qed_set_link(struct ecore_dev *edev, struct qed_link_params *params)
void qed_link_update(struct ecore_hwfn *hwfn)
{
- struct qed_link_output if_link;
+ struct ecore_dev *edev = hwfn->p_dev;
+ struct qede_dev *qdev = (struct qede_dev *)edev;
+ struct rte_eth_dev *dev = (struct rte_eth_dev *)qdev->ethdev;
- qed_fill_link(hwfn, &if_link);
+ if (!qede_link_update(dev, 0))
+ _rte_eth_dev_callback_process(dev,
+ RTE_ETH_EVENT_INTR_LSC, NULL);
}
static int qed_drain(struct ecore_dev *edev)