#include "qede_ethdev.h"
#include <rte_string_fns.h>
#include <rte_alarm.h>
-#include <rte_version.h>
#include <rte_kvargs.h>
-/* Globals */
-int qede_logtype_init;
-int qede_logtype_driver;
-
static const struct qed_eth_ops *qed_ops;
static int qede_eth_dev_uninit(struct rte_eth_dev *eth_dev);
static int qede_eth_dev_init(struct rte_eth_dev *eth_dev);
offsetof(struct qede_rx_queue, rx_alloc_errors)}
};
+/* Get FW version string based on fw_size */
+static int
+qede_fw_version_get(struct rte_eth_dev *dev, char *fw_ver, size_t fw_size)
+{
+ struct qede_dev *qdev = dev->data->dev_private;
+ struct ecore_dev *edev = &qdev->edev;
+ struct qed_dev_info *info = &qdev->dev_info.common;
+ static char ver_str[QEDE_PMD_DRV_VER_STR_SIZE];
+ size_t size;
+
+ if (fw_ver == NULL)
+ return 0;
+
+ if (IS_PF(edev))
+ snprintf(ver_str, QEDE_PMD_DRV_VER_STR_SIZE, "%s",
+ QEDE_PMD_FW_VERSION);
+ else
+ snprintf(ver_str, QEDE_PMD_DRV_VER_STR_SIZE, "%d.%d.%d.%d",
+ info->fw_major, info->fw_minor,
+ info->fw_rev, info->fw_eng);
+ size = strlen(ver_str);
+ if (size + 1 <= fw_size) /* Add 1 byte for "\0" */
+ strlcpy(fw_ver, ver_str, fw_size);
+ else
+ return (size + 1);
+
+ snprintf(ver_str + size, (QEDE_PMD_DRV_VER_STR_SIZE - size),
+ " MFW: %d.%d.%d.%d",
+ GET_MFW_FIELD(info->mfw_rev, QED_MFW_VERSION_3),
+ GET_MFW_FIELD(info->mfw_rev, QED_MFW_VERSION_2),
+ GET_MFW_FIELD(info->mfw_rev, QED_MFW_VERSION_1),
+ GET_MFW_FIELD(info->mfw_rev, QED_MFW_VERSION_0));
+ size = strlen(ver_str);
+ if (size + 1 <= fw_size)
+ strlcpy(fw_ver, ver_str, fw_size);
+
+ if (fw_size <= 32)
+ goto out;
+
+ snprintf(ver_str + size, (QEDE_PMD_DRV_VER_STR_SIZE - size),
+ " MBI: %d.%d.%d",
+ GET_MFW_FIELD(info->mbi_version, QED_MBI_VERSION_2),
+ GET_MFW_FIELD(info->mbi_version, QED_MBI_VERSION_1),
+ GET_MFW_FIELD(info->mbi_version, QED_MBI_VERSION_0));
+ size = strlen(ver_str);
+ if (size + 1 <= fw_size)
+ strlcpy(fw_ver, ver_str, fw_size);
+
+out:
+ return 0;
+}
+
static void qede_interrupt_action(struct ecore_hwfn *p_hwfn)
{
+ OSAL_SPIN_LOCK(&p_hwfn->spq_lock);
ecore_int_sp_dpc((osal_int_ptr_t)(p_hwfn));
+ OSAL_SPIN_UNLOCK(&p_hwfn->spq_lock);
}
static void
}
static void
-qede_assign_rxtx_handlers(struct rte_eth_dev *dev)
+qede_assign_rxtx_handlers(struct rte_eth_dev *dev, bool is_dummy)
{
uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
struct qede_dev *qdev = dev->data->dev_private;
struct ecore_dev *edev = &qdev->edev;
bool use_tx_offload = false;
+ if (is_dummy) {
+ dev->rx_pkt_burst = qede_rxtx_pkts_dummy;
+ dev->tx_pkt_burst = qede_rxtx_pkts_dummy;
+ return;
+ }
+
if (ECORE_IS_CMT(edev)) {
dev->rx_pkt_burst = qede_recv_pkts_cmt;
dev->tx_pkt_burst = qede_xmit_pkts_cmt;
qdev->ops = qed_ops;
}
-static void qede_print_adapter_info(struct qede_dev *qdev)
+static void qede_print_adapter_info(struct rte_eth_dev *dev)
{
+ struct qede_dev *qdev = dev->data->dev_private;
struct ecore_dev *edev = &qdev->edev;
- struct qed_dev_info *info = &qdev->dev_info.common;
static char ver_str[QEDE_PMD_DRV_VER_STR_SIZE];
DP_INFO(edev, "**************************************************\n");
- DP_INFO(edev, " DPDK version\t\t\t: %s\n", rte_version());
- DP_INFO(edev, " Chip details\t\t\t: %s %c%d\n",
+ DP_INFO(edev, " %-20s: %s\n", "DPDK version", rte_version());
+ DP_INFO(edev, " %-20s: %s %c%d\n", "Chip details",
ECORE_IS_BB(edev) ? "BB" : "AH",
'A' + edev->chip_rev,
(int)edev->chip_metal);
snprintf(ver_str, QEDE_PMD_DRV_VER_STR_SIZE, "%s",
QEDE_PMD_DRV_VERSION);
- DP_INFO(edev, " Driver version\t\t\t: %s\n", ver_str);
-
+ DP_INFO(edev, " %-20s: %s\n", "Driver version", ver_str);
snprintf(ver_str, QEDE_PMD_DRV_VER_STR_SIZE, "%s",
QEDE_PMD_BASE_VERSION);
- DP_INFO(edev, " Base version\t\t\t: %s\n", ver_str);
-
- if (!IS_VF(edev))
- snprintf(ver_str, QEDE_PMD_DRV_VER_STR_SIZE, "%s",
- QEDE_PMD_FW_VERSION);
- else
- snprintf(ver_str, QEDE_PMD_DRV_VER_STR_SIZE, "%d.%d.%d.%d",
- info->fw_major, info->fw_minor,
- info->fw_rev, info->fw_eng);
- DP_INFO(edev, " Firmware version\t\t\t: %s\n", ver_str);
-
- snprintf(ver_str, MCP_DRV_VER_STR_SIZE,
- "%d.%d.%d.%d",
- (info->mfw_rev & QED_MFW_VERSION_3_MASK) >>
- QED_MFW_VERSION_3_OFFSET,
- (info->mfw_rev & QED_MFW_VERSION_2_MASK) >>
- QED_MFW_VERSION_2_OFFSET,
- (info->mfw_rev & QED_MFW_VERSION_1_MASK) >>
- QED_MFW_VERSION_1_OFFSET,
- (info->mfw_rev & QED_MFW_VERSION_0_MASK) >>
- QED_MFW_VERSION_0_OFFSET);
- DP_INFO(edev, " Management Firmware version\t: %s\n", ver_str);
- DP_INFO(edev, " Firmware file\t\t\t: %s\n", qede_fw_file);
+ DP_INFO(edev, " %-20s: %s\n", "Base version", ver_str);
+ qede_fw_version_get(dev, ver_str, sizeof(ver_str));
+ DP_INFO(edev, " %-20s: %s\n", "Firmware version", ver_str);
+ DP_INFO(edev, " %-20s: %s\n", "Firmware file", qede_fw_file);
DP_INFO(edev, "**************************************************\n");
}
ECORE_ACCEPT_BCAST;
if (type == QED_FILTER_RX_MODE_TYPE_PROMISC) {
- flags.rx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED;
+ flags.rx_accept_filter |= (ECORE_ACCEPT_UCAST_UNMATCHED |
+ ECORE_ACCEPT_MCAST_UNMATCHED);
if (IS_VF(edev)) {
- flags.tx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED;
- DP_INFO(edev, "Enabling Tx unmatched flag for VF\n");
+ flags.tx_accept_filter |=
+ (ECORE_ACCEPT_UCAST_UNMATCHED |
+ ECORE_ACCEPT_MCAST_UNMATCHED);
+ DP_INFO(edev, "Enabling Tx unmatched flags for VF\n");
}
} else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC) {
flags.rx_accept_filter |= ECORE_ACCEPT_MCAST_UNMATCHED;
- } else if (type == (QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC |
- QED_FILTER_RX_MODE_TYPE_PROMISC)) {
- flags.rx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED |
- ECORE_ACCEPT_MCAST_UNMATCHED;
}
return ecore_filter_accept_cmd(edev, 0, flags, false, false,
}
}
- if (mask & ETH_VLAN_EXTEND_MASK)
- DP_ERR(edev, "Extend VLAN not supported\n");
-
qdev->vlan_offload_mask = mask;
DP_INFO(edev, "VLAN offload mask %d\n", mask);
qede_reset_queue_stats(qdev, true);
/* Newer SR-IOV PF driver expects RX/TX queues to be started before
- * enabling RSS. Hence RSS configuration is deferred upto this point.
+ * enabling RSS. Hence RSS configuration is deferred up to this point.
* Also, we would like to retain similar behavior in PF case, so we
* don't do PF/VF specific check here.
*/
if (qede_activate_vport(eth_dev, true))
goto err;
+ /* Bring-up the link */
+ qede_dev_set_link_state(eth_dev, true);
+
/* Update link status */
qede_link_update(eth_dev, 0);
/* Start/resume traffic */
qede_fastpath_start(edev);
- qede_assign_rxtx_handlers(eth_dev);
+ /* Assign I/O handlers */
+ qede_assign_rxtx_handlers(eth_dev, false);
+
DP_INFO(edev, "Device started\n");
return 0;
return -1; /* common error code is < 0 */
}
-static void qede_dev_stop(struct rte_eth_dev *eth_dev)
+static int qede_dev_stop(struct rte_eth_dev *eth_dev)
{
struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
PMD_INIT_FUNC_TRACE(edev);
+ eth_dev->data->dev_started = 0;
+
+ /* Bring the link down */
+ qede_dev_set_link_state(eth_dev, false);
+
+ /* Update link status */
+ qede_link_update(eth_dev, 0);
+
+ /* Replace I/O functions with dummy ones. It cannot
+ * be set to NULL because rte_eth_rx_burst() doesn't check for NULL.
+ */
+ qede_assign_rxtx_handlers(eth_dev, true);
/* Disable vport */
if (qede_activate_vport(eth_dev, false))
- return;
+ return 0;
if (qdev->enable_lro)
qede_enable_tpa(eth_dev, false);
ecore_hw_stop_fastpath(edev); /* TBD - loop */
DP_INFO(edev, "Device is stopped\n");
+
+ return 0;
}
static const char * const valid_args[] = {
struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
struct rte_eth_rxmode *rxmode = ð_dev->data->dev_conf.rxmode;
+ uint8_t num_rxqs;
+ uint8_t num_txqs;
int ret;
PMD_INIT_FUNC_TRACE(edev);
if (qede_check_fdir_support(eth_dev))
return -ENOTSUP;
- qede_dealloc_fp_resc(eth_dev);
- qdev->num_tx_queues = eth_dev->data->nb_tx_queues * edev->num_hwfns;
- qdev->num_rx_queues = eth_dev->data->nb_rx_queues * edev->num_hwfns;
-
- if (qede_alloc_fp_resc(qdev))
- return -ENOMEM;
+ /* Allocate/reallocate fastpath resources only for new queue config */
+ num_txqs = eth_dev->data->nb_tx_queues * edev->num_hwfns;
+ num_rxqs = eth_dev->data->nb_rx_queues * edev->num_hwfns;
+ if (qdev->num_tx_queues != num_txqs ||
+ qdev->num_rx_queues != num_rxqs) {
+ qede_dealloc_fp_resc(eth_dev);
+ qdev->num_tx_queues = num_txqs;
+ qdev->num_rx_queues = num_rxqs;
+ if (qede_alloc_fp_resc(qdev))
+ return -ENOMEM;
+ }
/* If jumbo enabled adjust MTU */
if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
static int qede_promiscuous_enable(struct rte_eth_dev *eth_dev)
{
- struct qede_dev *qdev = eth_dev->data->dev_private;
- struct ecore_dev *edev = &qdev->edev;
- enum qed_filter_rx_mode_type type = QED_FILTER_RX_MODE_TYPE_PROMISC;
enum _ecore_status_t ecore_status;
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ enum qed_filter_rx_mode_type type = QED_FILTER_RX_MODE_TYPE_PROMISC;
PMD_INIT_FUNC_TRACE(edev);
- if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1)
- type |= QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC;
-
ecore_status = qed_configure_filter_rx_mode(eth_dev, type);
return ecore_status >= ECORE_SUCCESS ? 0 : -EAGAIN;
}
}
-static void qede_dev_close(struct rte_eth_dev *eth_dev)
+static int qede_dev_close(struct rte_eth_dev *eth_dev)
{
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ int ret = 0;
PMD_INIT_FUNC_TRACE(edev);
+ /* only close in case of the primary process */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
/* dev_stop() shall cleanup fp resources in hw but without releasing
* dma memories and sw structures so that dev_start() can be called
* by the app without reconfiguration. However, in dev_close() we
* can release all the resources and device can be brought up newly
*/
if (eth_dev->data->dev_started)
- qede_dev_stop(eth_dev);
+ ret = qede_dev_stop(eth_dev);
if (qdev->vport_started)
qede_stop_vport(edev);
eth_dev->data->nb_rx_queues = 0;
eth_dev->data->nb_tx_queues = 0;
- /* Bring the link down */
- qede_dev_set_link_state(eth_dev, false);
qdev->ops->common->slowpath_stop(edev);
qdev->ops->common->remove(edev);
rte_intr_disable(&pci_dev->intr_handle);
if (ECORE_IS_CMT(edev))
rte_eal_alarm_cancel(qede_poll_sp_sb_cb, (void *)eth_dev);
+
+ return ret;
}
static int
QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC;
enum _ecore_status_t ecore_status;
- if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1)
- type |= QED_FILTER_RX_MODE_TYPE_PROMISC;
-
ecore_status = qed_configure_filter_rx_mode(eth_dev, type);
return ecore_status >= ECORE_SUCCESS ? 0 : -EAGAIN;
dev->data->min_rx_buf_size);
return -EINVAL;
}
- /* Temporarily replace I/O functions with dummy ones. It cannot
- * be set to NULL because rte_eth_rx_burst() doesn't check for NULL.
- */
- dev->rx_pkt_burst = qede_rxtx_pkts_dummy;
- dev->tx_pkt_burst = qede_rxtx_pkts_dummy;
if (dev->data->dev_started) {
dev->data->dev_started = 0;
- qede_dev_stop(dev);
+ rc = qede_dev_stop(dev);
+ if (rc != 0)
+ return rc;
restart = true;
}
rte_delay_ms(1000);
/* update max frame size */
dev->data->dev_conf.rxmode.max_rx_pkt_len = max_rx_pkt_len;
- /* Reassign back */
- qede_assign_rxtx_handlers(dev);
- if (ECORE_IS_CMT(edev)) {
- dev->rx_pkt_burst = qede_recv_pkts_cmt;
- dev->tx_pkt_burst = qede_xmit_pkts_cmt;
- } else {
- dev->rx_pkt_burst = qede_recv_pkts;
- dev->tx_pkt_burst = qede_xmit_pkts;
- }
return 0;
}
.dev_infos_get = qede_dev_info_get,
.rx_queue_setup = qede_rx_queue_setup,
.rx_queue_release = qede_rx_queue_release,
- .rx_descriptor_status = qede_rx_descriptor_status,
.tx_queue_setup = qede_tx_queue_setup,
.tx_queue_release = qede_tx_queue_release,
.dev_start = qede_dev_start,
.filter_ctrl = qede_dev_filter_ctrl,
.udp_tunnel_port_add = qede_udp_dst_port_add,
.udp_tunnel_port_del = qede_udp_dst_port_del,
+ .fw_version_get = qede_fw_version_get,
+ .get_reg = qede_get_regs,
};
static const struct eth_dev_ops qede_eth_vf_dev_ops = {
.dev_infos_get = qede_dev_info_get,
.rx_queue_setup = qede_rx_queue_setup,
.rx_queue_release = qede_rx_queue_release,
- .rx_descriptor_status = qede_rx_descriptor_status,
.tx_queue_setup = qede_tx_queue_setup,
.tx_queue_release = qede_tx_queue_release,
.dev_start = qede_dev_start,
.mac_addr_add = qede_mac_addr_add,
.mac_addr_remove = qede_mac_addr_remove,
.mac_addr_set = qede_mac_addr_set,
+ .fw_version_get = qede_fw_version_get,
};
static void qede_update_pf_params(struct ecore_dev *edev)
qed_ops->common->update_pf_params(edev, &pf_params);
}
+static void qede_generate_random_mac_addr(struct rte_ether_addr *mac_addr)
+{
+ uint64_t random;
+
+ /* Set Organizationally Unique Identifier (OUI) prefix. */
+ mac_addr->addr_bytes[0] = 0x00;
+ mac_addr->addr_bytes[1] = 0x09;
+ mac_addr->addr_bytes[2] = 0xC0;
+
+ /* Force indication of locally assigned MAC address. */
+ mac_addr->addr_bytes[0] |= RTE_ETHER_LOCAL_ADMIN_ADDR;
+
+ /* Generate the last 3 bytes of the MAC address with a random number. */
+ random = rte_rand();
+
+ memcpy(&mac_addr->addr_bytes[3], &random, 3);
+}
+
static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
{
struct rte_pci_device *pci_dev;
uint8_t bulletin_change;
uint8_t vf_mac[RTE_ETHER_ADDR_LEN];
uint8_t is_mac_forced;
- bool is_mac_exist;
+ bool is_mac_exist = false;
/* Fix up ecore debug level */
uint32_t dp_module = ~0 & ~ECORE_MSG_HW;
uint8_t dp_level = ECORE_LEVEL_VERBOSE;
}
rte_eth_copy_pci_info(eth_dev, pci_dev);
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
/* @DPDK */
edev->vendor_id = pci_dev->id.vendor_id;
strncpy((char *)params.name, QEDE_PMD_VER_PREFIX,
QEDE_PMD_DRV_VER_STR_SIZE);
- qede_assign_rxtx_handlers(eth_dev);
+ qede_assign_rxtx_handlers(eth_dev, true);
eth_dev->tx_pkt_prepare = qede_xmit_prep_pkts;
/* For CMT mode device do periodic polling for slowpath events.
qede_alloc_etherdev(adapter, &dev_info);
if (do_once) {
- qede_print_adapter_info(adapter);
+ qede_print_adapter_info(eth_dev);
do_once = false;
}
DP_ERR(edev, "No VF macaddr assigned\n");
}
}
+
+ /* If MAC doesn't exist from PF, generate random one */
+ if (!is_mac_exist) {
+ struct rte_ether_addr *mac_addr;
+
+ mac_addr = (struct rte_ether_addr *)&vf_mac;
+ qede_generate_random_mac_addr(mac_addr);
+
+ rte_ether_addr_copy(mac_addr,
+ ð_dev->data->mac_addrs[0]);
+
+ rte_ether_addr_copy(ð_dev->data->mac_addrs[0],
+ &adapter->primary_mac);
+ }
}
eth_dev->dev_ops = (is_vf) ? &qede_eth_vf_dev_ops : &qede_eth_dev_ops;
-
- /* Bring-up the link */
- qede_dev_set_link_state(eth_dev, true);
+ eth_dev->rx_descriptor_status = qede_rx_descriptor_status;
adapter->num_tx_queues = 0;
adapter->num_rx_queues = 0;
adapter->vxlan.enable = false;
adapter->geneve.enable = false;
adapter->ipgre.enable = false;
+ qed_ops->sriov_configure(edev, pci_dev->max_vfs);
}
DP_INFO(edev, "MAC address : %02x:%02x:%02x:%02x:%02x:%02x\n",
err:
if (do_once) {
- qede_print_adapter_info(adapter);
+ qede_print_adapter_info(eth_dev);
do_once = false;
}
return rc;
{
struct qede_dev *qdev = eth_dev->data->dev_private;
struct ecore_dev *edev = &qdev->edev;
-
PMD_INIT_FUNC_TRACE(edev);
-
- /* only uninitialize in the primary process */
- if (rte_eal_process_type() != RTE_PROC_PRIMARY)
- return 0;
-
- /* safe to close dev here */
qede_dev_close(eth_dev);
-
- eth_dev->dev_ops = NULL;
- eth_dev->rx_pkt_burst = NULL;
- eth_dev->tx_pkt_burst = NULL;
-
return 0;
}
RTE_PMD_REGISTER_PCI(net_qede_vf, rte_qedevf_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_qede_vf, pci_id_qedevf_map);
RTE_PMD_REGISTER_KMOD_DEP(net_qede_vf, "* igb_uio | vfio-pci");
-
-RTE_INIT(qede_init_log)
-{
- qede_logtype_init = rte_log_register("pmd.net.qede.init");
- if (qede_logtype_init >= 0)
- rte_log_set_level(qede_logtype_init, RTE_LOG_NOTICE);
- qede_logtype_driver = rte_log_register("pmd.net.qede.driver");
- if (qede_logtype_driver >= 0)
- rte_log_set_level(qede_logtype_driver, RTE_LOG_NOTICE);
-}
+RTE_LOG_REGISTER(qede_logtype_init, pmd.net.qede.init, NOTICE);
+RTE_LOG_REGISTER(qede_logtype_driver, pmd.net.qede.driver, NOTICE);