offsetof(struct ecore_eth_stats_common, mftag_filter_discards)},
{"rx_mac_filter_discards",
offsetof(struct ecore_eth_stats_common, mac_filter_discards)},
+ {"rx_gft_filter_drop",
+ offsetof(struct ecore_eth_stats_common, gft_filter_drop)},
{"rx_hw_buffer_truncates",
offsetof(struct ecore_eth_stats_common, brb_truncates)},
{"rx_hw_buffer_discards",
offsetof(struct qede_rx_queue, rx_alloc_errors)}
};
+/* Get FW version string based on fw_size */
+static int
+qede_fw_version_get(struct rte_eth_dev *dev, char *fw_ver, size_t fw_size)
+{
+ struct qede_dev *qdev = dev->data->dev_private;
+ struct ecore_dev *edev = &qdev->edev;
+ struct qed_dev_info *info = &qdev->dev_info.common;
+ static char ver_str[QEDE_PMD_DRV_VER_STR_SIZE];
+ size_t size;
+
+ if (fw_ver == NULL)
+ return 0;
+
+ if (IS_PF(edev))
+ snprintf(ver_str, QEDE_PMD_DRV_VER_STR_SIZE, "%s",
+ QEDE_PMD_FW_VERSION);
+ else
+ snprintf(ver_str, QEDE_PMD_DRV_VER_STR_SIZE, "%d.%d.%d.%d",
+ info->fw_major, info->fw_minor,
+ info->fw_rev, info->fw_eng);
+ size = strlen(ver_str);
+ if (size + 1 <= fw_size) /* Add 1 byte for "\0" */
+ strlcpy(fw_ver, ver_str, fw_size);
+ else
+ return (size + 1);
+
+ snprintf(ver_str + size, (QEDE_PMD_DRV_VER_STR_SIZE - size),
+ " MFW: %d.%d.%d.%d",
+ GET_MFW_FIELD(info->mfw_rev, QED_MFW_VERSION_3),
+ GET_MFW_FIELD(info->mfw_rev, QED_MFW_VERSION_2),
+ GET_MFW_FIELD(info->mfw_rev, QED_MFW_VERSION_1),
+ GET_MFW_FIELD(info->mfw_rev, QED_MFW_VERSION_0));
+ size = strlen(ver_str);
+ if (size + 1 <= fw_size)
+ strlcpy(fw_ver, ver_str, fw_size);
+
+ if (fw_size <= 32)
+ goto out;
+
+ snprintf(ver_str + size, (QEDE_PMD_DRV_VER_STR_SIZE - size),
+ " MBI: %d.%d.%d",
+ GET_MFW_FIELD(info->mbi_version, QED_MBI_VERSION_2),
+ GET_MFW_FIELD(info->mbi_version, QED_MBI_VERSION_1),
+ GET_MFW_FIELD(info->mbi_version, QED_MBI_VERSION_0));
+ size = strlen(ver_str);
+ if (size + 1 <= fw_size)
+ strlcpy(fw_ver, ver_str, fw_size);
+
+out:
+ return 0;
+}
+
static void qede_interrupt_action(struct ecore_hwfn *p_hwfn)
{
ecore_int_sp_dpc((osal_int_ptr_t)(p_hwfn));
DP_ERR(edev, "rte_intr_ack failed\n");
}
+static void
+qede_assign_rxtx_handlers(struct rte_eth_dev *dev)
+{
+ uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
+ struct qede_dev *qdev = dev->data->dev_private;
+ struct ecore_dev *edev = &qdev->edev;
+ bool use_tx_offload = false;
+
+ if (ECORE_IS_CMT(edev)) {
+ dev->rx_pkt_burst = qede_recv_pkts_cmt;
+ dev->tx_pkt_burst = qede_xmit_pkts_cmt;
+ return;
+ }
+
+ if (dev->data->lro || dev->data->scattered_rx) {
+ DP_INFO(edev, "Assigning qede_recv_pkts\n");
+ dev->rx_pkt_burst = qede_recv_pkts;
+ } else {
+ DP_INFO(edev, "Assigning qede_recv_pkts_regular\n");
+ dev->rx_pkt_burst = qede_recv_pkts_regular;
+ }
+
+ use_tx_offload = !!(tx_offloads &
+ (DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | /* tunnel */
+ DEV_TX_OFFLOAD_TCP_TSO | /* tso */
+ DEV_TX_OFFLOAD_VLAN_INSERT)); /* vlan insert */
+
+ if (use_tx_offload) {
+ DP_INFO(edev, "Assigning qede_xmit_pkts\n");
+ dev->tx_pkt_burst = qede_xmit_pkts;
+ } else {
+ DP_INFO(edev, "Assigning qede_xmit_pkts_regular\n");
+ dev->tx_pkt_burst = qede_xmit_pkts_regular;
+ }
+}
+
static void
qede_alloc_etherdev(struct qede_dev *qdev, struct qed_dev_eth_info *info)
{
qdev->ops = qed_ops;
}
-static void qede_print_adapter_info(struct qede_dev *qdev)
+static void qede_print_adapter_info(struct rte_eth_dev *dev)
{
+ struct qede_dev *qdev = dev->data->dev_private;
struct ecore_dev *edev = &qdev->edev;
- struct qed_dev_info *info = &qdev->dev_info.common;
- static char drv_ver[QEDE_PMD_DRV_VER_STR_SIZE];
static char ver_str[QEDE_PMD_DRV_VER_STR_SIZE];
- DP_INFO(edev, "*********************************\n");
- DP_INFO(edev, " DPDK version:%s\n", rte_version());
- DP_INFO(edev, " Chip details : %s %c%d\n",
+ DP_INFO(edev, "**************************************************\n");
+ DP_INFO(edev, " %-20s: %s\n", "DPDK version", rte_version());
+ DP_INFO(edev, " %-20s: %s %c%d\n", "Chip details",
ECORE_IS_BB(edev) ? "BB" : "AH",
'A' + edev->chip_rev,
(int)edev->chip_metal);
- snprintf(ver_str, QEDE_PMD_DRV_VER_STR_SIZE, "%d.%d.%d.%d",
- info->fw_major, info->fw_minor, info->fw_rev, info->fw_eng);
- snprintf(drv_ver, QEDE_PMD_DRV_VER_STR_SIZE, "%s_%s",
- ver_str, QEDE_PMD_VERSION);
- DP_INFO(edev, " Driver version : %s\n", drv_ver);
- DP_INFO(edev, " Firmware version : %s\n", ver_str);
-
- snprintf(ver_str, MCP_DRV_VER_STR_SIZE,
- "%d.%d.%d.%d",
- (info->mfw_rev >> 24) & 0xff,
- (info->mfw_rev >> 16) & 0xff,
- (info->mfw_rev >> 8) & 0xff, (info->mfw_rev) & 0xff);
- DP_INFO(edev, " Management Firmware version : %s\n", ver_str);
- DP_INFO(edev, " Firmware file : %s\n", qede_fw_file);
- DP_INFO(edev, "*********************************\n");
+ snprintf(ver_str, QEDE_PMD_DRV_VER_STR_SIZE, "%s",
+ QEDE_PMD_DRV_VERSION);
+ DP_INFO(edev, " %-20s: %s\n", "Driver version", ver_str);
+ snprintf(ver_str, QEDE_PMD_DRV_VER_STR_SIZE, "%s",
+ QEDE_PMD_BASE_VERSION);
+ DP_INFO(edev, " %-20s: %s\n", "Base version", ver_str);
+ qede_fw_version_get(dev, ver_str, sizeof(ver_str));
+ DP_INFO(edev, " %-20s: %s\n", "Firmware version", ver_str);
+ DP_INFO(edev, " %-20s: %s\n", "Firmware file", qede_fw_file);
+ DP_INFO(edev, "**************************************************\n");
}
static void qede_reset_queue_stats(struct qede_dev *qdev, bool xstats)
params.update_vport_active_tx_flg = 1;
params.vport_active_rx_flg = flg;
params.vport_active_tx_flg = flg;
- if (~qdev->enable_tx_switching & flg) {
+ if ((qdev->enable_tx_switching == false) && (flg == true)) {
params.update_tx_switching_flg = 1;
params.tx_switching_flg = !flg;
}
}
}
+ qdev->vlan_strip_flg = flg;
+
DP_INFO(edev, "VLAN stripping %s\n", flg ? "enabled" : "disabled");
return 0;
}
PMD_INIT_FUNC_TRACE(edev);
/* Update MTU only if it has changed */
- if (eth_dev->data->mtu != qdev->mtu) {
- if (qede_update_mtu(eth_dev, qdev->mtu))
+ if (qdev->new_mtu && qdev->new_mtu != qdev->mtu) {
+ if (qede_update_mtu(eth_dev, qdev->new_mtu))
goto err;
+ qdev->mtu = qdev->new_mtu;
+ qdev->new_mtu = 0;
}
/* Configure TPA parameters */
qede_reset_queue_stats(qdev, true);
/* Newer SR-IOV PF driver expects RX/TX queues to be started before
- * enabling RSS. Hence RSS configuration is deferred upto this point.
+ * enabling RSS. Hence RSS configuration is deferred up to this point.
* Also, we would like to retain similar behavior in PF case, so we
* don't do PF/VF specific check here.
*/
/* Start/resume traffic */
qede_fastpath_start(edev);
+ qede_assign_rxtx_handlers(eth_dev);
DP_INFO(edev, "Device started\n");
return 0;
PMD_INIT_FUNC_TRACE(edev);
+ if (rxmode->mq_mode & ETH_MQ_RX_RSS_FLAG)
+ rxmode->offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+
/* We need to have min 1 RX queue.There is no min check in
* rte_eth_dev_configure(), so we are checking it here.
*/
.nb_mtu_seg_max = ETH_TX_MAX_BDS_PER_NON_LSO_PACKET
};
-static void
+static int
qede_dev_info_get(struct rte_eth_dev *eth_dev,
struct rte_eth_dev_info *dev_info)
{
DEV_RX_OFFLOAD_SCATTER |
DEV_RX_OFFLOAD_JUMBO_FRAME |
DEV_RX_OFFLOAD_VLAN_FILTER |
- DEV_RX_OFFLOAD_VLAN_STRIP);
+ DEV_RX_OFFLOAD_VLAN_STRIP |
+ DEV_RX_OFFLOAD_RSS_HASH);
dev_info->rx_queue_offload_capa = 0;
/* TX offloads are on a per-packet basis, so it is applicable
if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
speed_cap |= ETH_LINK_SPEED_100G;
dev_info->speed_capa = speed_cap;
+
+ return 0;
}
/* return 0 means link status changed, -1 means not changed */
return rte_eth_linkstatus_set(eth_dev, &link);
}
-static void qede_promiscuous_enable(struct rte_eth_dev *eth_dev)
+static int qede_promiscuous_enable(struct rte_eth_dev *eth_dev)
{
struct qede_dev *qdev = eth_dev->data->dev_private;
struct ecore_dev *edev = &qdev->edev;
enum qed_filter_rx_mode_type type = QED_FILTER_RX_MODE_TYPE_PROMISC;
+ enum _ecore_status_t ecore_status;
PMD_INIT_FUNC_TRACE(edev);
if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1)
type |= QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC;
- qed_configure_filter_rx_mode(eth_dev, type);
+ ecore_status = qed_configure_filter_rx_mode(eth_dev, type);
+
+ return ecore_status >= ECORE_SUCCESS ? 0 : -EAGAIN;
}
-static void qede_promiscuous_disable(struct rte_eth_dev *eth_dev)
+static int qede_promiscuous_disable(struct rte_eth_dev *eth_dev)
{
struct qede_dev *qdev = eth_dev->data->dev_private;
struct ecore_dev *edev = &qdev->edev;
+ enum _ecore_status_t ecore_status;
PMD_INIT_FUNC_TRACE(edev);
if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1)
- qed_configure_filter_rx_mode(eth_dev,
+ ecore_status = qed_configure_filter_rx_mode(eth_dev,
QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC);
else
- qed_configure_filter_rx_mode(eth_dev,
+ ecore_status = qed_configure_filter_rx_mode(eth_dev,
QED_FILTER_RX_MODE_TYPE_REGULAR);
+
+ return ecore_status >= ECORE_SUCCESS ? 0 : -EAGAIN;
}
static void qede_poll_sp_sb_cb(void *param)
if (eth_dev->data->dev_started)
qede_dev_stop(eth_dev);
- qede_stop_vport(edev);
+ if (qdev->vport_started)
+ qede_stop_vport(edev);
qdev->vport_started = false;
qede_fdir_dealloc_resc(eth_dev);
qede_dealloc_fp_resc(eth_dev);
return stat_idx;
}
-static void
+static int
qede_reset_xstats(struct rte_eth_dev *dev)
{
struct qede_dev *qdev = dev->data->dev_private;
ecore_reset_vport_stats(edev);
qede_reset_queue_stats(qdev, true);
+
+ return 0;
}
int qede_dev_set_link_state(struct rte_eth_dev *eth_dev, bool link_up)
return qede_dev_set_link_state(eth_dev, false);
}
-static void qede_reset_stats(struct rte_eth_dev *eth_dev)
+static int qede_reset_stats(struct rte_eth_dev *eth_dev)
{
struct qede_dev *qdev = eth_dev->data->dev_private;
struct ecore_dev *edev = &qdev->edev;
ecore_reset_vport_stats(edev);
qede_reset_queue_stats(qdev, false);
+
+ return 0;
}
-static void qede_allmulticast_enable(struct rte_eth_dev *eth_dev)
+static int qede_allmulticast_enable(struct rte_eth_dev *eth_dev)
{
enum qed_filter_rx_mode_type type =
QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC;
+ enum _ecore_status_t ecore_status;
if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1)
type |= QED_FILTER_RX_MODE_TYPE_PROMISC;
- qed_configure_filter_rx_mode(eth_dev, type);
+ ecore_status = qed_configure_filter_rx_mode(eth_dev, type);
+
+ return ecore_status >= ECORE_SUCCESS ? 0 : -EAGAIN;
}
-static void qede_allmulticast_disable(struct rte_eth_dev *eth_dev)
+static int qede_allmulticast_disable(struct rte_eth_dev *eth_dev)
{
+ enum _ecore_status_t ecore_status;
+
if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1)
- qed_configure_filter_rx_mode(eth_dev,
+ ecore_status = qed_configure_filter_rx_mode(eth_dev,
QED_FILTER_RX_MODE_TYPE_PROMISC);
else
- qed_configure_filter_rx_mode(eth_dev,
+ ecore_status = qed_configure_filter_rx_mode(eth_dev,
QED_FILTER_RX_MODE_TYPE_REGULAR);
+
+ return ecore_status >= ECORE_SUCCESS ? 0 : -EAGAIN;
}
static int
};
if (eth_dev->rx_pkt_burst == qede_recv_pkts ||
+ eth_dev->rx_pkt_burst == qede_recv_pkts_regular ||
eth_dev->rx_pkt_burst == qede_recv_pkts_cmt)
return ptypes;
int i, rc;
PMD_INIT_FUNC_TRACE(edev);
- qede_dev_info_get(dev, &dev_info);
+ rc = qede_dev_info_get(dev, &dev_info);
+ if (rc != 0) {
+ DP_ERR(edev, "Error during getting ethernet device info\n");
+ return rc;
+ }
max_rx_pkt_len = mtu + QEDE_MAX_ETHER_HDR_LEN;
frame_size = max_rx_pkt_len;
if (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen) {
restart = true;
}
rte_delay_ms(1000);
- qdev->mtu = mtu;
+ qdev->new_mtu = mtu;
/* Fix up RX buf size for all queues of the port */
for (i = 0; i < qdev->num_rx_queues; i++) {
/* update max frame size */
dev->data->dev_conf.rxmode.max_rx_pkt_len = max_rx_pkt_len;
+
/* Reassign back */
+ qede_assign_rxtx_handlers(dev);
if (ECORE_IS_CMT(edev)) {
dev->rx_pkt_burst = qede_recv_pkts_cmt;
dev->tx_pkt_burst = qede_xmit_pkts_cmt;
.filter_ctrl = qede_dev_filter_ctrl,
.udp_tunnel_port_add = qede_udp_dst_port_add,
.udp_tunnel_port_del = qede_udp_dst_port_del,
+ .fw_version_get = qede_fw_version_get,
};
static const struct eth_dev_ops qede_eth_vf_dev_ops = {
.mac_addr_add = qede_mac_addr_add,
.mac_addr_remove = qede_mac_addr_remove,
.mac_addr_set = qede_mac_addr_set,
+ .fw_version_get = qede_fw_version_get,
};
static void qede_update_pf_params(struct ecore_dev *edev)
qed_ops = qed_get_eth_ops();
if (!qed_ops) {
DP_ERR(edev, "Failed to get qed_eth_ops_pass\n");
- return -EINVAL;
+ rc = -EINVAL;
+ goto err;
}
DP_INFO(edev, "Starting qede probe\n");
dp_level, is_vf);
if (rc != 0) {
DP_ERR(edev, "qede probe failed rc %d\n", rc);
- return -ENODEV;
+ rc = -ENODEV;
+ goto err;
}
qede_update_pf_params(edev);
if (rte_intr_enable(&pci_dev->intr_handle)) {
DP_ERR(edev, "rte_intr_enable() failed\n");
- return -ENODEV;
+ rc = -ENODEV;
+ goto err;
}
/* Start the Slowpath-process */
strncpy((char *)params.name, QEDE_PMD_VER_PREFIX,
QEDE_PMD_DRV_VER_STR_SIZE);
- if (ECORE_IS_CMT(edev)) {
- eth_dev->rx_pkt_burst = qede_recv_pkts_cmt;
- eth_dev->tx_pkt_burst = qede_xmit_pkts_cmt;
- } else {
- eth_dev->rx_pkt_burst = qede_recv_pkts;
- eth_dev->tx_pkt_burst = qede_xmit_pkts;
- }
-
+ qede_assign_rxtx_handlers(eth_dev);
eth_dev->tx_pkt_prepare = qede_xmit_prep_pkts;
/* For CMT mode device do periodic polling for slowpath events.
if (rc != 0) {
DP_ERR(edev, "Unable to start periodic"
" timer rc %d\n", rc);
- return -EINVAL;
+ rc = -EINVAL;
+ goto err;
}
}
DP_ERR(edev, "Cannot start slowpath rc = %d\n", rc);
rte_eal_alarm_cancel(qede_poll_sp_sb_cb,
(void *)eth_dev);
- return -ENODEV;
+ rc = -ENODEV;
+ goto err;
}
rc = qed_ops->fill_dev_info(edev, &dev_info);
qed_ops->common->remove(edev);
rte_eal_alarm_cancel(qede_poll_sp_sb_cb,
(void *)eth_dev);
- return -ENODEV;
+ rc = -ENODEV;
+ goto err;
}
qede_alloc_etherdev(adapter, &dev_info);
+ if (do_once) {
+ qede_print_adapter_info(eth_dev);
+ do_once = false;
+ }
+
adapter->ops->common->set_name(edev, edev->name);
if (!is_vf)
eth_dev->dev_ops = (is_vf) ? &qede_eth_vf_dev_ops : &qede_eth_dev_ops;
- if (do_once) {
- qede_print_adapter_info(adapter);
- do_once = false;
- }
-
/* Bring-up the link */
qede_dev_set_link_state(eth_dev, true);
DP_INFO(edev, "Device initialized\n");
return 0;
+
+err:
+ if (do_once) {
+ qede_print_adapter_info(eth_dev);
+ do_once = false;
+ }
+ return rc;
}
static int qedevf_eth_dev_init(struct rte_eth_dev *eth_dev)