#include "qede_ethdev.h"
#include <rte_alarm.h>
+#include <rte_version.h>
/* Globals */
static const struct qed_eth_ops *qed_ops;
-static const char *drivername = "qede pmd";
static int64_t timer_period = 1;
struct rte_qede_xstats_name_off {
offsetof(struct ecore_eth_stats, tpa_coalesced_bytes)},
};
+static const struct rte_qede_xstats_name_off qede_rxq_xstats_strings[] = {
+ {"rx_q_segments",
+ offsetof(struct qede_rx_queue, rx_segs)},
+ {"rx_q_hw_errors",
+ offsetof(struct qede_rx_queue, rx_hw_errors)},
+ {"rx_q_allocation_errors",
+ offsetof(struct qede_rx_queue, rx_alloc_errors)}
+};
+
static void qede_interrupt_action(struct ecore_hwfn *p_hwfn)
{
ecore_int_sp_dpc((osal_int_ptr_t)(p_hwfn));
}
static void
-qede_interrupt_handler(__rte_unused struct rte_intr_handle *handle, void *param)
+qede_interrupt_handler(struct rte_intr_handle *handle, void *param)
{
struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
struct qede_dev *qdev = eth_dev->data->dev_private;
struct ecore_dev *edev = &qdev->edev;
qede_interrupt_action(ECORE_LEADING_HWFN(edev));
- if (rte_intr_enable(ð_dev->pci_dev->intr_handle))
+ if (rte_intr_enable(handle))
DP_ERR(edev, "rte_intr_enable failed\n");
}
{
struct ecore_dev *edev = &qdev->edev;
struct qed_dev_info *info = &qdev->dev_info.common;
- static char ver_str[QED_DRV_VER_STR_SIZE];
+ static char drv_ver[QEDE_PMD_DRV_VER_STR_SIZE];
+ static char ver_str[QEDE_PMD_DRV_VER_STR_SIZE];
DP_INFO(edev, "*********************************\n");
+ DP_INFO(edev, " DPDK version:%s\n", rte_version());
DP_INFO(edev, " Chip details : %s%d\n",
- ECORE_IS_BB(edev) ? "BB" : "AH",
- CHIP_REV_IS_A0(edev) ? 0 : 1);
-
- sprintf(ver_str, "%s %s_%d.%d.%d.%d", QEDE_PMD_VER_PREFIX,
- edev->ver_str, QEDE_PMD_VERSION_MAJOR, QEDE_PMD_VERSION_MINOR,
- QEDE_PMD_VERSION_REVISION, QEDE_PMD_VERSION_PATCH);
- strcpy(qdev->drv_ver, ver_str);
- DP_INFO(edev, " Driver version : %s\n", ver_str);
-
- sprintf(ver_str, "%d.%d.%d.%d", info->fw_major, info->fw_minor,
- info->fw_rev, info->fw_eng);
+ ECORE_IS_BB(edev) ? "BB" : "AH",
+ CHIP_REV_IS_A0(edev) ? 0 : 1);
+ snprintf(ver_str, QEDE_PMD_DRV_VER_STR_SIZE, "%d.%d.%d.%d",
+ info->fw_major, info->fw_minor, info->fw_rev, info->fw_eng);
+ snprintf(drv_ver, QEDE_PMD_DRV_VER_STR_SIZE, "%s_%s",
+ ver_str, QEDE_PMD_VERSION);
+ DP_INFO(edev, " Driver version : %s\n", drv_ver);
DP_INFO(edev, " Firmware version : %s\n", ver_str);
- sprintf(ver_str, "%d.%d.%d.%d",
+ snprintf(ver_str, MCP_DRV_VER_STR_SIZE,
+ "%d.%d.%d.%d",
(info->mfw_rev >> 24) & 0xff,
(info->mfw_rev >> 16) & 0xff,
(info->mfw_rev >> 8) & 0xff, (info->mfw_rev) & 0xff);
- DP_INFO(edev, " Management firmware version : %s\n", ver_str);
-
+ DP_INFO(edev, " Management Firmware version : %s\n", ver_str);
DP_INFO(edev, " Firmware file : %s\n", fw_file);
-
DP_INFO(edev, "*********************************\n");
}
struct qede_vlan_entry *vlan;
int rc;
- if (qdev->configured_vlans == dev_info->num_vlan_filters) {
- DP_NOTICE(edev, false, "Reached max VLAN filter limit"
- " enabling accept_any_vlan\n");
- qede_config_accept_any_vlan(qdev, true);
- return 0;
- }
-
if (on) {
+ if (qdev->configured_vlans == dev_info->num_vlan_filters) {
+ DP_INFO(edev, "Reached max VLAN filter limit"
+ " enabling accept_any_vlan\n");
+ qede_config_accept_any_vlan(qdev, true);
+ return 0;
+ }
+
SLIST_FOREACH(tmp, &qdev->vlan_list_head, list) {
if (tmp->vid == vlan_id) {
DP_ERR(edev, "VLAN %u already configured\n",
struct qede_dev *qdev = eth_dev->data->dev_private;
struct ecore_dev *edev = &qdev->edev;
struct rte_eth_rxmode *rxmode = ð_dev->data->dev_conf.rxmode;
- int rc;
+ int rc, i, j;
PMD_INIT_FUNC_TRACE(edev);
}
}
- qdev->fp_num_tx = eth_dev->data->nb_tx_queues;
- qdev->fp_num_rx = eth_dev->data->nb_rx_queues;
- qdev->num_queues = qdev->fp_num_tx + qdev->fp_num_rx;
-
/* Sanity checks and throw warnings */
- if (rxmode->enable_scatter == 1) {
- DP_ERR(edev, "RX scatter packets is not supported\n");
- return -EINVAL;
- }
+ if (rxmode->enable_scatter == 1)
+ eth_dev->data->scattered_rx = 1;
if (rxmode->enable_lro == 1) {
DP_INFO(edev, "LRO is not supported\n");
DP_INFO(edev, "IP/UDP/TCP checksum offload is always enabled "
"in hw\n");
- SLIST_INIT(&qdev->vlan_list_head);
-
/* Check for the port restart case */
if (qdev->state != QEDE_DEV_INIT) {
rc = qdev->ops->vport_stop(edev, 0);
qede_dealloc_fp_resc(eth_dev);
}
+ qdev->fp_num_tx = eth_dev->data->nb_tx_queues;
+ qdev->fp_num_rx = eth_dev->data->nb_rx_queues;
+ qdev->num_queues = qdev->fp_num_tx + qdev->fp_num_rx;
+
/* Fastpath status block should be initialized before sending
* VPORT-START in the case of VF. Anyway, do it for both VF/PF.
*/
if (rc != 0)
return rc;
+ SLIST_INIT(&qdev->vlan_list_head);
+
/* Add primary mac for PF */
if (IS_PF(edev))
qede_mac_addr_set(eth_dev, &qdev->primary_mac);
qdev->state = QEDE_DEV_CONFIG;
+ DP_INFO(edev, "Allocated RSS=%d TSS=%d (with CoS=%d)\n",
+ (int)QEDE_RSS_COUNT(qdev), (int)QEDE_TSS_COUNT(qdev),
+ qdev->num_tc);
+
return 0;
}
{
struct qede_dev *qdev = eth_dev->data->dev_private;
struct ecore_dev *edev = &qdev->edev;
+ struct qed_link_output link;
+ uint32_t speed_cap = 0;
PMD_INIT_FUNC_TRACE(edev);
+ dev_info->pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
dev_info->min_rx_bufsize = (uint32_t)(ETHER_MIN_MTU +
QEDE_ETH_OVERHEAD);
dev_info->max_rx_pktlen = (uint32_t)ETH_TX_MAX_NON_LSO_PKT_LEN;
dev_info->max_vfs = 0;
else
dev_info->max_vfs = (uint16_t)NUM_OF_VFS(&qdev->edev);
- dev_info->driver_name = qdev->drv_ver;
dev_info->reta_size = ECORE_RSS_IND_TABLE_SIZE;
dev_info->flow_type_rss_offloads = (uint64_t)QEDE_RSS_OFFLOAD_ALL;
DEV_TX_OFFLOAD_UDP_CKSUM |
DEV_TX_OFFLOAD_TCP_CKSUM);
- dev_info->speed_capa = ETH_LINK_SPEED_25G | ETH_LINK_SPEED_40G |
- ETH_LINK_SPEED_100G;
+ memset(&link, 0, sizeof(struct qed_link_output));
+ qdev->ops->common->get_link(edev, &link);
+ if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
+ speed_cap |= ETH_LINK_SPEED_1G;
+ if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
+ speed_cap |= ETH_LINK_SPEED_10G;
+ if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
+ speed_cap |= ETH_LINK_SPEED_25G;
+ if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
+ speed_cap |= ETH_LINK_SPEED_40G;
+ if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
+ speed_cap |= ETH_LINK_SPEED_50G;
+ if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
+ speed_cap |= ETH_LINK_SPEED_100G;
+ dev_info->speed_capa = speed_cap;
}
/* return 0 means link status changed, -1 means not changed */
static void qede_dev_close(struct rte_eth_dev *eth_dev)
{
+ struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
int rc;
qdev->ops->common->remove(edev);
- rte_intr_disable(ð_dev->pci_dev->intr_handle);
+ rte_intr_disable(&pci_dev->intr_handle);
- rte_intr_callback_unregister(ð_dev->pci_dev->intr_handle,
+ rte_intr_callback_unregister(&pci_dev->intr_handle,
qede_interrupt_handler, (void *)eth_dev);
if (edev->num_hwfns > 1)
struct qede_dev *qdev = eth_dev->data->dev_private;
struct ecore_dev *edev = &qdev->edev;
struct ecore_eth_stats stats;
+ unsigned int i = 0, j = 0, qid;
+ struct qede_tx_queue *txq;
qdev->ops->get_vport_stats(edev, &stats);
stats.tx_mcast_bytes + stats.tx_bcast_bytes;
eth_stats->oerrors = stats.tx_err_drop_pkts;
+
+ /* Queue stats */
+ for (qid = 0; qid < QEDE_QUEUE_CNT(qdev); qid++) {
+ if (qdev->fp_array[qid].type & QEDE_FASTPATH_RX) {
+ eth_stats->q_ipackets[i] =
+ *(uint64_t *)(
+ ((char *)(qdev->fp_array[(qid)].rxq)) +
+ offsetof(struct qede_rx_queue,
+ rcv_pkts));
+ eth_stats->q_errors[i] =
+ *(uint64_t *)(
+ ((char *)(qdev->fp_array[(qid)].rxq)) +
+ offsetof(struct qede_rx_queue,
+ rx_hw_errors)) +
+ *(uint64_t *)(
+ ((char *)(qdev->fp_array[(qid)].rxq)) +
+ offsetof(struct qede_rx_queue,
+ rx_alloc_errors));
+ i++;
+ }
+
+ if (qdev->fp_array[qid].type & QEDE_FASTPATH_TX) {
+ txq = qdev->fp_array[(qid)].txqs[0];
+ eth_stats->q_opackets[j] =
+ *((uint64_t *)(uintptr_t)
+ (((uint64_t)(uintptr_t)(txq)) +
+ offsetof(struct qede_tx_queue,
+ xmit_pkts)));
+ j++;
+ }
+ }
+}
+
+static unsigned
+qede_get_xstats_count(struct qede_dev *qdev) {
+ return RTE_DIM(qede_xstats_strings) +
+ (RTE_DIM(qede_rxq_xstats_strings) * QEDE_RSS_COUNT(qdev));
}
static int
qede_get_xstats_names(__rte_unused struct rte_eth_dev *dev,
struct rte_eth_xstat_name *xstats_names, unsigned limit)
{
- unsigned int i, stat_cnt = RTE_DIM(qede_xstats_strings);
+ struct qede_dev *qdev = dev->data->dev_private;
+ const unsigned int stat_cnt = qede_get_xstats_count(qdev);
+ unsigned int i, qid, stat_idx = 0;
- if (xstats_names != NULL)
- for (i = 0; i < stat_cnt; i++)
- snprintf(xstats_names[i].name,
- sizeof(xstats_names[i].name),
+ if (xstats_names != NULL) {
+ for (i = 0; i < RTE_DIM(qede_xstats_strings); i++) {
+ snprintf(xstats_names[stat_idx].name,
+ sizeof(xstats_names[stat_idx].name),
"%s",
qede_xstats_strings[i].name);
+ stat_idx++;
+ }
+
+ for (qid = 0; qid < QEDE_RSS_COUNT(qdev); qid++) {
+ for (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) {
+ snprintf(xstats_names[stat_idx].name,
+ sizeof(xstats_names[stat_idx].name),
+ "%.4s%d%s",
+ qede_rxq_xstats_strings[i].name, qid,
+ qede_rxq_xstats_strings[i].name + 4);
+ stat_idx++;
+ }
+ }
+ }
return stat_cnt;
}
struct qede_dev *qdev = dev->data->dev_private;
struct ecore_dev *edev = &qdev->edev;
struct ecore_eth_stats stats;
- unsigned int num = RTE_DIM(qede_xstats_strings);
+ const unsigned int num = qede_get_xstats_count(qdev);
+ unsigned int i, qid, stat_idx = 0;
if (n < num)
return num;
qdev->ops->get_vport_stats(edev, &stats);
- for (num = 0; num < n; num++)
- xstats[num].value = *(u64 *)(((char *)&stats) +
- qede_xstats_strings[num].offset);
+ for (i = 0; i < RTE_DIM(qede_xstats_strings); i++) {
+ xstats[stat_idx].value = *(uint64_t *)(((char *)&stats) +
+ qede_xstats_strings[i].offset);
+ xstats[stat_idx].id = stat_idx;
+ stat_idx++;
+ }
+
+ for (qid = 0; qid < QEDE_QUEUE_CNT(qdev); qid++) {
+ if (qdev->fp_array[qid].type & QEDE_FASTPATH_RX) {
+ for (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) {
+ xstats[stat_idx].value = *(uint64_t *)(
+ ((char *)(qdev->fp_array[(qid)].rxq)) +
+ qede_rxq_xstats_strings[i].offset);
+ xstats[stat_idx].id = stat_idx;
+ stat_idx++;
+ }
+ }
+ }
- return num;
+ return stat_idx;
}
static void
return NULL;
}
-int qede_rss_hash_update(struct rte_eth_dev *eth_dev,
- struct rte_eth_rss_conf *rss_conf)
+void qede_init_rss_caps(uint8_t *rss_caps, uint64_t hf)
+{
+ *rss_caps = 0;
+ *rss_caps |= (hf & ETH_RSS_IPV4) ? ECORE_RSS_IPV4 : 0;
+ *rss_caps |= (hf & ETH_RSS_IPV6) ? ECORE_RSS_IPV6 : 0;
+ *rss_caps |= (hf & ETH_RSS_IPV6_EX) ? ECORE_RSS_IPV6 : 0;
+ *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_TCP) ? ECORE_RSS_IPV4_TCP : 0;
+ *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_TCP) ? ECORE_RSS_IPV6_TCP : 0;
+ *rss_caps |= (hf & ETH_RSS_IPV6_TCP_EX) ? ECORE_RSS_IPV6_TCP : 0;
+}
+
+static int qede_rss_hash_update(struct rte_eth_dev *eth_dev,
+ struct rte_eth_rss_conf *rss_conf)
{
struct qed_update_vport_params vport_update_params;
struct qede_dev *qdev = eth_dev->data->dev_private;
struct ecore_dev *edev = &qdev->edev;
- uint8_t rss_caps;
uint32_t *key = (uint32_t *)rss_conf->rss_key;
uint64_t hf = rss_conf->rss_hf;
int i;
- if (hf == 0)
- DP_ERR(edev, "hash function 0 will disable RSS\n");
+ memset(&vport_update_params, 0, sizeof(vport_update_params));
- rss_caps = 0;
- rss_caps |= (hf & ETH_RSS_IPV4) ? ECORE_RSS_IPV4 : 0;
- rss_caps |= (hf & ETH_RSS_IPV6) ? ECORE_RSS_IPV6 : 0;
- rss_caps |= (hf & ETH_RSS_IPV6_EX) ? ECORE_RSS_IPV6 : 0;
- rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_TCP) ? ECORE_RSS_IPV4_TCP : 0;
- rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_TCP) ? ECORE_RSS_IPV6_TCP : 0;
- rss_caps |= (hf & ETH_RSS_IPV6_TCP_EX) ? ECORE_RSS_IPV6_TCP : 0;
+ if (hf != 0) {
+ /* Enable RSS */
+ qede_init_rss_caps(&qdev->rss_params.rss_caps, hf);
+ memcpy(&vport_update_params.rss_params, &qdev->rss_params,
+ sizeof(vport_update_params.rss_params));
+ if (key)
+ memcpy(qdev->rss_params.rss_key, rss_conf->rss_key,
+ rss_conf->rss_key_len);
+ vport_update_params.update_rss_flg = 1;
+ qdev->rss_enabled = 1;
+ } else {
+ /* Disable RSS */
+ qdev->rss_enabled = 0;
+ }
/* If the mapping doesn't fit any supported, return */
- if (rss_caps == 0 && hf != 0)
+ if (qdev->rss_params.rss_caps == 0 && hf != 0)
return -EINVAL;
- memset(&vport_update_params, 0, sizeof(vport_update_params));
-
- if (key != NULL)
- memcpy(qdev->rss_params.rss_key, rss_conf->rss_key,
- rss_conf->rss_key_len);
+ DP_INFO(edev, "%s\n", (vport_update_params.update_rss_flg) ?
+ "Enabling RSS" : "Disabling RSS");
- qdev->rss_params.rss_caps = rss_caps;
- memcpy(&vport_update_params.rss_params, &qdev->rss_params,
- sizeof(vport_update_params.rss_params));
- vport_update_params.update_rss_flg = 1;
vport_update_params.vport_id = 0;
return qdev->ops->vport_update(edev, &vport_update_params);
return 0;
}
-int qede_rss_reta_update(struct rte_eth_dev *eth_dev,
- struct rte_eth_rss_reta_entry64 *reta_conf,
- uint16_t reta_size)
+static int qede_rss_reta_update(struct rte_eth_dev *eth_dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
{
struct qed_update_vport_params vport_update_params;
struct qede_dev *qdev = eth_dev->data->dev_private;
/* Extract key data structures */
adapter = eth_dev->data->dev_private;
edev = &adapter->edev;
- pci_addr = eth_dev->pci_dev->addr;
+ pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
+ pci_addr = pci_dev->addr;
PMD_INIT_FUNC_TRACE(edev);
return 0;
}
- pci_dev = eth_dev->pci_dev;
-
rte_eth_copy_pci_info(eth_dev, pci_dev);
qed_ops = qed_get_eth_ops();
qede_update_pf_params(edev);
- rte_intr_callback_register(ð_dev->pci_dev->intr_handle,
+ rte_intr_callback_register(&pci_dev->intr_handle,
qede_interrupt_handler, (void *)eth_dev);
- if (rte_intr_enable(ð_dev->pci_dev->intr_handle)) {
+ if (rte_intr_enable(&pci_dev->intr_handle)) {
DP_ERR(edev, "rte_intr_enable() failed\n");
return -ENODEV;
}
/* Start the Slowpath-process */
memset(¶ms, 0, sizeof(struct qed_slowpath_params));
params.int_mode = ECORE_INT_MODE_MSIX;
- params.drv_major = QEDE_MAJOR_VERSION;
- params.drv_minor = QEDE_MINOR_VERSION;
- params.drv_rev = QEDE_REVISION_VERSION;
- params.drv_eng = QEDE_ENGINEERING_VERSION;
- strncpy((char *)params.name, "qede LAN", QED_DRV_VER_STR_SIZE);
+ params.drv_major = QEDE_PMD_VERSION_MAJOR;
+ params.drv_minor = QEDE_PMD_VERSION_MINOR;
+ params.drv_rev = QEDE_PMD_VERSION_REVISION;
+ params.drv_eng = QEDE_PMD_VERSION_PATCH;
+ strncpy((char *)params.name, QEDE_PMD_VER_PREFIX,
+ QEDE_PMD_DRV_VER_STR_SIZE);
/* For CMT mode device do periodic polling for slowpath events.
* This is required since uio device uses only one MSI-x
* interrupt vector but we need one for each engine.
*/
- if (edev->num_hwfns > 1) {
+ if (edev->num_hwfns > 1 && IS_PF(edev)) {
rc = rte_eal_alarm_set(timer_period * US_PER_S,
qede_poll_sp_sb_cb,
(void *)eth_dev);
qede_alloc_etherdev(adapter, &dev_info);
- adapter->ops->common->set_id(edev, edev->name, QEDE_DRV_MODULE_VERSION);
+ adapter->ops->common->set_id(edev, edev->name, QEDE_PMD_VERSION);
if (!is_vf)
adapter->dev_info.num_mac_addrs =
RTE_PMD_REGISTER_PCI(net_qede, rte_qede_pmd.pci_drv);
RTE_PMD_REGISTER_PCI_TABLE(net_qede, pci_id_qede_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_qede, "* igb_uio | uio_pci_generic | vfio");
RTE_PMD_REGISTER_PCI(net_qede_vf, rte_qedevf_pmd.pci_drv);
RTE_PMD_REGISTER_PCI_TABLE(net_qede_vf, pci_id_qedevf_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_qede_vf, "* igb_uio | vfio");