/* Globals */
static const struct qed_eth_ops *qed_ops;
-static const char *drivername = "qede pmd";
static int64_t timer_period = 1;
struct rte_qede_xstats_name_off {
offsetof(struct ecore_eth_stats, tpa_coalesced_bytes)},
};
+static const struct rte_qede_xstats_name_off qede_rxq_xstats_strings[] = {
+ {"rx_q_segments",
+ offsetof(struct qede_rx_queue, rx_segs)},
+ {"rx_q_hw_errors",
+ offsetof(struct qede_rx_queue, rx_hw_errors)},
+ {"rx_q_allocation_errors",
+ offsetof(struct qede_rx_queue, rx_alloc_errors)}
+};
+
static void qede_interrupt_action(struct ecore_hwfn *p_hwfn)
{
ecore_int_sp_dpc((osal_int_ptr_t)(p_hwfn));
}
static void
-qede_interrupt_handler(__rte_unused struct rte_intr_handle *handle, void *param)
+qede_interrupt_handler(struct rte_intr_handle *handle, void *param)
{
struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
struct qede_dev *qdev = eth_dev->data->dev_private;
struct ecore_dev *edev = &qdev->edev;
qede_interrupt_action(ECORE_LEADING_HWFN(edev));
- if (rte_intr_enable(ð_dev->pci_dev->intr_handle))
+ if (rte_intr_enable(handle))
DP_ERR(edev, "rte_intr_enable failed\n");
}
{
struct qede_dev *qdev = eth_dev->data->dev_private;
struct ecore_dev *edev = &qdev->edev;
+ struct qed_link_output link;
+ uint32_t speed_cap = 0;
PMD_INIT_FUNC_TRACE(edev);
+ dev_info->pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
dev_info->min_rx_bufsize = (uint32_t)(ETHER_MIN_MTU +
QEDE_ETH_OVERHEAD);
dev_info->max_rx_pktlen = (uint32_t)ETH_TX_MAX_NON_LSO_PKT_LEN;
dev_info->max_vfs = 0;
else
dev_info->max_vfs = (uint16_t)NUM_OF_VFS(&qdev->edev);
- dev_info->driver_name = qdev->drv_ver;
dev_info->reta_size = ECORE_RSS_IND_TABLE_SIZE;
dev_info->flow_type_rss_offloads = (uint64_t)QEDE_RSS_OFFLOAD_ALL;
DEV_TX_OFFLOAD_UDP_CKSUM |
DEV_TX_OFFLOAD_TCP_CKSUM);
- dev_info->speed_capa = ETH_LINK_SPEED_25G | ETH_LINK_SPEED_40G |
- ETH_LINK_SPEED_100G;
+ memset(&link, 0, sizeof(struct qed_link_output));
+ qdev->ops->common->get_link(edev, &link);
+ if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
+ speed_cap |= ETH_LINK_SPEED_1G;
+ if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
+ speed_cap |= ETH_LINK_SPEED_10G;
+ if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
+ speed_cap |= ETH_LINK_SPEED_25G;
+ if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
+ speed_cap |= ETH_LINK_SPEED_40G;
+ if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
+ speed_cap |= ETH_LINK_SPEED_50G;
+ if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
+ speed_cap |= ETH_LINK_SPEED_100G;
+ dev_info->speed_capa = speed_cap;
}
/* return 0 means link status changed, -1 means not changed */
static void qede_dev_close(struct rte_eth_dev *eth_dev)
{
+ struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
int rc;
qdev->ops->common->remove(edev);
- rte_intr_disable(ð_dev->pci_dev->intr_handle);
+ rte_intr_disable(&pci_dev->intr_handle);
- rte_intr_callback_unregister(ð_dev->pci_dev->intr_handle,
+ rte_intr_callback_unregister(&pci_dev->intr_handle,
qede_interrupt_handler, (void *)eth_dev);
if (edev->num_hwfns > 1)
struct qede_dev *qdev = eth_dev->data->dev_private;
struct ecore_dev *edev = &qdev->edev;
struct ecore_eth_stats stats;
+ unsigned int i = 0, j = 0, qid;
+ struct qede_tx_queue *txq;
qdev->ops->get_vport_stats(edev, &stats);
stats.tx_mcast_bytes + stats.tx_bcast_bytes;
eth_stats->oerrors = stats.tx_err_drop_pkts;
+
+ /* Queue stats */
+ for (qid = 0; qid < QEDE_QUEUE_CNT(qdev); qid++) {
+ if (qdev->fp_array[qid].type & QEDE_FASTPATH_RX) {
+ eth_stats->q_ipackets[i] =
+ *(uint64_t *)(
+ ((char *)(qdev->fp_array[(qid)].rxq)) +
+ offsetof(struct qede_rx_queue,
+ rcv_pkts));
+ eth_stats->q_errors[i] =
+ *(uint64_t *)(
+ ((char *)(qdev->fp_array[(qid)].rxq)) +
+ offsetof(struct qede_rx_queue,
+ rx_hw_errors)) +
+ *(uint64_t *)(
+ ((char *)(qdev->fp_array[(qid)].rxq)) +
+ offsetof(struct qede_rx_queue,
+ rx_alloc_errors));
+ i++;
+ }
+
+ if (qdev->fp_array[qid].type & QEDE_FASTPATH_TX) {
+ txq = qdev->fp_array[(qid)].txqs[0];
+ eth_stats->q_opackets[j] =
+ *((uint64_t *)(uintptr_t)
+ (((uint64_t)(uintptr_t)(txq)) +
+ offsetof(struct qede_tx_queue,
+ xmit_pkts)));
+ j++;
+ }
+ }
+}
+
+static unsigned
+qede_get_xstats_count(struct qede_dev *qdev) {
+ return RTE_DIM(qede_xstats_strings) +
+ (RTE_DIM(qede_rxq_xstats_strings) * QEDE_RSS_COUNT(qdev));
}
static int
qede_get_xstats_names(__rte_unused struct rte_eth_dev *dev,
struct rte_eth_xstat_name *xstats_names, unsigned limit)
{
- unsigned int i, stat_cnt = RTE_DIM(qede_xstats_strings);
+ struct qede_dev *qdev = dev->data->dev_private;
+ const unsigned int stat_cnt = qede_get_xstats_count(qdev);
+ unsigned int i, qid, stat_idx = 0;
- if (xstats_names != NULL)
- for (i = 0; i < stat_cnt; i++)
- snprintf(xstats_names[i].name,
- sizeof(xstats_names[i].name),
+ if (xstats_names != NULL) {
+ for (i = 0; i < RTE_DIM(qede_xstats_strings); i++) {
+ snprintf(xstats_names[stat_idx].name,
+ sizeof(xstats_names[stat_idx].name),
"%s",
qede_xstats_strings[i].name);
+ stat_idx++;
+ }
+
+ for (qid = 0; qid < QEDE_RSS_COUNT(qdev); qid++) {
+ for (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) {
+ snprintf(xstats_names[stat_idx].name,
+ sizeof(xstats_names[stat_idx].name),
+ "%.4s%d%s",
+ qede_rxq_xstats_strings[i].name, qid,
+ qede_rxq_xstats_strings[i].name + 4);
+ stat_idx++;
+ }
+ }
+ }
return stat_cnt;
}
struct qede_dev *qdev = dev->data->dev_private;
struct ecore_dev *edev = &qdev->edev;
struct ecore_eth_stats stats;
- unsigned int num = RTE_DIM(qede_xstats_strings);
+ const unsigned int num = qede_get_xstats_count(qdev);
+ unsigned int i, qid, stat_idx = 0;
if (n < num)
return num;
qdev->ops->get_vport_stats(edev, &stats);
- for (num = 0; num < n; num++)
- xstats[num].value = *(u64 *)(((char *)&stats) +
- qede_xstats_strings[num].offset);
+ for (i = 0; i < RTE_DIM(qede_xstats_strings); i++) {
+ xstats[stat_idx].value = *(uint64_t *)(((char *)&stats) +
+ qede_xstats_strings[i].offset);
+ xstats[stat_idx].id = stat_idx;
+ stat_idx++;
+ }
+
+ for (qid = 0; qid < QEDE_QUEUE_CNT(qdev); qid++) {
+ if (qdev->fp_array[qid].type & QEDE_FASTPATH_RX) {
+ for (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) {
+ xstats[stat_idx].value = *(uint64_t *)(
+ ((char *)(qdev->fp_array[(qid)].rxq)) +
+ qede_rxq_xstats_strings[i].offset);
+ xstats[stat_idx].id = stat_idx;
+ stat_idx++;
+ }
+ }
+ }
- return num;
+ return stat_idx;
}
static void
/* Extract key data structures */
adapter = eth_dev->data->dev_private;
edev = &adapter->edev;
- pci_addr = eth_dev->pci_dev->addr;
+ pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
+ pci_addr = pci_dev->addr;
PMD_INIT_FUNC_TRACE(edev);
return 0;
}
- pci_dev = eth_dev->pci_dev;
-
rte_eth_copy_pci_info(eth_dev, pci_dev);
qed_ops = qed_get_eth_ops();
qede_update_pf_params(edev);
- rte_intr_callback_register(ð_dev->pci_dev->intr_handle,
+ rte_intr_callback_register(&pci_dev->intr_handle,
qede_interrupt_handler, (void *)eth_dev);
- if (rte_intr_enable(ð_dev->pci_dev->intr_handle)) {
+ if (rte_intr_enable(&pci_dev->intr_handle)) {
DP_ERR(edev, "rte_intr_enable() failed\n");
return -ENODEV;
}
RTE_PMD_REGISTER_PCI(net_qede, rte_qede_pmd.pci_drv);
RTE_PMD_REGISTER_PCI_TABLE(net_qede, pci_id_qede_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_qede, "* igb_uio | uio_pci_generic | vfio");
RTE_PMD_REGISTER_PCI(net_qede_vf, rte_qedevf_pmd.pci_drv);
RTE_PMD_REGISTER_PCI_TABLE(net_qede_vf, pci_id_qedevf_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_qede_vf, "* igb_uio | vfio");