X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fqede%2Fqede_ethdev.c;h=fbad2a6e5f05cd085d48e337b787653e214c2c2c;hb=bed3b24c87cb7fb63d24d16d65b911fe14525d82;hp=257e5b21c9fc46607541c85b1faa9f82ddf7d95f;hpb=0b090fd364721490b0420a026ae8d1cbe00ea135;p=dpdk.git diff --git a/drivers/net/qede/qede_ethdev.c b/drivers/net/qede/qede_ethdev.c index 257e5b21c9..fbad2a6e5f 100644 --- a/drivers/net/qede/qede_ethdev.c +++ b/drivers/net/qede/qede_ethdev.c @@ -279,14 +279,14 @@ static void qede_interrupt_action(struct ecore_hwfn *p_hwfn) } static void -qede_interrupt_handler(struct rte_intr_handle *handle, void *param) +qede_interrupt_handler(void *param) { struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param; struct qede_dev *qdev = eth_dev->data->dev_private; struct ecore_dev *edev = &qdev->edev; qede_interrupt_action(ECORE_LEADING_HWFN(edev)); - if (rte_intr_enable(handle)) + if (rte_intr_enable(eth_dev->intr_handle)) DP_ERR(edev, "rte_intr_enable failed\n"); } @@ -769,7 +769,7 @@ static int qede_init_vport(struct qede_dev *qdev) int rc; start.remove_inner_vlan = 1; - start.gro_enable = 0; + start.enable_lro = qdev->enable_lro; start.mtu = ETHER_MTU + QEDE_ETH_OVERHEAD; start.vport_id = 0; start.drop_ttl0 = false; @@ -798,7 +798,7 @@ static void qede_prandom_bytes(uint32_t *buff) buff[i] = rand(); } -static int qede_config_rss(struct rte_eth_dev *eth_dev) +int qede_config_rss(struct rte_eth_dev *eth_dev) { struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); @@ -866,11 +866,6 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev) if (rxmode->enable_scatter == 1) eth_dev->data->scattered_rx = 1; - if (rxmode->enable_lro == 1) { - DP_ERR(edev, "LRO is not supported\n"); - return -EINVAL; - } - if (!rxmode->hw_strip_crc) DP_INFO(edev, "L2 CRC stripping is always enabled in hw\n"); @@ -878,6 +873,13 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev) DP_INFO(edev, "IP/UDP/TCP checksum offload is always enabled " "in hw\n"); + if (rxmode->enable_lro) { + qdev->enable_lro = true; + /* Enable scatter mode for LRO */ + if (!rxmode->enable_scatter) + eth_dev->data->scattered_rx = 1; + } + /* Check for the port restart case */ if (qdev->state != QEDE_DEV_INIT) { rc = qdev->ops->vport_stop(edev, 0); @@ -904,26 +906,23 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev) if (rc != 0) return rc; - /* Do RSS configuration after vport-start */ - switch (rxmode->mq_mode) { - case ETH_MQ_RX_RSS: - rc = qede_config_rss(eth_dev); - if (rc != 0) { - qdev->ops->vport_stop(edev, 0); - qede_dealloc_fp_resc(eth_dev); - return -EINVAL; - } - break; - case ETH_MQ_RX_NONE: - DP_INFO(edev, "RSS is disabled\n"); - break; - default: + if (!(rxmode->mq_mode == ETH_MQ_RX_RSS || + rxmode->mq_mode == ETH_MQ_RX_NONE)) { DP_ERR(edev, "Unsupported RSS mode\n"); qdev->ops->vport_stop(edev, 0); qede_dealloc_fp_resc(eth_dev); return -EINVAL; } + /* Flow director mode check */ + rc = qede_check_fdir_support(eth_dev); + if (rc) { + qdev->ops->vport_stop(edev, 0); + qede_dealloc_fp_resc(eth_dev); + return -EINVAL; + } + SLIST_INIT(&qdev->fdir_info.fdir_list_head); + SLIST_INIT(&qdev->vlan_list_head); /* Add primary mac for PF */ @@ -948,13 +947,15 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev) static const struct rte_eth_desc_lim qede_rx_desc_lim = { .nb_max = NUM_RX_BDS_MAX, .nb_min = 128, - .nb_align = 128 /* lowest common multiple */ + .nb_align = 128 /* lowest common multiple */ }; static const struct rte_eth_desc_lim qede_tx_desc_lim = { .nb_max = NUM_TX_BDS_MAX, .nb_min = 256, - .nb_align = 256 + .nb_align = 256, + .nb_seg_max = ETH_TX_MAX_BDS_PER_LSO_PACKET, + .nb_mtu_seg_max = ETH_TX_MAX_BDS_PER_NON_LSO_PACKET }; static void @@ -996,12 +997,16 @@ qede_dev_info_get(struct rte_eth_dev *eth_dev, DEV_RX_OFFLOAD_IPV4_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM | DEV_RX_OFFLOAD_TCP_CKSUM | - DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM); + DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | + DEV_RX_OFFLOAD_TCP_LRO); + dev_info->tx_offload_capa = (DEV_TX_OFFLOAD_VLAN_INSERT | DEV_TX_OFFLOAD_IPV4_CKSUM | DEV_TX_OFFLOAD_UDP_CKSUM | DEV_TX_OFFLOAD_TCP_CKSUM | - DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM); + DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | + DEV_TX_OFFLOAD_TCP_TSO | + DEV_TX_OFFLOAD_VXLAN_TNL_TSO); memset(&link, 0, sizeof(struct qed_link_output)); qdev->ops->common->get_link(edev, &link); @@ -1124,6 +1129,8 @@ static void qede_dev_close(struct rte_eth_dev *eth_dev) PMD_INIT_FUNC_TRACE(edev); + qede_fdir_dealloc_resc(eth_dev); + /* dev_stop() shall cleanup fp resources in hw but without releasing * dma memories and sw structures so that dev_start() can be called * by the app without reconfiguration. However, in dev_close() we @@ -1478,6 +1485,8 @@ static void qede_init_rss_caps(uint8_t *rss_caps, uint64_t hf) *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_TCP) ? ECORE_RSS_IPV4_TCP : 0; *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_TCP) ? ECORE_RSS_IPV6_TCP : 0; *rss_caps |= (hf & ETH_RSS_IPV6_TCP_EX) ? ECORE_RSS_IPV6_TCP : 0; + *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_UDP) ? ECORE_RSS_IPV4_UDP : 0; + *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_UDP) ? ECORE_RSS_IPV6_UDP : 0; } static int qede_rss_hash_update(struct rte_eth_dev *eth_dev, @@ -1487,11 +1496,11 @@ static int qede_rss_hash_update(struct rte_eth_dev *eth_dev, struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); struct ecore_sp_vport_update_params vport_update_params; struct ecore_rss_params rss_params; - struct ecore_rss_params params; struct ecore_hwfn *p_hwfn; uint32_t *key = (uint32_t *)rss_conf->rss_key; uint64_t hf = rss_conf->rss_hf; uint8_t len = rss_conf->rss_key_len; + uint8_t idx; uint8_t i; int rc; @@ -1526,6 +1535,11 @@ static int qede_rss_hash_update(struct rte_eth_dev *eth_dev, /* tbl_size has to be set with capabilities */ rss_params.rss_table_size_log = 7; vport_update_params.vport_id = 0; + /* pass the L2 handles instead of qids */ + for (i = 0 ; i < ECORE_RSS_IND_TABLE_SIZE ; i++) { + idx = qdev->rss_ind_table[i]; + rss_params.rss_ind_table[i] = qdev->fp_array[idx].rxq->handle; + } vport_update_params.rss_params = &rss_params; for_each_hwfn(edev, i) { @@ -1607,14 +1621,18 @@ static int qede_rss_reta_update(struct rte_eth_dev *eth_dev, shift = i % RTE_RETA_GROUP_SIZE; if (reta_conf[idx].mask & (1ULL << shift)) { entry = reta_conf[idx].reta[shift]; - params.rss_ind_table[i] = entry; + /* Pass rxq handles to ecore */ + params.rss_ind_table[i] = + qdev->fp_array[entry].rxq->handle; + /* Update the local copy for RETA query command */ + qdev->rss_ind_table[i] = entry; } } /* Fix up RETA for CMT mode device */ if (edev->num_hwfns > 1) qdev->rss_enable = qed_update_rss_parm_cmt(edev, - ¶ms.rss_ind_table[0]); + params.rss_ind_table[0]); params.update_rss_ind_table = 1; params.rss_table_size_log = 7; params.update_rss_config = 1; @@ -1634,10 +1652,6 @@ static int qede_rss_reta_update(struct rte_eth_dev *eth_dev, } } - /* Update the local copy for RETA query command */ - memcpy(qdev->rss_ind_table, params.rss_ind_table, - sizeof(params.rss_ind_table)); - return 0; } @@ -1670,32 +1684,61 @@ static int qede_rss_reta_query(struct rte_eth_dev *eth_dev, int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) { - uint32_t frame_size; - struct qede_dev *qdev = dev->data->dev_private; + struct qede_dev *qdev = QEDE_INIT_QDEV(dev); + struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); struct rte_eth_dev_info dev_info = {0}; + struct qede_fastpath *fp; + uint32_t frame_size; + uint16_t rx_buf_size; + uint16_t bufsz; + int i; + PMD_INIT_FUNC_TRACE(edev); qede_dev_info_get(dev, &dev_info); - - /* VLAN_TAG = 4 */ - frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + 4; - - if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen)) + frame_size = mtu + QEDE_ETH_OVERHEAD; + if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen)) { + DP_ERR(edev, "MTU %u out of range\n", mtu); return -EINVAL; - + } if (!dev->data->scattered_rx && - frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) + frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) { + DP_INFO(edev, "MTU greater than minimum RX buffer size of %u\n", + dev->data->min_rx_buf_size); return -EINVAL; - + } + /* Temporarily replace I/O functions with dummy ones. It cannot + * be set to NULL because rte_eth_rx_burst() doesn't check for NULL. + */ + dev->rx_pkt_burst = qede_rxtx_pkts_dummy; + dev->tx_pkt_burst = qede_rxtx_pkts_dummy; + qede_dev_stop(dev); + rte_delay_ms(1000); + qdev->mtu = mtu; + /* Fix up RX buf size for all queues of the port */ + for_each_queue(i) { + fp = &qdev->fp_array[i]; + if (fp->type & QEDE_FASTPATH_RX) { + bufsz = (uint16_t)rte_pktmbuf_data_room_size( + fp->rxq->mb_pool) - RTE_PKTMBUF_HEADROOM; + if (dev->data->scattered_rx) + rx_buf_size = bufsz + QEDE_ETH_OVERHEAD; + else + rx_buf_size = mtu + QEDE_ETH_OVERHEAD; + rx_buf_size = QEDE_CEIL_TO_CACHE_LINE_SIZE(rx_buf_size); + fp->rxq->rx_buf_size = rx_buf_size; + DP_INFO(edev, "buf_size adjusted to %u\n", rx_buf_size); + } + } + qede_dev_start(dev); if (frame_size > ETHER_MAX_LEN) dev->data->dev_conf.rxmode.jumbo_frame = 1; else dev->data->dev_conf.rxmode.jumbo_frame = 0; - /* update max frame size */ dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size; - qdev->mtu = mtu; - qede_dev_stop(dev); - qede_dev_start(dev); + /* Reassign back */ + dev->rx_pkt_burst = qede_recv_pkts; + dev->tx_pkt_burst = qede_xmit_pkts; return 0; } @@ -1957,11 +2000,13 @@ int qede_dev_filter_ctrl(struct rte_eth_dev *eth_dev, } break; case RTE_ETH_FILTER_FDIR: + return qede_fdir_filter_conf(eth_dev, filter_op, arg); + case RTE_ETH_FILTER_NTUPLE: + return qede_ntuple_filter_conf(eth_dev, filter_op, arg); case RTE_ETH_FILTER_MACVLAN: case RTE_ETH_FILTER_ETHERTYPE: case RTE_ETH_FILTER_FLEXIBLE: case RTE_ETH_FILTER_SYN: - case RTE_ETH_FILTER_NTUPLE: case RTE_ETH_FILTER_HASH: case RTE_ETH_FILTER_L2_TUNNEL: case RTE_ETH_FILTER_MAX: @@ -2052,6 +2097,7 @@ static void qede_update_pf_params(struct ecore_dev *edev) memset(&pf_params, 0, sizeof(struct ecore_pf_params)); pf_params.eth_pf_params.num_cons = QEDE_PF_NUM_CONNS; + pf_params.eth_pf_params.num_arfs_filters = QEDE_RFS_MAX_FLTR; qed_ops->common->update_pf_params(edev, &pf_params); } @@ -2088,6 +2134,7 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf) eth_dev->rx_pkt_burst = qede_recv_pkts; eth_dev->tx_pkt_burst = qede_xmit_pkts; + eth_dev->tx_pkt_prepare = qede_xmit_prep_pkts; if (rte_eal_process_type() != RTE_PROC_PRIMARY) { DP_NOTICE(edev, false, @@ -2335,35 +2382,47 @@ static const struct rte_pci_id pci_id_qede_map[] = { {.vendor_id = 0,} }; -static struct eth_driver rte_qedevf_pmd = { - .pci_drv = { - .id_table = pci_id_qedevf_map, - .drv_flags = - RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, - .probe = rte_eth_dev_pci_probe, - .remove = rte_eth_dev_pci_remove, - }, - .eth_dev_init = qedevf_eth_dev_init, - .eth_dev_uninit = qedevf_eth_dev_uninit, - .dev_private_size = sizeof(struct qede_dev), +static int qedevf_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, + struct rte_pci_device *pci_dev) +{ + return rte_eth_dev_pci_generic_probe(pci_dev, + sizeof(struct qede_dev), qedevf_eth_dev_init); +} + +static int qedevf_eth_dev_pci_remove(struct rte_pci_device *pci_dev) +{ + return rte_eth_dev_pci_generic_remove(pci_dev, qedevf_eth_dev_uninit); +} + +static struct rte_pci_driver rte_qedevf_pmd = { + .id_table = pci_id_qedevf_map, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, + .probe = qedevf_eth_dev_pci_probe, + .remove = qedevf_eth_dev_pci_remove, }; -static struct eth_driver rte_qede_pmd = { - .pci_drv = { - .id_table = pci_id_qede_map, - .drv_flags = - RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, - .probe = rte_eth_dev_pci_probe, - .remove = rte_eth_dev_pci_remove, - }, - .eth_dev_init = qede_eth_dev_init, - .eth_dev_uninit = qede_eth_dev_uninit, - .dev_private_size = sizeof(struct qede_dev), +static int qede_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, + struct rte_pci_device *pci_dev) +{ + return rte_eth_dev_pci_generic_probe(pci_dev, + sizeof(struct qede_dev), qede_eth_dev_init); +} + +static int qede_eth_dev_pci_remove(struct rte_pci_device *pci_dev) +{ + return rte_eth_dev_pci_generic_remove(pci_dev, qede_eth_dev_uninit); +} + +static struct rte_pci_driver rte_qede_pmd = { + .id_table = pci_id_qede_map, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, + .probe = qede_eth_dev_pci_probe, + .remove = qede_eth_dev_pci_remove, }; -RTE_PMD_REGISTER_PCI(net_qede, rte_qede_pmd.pci_drv); +RTE_PMD_REGISTER_PCI(net_qede, rte_qede_pmd); RTE_PMD_REGISTER_PCI_TABLE(net_qede, pci_id_qede_map); RTE_PMD_REGISTER_KMOD_DEP(net_qede, "* igb_uio | uio_pci_generic | vfio"); -RTE_PMD_REGISTER_PCI(net_qede_vf, rte_qedevf_pmd.pci_drv); +RTE_PMD_REGISTER_PCI(net_qede_vf, rte_qedevf_pmd); RTE_PMD_REGISTER_PCI_TABLE(net_qede_vf, pci_id_qedevf_map); RTE_PMD_REGISTER_KMOD_DEP(net_qede_vf, "* igb_uio | vfio");