X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_ether%2Frte_ethdev.c;h=39f65bfadb55c23ff6ce4d7f8a6c5d2b86bf64f2;hb=80a1deb4c77aca0ddc402c888f3c67216373f5e0;hp=f383bcea2e117b6e7dd58f871ae8e349d88fd080;hpb=854d8ad4ef68300894a0f152154aced14de00601;p=dpdk.git diff --git a/lib/librte_ether/rte_ethdev.c b/lib/librte_ether/rte_ethdev.c index f383bcea2e..39f65bfadb 100644 --- a/lib/librte_ether/rte_ethdev.c +++ b/lib/librte_ether/rte_ethdev.c @@ -142,17 +142,8 @@ static const struct rte_eth_xstats_name_off rte_stats_strings[] = { {"rx_bytes", offsetof(struct rte_eth_stats, ibytes)}, {"tx_bytes", offsetof(struct rte_eth_stats, obytes)}, {"tx_errors", offsetof(struct rte_eth_stats, oerrors)}, - {"rx_missed_errors", offsetof(struct rte_eth_stats, imissed)}, - {"rx_crc_errors", offsetof(struct rte_eth_stats, ibadcrc)}, - {"rx_bad_length_errors", offsetof(struct rte_eth_stats, ibadlen)}, {"rx_errors", offsetof(struct rte_eth_stats, ierrors)}, {"alloc_rx_buff_failed", offsetof(struct rte_eth_stats, rx_nombuf)}, - {"fdir_match", offsetof(struct rte_eth_stats, fdirmatch)}, - {"fdir_miss", offsetof(struct rte_eth_stats, fdirmiss)}, - {"tx_flow_control_xon", offsetof(struct rte_eth_stats, tx_pause_xon)}, - {"rx_flow_control_xon", offsetof(struct rte_eth_stats, rx_pause_xon)}, - {"tx_flow_control_xoff", offsetof(struct rte_eth_stats, tx_pause_xoff)}, - {"rx_flow_control_xoff", offsetof(struct rte_eth_stats, rx_pause_xoff)}, }; #define RTE_NB_STATS (sizeof(rte_stats_strings) / sizeof(rte_stats_strings[0])) @@ -296,7 +287,7 @@ rte_eth_dev_release_port(struct rte_eth_dev *eth_dev) if (eth_dev == NULL) return -EINVAL; - eth_dev->attached = 0; + eth_dev->attached = DEV_DETACHED; nb_ports--; return 0; } @@ -351,8 +342,7 @@ rte_eth_dev_init(struct rte_pci_driver *pci_drv, (unsigned) pci_dev->id.device_id); if (rte_eal_process_type() == RTE_PROC_PRIMARY) rte_free(eth_dev->data->dev_private); - eth_dev->attached = DEV_DETACHED; - nb_ports--; + rte_eth_dev_release_port(eth_dev); return diag; } @@ -419,7 +409,7 @@ rte_eth_driver_register(struct eth_driver *eth_drv) rte_eal_pci_register(ð_drv->pci_drv); } -static int +int rte_eth_dev_is_valid_port(uint8_t port_id) { if (port_id >= RTE_MAX_ETHPORTS || @@ -515,7 +505,7 @@ rte_eth_dev_is_detachable(uint8_t port_id) { uint32_t drv_flags; - if (port_id >= RTE_MAX_ETHPORTS) { + if (!rte_eth_dev_is_valid_port(port_id)) { PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); return -EINVAL; } @@ -593,9 +583,9 @@ rte_eth_dev_detach_pdev(uint8_t port_id, struct rte_pci_addr *addr) if (rte_eal_compare_pci_addr(&vp, &freed_addr) == 0) goto err; - /* invoke close func of the driver, + /* invoke devuninit func of the pci driver, * also remove the device from pci_device_list */ - if (rte_eal_pci_close_one(&freed_addr)) + if (rte_eal_pci_detach(&freed_addr)) goto err; *addr = freed_addr; @@ -665,7 +655,7 @@ rte_eth_dev_detach_vdev(uint8_t port_id, char *vdevname) if (rte_eth_dev_get_name_by_port(port_id, name)) goto err; /* walk around dev_driver_list to find the driver of the device, - * then invoke close function o the driver */ + * then invoke uninit function of the driver */ if (rte_eal_vdev_uninit(name)) goto err; @@ -890,197 +880,6 @@ rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues) return 0; } -static int -rte_eth_dev_check_vf_rss_rxq_num(uint8_t port_id, uint16_t nb_rx_q) -{ - struct rte_eth_dev *dev = &rte_eth_devices[port_id]; - - switch (nb_rx_q) { - case 1: - case 2: - RTE_ETH_DEV_SRIOV(dev).active = - ETH_64_POOLS; - break; - case 4: - RTE_ETH_DEV_SRIOV(dev).active = - ETH_32_POOLS; - break; - default: - return -EINVAL; - } - - RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = nb_rx_q; - RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx = - dev->pci_dev->max_vfs * nb_rx_q; - - return 0; -} - -static int -rte_eth_dev_check_mq_mode(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q, - const struct rte_eth_conf *dev_conf) -{ - struct rte_eth_dev *dev = &rte_eth_devices[port_id]; - - if (RTE_ETH_DEV_SRIOV(dev).active != 0) { - /* check multi-queue mode */ - if ((dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) || - (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB_RSS) || - (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB)) { - /* SRIOV only works in VMDq enable mode */ - PMD_DEBUG_TRACE("ethdev port_id=%" PRIu8 - " SRIOV active, " - "wrong VMDQ mq_mode rx %u tx %u\n", - port_id, - dev_conf->rxmode.mq_mode, - dev_conf->txmode.mq_mode); - return -EINVAL; - } - - switch (dev_conf->rxmode.mq_mode) { - case ETH_MQ_RX_VMDQ_DCB: - case ETH_MQ_RX_VMDQ_DCB_RSS: - /* DCB/RSS VMDQ in SRIOV mode, not implement yet */ - PMD_DEBUG_TRACE("ethdev port_id=%" PRIu8 - " SRIOV active, " - "unsupported VMDQ mq_mode rx %u\n", - port_id, dev_conf->rxmode.mq_mode); - return -EINVAL; - case ETH_MQ_RX_RSS: - PMD_DEBUG_TRACE("ethdev port_id=%" PRIu8 - " SRIOV active, " - "Rx mq mode is changed from:" - "mq_mode %u into VMDQ mq_mode %u\n", - port_id, - dev_conf->rxmode.mq_mode, - dev->data->dev_conf.rxmode.mq_mode); - case ETH_MQ_RX_VMDQ_RSS: - dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS; - if (nb_rx_q <= RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) - if (rte_eth_dev_check_vf_rss_rxq_num(port_id, nb_rx_q) != 0) { - PMD_DEBUG_TRACE("ethdev port_id=%d" - " SRIOV active, invalid queue" - " number for VMDQ RSS, allowed" - " value are 1, 2 or 4\n", - port_id); - return -EINVAL; - } - break; - default: /* ETH_MQ_RX_VMDQ_ONLY or ETH_MQ_RX_NONE */ - /* if nothing mq mode configure, use default scheme */ - dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_ONLY; - if (RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool > 1) - RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 1; - break; - } - - switch (dev_conf->txmode.mq_mode) { - case ETH_MQ_TX_VMDQ_DCB: - /* DCB VMDQ in SRIOV mode, not implement yet */ - PMD_DEBUG_TRACE("ethdev port_id=%" PRIu8 - " SRIOV active, " - "unsupported VMDQ mq_mode tx %u\n", - port_id, dev_conf->txmode.mq_mode); - return -EINVAL; - default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */ - /* if nothing mq mode configure, use default scheme */ - dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_ONLY; - break; - } - - /* check valid queue number */ - if ((nb_rx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) || - (nb_tx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)) { - PMD_DEBUG_TRACE("ethdev port_id=%d SRIOV active, " - "queue number must less equal to %d\n", - port_id, - RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool); - return -EINVAL; - } - } else { - /* For vmdb+dcb mode check our configuration before we go further */ - if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) { - const struct rte_eth_vmdq_dcb_conf *conf; - - if (nb_rx_q != ETH_VMDQ_DCB_NUM_QUEUES) { - PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB, nb_rx_q " - "!= %d\n", - port_id, ETH_VMDQ_DCB_NUM_QUEUES); - return -EINVAL; - } - conf = &(dev_conf->rx_adv_conf.vmdq_dcb_conf); - if (!(conf->nb_queue_pools == ETH_16_POOLS || - conf->nb_queue_pools == ETH_32_POOLS)) { - PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB selected, " - "nb_queue_pools must be %d or %d\n", - port_id, ETH_16_POOLS, ETH_32_POOLS); - return -EINVAL; - } - } - if (dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) { - const struct rte_eth_vmdq_dcb_tx_conf *conf; - - if (nb_tx_q != ETH_VMDQ_DCB_NUM_QUEUES) { - PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB, nb_tx_q " - "!= %d\n", - port_id, ETH_VMDQ_DCB_NUM_QUEUES); - return -EINVAL; - } - conf = &(dev_conf->tx_adv_conf.vmdq_dcb_tx_conf); - if (!(conf->nb_queue_pools == ETH_16_POOLS || - conf->nb_queue_pools == ETH_32_POOLS)) { - PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB selected, " - "nb_queue_pools != %d or nb_queue_pools " - "!= %d\n", - port_id, ETH_16_POOLS, ETH_32_POOLS); - return -EINVAL; - } - } - - /* For DCB mode check our configuration before we go further */ - if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) { - const struct rte_eth_dcb_rx_conf *conf; - - if (nb_rx_q != ETH_DCB_NUM_QUEUES) { - PMD_DEBUG_TRACE("ethdev port_id=%d DCB, nb_rx_q " - "!= %d\n", - port_id, ETH_DCB_NUM_QUEUES); - return -EINVAL; - } - conf = &(dev_conf->rx_adv_conf.dcb_rx_conf); - if (!(conf->nb_tcs == ETH_4_TCS || - conf->nb_tcs == ETH_8_TCS)) { - PMD_DEBUG_TRACE("ethdev port_id=%d DCB selected, " - "nb_tcs != %d or nb_tcs " - "!= %d\n", - port_id, ETH_4_TCS, ETH_8_TCS); - return -EINVAL; - } - } - - if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) { - const struct rte_eth_dcb_tx_conf *conf; - - if (nb_tx_q != ETH_DCB_NUM_QUEUES) { - PMD_DEBUG_TRACE("ethdev port_id=%d DCB, nb_tx_q " - "!= %d\n", - port_id, ETH_DCB_NUM_QUEUES); - return -EINVAL; - } - conf = &(dev_conf->tx_adv_conf.dcb_tx_conf); - if (!(conf->nb_tcs == ETH_4_TCS || - conf->nb_tcs == ETH_8_TCS)) { - PMD_DEBUG_TRACE("ethdev port_id=%d DCB selected, " - "nb_tcs != %d or nb_tcs " - "!= %d\n", - port_id, ETH_4_TCS, ETH_8_TCS); - return -EINVAL; - } - } - } - return 0; -} - int rte_eth_dev_configure(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q, const struct rte_eth_conf *dev_conf) @@ -1192,14 +991,6 @@ rte_eth_dev_configure(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q, ETHER_MAX_LEN; } - /* multiple queue mode checking */ - diag = rte_eth_dev_check_mq_mode(port_id, nb_rx_q, nb_tx_q, dev_conf); - if (diag != 0) { - PMD_DEBUG_TRACE("port%d rte_eth_dev_check_mq_mode = %d\n", - port_id, diag); - return diag; - } - /* * Setup new number of RX/TX queues and reconfigure device. */ @@ -1391,6 +1182,11 @@ rte_eth_dev_close(uint8_t port_id) FUNC_PTR_OR_RET(*dev->dev_ops->dev_close); dev->data->dev_started = 0; (*dev->dev_ops->dev_close)(dev); + + rte_free(dev->data->rx_queues); + dev->data->rx_queues = NULL; + rte_free(dev->data->tx_queues); + dev->data->tx_queues = NULL; } int @@ -1452,6 +1248,19 @@ rte_eth_rx_queue_setup(uint8_t port_id, uint16_t rx_queue_id, return -EINVAL; } + if (nb_rx_desc > dev_info.rx_desc_lim.nb_max || + nb_rx_desc < dev_info.rx_desc_lim.nb_min || + nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) { + + PMD_DEBUG_TRACE("Invalid value for nb_rx_desc(=%hu), " + "should be: <= %hu, = %hu, and a product of %hu\n", + nb_rx_desc, + dev_info.rx_desc_lim.nb_max, + dev_info.rx_desc_lim.nb_min, + dev_info.rx_desc_lim.nb_align); + return -EINVAL; + } + if (rx_conf == NULL) rx_conf = &dev_info.default_rxconf; @@ -1661,26 +1470,35 @@ rte_eth_xstats_get(uint8_t port_id, struct rte_eth_xstats *xstats, { struct rte_eth_stats eth_stats; struct rte_eth_dev *dev; - unsigned count, i, q; + unsigned count = 0, i, q; + signed xcount = 0; uint64_t val, *stats_ptr; VALID_PORTID_OR_ERR_RET(port_id, -EINVAL); dev = &rte_eth_devices[port_id]; - /* implemented by the driver */ - if (dev->dev_ops->xstats_get != NULL) - return (*dev->dev_ops->xstats_get)(dev, xstats, n); - - /* else, return generic statistics */ + /* Return generic statistics */ count = RTE_NB_STATS; count += dev->data->nb_rx_queues * RTE_NB_RXQ_STATS; count += dev->data->nb_tx_queues * RTE_NB_TXQ_STATS; - if (n < count) - return count; - /* now fill the xstats structure */ + /* implemented by the driver */ + if (dev->dev_ops->xstats_get != NULL) { + /* Retrieve the xstats from the driver at the end of the + * xstats struct. + */ + xcount = (*dev->dev_ops->xstats_get)(dev, &xstats[count], + (n > count) ? n - count : 0); + + if (xcount < 0) + return xcount; + } + if (n < count + xcount) + return count + xcount; + + /* now fill the xstats structure */ count = 0; rte_eth_stats_get(port_id, ð_stats); @@ -1722,7 +1540,7 @@ rte_eth_xstats_get(uint8_t port_id, struct rte_eth_xstats *xstats, } } - return count; + return count + xcount; } /* reset ethdev extended statistics */ @@ -1782,11 +1600,18 @@ void rte_eth_dev_info_get(uint8_t port_id, struct rte_eth_dev_info *dev_info) { struct rte_eth_dev *dev; + const struct rte_eth_desc_lim lim = { + .nb_max = UINT16_MAX, + .nb_min = 0, + .nb_align = 1, + }; VALID_PORTID_OR_RET(port_id); dev = &rte_eth_devices[port_id]; memset(dev_info, 0, sizeof(struct rte_eth_dev_info)); + dev_info->rx_desc_lim = lim; + dev_info->tx_desc_lim = lim; FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get); (*dev->dev_ops->dev_infos_get)(dev, dev_info); @@ -1965,234 +1790,6 @@ rte_eth_dev_set_vlan_pvid(uint8_t port_id, uint16_t pvid, int on) return 0; } -int -rte_eth_dev_fdir_add_signature_filter(uint8_t port_id, - struct rte_fdir_filter *fdir_filter, - uint8_t queue) -{ - struct rte_eth_dev *dev; - - VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); - dev = &rte_eth_devices[port_id]; - - if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_SIGNATURE) { - PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n", - port_id, dev->data->dev_conf.fdir_conf.mode); - return -ENOSYS; - } - - if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP - || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE) - && (fdir_filter->port_src || fdir_filter->port_dst)) { - PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " - "None l4type, source & destinations ports " - "should be null!\n"); - return -EINVAL; - } - - FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_add_signature_filter, -ENOTSUP); - return (*dev->dev_ops->fdir_add_signature_filter)(dev, fdir_filter, - queue); -} - -int -rte_eth_dev_fdir_update_signature_filter(uint8_t port_id, - struct rte_fdir_filter *fdir_filter, - uint8_t queue) -{ - struct rte_eth_dev *dev; - - VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); - dev = &rte_eth_devices[port_id]; - - if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_SIGNATURE) { - PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n", - port_id, dev->data->dev_conf.fdir_conf.mode); - return -ENOSYS; - } - - if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP - || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE) - && (fdir_filter->port_src || fdir_filter->port_dst)) { - PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " - "None l4type, source & destinations ports " - "should be null!\n"); - return -EINVAL; - } - - FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_update_signature_filter, -ENOTSUP); - return (*dev->dev_ops->fdir_update_signature_filter)(dev, fdir_filter, - queue); - -} - -int -rte_eth_dev_fdir_remove_signature_filter(uint8_t port_id, - struct rte_fdir_filter *fdir_filter) -{ - struct rte_eth_dev *dev; - - VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); - dev = &rte_eth_devices[port_id]; - - if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_SIGNATURE) { - PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n", - port_id, dev->data->dev_conf.fdir_conf.mode); - return -ENOSYS; - } - - if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP - || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE) - && (fdir_filter->port_src || fdir_filter->port_dst)) { - PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " - "None l4type source & destinations ports " - "should be null!\n"); - return -EINVAL; - } - - FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_remove_signature_filter, -ENOTSUP); - return (*dev->dev_ops->fdir_remove_signature_filter)(dev, fdir_filter); -} - -int -rte_eth_dev_fdir_get_infos(uint8_t port_id, struct rte_eth_fdir *fdir) -{ - struct rte_eth_dev *dev; - - VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); - dev = &rte_eth_devices[port_id]; - if (!(dev->data->dev_conf.fdir_conf.mode)) { - PMD_DEBUG_TRACE("port %d: pkt-filter disabled\n", port_id); - return -ENOSYS; - } - - FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_infos_get, -ENOTSUP); - - (*dev->dev_ops->fdir_infos_get)(dev, fdir); - return 0; -} - -int -rte_eth_dev_fdir_add_perfect_filter(uint8_t port_id, - struct rte_fdir_filter *fdir_filter, - uint16_t soft_id, uint8_t queue, - uint8_t drop) -{ - struct rte_eth_dev *dev; - - VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); - dev = &rte_eth_devices[port_id]; - - if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) { - PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n", - port_id, dev->data->dev_conf.fdir_conf.mode); - return -ENOSYS; - } - - if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP - || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE) - && (fdir_filter->port_src || fdir_filter->port_dst)) { - PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " - "None l4type, source & destinations ports " - "should be null!\n"); - return -EINVAL; - } - - /* For now IPv6 is not supported with perfect filter */ - if (fdir_filter->iptype == RTE_FDIR_IPTYPE_IPV6) - return -ENOTSUP; - - FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_add_perfect_filter, -ENOTSUP); - return (*dev->dev_ops->fdir_add_perfect_filter)(dev, fdir_filter, - soft_id, queue, - drop); -} - -int -rte_eth_dev_fdir_update_perfect_filter(uint8_t port_id, - struct rte_fdir_filter *fdir_filter, - uint16_t soft_id, uint8_t queue, - uint8_t drop) -{ - struct rte_eth_dev *dev; - - VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); - dev = &rte_eth_devices[port_id]; - - if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) { - PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n", - port_id, dev->data->dev_conf.fdir_conf.mode); - return -ENOSYS; - } - - if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP - || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE) - && (fdir_filter->port_src || fdir_filter->port_dst)) { - PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " - "None l4type, source & destinations ports " - "should be null!\n"); - return -EINVAL; - } - - /* For now IPv6 is not supported with perfect filter */ - if (fdir_filter->iptype == RTE_FDIR_IPTYPE_IPV6) - return -ENOTSUP; - - FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_update_perfect_filter, -ENOTSUP); - return (*dev->dev_ops->fdir_update_perfect_filter)(dev, fdir_filter, - soft_id, queue, drop); -} - -int -rte_eth_dev_fdir_remove_perfect_filter(uint8_t port_id, - struct rte_fdir_filter *fdir_filter, - uint16_t soft_id) -{ - struct rte_eth_dev *dev; - - VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); - dev = &rte_eth_devices[port_id]; - - if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) { - PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n", - port_id, dev->data->dev_conf.fdir_conf.mode); - return -ENOSYS; - } - - if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP - || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE) - && (fdir_filter->port_src || fdir_filter->port_dst)) { - PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " - "None l4type, source & destinations ports " - "should be null!\n"); - return -EINVAL; - } - - /* For now IPv6 is not supported with perfect filter */ - if (fdir_filter->iptype == RTE_FDIR_IPTYPE_IPV6) - return -ENOTSUP; - - FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_remove_perfect_filter, -ENOTSUP); - return (*dev->dev_ops->fdir_remove_perfect_filter)(dev, fdir_filter, - soft_id); -} - -int -rte_eth_dev_fdir_set_masks(uint8_t port_id, struct rte_fdir_masks *fdir_mask) -{ - struct rte_eth_dev *dev; - - VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); - dev = &rte_eth_devices[port_id]; - if (!(dev->data->dev_conf.fdir_conf.mode)) { - PMD_DEBUG_TRACE("port %d: pkt-filter disabled\n", port_id); - return -ENOSYS; - } - - FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_set_masks, -ENOTSUP); - return (*dev->dev_ops->fdir_set_masks)(dev, fdir_mask); -} - int rte_eth_dev_flow_ctrl_get(uint8_t port_id, struct rte_eth_fc_conf *fc_conf) { @@ -2950,9 +2547,10 @@ rte_eth_dev_callback_register(uint8_t port_id, } /* create a new callback. */ - if (user_cb == NULL && - (user_cb = rte_zmalloc("INTR_USER_CALLBACK", - sizeof(struct rte_eth_dev_callback), 0))) { + if (user_cb == NULL) + user_cb = rte_zmalloc("INTR_USER_CALLBACK", + sizeof(struct rte_eth_dev_callback), 0); + if (user_cb != NULL) { user_cb->cb_fn = cb_fn; user_cb->cb_arg = cb_arg; user_cb->event = event; @@ -3027,6 +2625,113 @@ _rte_eth_dev_callback_process(struct rte_eth_dev *dev, } rte_spinlock_unlock(&rte_eth_dev_cb_lock); } + +int +rte_eth_dev_rx_intr_ctl(uint8_t port_id, int epfd, int op, void *data) +{ + uint32_t vec; + struct rte_eth_dev *dev; + struct rte_intr_handle *intr_handle; + uint16_t qid; + int rc; + + if (!rte_eth_dev_is_valid_port(port_id)) { + PMD_DEBUG_TRACE("Invalid port_id=%u\n", port_id); + return -ENODEV; + } + + dev = &rte_eth_devices[port_id]; + intr_handle = &dev->pci_dev->intr_handle; + if (!intr_handle->intr_vec) { + PMD_DEBUG_TRACE("RX Intr vector unset\n"); + return -EPERM; + } + + for (qid = 0; qid < dev->data->nb_rx_queues; qid++) { + vec = intr_handle->intr_vec[qid]; + rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data); + if (rc && rc != -EEXIST) { + PMD_DEBUG_TRACE("p %u q %u rx ctl error" + " op %d epfd %d vec %u\n", + port_id, qid, op, epfd, vec); + } + } + + return 0; +} + +int +rte_eth_dev_rx_intr_ctl_q(uint8_t port_id, uint16_t queue_id, + int epfd, int op, void *data) +{ + uint32_t vec; + struct rte_eth_dev *dev; + struct rte_intr_handle *intr_handle; + int rc; + + if (!rte_eth_dev_is_valid_port(port_id)) { + PMD_DEBUG_TRACE("Invalid port_id=%u\n", port_id); + return -ENODEV; + } + + dev = &rte_eth_devices[port_id]; + if (queue_id >= dev->data->nb_rx_queues) { + PMD_DEBUG_TRACE("Invalid RX queue_id=%u\n", queue_id); + return -EINVAL; + } + + intr_handle = &dev->pci_dev->intr_handle; + if (!intr_handle->intr_vec) { + PMD_DEBUG_TRACE("RX Intr vector unset\n"); + return -EPERM; + } + + vec = intr_handle->intr_vec[queue_id]; + rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data); + if (rc && rc != -EEXIST) { + PMD_DEBUG_TRACE("p %u q %u rx ctl error" + " op %d epfd %d vec %u\n", + port_id, queue_id, op, epfd, vec); + return rc; + } + + return 0; +} + +int +rte_eth_dev_rx_intr_enable(uint8_t port_id, + uint16_t queue_id) +{ + struct rte_eth_dev *dev; + + if (!rte_eth_dev_is_valid_port(port_id)) { + PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); + return -ENODEV; + } + + dev = &rte_eth_devices[port_id]; + + FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_enable, -ENOTSUP); + return (*dev->dev_ops->rx_queue_intr_enable)(dev, queue_id); +} + +int +rte_eth_dev_rx_intr_disable(uint8_t port_id, + uint16_t queue_id) +{ + struct rte_eth_dev *dev; + + if (!rte_eth_dev_is_valid_port(port_id)) { + PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); + return -ENODEV; + } + + dev = &rte_eth_devices[port_id]; + + FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_disable, -ENOTSUP); + return (*dev->dev_ops->rx_queue_intr_disable)(dev, queue_id); +} + #ifdef RTE_NIC_BYPASS int rte_eth_dev_bypass_init(uint8_t port_id) { @@ -3336,6 +3041,54 @@ rte_eth_remove_tx_callback(uint8_t port_id, uint16_t queue_id, return -EINVAL; } +int +rte_eth_rx_queue_info_get(uint8_t port_id, uint16_t queue_id, + struct rte_eth_rxq_info *qinfo) +{ + struct rte_eth_dev *dev; + + VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); + + if (qinfo == NULL) + return -EINVAL; + + dev = &rte_eth_devices[port_id]; + if (queue_id >= dev->data->nb_rx_queues) { + PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", queue_id); + return -EINVAL; + } + + FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rxq_info_get, -ENOTSUP); + + memset(qinfo, 0, sizeof(*qinfo)); + dev->dev_ops->rxq_info_get(dev, queue_id, qinfo); + return 0; +} + +int +rte_eth_tx_queue_info_get(uint8_t port_id, uint16_t queue_id, + struct rte_eth_txq_info *qinfo) +{ + struct rte_eth_dev *dev; + + VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); + + if (qinfo == NULL) + return -EINVAL; + + dev = &rte_eth_devices[port_id]; + if (queue_id >= dev->data->nb_tx_queues) { + PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", queue_id); + return -EINVAL; + } + + FUNC_PTR_OR_ERR_RET(*dev->dev_ops->txq_info_get, -ENOTSUP); + + memset(qinfo, 0, sizeof(*qinfo)); + dev->dev_ops->txq_info_get(dev, queue_id, qinfo); + return 0; +} + int rte_eth_dev_set_mc_addr_list(uint8_t port_id, struct ether_addr *mc_addr_set, @@ -3398,3 +3151,81 @@ rte_eth_timesync_read_tx_timestamp(uint8_t port_id, struct timespec *timestamp) FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_tx_timestamp, -ENOTSUP); return (*dev->dev_ops->timesync_read_tx_timestamp)(dev, timestamp); } + +int +rte_eth_dev_get_reg_length(uint8_t port_id) +{ + struct rte_eth_dev *dev; + + VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); + + dev = &rte_eth_devices[port_id]; + FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg_length, -ENOTSUP); + return (*dev->dev_ops->get_reg_length)(dev); +} + +int +rte_eth_dev_get_reg_info(uint8_t port_id, struct rte_dev_reg_info *info) +{ + struct rte_eth_dev *dev; + + VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); + + dev = &rte_eth_devices[port_id]; + FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg, -ENOTSUP); + return (*dev->dev_ops->get_reg)(dev, info); +} + +int +rte_eth_dev_get_eeprom_length(uint8_t port_id) +{ + struct rte_eth_dev *dev; + + VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); + + dev = &rte_eth_devices[port_id]; + FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom_length, -ENOTSUP); + return (*dev->dev_ops->get_eeprom_length)(dev); +} + +int +rte_eth_dev_get_eeprom(uint8_t port_id, struct rte_dev_eeprom_info *info) +{ + struct rte_eth_dev *dev; + + VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); + + dev = &rte_eth_devices[port_id]; + FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom, -ENOTSUP); + return (*dev->dev_ops->get_eeprom)(dev, info); +} + +int +rte_eth_dev_set_eeprom(uint8_t port_id, struct rte_dev_eeprom_info *info) +{ + struct rte_eth_dev *dev; + + VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); + + dev = &rte_eth_devices[port_id]; + FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_eeprom, -ENOTSUP); + return (*dev->dev_ops->set_eeprom)(dev, info); +} + +int +rte_eth_dev_get_dcb_info(uint8_t port_id, + struct rte_eth_dcb_info *dcb_info) +{ + struct rte_eth_dev *dev; + + if (!rte_eth_dev_is_valid_port(port_id)) { + PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); + return -ENODEV; + } + + dev = &rte_eth_devices[port_id]; + memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info)); + + FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_dcb_info, -ENOTSUP); + return (*dev->dev_ops->get_dcb_info)(dev, dcb_info); +}