X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_ether%2Frte_ethdev.c;h=7aadbd3159d61ec36a6359ae1f1fefe8a9ae9633;hb=46375cfa2cadb88f625feeb02bd6ec2eb43d9501;hp=cfb450a13d6489640930bc8860c08c1d66e372f5;hpb=f831c63cbe86c3025bc9eeccd329c1d2a6275dd5;p=dpdk.git diff --git a/lib/librte_ether/rte_ethdev.c b/lib/librte_ether/rte_ethdev.c index cfb450a13d..7aadbd3159 100644 --- a/lib/librte_ether/rte_ethdev.c +++ b/lib/librte_ether/rte_ethdev.c @@ -126,6 +126,11 @@ struct rte_eth_dev_callback { enum rte_eth_event_type event; /**< Interrupt event type */ }; +enum { + STAT_QMAP_TX = 0, + STAT_QMAP_RX +}; + static inline void rte_eth_dev_data_alloc(void) { @@ -241,6 +246,82 @@ rte_eth_dev_count(void) return (nb_ports); } +static int +rte_eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues) +{ + uint16_t old_nb_queues = dev->data->nb_rx_queues; + void **rxq; + unsigned i; + + FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP); + + if (dev->data->rx_queues == NULL) { + dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues", + sizeof(dev->data->rx_queues[0]) * nb_queues, + CACHE_LINE_SIZE); + if (dev->data->rx_queues == NULL) { + dev->data->nb_rx_queues = 0; + return -(ENOMEM); + } + } else { + rxq = dev->data->rx_queues; + + for (i = nb_queues; i < old_nb_queues; i++) + (*dev->dev_ops->rx_queue_release)(rxq[i]); + rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues, + CACHE_LINE_SIZE); + if (rxq == NULL) + return -(ENOMEM); + + if (nb_queues > old_nb_queues) + memset(rxq + old_nb_queues, 0, + sizeof(rxq[0]) * (nb_queues - old_nb_queues)); + + dev->data->rx_queues = rxq; + + } + dev->data->nb_rx_queues = nb_queues; + return (0); +} + +static int +rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues) +{ + uint16_t old_nb_queues = dev->data->nb_tx_queues; + void **txq; + unsigned i; + + FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP); + + if (dev->data->tx_queues == NULL) { + dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues", + sizeof(dev->data->tx_queues[0]) * nb_queues, + CACHE_LINE_SIZE); + if (dev->data->tx_queues == NULL) { + dev->data->nb_tx_queues = 0; + return -(ENOMEM); + } + } else { + txq = dev->data->tx_queues; + + for (i = nb_queues; i < old_nb_queues; i++) + (*dev->dev_ops->tx_queue_release)(txq[i]); + txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues, + CACHE_LINE_SIZE); + if (txq == NULL) + return -(ENOMEM); + + if (nb_queues > old_nb_queues) + memset(txq + old_nb_queues, 0, + sizeof(txq[0]) * (nb_queues - old_nb_queues)); + + dev->data->tx_queues = txq; + + } + dev->data->nb_tx_queues = nb_queues; + return (0); +} + int rte_eth_dev_configure(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q, const struct rte_eth_conf *dev_conf) @@ -311,6 +392,14 @@ rte_eth_dev_configure(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q, (unsigned)dev_info.max_rx_pktlen); return (-EINVAL); } + else if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN) { + PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u" + " < min valid value %u\n", + port_id, + (unsigned)dev_conf->rxmode.max_rx_pkt_len, + (unsigned)ETHER_MIN_LEN); + return (-EINVAL); + } } else /* Use default value */ dev->data->dev_conf.rxmode.max_rx_pkt_len = ETHER_MAX_LEN; @@ -334,13 +423,95 @@ rte_eth_dev_configure(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q, return (-EINVAL); } } + if (dev_conf->txmode.mq_mode == ETH_VMDQ_DCB_TX) { + const struct rte_eth_vmdq_dcb_tx_conf *conf; - diag = (*dev->dev_ops->dev_configure)(dev, nb_rx_q, nb_tx_q); + if (nb_tx_q != ETH_VMDQ_DCB_NUM_QUEUES) { + PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB, nb_tx_q " + "!= %d\n", + port_id, ETH_VMDQ_DCB_NUM_QUEUES); + return (-EINVAL); + } + conf = &(dev_conf->tx_adv_conf.vmdq_dcb_tx_conf); + if (! (conf->nb_queue_pools == ETH_16_POOLS || + conf->nb_queue_pools == ETH_32_POOLS)) { + PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB selected, " + "nb_queue_pools != %d or nb_queue_pools " + "!= %d\n", + port_id, ETH_16_POOLS, ETH_32_POOLS); + return (-EINVAL); + } + } + + /* For DCB mode check our configuration before we go further */ + if (dev_conf->rxmode.mq_mode == ETH_DCB_RX) { + const struct rte_eth_dcb_rx_conf *conf; + + if (nb_rx_q != ETH_DCB_NUM_QUEUES) { + PMD_DEBUG_TRACE("ethdev port_id=%d DCB, nb_rx_q " + "!= %d\n", + port_id, ETH_DCB_NUM_QUEUES); + return (-EINVAL); + } + conf = &(dev_conf->rx_adv_conf.dcb_rx_conf); + if (! (conf->nb_tcs == ETH_4_TCS || + conf->nb_tcs == ETH_8_TCS)) { + PMD_DEBUG_TRACE("ethdev port_id=%d DCB selected, " + "nb_tcs != %d or nb_tcs " + "!= %d\n", + port_id, ETH_4_TCS, ETH_8_TCS); + return (-EINVAL); + } + } + + if (dev_conf->txmode.mq_mode == ETH_DCB_TX) { + const struct rte_eth_dcb_tx_conf *conf; + + if (nb_tx_q != ETH_DCB_NUM_QUEUES) { + PMD_DEBUG_TRACE("ethdev port_id=%d DCB, nb_tx_q " + "!= %d\n", + port_id, ETH_DCB_NUM_QUEUES); + return (-EINVAL); + } + conf = &(dev_conf->tx_adv_conf.dcb_tx_conf); + if (! (conf->nb_tcs == ETH_4_TCS || + conf->nb_tcs == ETH_8_TCS)) { + PMD_DEBUG_TRACE("ethdev port_id=%d DCB selected, " + "nb_tcs != %d or nb_tcs " + "!= %d\n", + port_id, ETH_4_TCS, ETH_8_TCS); + return (-EINVAL); + } + } + + /* + * Setup new number of RX/TX queues and reconfigure device. + */ + diag = rte_eth_dev_rx_queue_config(dev, nb_rx_q); + if (diag != 0) { + PMD_DEBUG_TRACE("port%d rte_eth_dev_rx_queue_config = %d\n", + port_id, diag); + return diag; + } + + diag = rte_eth_dev_tx_queue_config(dev, nb_tx_q); if (diag != 0) { - rte_free(dev->data->rx_queues); - rte_free(dev->data->tx_queues); + PMD_DEBUG_TRACE("port%d rte_eth_dev_tx_queue_config = %d\n", + port_id, diag); + rte_eth_dev_rx_queue_config(dev, 0); + return diag; } - return diag; + + diag = (*dev->dev_ops->dev_configure)(dev); + if (diag != 0) { + PMD_DEBUG_TRACE("port%d dev_configure = %d\n", + port_id, diag); + rte_eth_dev_rx_queue_config(dev, 0); + rte_eth_dev_tx_queue_config(dev, 0); + return diag; + } + + return 0; } static void @@ -532,7 +703,7 @@ rte_eth_tx_queue_setup(uint8_t port_id, uint16_t tx_queue_id, * in a multi-process setup*/ PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY); - if (port_id >= nb_ports) { + if (port_id >= RTE_MAX_ETHPORTS || port_id >= nb_ports) { PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); return (-EINVAL); } @@ -730,6 +901,43 @@ rte_eth_stats_reset(uint8_t port_id) (*dev->dev_ops->stats_reset)(dev); } + +static int +set_queue_stats_mapping(uint8_t port_id, uint16_t queue_id, uint8_t stat_idx, + uint8_t is_rx) +{ + struct rte_eth_dev *dev; + + if (port_id >= nb_ports) { + PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); + return -ENODEV; + } + dev = &rte_eth_devices[port_id]; + + FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP); + return (*dev->dev_ops->queue_stats_mapping_set) + (dev, queue_id, stat_idx, is_rx); +} + + +int +rte_eth_dev_set_tx_queue_stats_mapping(uint8_t port_id, uint16_t tx_queue_id, + uint8_t stat_idx) +{ + return set_queue_stats_mapping(port_id, tx_queue_id, stat_idx, + STAT_QMAP_TX); +} + + +int +rte_eth_dev_set_rx_queue_stats_mapping(uint8_t port_id, uint16_t rx_queue_id, + uint8_t stat_idx) +{ + return set_queue_stats_mapping(port_id, rx_queue_id, stat_idx, + STAT_QMAP_RX); +} + + void rte_eth_dev_info_get(uint8_t port_id, struct rte_eth_dev_info *dev_info) { @@ -785,6 +993,118 @@ rte_eth_dev_vlan_filter(uint8_t port_id, uint16_t vlan_id, int on) return (0); } +int +rte_eth_dev_set_vlan_strip_on_queue(uint8_t port_id, uint16_t rx_queue_id, int on) +{ + struct rte_eth_dev *dev; + + if (port_id >= nb_ports) { + PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); + return (-ENODEV); + } + + dev = &rte_eth_devices[port_id]; + if (rx_queue_id >= dev->data->nb_rx_queues) { + PMD_DEBUG_TRACE("Invalid rx_queue_id=%d\n", port_id); + return (-EINVAL); + } + + FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP); + (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on); + + return (0); +} + +int +rte_eth_dev_set_vlan_ether_type(uint8_t port_id, uint16_t tpid) +{ + struct rte_eth_dev *dev; + + if (port_id >= nb_ports) { + PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); + return (-ENODEV); + } + + dev = &rte_eth_devices[port_id]; + FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP); + (*dev->dev_ops->vlan_tpid_set)(dev, tpid); + + return (0); +} + +int +rte_eth_dev_set_vlan_offload(uint8_t port_id, int offload_mask) +{ + struct rte_eth_dev *dev; + int ret = 0; + int mask = 0; + int cur, org = 0; + + if (port_id >= nb_ports) { + PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); + return (-ENODEV); + } + + dev = &rte_eth_devices[port_id]; + + /*check which option changed by application*/ + cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD); + org = !!(dev->data->dev_conf.rxmode.hw_vlan_strip); + if (cur != org){ + dev->data->dev_conf.rxmode.hw_vlan_strip = (uint8_t)cur; + mask |= ETH_VLAN_STRIP_MASK; + } + + cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD); + org = !!(dev->data->dev_conf.rxmode.hw_vlan_filter); + if (cur != org){ + dev->data->dev_conf.rxmode.hw_vlan_filter = (uint8_t)cur; + mask |= ETH_VLAN_FILTER_MASK; + } + + cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD); + org = !!(dev->data->dev_conf.rxmode.hw_vlan_extend); + if (cur != org){ + dev->data->dev_conf.rxmode.hw_vlan_extend = (uint8_t)cur; + mask |= ETH_VLAN_EXTEND_MASK; + } + + /*no change*/ + if(mask == 0) + return ret; + + FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP); + (*dev->dev_ops->vlan_offload_set)(dev, mask); + + return ret; +} + +int +rte_eth_dev_get_vlan_offload(uint8_t port_id) +{ + struct rte_eth_dev *dev; + int ret = 0; + + if (port_id >= nb_ports) { + PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); + return (-ENODEV); + } + + dev = &rte_eth_devices[port_id]; + + if (dev->data->dev_conf.rxmode.hw_vlan_strip) + ret |= ETH_VLAN_STRIP_OFFLOAD ; + + if (dev->data->dev_conf.rxmode.hw_vlan_filter) + ret |= ETH_VLAN_FILTER_OFFLOAD ; + + if (dev->data->dev_conf.rxmode.hw_vlan_extend) + ret |= ETH_VLAN_EXTEND_OFFLOAD ; + + return ret; +} + + int rte_eth_dev_fdir_add_signature_filter(uint8_t port_id, struct rte_fdir_filter *fdir_filter, @@ -1041,10 +1361,6 @@ rte_eth_dev_fdir_set_masks(uint8_t port_id, struct rte_fdir_masks *fdir_mask) return (-ENOSYS); } - /* IPv6 mask are not supported */ - if (fdir_mask->src_ipv6_mask) - return (-ENOTSUP); - FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_set_masks, -ENOTSUP); return (*dev->dev_ops->fdir_set_masks)(dev, fdir_mask); }