X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_ether%2Frte_ethdev.c;h=d10982fa9af3c65f8a8a0105df417c1826cc074a;hb=3031749c2df04a63cdcef186dcce3781e61436e8;hp=06fc25106ba92dc37f6c5a4c6fe238c997df2b4c;hpb=01a638e15d837da39f351e038570f3ed666c2ddc;p=dpdk.git diff --git a/lib/librte_ether/rte_ethdev.c b/lib/librte_ether/rte_ethdev.c index 06fc25106b..d10982fa9a 100644 --- a/lib/librte_ether/rte_ethdev.c +++ b/lib/librte_ether/rte_ethdev.c @@ -1,35 +1,34 @@ /*- * BSD LICENSE - * - * Copyright(c) 2010-2013 Intel Corporation. All rights reserved. + * + * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions * are met: - * - * * Redistributions of source code must retain the above copyright + * + * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * */ #include @@ -153,7 +152,7 @@ rte_eth_dev_data_alloc(void) RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data)); } -static inline struct rte_eth_dev * +struct rte_eth_dev * rte_eth_dev_allocate(void) { struct rte_eth_dev *eth_dev; @@ -262,9 +261,7 @@ rte_eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues) void **rxq; unsigned i; - FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP); - - if (dev->data->rx_queues == NULL) { + if (dev->data->rx_queues == NULL) { /* first time configuration */ dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues", sizeof(dev->data->rx_queues[0]) * nb_queues, CACHE_LINE_SIZE); @@ -272,7 +269,9 @@ rte_eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues) dev->data->nb_rx_queues = 0; return -(ENOMEM); } - } else { + } else { /* re-configure */ + FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP); + rxq = dev->data->rx_queues; for (i = nb_queues; i < old_nb_queues; i++) @@ -293,6 +292,110 @@ rte_eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues) return (0); } +int +rte_eth_dev_rx_queue_start(uint8_t port_id, uint16_t rx_queue_id) +{ + struct rte_eth_dev *dev; + + /* This function is only safe when called from the primary process + * in a multi-process setup*/ + PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY); + + if (port_id >= nb_ports) { + PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); + return -EINVAL; + } + + dev = &rte_eth_devices[port_id]; + if (rx_queue_id >= dev->data->nb_rx_queues) { + PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id); + return -EINVAL; + } + + FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP); + + return dev->dev_ops->rx_queue_start(dev, rx_queue_id); + +} + +int +rte_eth_dev_rx_queue_stop(uint8_t port_id, uint16_t rx_queue_id) +{ + struct rte_eth_dev *dev; + + /* This function is only safe when called from the primary process + * in a multi-process setup*/ + PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY); + + if (port_id >= nb_ports) { + PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); + return -EINVAL; + } + + dev = &rte_eth_devices[port_id]; + if (rx_queue_id >= dev->data->nb_rx_queues) { + PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id); + return -EINVAL; + } + + FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP); + + return dev->dev_ops->rx_queue_stop(dev, rx_queue_id); + +} + +int +rte_eth_dev_tx_queue_start(uint8_t port_id, uint16_t tx_queue_id) +{ + struct rte_eth_dev *dev; + + /* This function is only safe when called from the primary process + * in a multi-process setup*/ + PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY); + + if (port_id >= nb_ports) { + PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); + return -EINVAL; + } + + dev = &rte_eth_devices[port_id]; + if (tx_queue_id >= dev->data->nb_tx_queues) { + PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id); + return -EINVAL; + } + + FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP); + + return dev->dev_ops->tx_queue_start(dev, tx_queue_id); + +} + +int +rte_eth_dev_tx_queue_stop(uint8_t port_id, uint16_t tx_queue_id) +{ + struct rte_eth_dev *dev; + + /* This function is only safe when called from the primary process + * in a multi-process setup*/ + PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY); + + if (port_id >= nb_ports) { + PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); + return -EINVAL; + } + + dev = &rte_eth_devices[port_id]; + if (tx_queue_id >= dev->data->nb_tx_queues) { + PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id); + return -EINVAL; + } + + FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP); + + return dev->dev_ops->tx_queue_stop(dev, tx_queue_id); + +} + static int rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues) { @@ -300,9 +403,7 @@ rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues) void **txq; unsigned i; - FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP); - - if (dev->data->tx_queues == NULL) { + if (dev->data->tx_queues == NULL) { /* first time configuration */ dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues", sizeof(dev->data->tx_queues[0]) * nb_queues, CACHE_LINE_SIZE); @@ -310,7 +411,9 @@ rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues) dev->data->nb_tx_queues = 0; return -(ENOMEM); } - } else { + } else { /* re-configure */ + FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP); + txq = dev->data->tx_queues; for (i = nb_queues; i < old_nb_queues; i++) @@ -331,6 +434,151 @@ rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues) return (0); } +static int +rte_eth_dev_check_mq_mode(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q, + const struct rte_eth_conf *dev_conf) +{ + struct rte_eth_dev *dev = &rte_eth_devices[port_id]; + + if (RTE_ETH_DEV_SRIOV(dev).active != 0) { + /* check multi-queue mode */ + if ((dev_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) || + (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) || + (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB_RSS) || + (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB)) { + /* SRIOV only works in VMDq enable mode */ + PMD_DEBUG_TRACE("ethdev port_id=%hhu SRIOV active, " + "wrong VMDQ mq_mode rx %u tx %u\n", + port_id, + dev_conf->rxmode.mq_mode, + dev_conf->txmode.mq_mode); + return (-EINVAL); + } + + switch (dev_conf->rxmode.mq_mode) { + case ETH_MQ_RX_VMDQ_RSS: + case ETH_MQ_RX_VMDQ_DCB: + case ETH_MQ_RX_VMDQ_DCB_RSS: + /* DCB/RSS VMDQ in SRIOV mode, not implement yet */ + PMD_DEBUG_TRACE("ethdev port_id=%hhu SRIOV active, " + "unsupported VMDQ mq_mode rx %u\n", + port_id, dev_conf->rxmode.mq_mode); + return (-EINVAL); + default: /* ETH_MQ_RX_VMDQ_ONLY or ETH_MQ_RX_NONE */ + /* if nothing mq mode configure, use default scheme */ + dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_ONLY; + if (RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool > 1) + RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 1; + break; + } + + switch (dev_conf->txmode.mq_mode) { + case ETH_MQ_TX_VMDQ_DCB: + /* DCB VMDQ in SRIOV mode, not implement yet */ + PMD_DEBUG_TRACE("ethdev port_id=%hhu SRIOV active, " + "unsupported VMDQ mq_mode tx %u\n", + port_id, dev_conf->txmode.mq_mode); + return (-EINVAL); + default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */ + /* if nothing mq mode configure, use default scheme */ + dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_ONLY; + if (RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool > 1) + RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 1; + break; + } + + /* check valid queue number */ + if ((nb_rx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) || + (nb_tx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)) { + PMD_DEBUG_TRACE("ethdev port_id=%d SRIOV active, " + "queue number must less equal to %d\n", + port_id, RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool); + return (-EINVAL); + } + } else { + /* For vmdb+dcb mode check our configuration before we go further */ + if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) { + const struct rte_eth_vmdq_dcb_conf *conf; + + if (nb_rx_q != ETH_VMDQ_DCB_NUM_QUEUES) { + PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB, nb_rx_q " + "!= %d\n", + port_id, ETH_VMDQ_DCB_NUM_QUEUES); + return (-EINVAL); + } + conf = &(dev_conf->rx_adv_conf.vmdq_dcb_conf); + if (! (conf->nb_queue_pools == ETH_16_POOLS || + conf->nb_queue_pools == ETH_32_POOLS)) { + PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB selected, " + "nb_queue_pools must be %d or %d\n", + port_id, ETH_16_POOLS, ETH_32_POOLS); + return (-EINVAL); + } + } + if (dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) { + const struct rte_eth_vmdq_dcb_tx_conf *conf; + + if (nb_tx_q != ETH_VMDQ_DCB_NUM_QUEUES) { + PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB, nb_tx_q " + "!= %d\n", + port_id, ETH_VMDQ_DCB_NUM_QUEUES); + return (-EINVAL); + } + conf = &(dev_conf->tx_adv_conf.vmdq_dcb_tx_conf); + if (! (conf->nb_queue_pools == ETH_16_POOLS || + conf->nb_queue_pools == ETH_32_POOLS)) { + PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB selected, " + "nb_queue_pools != %d or nb_queue_pools " + "!= %d\n", + port_id, ETH_16_POOLS, ETH_32_POOLS); + return (-EINVAL); + } + } + + /* For DCB mode check our configuration before we go further */ + if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) { + const struct rte_eth_dcb_rx_conf *conf; + + if (nb_rx_q != ETH_DCB_NUM_QUEUES) { + PMD_DEBUG_TRACE("ethdev port_id=%d DCB, nb_rx_q " + "!= %d\n", + port_id, ETH_DCB_NUM_QUEUES); + return (-EINVAL); + } + conf = &(dev_conf->rx_adv_conf.dcb_rx_conf); + if (! (conf->nb_tcs == ETH_4_TCS || + conf->nb_tcs == ETH_8_TCS)) { + PMD_DEBUG_TRACE("ethdev port_id=%d DCB selected, " + "nb_tcs != %d or nb_tcs " + "!= %d\n", + port_id, ETH_4_TCS, ETH_8_TCS); + return (-EINVAL); + } + } + + if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) { + const struct rte_eth_dcb_tx_conf *conf; + + if (nb_tx_q != ETH_DCB_NUM_QUEUES) { + PMD_DEBUG_TRACE("ethdev port_id=%d DCB, nb_tx_q " + "!= %d\n", + port_id, ETH_DCB_NUM_QUEUES); + return (-EINVAL); + } + conf = &(dev_conf->tx_adv_conf.dcb_tx_conf); + if (! (conf->nb_tcs == ETH_4_TCS || + conf->nb_tcs == ETH_8_TCS)) { + PMD_DEBUG_TRACE("ethdev port_id=%d DCB selected, " + "nb_tcs != %d or nb_tcs " + "!= %d\n", + port_id, ETH_4_TCS, ETH_8_TCS); + return (-EINVAL); + } + } + } + return 0; +} + int rte_eth_dev_configure(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q, const struct rte_eth_conf *dev_conf) @@ -413,84 +661,12 @@ rte_eth_dev_configure(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q, /* Use default value */ dev->data->dev_conf.rxmode.max_rx_pkt_len = ETHER_MAX_LEN; - /* For vmdb+dcb mode check our configuration before we go further */ - if (dev_conf->rxmode.mq_mode == ETH_VMDQ_DCB) { - const struct rte_eth_vmdq_dcb_conf *conf; - - if (nb_rx_q != ETH_VMDQ_DCB_NUM_QUEUES) { - PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB, nb_rx_q " - "!= %d\n", - port_id, ETH_VMDQ_DCB_NUM_QUEUES); - return (-EINVAL); - } - conf = &(dev_conf->rx_adv_conf.vmdq_dcb_conf); - if (! (conf->nb_queue_pools == ETH_16_POOLS || - conf->nb_queue_pools == ETH_32_POOLS)) { - PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB selected, " - "nb_queue_pools must be %d or %d\n", - port_id, ETH_16_POOLS, ETH_32_POOLS); - return (-EINVAL); - } - } - if (dev_conf->txmode.mq_mode == ETH_VMDQ_DCB_TX) { - const struct rte_eth_vmdq_dcb_tx_conf *conf; - - if (nb_tx_q != ETH_VMDQ_DCB_NUM_QUEUES) { - PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB, nb_tx_q " - "!= %d\n", - port_id, ETH_VMDQ_DCB_NUM_QUEUES); - return (-EINVAL); - } - conf = &(dev_conf->tx_adv_conf.vmdq_dcb_tx_conf); - if (! (conf->nb_queue_pools == ETH_16_POOLS || - conf->nb_queue_pools == ETH_32_POOLS)) { - PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB selected, " - "nb_queue_pools != %d or nb_queue_pools " - "!= %d\n", - port_id, ETH_16_POOLS, ETH_32_POOLS); - return (-EINVAL); - } - } - - /* For DCB mode check our configuration before we go further */ - if (dev_conf->rxmode.mq_mode == ETH_DCB_RX) { - const struct rte_eth_dcb_rx_conf *conf; - - if (nb_rx_q != ETH_DCB_NUM_QUEUES) { - PMD_DEBUG_TRACE("ethdev port_id=%d DCB, nb_rx_q " - "!= %d\n", - port_id, ETH_DCB_NUM_QUEUES); - return (-EINVAL); - } - conf = &(dev_conf->rx_adv_conf.dcb_rx_conf); - if (! (conf->nb_tcs == ETH_4_TCS || - conf->nb_tcs == ETH_8_TCS)) { - PMD_DEBUG_TRACE("ethdev port_id=%d DCB selected, " - "nb_tcs != %d or nb_tcs " - "!= %d\n", - port_id, ETH_4_TCS, ETH_8_TCS); - return (-EINVAL); - } - } - - if (dev_conf->txmode.mq_mode == ETH_DCB_TX) { - const struct rte_eth_dcb_tx_conf *conf; - - if (nb_tx_q != ETH_DCB_NUM_QUEUES) { - PMD_DEBUG_TRACE("ethdev port_id=%d DCB, nb_tx_q " - "!= %d\n", - port_id, ETH_DCB_NUM_QUEUES); - return (-EINVAL); - } - conf = &(dev_conf->tx_adv_conf.dcb_tx_conf); - if (! (conf->nb_tcs == ETH_4_TCS || - conf->nb_tcs == ETH_8_TCS)) { - PMD_DEBUG_TRACE("ethdev port_id=%d DCB selected, " - "nb_tcs != %d or nb_tcs " - "!= %d\n", - port_id, ETH_4_TCS, ETH_8_TCS); - return (-EINVAL); - } + /* multipe queue mode checking */ + diag = rte_eth_dev_check_mq_mode(port_id, nb_rx_q, nb_tx_q, dev_conf); + if (diag != 0) { + PMD_DEBUG_TRACE("port%d rte_eth_dev_check_mq_mode = %d\n", + port_id, diag); + return diag; } /* @@ -530,11 +706,15 @@ rte_eth_dev_config_restore(uint8_t port_id) struct rte_eth_dev_info dev_info; struct ether_addr addr; uint16_t i; + uint32_t pool = 0; dev = &rte_eth_devices[port_id]; rte_eth_dev_info_get(port_id, &dev_info); + if (RTE_ETH_DEV_SRIOV(dev).active) + pool = RTE_ETH_DEV_SRIOV(dev).def_vmdq_idx; + /* replay MAC address configuration */ for (i = 0; i < dev_info.max_mac_addrs; i++) { addr = dev->data->mac_addrs[i]; @@ -545,7 +725,7 @@ rte_eth_dev_config_restore(uint8_t port_id) /* add address to the hardware */ if (*dev->dev_ops->mac_addr_add) - (*dev->dev_ops->mac_addr_add)(dev, &addr, i, 0); + (*dev->dev_ops->mac_addr_add)(dev, &addr, i, pool); else { PMD_DEBUG_TRACE("port %d: MAC address array not supported\n", port_id); @@ -681,8 +861,7 @@ rte_eth_rx_queue_setup(uint8_t port_id, uint16_t rx_queue_id, (int) sizeof(struct rte_pktmbuf_pool_private)); return (-ENOSPC); } - mbp_priv = (struct rte_pktmbuf_pool_private *) - ((char *)mp + sizeof(struct rte_mempool)); + mbp_priv = rte_mempool_get_priv(mp); if ((uint32_t) (mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM) < dev_info.min_rx_bufsize) { PMD_DEBUG_TRACE("%s mbuf_data_room_size %d < %d " @@ -889,6 +1068,7 @@ rte_eth_stats_get(uint8_t port_id, struct rte_eth_stats *stats) return; } dev = &rte_eth_devices[port_id]; + memset(stats, 0, sizeof(*stats)); FUNC_PTR_OR_RET(*dev->dev_ops->stats_get); (*dev->dev_ops->stats_get)(dev, stats); @@ -958,10 +1138,15 @@ rte_eth_dev_info_get(uint8_t port_id, struct rte_eth_dev_info *dev_info) } dev = &rte_eth_devices[port_id]; + /* Default device offload capabilities to zero */ + dev_info->rx_offload_capa = 0; + dev_info->tx_offload_capa = 0; + dev_info->if_index = 0; FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get); (*dev->dev_ops->dev_infos_get)(dev, dev_info); dev_info->pci_dev = dev->pci_dev; - dev_info->driver_name = dev->driver->pci_drv.name; + if (dev->driver) + dev_info->driver_name = dev->driver->pci_drv.name; } void @@ -1048,7 +1233,7 @@ rte_eth_dev_set_vlan_offload(uint8_t port_id, int offload_mask) int ret = 0; int mask = 0; int cur, org = 0; - + if (port_id >= nb_ports) { PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); return (-ENODEV); @@ -1063,7 +1248,7 @@ rte_eth_dev_set_vlan_offload(uint8_t port_id, int offload_mask) dev->data->dev_conf.rxmode.hw_vlan_strip = (uint8_t)cur; mask |= ETH_VLAN_STRIP_MASK; } - + cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD); org = !!(dev->data->dev_conf.rxmode.hw_vlan_filter); if (cur != org){ @@ -1081,7 +1266,7 @@ rte_eth_dev_set_vlan_offload(uint8_t port_id, int offload_mask) /*no change*/ if(mask == 0) return ret; - + FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP); (*dev->dev_ops->vlan_offload_set)(dev, mask); @@ -1420,6 +1605,7 @@ int rte_eth_dev_rss_reta_update(uint8_t port_id, struct rte_eth_rss_reta *reta_conf) { struct rte_eth_dev *dev; + uint16_t max_rxq; uint8_t i,j; if (port_id >= nb_ports) { @@ -1433,26 +1619,29 @@ rte_eth_dev_rss_reta_update(uint8_t port_id, struct rte_eth_rss_reta *reta_conf) return (-EINVAL); } + dev = &rte_eth_devices[port_id]; + max_rxq = (dev->data->nb_rx_queues <= ETH_RSS_RETA_MAX_QUEUE) ? + dev->data->nb_rx_queues : ETH_RSS_RETA_MAX_QUEUE; if (reta_conf->mask_lo != 0) { for (i = 0; i < ETH_RSS_RETA_NUM_ENTRIES/2; i++) { if ((reta_conf->mask_lo & (1ULL << i)) && - (reta_conf->reta[i] >= ETH_RSS_RETA_MAX_QUEUE)) { + (reta_conf->reta[i] >= max_rxq)) { PMD_DEBUG_TRACE("RETA hash index output" "configration for port=%d,invalid" "queue=%d\n",port_id,reta_conf->reta[i]); return (-EINVAL); - } + } } } if (reta_conf->mask_hi != 0) { - for (i = 0; i< ETH_RSS_RETA_NUM_ENTRIES/2; i++) { + for (i = 0; i< ETH_RSS_RETA_NUM_ENTRIES/2; i++) { j = (uint8_t)(i + ETH_RSS_RETA_NUM_ENTRIES/2); /* Check if the max entry >= 128 */ - if ((reta_conf->mask_hi & (1ULL << i)) && - (reta_conf->reta[j] >= ETH_RSS_RETA_MAX_QUEUE)) { + if ((reta_conf->mask_hi & (1ULL << i)) && + (reta_conf->reta[j] >= max_rxq)) { PMD_DEBUG_TRACE("RETA hash index output" "configration for port=%d,invalid" "queue=%d\n",port_id,reta_conf->reta[j]); @@ -1462,17 +1651,15 @@ rte_eth_dev_rss_reta_update(uint8_t port_id, struct rte_eth_rss_reta *reta_conf) } } - dev = &rte_eth_devices[port_id]; - FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP); return (*dev->dev_ops->reta_update)(dev, reta_conf); } -int +int rte_eth_dev_rss_reta_query(uint8_t port_id, struct rte_eth_rss_reta *reta_conf) { struct rte_eth_dev *dev; - + if (port_id >= nb_ports) { PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); return (-ENODEV); @@ -1488,6 +1675,43 @@ rte_eth_dev_rss_reta_query(uint8_t port_id, struct rte_eth_rss_reta *reta_conf) return (*dev->dev_ops->reta_query)(dev, reta_conf); } +int +rte_eth_dev_rss_hash_update(uint8_t port_id, struct rte_eth_rss_conf *rss_conf) +{ + struct rte_eth_dev *dev; + uint16_t rss_hash_protos; + + if (port_id >= nb_ports) { + PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); + return (-ENODEV); + } + rss_hash_protos = rss_conf->rss_hf; + if ((rss_hash_protos != 0) && + ((rss_hash_protos & ETH_RSS_PROTO_MASK) == 0)) { + PMD_DEBUG_TRACE("Invalid rss_hash_protos=0x%x\n", + rss_hash_protos); + return (-EINVAL); + } + dev = &rte_eth_devices[port_id]; + FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP); + return (*dev->dev_ops->rss_hash_update)(dev, rss_conf); +} + +int +rte_eth_dev_rss_hash_conf_get(uint8_t port_id, + struct rte_eth_rss_conf *rss_conf) +{ + struct rte_eth_dev *dev; + + if (port_id >= nb_ports) { + PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); + return (-ENODEV); + } + dev = &rte_eth_devices[port_id]; + FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP); + return (*dev->dev_ops->rss_hash_conf_get)(dev, rss_conf); +} + int rte_eth_led_on(uint8_t port_id) { @@ -1542,10 +1766,11 @@ static struct ether_addr null_mac_addr = {{0, 0, 0, 0, 0, 0}}; int rte_eth_dev_mac_addr_add(uint8_t port_id, struct ether_addr *addr, - uint32_t pool) + uint32_t pool) { struct rte_eth_dev *dev; int index; + uint64_t pool_mask; if (port_id >= nb_ports) { PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); @@ -1555,19 +1780,29 @@ rte_eth_dev_mac_addr_add(uint8_t port_id, struct ether_addr *addr, FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP); if (is_zero_ether_addr(addr)) { - PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n", port_id); + PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n", + port_id); + return (-EINVAL); + } + if (pool >= ETH_64_POOLS) { + PMD_DEBUG_TRACE("pool id must be 0-%d\n",ETH_64_POOLS - 1); return (-EINVAL); } - /* Check if it's already there, and do nothing */ index = get_mac_addr_index(port_id, addr); - if (index >= 0) - return 0; - - index = get_mac_addr_index(port_id, &null_mac_addr); if (index < 0) { - PMD_DEBUG_TRACE("port %d: MAC address array full\n", port_id); - return (-ENOSPC); + index = get_mac_addr_index(port_id, &null_mac_addr); + if (index < 0) { + PMD_DEBUG_TRACE("port %d: MAC address array full\n", + port_id); + return (-ENOSPC); + } + } else { + pool_mask = dev->data->mac_pool_sel[index]; + + /* Check if both MAC address and pool is alread there, and do nothing */ + if (pool_mask & (1ULL << pool)) + return 0; } /* Update NIC */ @@ -1576,6 +1811,9 @@ rte_eth_dev_mac_addr_add(uint8_t port_id, struct ether_addr *addr, /* Update address in NIC data structure */ ether_addr_copy(addr, &dev->data->mac_addrs[index]); + /* Update pool bitmap in NIC data structure */ + dev->data->mac_pool_sel[index] |= (1ULL << pool); + return 0; } @@ -1608,47 +1846,352 @@ rte_eth_dev_mac_addr_remove(uint8_t port_id, struct ether_addr *addr) return 0; } -#ifdef RTE_LIBRTE_ETHDEV_DEBUG -uint16_t -rte_eth_rx_burst(uint8_t port_id, uint16_t queue_id, - struct rte_mbuf **rx_pkts, uint16_t nb_pkts) +int +rte_eth_dev_set_vf_rxmode(uint8_t port_id, uint16_t vf, + uint16_t rx_mode, uint8_t on) { + uint16_t num_vfs; struct rte_eth_dev *dev; + struct rte_eth_dev_info dev_info; if (port_id >= nb_ports) { - PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); - return 0; + PMD_DEBUG_TRACE("set VF RX mode:Invalid port_id=%d\n", + port_id); + return (-ENODEV); } + dev = &rte_eth_devices[port_id]; - FUNC_PTR_OR_ERR_RET(*dev->rx_pkt_burst, -ENOTSUP); - if (queue_id >= dev->data->nb_rx_queues) { - PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", queue_id); - return 0; + rte_eth_dev_info_get(port_id, &dev_info); + + num_vfs = dev_info.max_vfs; + if (vf > num_vfs) + { + PMD_DEBUG_TRACE("set VF RX mode:invalid VF id %d\n", vf); + return (-EINVAL); } - return (*dev->rx_pkt_burst)(dev->data->rx_queues[queue_id], - rx_pkts, nb_pkts); + if (rx_mode == 0) + { + PMD_DEBUG_TRACE("set VF RX mode:mode mask ca not be zero\n"); + return (-EINVAL); + } + FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rx_mode, -ENOTSUP); + return (*dev->dev_ops->set_vf_rx_mode)(dev, vf, rx_mode, on); } -uint16_t -rte_eth_tx_burst(uint8_t port_id, uint16_t queue_id, - struct rte_mbuf **tx_pkts, uint16_t nb_pkts) +/* + * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find + * an empty spot. + */ +static inline int +get_hash_mac_addr_index(uint8_t port_id, struct ether_addr *addr) { - struct rte_eth_dev *dev; + struct rte_eth_dev_info dev_info; + struct rte_eth_dev *dev = &rte_eth_devices[port_id]; + unsigned i; - if (port_id >= nb_ports) { - PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); - return 0; - } - dev = &rte_eth_devices[port_id]; + rte_eth_dev_info_get(port_id, &dev_info); + if (!dev->data->hash_mac_addrs) + return -1; - FUNC_PTR_OR_ERR_RET(*dev->tx_pkt_burst, -ENOTSUP); - if (queue_id >= dev->data->nb_tx_queues) { - PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", queue_id); - return 0; + for (i = 0; i < dev_info.max_hash_mac_addrs; i++) + if (memcmp(addr, &dev->data->hash_mac_addrs[i], + ETHER_ADDR_LEN) == 0) + return i; + + return -1; +} + +int +rte_eth_dev_uc_hash_table_set(uint8_t port_id, struct ether_addr *addr, + uint8_t on) +{ + int index; + int ret; + struct rte_eth_dev *dev; + + if (port_id >= nb_ports) { + PMD_DEBUG_TRACE("unicast hash setting:Invalid port_id=%d\n", + port_id); + return (-ENODEV); + } + + dev = &rte_eth_devices[port_id]; + if (is_zero_ether_addr(addr)) { + PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n", + port_id); + return (-EINVAL); + } + + index = get_hash_mac_addr_index(port_id, addr); + /* Check if it's already there, and do nothing */ + if ((index >= 0) && (on)) + return 0; + + if (index < 0) { + if (!on) { + PMD_DEBUG_TRACE("port %d: the MAC address was not" + "set in UTA\n", port_id); + return (-EINVAL); + } + + index = get_hash_mac_addr_index(port_id, &null_mac_addr); + if (index < 0) { + PMD_DEBUG_TRACE("port %d: MAC address array full\n", + port_id); + return (-ENOSPC); + } + } + + FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP); + ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on); + if (ret == 0) { + /* Update address in NIC data structure */ + if (on) + ether_addr_copy(addr, + &dev->data->hash_mac_addrs[index]); + else + ether_addr_copy(&null_mac_addr, + &dev->data->hash_mac_addrs[index]); + } + + return ret; +} + +int +rte_eth_dev_uc_all_hash_table_set(uint8_t port_id, uint8_t on) +{ + struct rte_eth_dev *dev; + + if (port_id >= nb_ports) { + PMD_DEBUG_TRACE("unicast hash setting:Invalid port_id=%d\n", + port_id); + return (-ENODEV); + } + + dev = &rte_eth_devices[port_id]; + + FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP); + return (*dev->dev_ops->uc_all_hash_table_set)(dev, on); +} + +int +rte_eth_dev_set_vf_rx(uint8_t port_id,uint16_t vf, uint8_t on) +{ + uint16_t num_vfs; + struct rte_eth_dev *dev; + struct rte_eth_dev_info dev_info; + + if (port_id >= nb_ports) { + PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); + return (-ENODEV); + } + + dev = &rte_eth_devices[port_id]; + rte_eth_dev_info_get(port_id, &dev_info); + + num_vfs = dev_info.max_vfs; + if (vf > num_vfs) + { + PMD_DEBUG_TRACE("port %d: invalid vf id\n", port_id); + return (-EINVAL); + } + + FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rx, -ENOTSUP); + return (*dev->dev_ops->set_vf_rx)(dev, vf,on); +} + +int +rte_eth_dev_set_vf_tx(uint8_t port_id,uint16_t vf, uint8_t on) +{ + uint16_t num_vfs; + struct rte_eth_dev *dev; + struct rte_eth_dev_info dev_info; + + if (port_id >= nb_ports) { + PMD_DEBUG_TRACE("set pool tx:Invalid port_id=%d\n", port_id); + return (-ENODEV); + } + + dev = &rte_eth_devices[port_id]; + rte_eth_dev_info_get(port_id, &dev_info); + + num_vfs = dev_info.max_vfs; + if (vf > num_vfs) + { + PMD_DEBUG_TRACE("set pool tx:invalid pool id=%d\n", vf); + return (-EINVAL); + } + + FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_tx, -ENOTSUP); + return (*dev->dev_ops->set_vf_tx)(dev, vf,on); +} + +int +rte_eth_dev_set_vf_vlan_filter(uint8_t port_id, uint16_t vlan_id, + uint64_t vf_mask,uint8_t vlan_on) +{ + struct rte_eth_dev *dev; + + if (port_id >= nb_ports) { + PMD_DEBUG_TRACE("VF VLAN filter:invalid port id=%d\n", + port_id); + return (-ENODEV); + } + dev = &rte_eth_devices[port_id]; + + if(vlan_id > ETHER_MAX_VLAN_ID) + { + PMD_DEBUG_TRACE("VF VLAN filter:invalid VLAN id=%d\n", + vlan_id); + return (-EINVAL); + } + if (vf_mask == 0) + { + PMD_DEBUG_TRACE("VF VLAN filter:pool_mask can not be 0\n"); + return (-EINVAL); + } + + FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_vlan_filter, -ENOTSUP); + return (*dev->dev_ops->set_vf_vlan_filter)(dev, vlan_id, + vf_mask,vlan_on); +} + +int +rte_eth_mirror_rule_set(uint8_t port_id, + struct rte_eth_vmdq_mirror_conf *mirror_conf, + uint8_t rule_id, uint8_t on) +{ + struct rte_eth_dev *dev = &rte_eth_devices[port_id]; + + if (port_id >= nb_ports) { + PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); + return (-ENODEV); + } + + if (mirror_conf->rule_type_mask == 0) { + PMD_DEBUG_TRACE("mirror rule type can not be 0.\n"); + return (-EINVAL); + } + + if (mirror_conf->dst_pool >= ETH_64_POOLS) { + PMD_DEBUG_TRACE("Invalid dst pool, pool id must" + "be 0-%d\n",ETH_64_POOLS - 1); + return (-EINVAL); + } + + if ((mirror_conf->rule_type_mask & ETH_VMDQ_POOL_MIRROR) && + (mirror_conf->pool_mask == 0)) { + PMD_DEBUG_TRACE("Invalid mirror pool, pool mask can not" + "be 0.\n"); + return (-EINVAL); + } + + if(rule_id >= ETH_VMDQ_NUM_MIRROR_RULE) + { + PMD_DEBUG_TRACE("Invalid rule_id, rule_id must be 0-%d\n", + ETH_VMDQ_NUM_MIRROR_RULE - 1); + return (-EINVAL); + } + + dev = &rte_eth_devices[port_id]; + FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_set, -ENOTSUP); + + return (*dev->dev_ops->mirror_rule_set)(dev, mirror_conf, rule_id, on); +} + +int +rte_eth_mirror_rule_reset(uint8_t port_id, uint8_t rule_id) +{ + struct rte_eth_dev *dev = &rte_eth_devices[port_id]; + + if (port_id >= nb_ports) { + PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); + return (-ENODEV); + } + + if(rule_id >= ETH_VMDQ_NUM_MIRROR_RULE) + { + PMD_DEBUG_TRACE("Invalid rule_id, rule_id must be 0-%d\n", + ETH_VMDQ_NUM_MIRROR_RULE-1); + return (-EINVAL); + } + + dev = &rte_eth_devices[port_id]; + FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_reset, -ENOTSUP); + + return (*dev->dev_ops->mirror_rule_reset)(dev, rule_id); +} + +#ifdef RTE_LIBRTE_ETHDEV_DEBUG +uint16_t +rte_eth_rx_burst(uint8_t port_id, uint16_t queue_id, + struct rte_mbuf **rx_pkts, uint16_t nb_pkts) +{ + struct rte_eth_dev *dev; + + if (port_id >= nb_ports) { + PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); + return 0; + } + dev = &rte_eth_devices[port_id]; + FUNC_PTR_OR_ERR_RET(*dev->rx_pkt_burst, -ENOTSUP); + if (queue_id >= dev->data->nb_rx_queues) { + PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", queue_id); + return 0; + } + return (*dev->rx_pkt_burst)(dev->data->rx_queues[queue_id], + rx_pkts, nb_pkts); +} + +uint16_t +rte_eth_tx_burst(uint8_t port_id, uint16_t queue_id, + struct rte_mbuf **tx_pkts, uint16_t nb_pkts) +{ + struct rte_eth_dev *dev; + + if (port_id >= nb_ports) { + PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); + return 0; + } + dev = &rte_eth_devices[port_id]; + + FUNC_PTR_OR_ERR_RET(*dev->tx_pkt_burst, -ENOTSUP); + if (queue_id >= dev->data->nb_tx_queues) { + PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", queue_id); + return 0; } return (*dev->tx_pkt_burst)(dev->data->tx_queues[queue_id], tx_pkts, nb_pkts); } + +uint32_t +rte_eth_rx_queue_count(uint8_t port_id, uint16_t queue_id) +{ + struct rte_eth_dev *dev; + + if (port_id >= nb_ports) { + PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); + return 0; + } + dev = &rte_eth_devices[port_id]; + FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_count, -ENOTSUP); + return (*dev->dev_ops->rx_queue_count)(dev, queue_id); +} + +int +rte_eth_rx_descriptor_done(uint8_t port_id, uint16_t queue_id, uint16_t offset) +{ + struct rte_eth_dev *dev; + + if (port_id >= nb_ports) { + PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); + return (-ENODEV); + } + dev = &rte_eth_devices[port_id]; + FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_descriptor_done, -ENOTSUP); + return (*dev->dev_ops->rx_descriptor_done)( \ + dev->data->rx_queues[queue_id], offset); +} #endif int @@ -1756,3 +2299,182 @@ _rte_eth_dev_callback_process(struct rte_eth_dev *dev, } rte_spinlock_unlock(&rte_eth_dev_cb_lock); } +#ifdef RTE_NIC_BYPASS +int rte_eth_dev_bypass_init(uint8_t port_id) +{ + struct rte_eth_dev *dev; + + if (port_id >= nb_ports) { + PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); + return (-ENODEV); + } + + if ((dev= &rte_eth_devices[port_id]) == NULL) { + PMD_DEBUG_TRACE("Invalid port device\n"); + return (-ENODEV); + } + + FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_init, -ENOTSUP); + (*dev->dev_ops->bypass_init)(dev); + return 0; +} + +int +rte_eth_dev_bypass_state_show(uint8_t port_id, uint32_t *state) +{ + struct rte_eth_dev *dev; + + if (port_id >= nb_ports) { + PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); + return (-ENODEV); + } + + if ((dev= &rte_eth_devices[port_id]) == NULL) { + PMD_DEBUG_TRACE("Invalid port device\n"); + return (-ENODEV); + } + FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_show, -ENOTSUP); + (*dev->dev_ops->bypass_state_show)(dev, state); + return 0; +} + +int +rte_eth_dev_bypass_state_set(uint8_t port_id, uint32_t *new_state) +{ + struct rte_eth_dev *dev; + + if (port_id >= nb_ports) { + PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); + return (-ENODEV); + } + + if ((dev= &rte_eth_devices[port_id]) == NULL) { + PMD_DEBUG_TRACE("Invalid port device\n"); + return (-ENODEV); + } + + FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_set, -ENOTSUP); + (*dev->dev_ops->bypass_state_set)(dev, new_state); + return 0; +} + +int +rte_eth_dev_bypass_event_show(uint8_t port_id, uint32_t event, uint32_t *state) +{ + struct rte_eth_dev *dev; + + if (port_id >= nb_ports) { + PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); + return (-ENODEV); + } + + if ((dev= &rte_eth_devices[port_id]) == NULL) { + PMD_DEBUG_TRACE("Invalid port device\n"); + return (-ENODEV); + } + + FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_show, -ENOTSUP); + (*dev->dev_ops->bypass_event_show)(dev, event, state); + return 0; +} + +int +rte_eth_dev_bypass_event_store(uint8_t port_id, uint32_t event, uint32_t state) +{ + struct rte_eth_dev *dev; + + if (port_id >= nb_ports) { + PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); + return (-ENODEV); + } + + if ((dev= &rte_eth_devices[port_id]) == NULL) { + PMD_DEBUG_TRACE("Invalid port device\n"); + return (-ENODEV); + } + + FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_event_set, -ENOTSUP); + (*dev->dev_ops->bypass_event_set)(dev, event, state); + return 0; +} + +int +rte_eth_dev_wd_timeout_store(uint8_t port_id, uint32_t timeout) +{ + struct rte_eth_dev *dev; + + if (port_id >= nb_ports) { + PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); + return (-ENODEV); + } + + if ((dev= &rte_eth_devices[port_id]) == NULL) { + PMD_DEBUG_TRACE("Invalid port device\n"); + return (-ENODEV); + } + + FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_timeout_set, -ENOTSUP); + (*dev->dev_ops->bypass_wd_timeout_set)(dev, timeout); + return 0; +} + +int +rte_eth_dev_bypass_ver_show(uint8_t port_id, uint32_t *ver) +{ + struct rte_eth_dev *dev; + + if (port_id >= nb_ports) { + PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); + return (-ENODEV); + } + + if ((dev= &rte_eth_devices[port_id]) == NULL) { + PMD_DEBUG_TRACE("Invalid port device\n"); + return (-ENODEV); + } + + FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_ver_show, -ENOTSUP); + (*dev->dev_ops->bypass_ver_show)(dev, ver); + return 0; +} + +int +rte_eth_dev_bypass_wd_timeout_show(uint8_t port_id, uint32_t *wd_timeout) +{ + struct rte_eth_dev *dev; + + if (port_id >= nb_ports) { + PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); + return (-ENODEV); + } + + if ((dev= &rte_eth_devices[port_id]) == NULL) { + PMD_DEBUG_TRACE("Invalid port device\n"); + return (-ENODEV); + } + + FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_timeout_show, -ENOTSUP); + (*dev->dev_ops->bypass_wd_timeout_show)(dev, wd_timeout); + return 0; +} + +int +rte_eth_dev_bypass_wd_reset(uint8_t port_id) +{ + struct rte_eth_dev *dev; + + if (port_id >= nb_ports) { + PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); + return (-ENODEV); + } + + if ((dev= &rte_eth_devices[port_id]) == NULL) { + PMD_DEBUG_TRACE("Invalid port device\n"); + return (-ENODEV); + } + + FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_reset, -ENOTSUP); + (*dev->dev_ops->bypass_wd_reset)(dev); + return 0; +} +#endif